repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Workiva/furious
furious/async.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/async.py#L515-L521
def context_id(self): """Return this Async's Context Id if it exists.""" if not self._context_id: self._context_id = self._get_context_id() self.update_options(context_id=self._context_id) return self._context_id
[ "def", "context_id", "(", "self", ")", ":", "if", "not", "self", ".", "_context_id", ":", "self", ".", "_context_id", "=", "self", ".", "_get_context_id", "(", ")", "self", ".", "update_options", "(", "context_id", "=", "self", ".", "_context_id", ")", "return", "self", ".", "_context_id" ]
Return this Async's Context Id if it exists.
[ "Return", "this", "Async", "s", "Context", "Id", "if", "it", "exists", "." ]
python
train
36.428571
gem/oq-engine
openquake/risklib/scientific.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/scientific.py#L1306-L1345
def losses_by_period(losses, return_periods, num_events=None, eff_time=None): """ :param losses: array of simulated losses :param return_periods: return periods of interest :param num_events: the number of events (>= to the number of losses) :param eff_time: investigation_time * ses_per_logic_tree_path :returns: interpolated losses for the return periods, possibly with NaN NB: the return periods must be ordered integers >= 1. The interpolated losses are defined inside the interval min_time < time < eff_time where min_time = eff_time /num_events. Outside the interval they have NaN values. Here is an example: >>> losses = [3, 2, 3.5, 4, 3, 23, 11, 2, 1, 4, 5, 7, 8, 9, 13] >>> losses_by_period(losses, [1, 2, 5, 10, 20, 50, 100], 20) array([ nan, nan, 0. , 3.5, 8. , 13. , 23. ]) If num_events is not passed, it is inferred from the number of losses; if eff_time is not passed, it is inferred from the longest return period. """ if len(losses) == 0: # zero-curve return numpy.zeros(len(return_periods)) if num_events is None: num_events = len(losses) elif num_events < len(losses): raise ValueError( 'There are not enough events (%d) to compute the loss curve ' 'from %d losses' % (num_events, len(losses))) if eff_time is None: eff_time = return_periods[-1] losses = numpy.sort(losses) num_zeros = num_events - len(losses) if num_zeros: losses = numpy.concatenate( [numpy.zeros(num_zeros, losses.dtype), losses]) periods = eff_time / numpy.arange(num_events, 0., -1) rperiods = [rp if periods[0] <= rp <= periods[-1] else numpy.nan for rp in return_periods] curve = numpy.interp(numpy.log(rperiods), numpy.log(periods), losses) return curve
[ "def", "losses_by_period", "(", "losses", ",", "return_periods", ",", "num_events", "=", "None", ",", "eff_time", "=", "None", ")", ":", "if", "len", "(", "losses", ")", "==", "0", ":", "# zero-curve", "return", "numpy", ".", "zeros", "(", "len", "(", "return_periods", ")", ")", "if", "num_events", "is", "None", ":", "num_events", "=", "len", "(", "losses", ")", "elif", "num_events", "<", "len", "(", "losses", ")", ":", "raise", "ValueError", "(", "'There are not enough events (%d) to compute the loss curve '", "'from %d losses'", "%", "(", "num_events", ",", "len", "(", "losses", ")", ")", ")", "if", "eff_time", "is", "None", ":", "eff_time", "=", "return_periods", "[", "-", "1", "]", "losses", "=", "numpy", ".", "sort", "(", "losses", ")", "num_zeros", "=", "num_events", "-", "len", "(", "losses", ")", "if", "num_zeros", ":", "losses", "=", "numpy", ".", "concatenate", "(", "[", "numpy", ".", "zeros", "(", "num_zeros", ",", "losses", ".", "dtype", ")", ",", "losses", "]", ")", "periods", "=", "eff_time", "/", "numpy", ".", "arange", "(", "num_events", ",", "0.", ",", "-", "1", ")", "rperiods", "=", "[", "rp", "if", "periods", "[", "0", "]", "<=", "rp", "<=", "periods", "[", "-", "1", "]", "else", "numpy", ".", "nan", "for", "rp", "in", "return_periods", "]", "curve", "=", "numpy", ".", "interp", "(", "numpy", ".", "log", "(", "rperiods", ")", ",", "numpy", ".", "log", "(", "periods", ")", ",", "losses", ")", "return", "curve" ]
:param losses: array of simulated losses :param return_periods: return periods of interest :param num_events: the number of events (>= to the number of losses) :param eff_time: investigation_time * ses_per_logic_tree_path :returns: interpolated losses for the return periods, possibly with NaN NB: the return periods must be ordered integers >= 1. The interpolated losses are defined inside the interval min_time < time < eff_time where min_time = eff_time /num_events. Outside the interval they have NaN values. Here is an example: >>> losses = [3, 2, 3.5, 4, 3, 23, 11, 2, 1, 4, 5, 7, 8, 9, 13] >>> losses_by_period(losses, [1, 2, 5, 10, 20, 50, 100], 20) array([ nan, nan, 0. , 3.5, 8. , 13. , 23. ]) If num_events is not passed, it is inferred from the number of losses; if eff_time is not passed, it is inferred from the longest return period.
[ ":", "param", "losses", ":", "array", "of", "simulated", "losses", ":", "param", "return_periods", ":", "return", "periods", "of", "interest", ":", "param", "num_events", ":", "the", "number", "of", "events", "(", ">", "=", "to", "the", "number", "of", "losses", ")", ":", "param", "eff_time", ":", "investigation_time", "*", "ses_per_logic_tree_path", ":", "returns", ":", "interpolated", "losses", "for", "the", "return", "periods", "possibly", "with", "NaN" ]
python
train
45.4
gem/oq-engine
openquake/commands/plot_memory.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/plot_memory.py#L22-L38
def make_figure(plots): """ :param plots: list of pairs (task_name, memory array) """ # NB: matplotlib is imported inside since it is a costly import import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.grid(True) ax.set_xlabel('tasks') ax.set_ylabel('GB') start = 0 for task_name, mem in plots: ax.plot(range(start, start + len(mem)), mem, label=task_name) start += len(mem) ax.legend() return plt
[ "def", "make_figure", "(", "plots", ")", ":", "# NB: matplotlib is imported inside since it is a costly import", "import", "matplotlib", ".", "pyplot", "as", "plt", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "ax", ".", "grid", "(", "True", ")", "ax", ".", "set_xlabel", "(", "'tasks'", ")", "ax", ".", "set_ylabel", "(", "'GB'", ")", "start", "=", "0", "for", "task_name", ",", "mem", "in", "plots", ":", "ax", ".", "plot", "(", "range", "(", "start", ",", "start", "+", "len", "(", "mem", ")", ")", ",", "mem", ",", "label", "=", "task_name", ")", "start", "+=", "len", "(", "mem", ")", "ax", ".", "legend", "(", ")", "return", "plt" ]
:param plots: list of pairs (task_name, memory array)
[ ":", "param", "plots", ":", "list", "of", "pairs", "(", "task_name", "memory", "array", ")" ]
python
train
26.941176
mitsei/dlkit
dlkit/json_/cataloging/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/cataloging/sessions.py#L379-L403
def get_catalogs_by_query(self, catalog_query): """Gets a list of ``Catalogs`` matching the given catalog query. arg: catalog_query (osid.cataloging.CatalogQuery): the catalog query return: (osid.cataloging.CatalogList) - the returned ``CatalogList`` raise: NullArgument - ``catalog_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``catalog_query`` is not of this service *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinQuerySession.get_bins_by_query_template if self._catalog_session is not None: return self._catalog_session.get_catalogs_by_query(catalog_query) query_terms = dict(catalog_query._query_terms) collection = JSONClientValidated('cataloging', collection='Catalog', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) return objects.CatalogList(result, runtime=self._runtime)
[ "def", "get_catalogs_by_query", "(", "self", ",", "catalog_query", ")", ":", "# Implemented from template for", "# osid.resource.BinQuerySession.get_bins_by_query_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "get_catalogs_by_query", "(", "catalog_query", ")", "query_terms", "=", "dict", "(", "catalog_query", ".", "_query_terms", ")", "collection", "=", "JSONClientValidated", "(", "'cataloging'", ",", "collection", "=", "'Catalog'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "query_terms", ")", ".", "sort", "(", "'_id'", ",", "DESCENDING", ")", "return", "objects", ".", "CatalogList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets a list of ``Catalogs`` matching the given catalog query. arg: catalog_query (osid.cataloging.CatalogQuery): the catalog query return: (osid.cataloging.CatalogList) - the returned ``CatalogList`` raise: NullArgument - ``catalog_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``catalog_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "list", "of", "Catalogs", "matching", "the", "given", "catalog", "query", "." ]
python
train
48.88
hydpy-dev/hydpy
hydpy/auxs/anntools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/anntools.py#L1323-L1370
def refresh(self) -> None: """Prepare the actual |anntools.SeasonalANN| object for calculations. Dispite all automated refreshings explained in the general documentation on class |anntools.SeasonalANN|, it is still possible to destroy the inner consistency of a |anntools.SeasonalANN| instance, as it stores its |anntools.ANN| objects by reference. This is shown by the following example: >>> from hydpy import SeasonalANN, ann >>> seasonalann = SeasonalANN(None) >>> seasonalann.simulationstep = '1d' >>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1, ... weights_input=0.0, weights_output=0.0, ... intercepts_hidden=0.0, intercepts_output=1.0) >>> seasonalann(_1_1_12=jan) >>> jan.nmb_inputs, jan.nmb_outputs = 2, 3 >>> jan.nmb_inputs, jan.nmb_outputs (2, 3) >>> seasonalann.nmb_inputs, seasonalann.nmb_outputs (1, 1) Due to the C level implementation of the mathematical core of both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|, such an inconsistency might result in a program crash without any informative error message. Whenever you are afraid some inconsistency might have crept in, and you want to repair it, call method |anntools.SeasonalANN.refresh| explicitly: >>> seasonalann.refresh() >>> jan.nmb_inputs, jan.nmb_outputs (2, 3) >>> seasonalann.nmb_inputs, seasonalann.nmb_outputs (2, 3) """ # pylint: disable=unsupported-assignment-operation if self._do_refresh: if self.anns: self.__sann = annutils.SeasonalANN(self.anns) setattr(self.fastaccess, self.name, self._sann) self._set_shape((None, self._sann.nmb_anns)) if self._sann.nmb_anns > 1: self._interp() else: self._sann.ratios[:, 0] = 1. self.verify() else: self.__sann = None
[ "def", "refresh", "(", "self", ")", "->", "None", ":", "# pylint: disable=unsupported-assignment-operation", "if", "self", ".", "_do_refresh", ":", "if", "self", ".", "anns", ":", "self", ".", "__sann", "=", "annutils", ".", "SeasonalANN", "(", "self", ".", "anns", ")", "setattr", "(", "self", ".", "fastaccess", ",", "self", ".", "name", ",", "self", ".", "_sann", ")", "self", ".", "_set_shape", "(", "(", "None", ",", "self", ".", "_sann", ".", "nmb_anns", ")", ")", "if", "self", ".", "_sann", ".", "nmb_anns", ">", "1", ":", "self", ".", "_interp", "(", ")", "else", ":", "self", ".", "_sann", ".", "ratios", "[", ":", ",", "0", "]", "=", "1.", "self", ".", "verify", "(", ")", "else", ":", "self", ".", "__sann", "=", "None" ]
Prepare the actual |anntools.SeasonalANN| object for calculations. Dispite all automated refreshings explained in the general documentation on class |anntools.SeasonalANN|, it is still possible to destroy the inner consistency of a |anntools.SeasonalANN| instance, as it stores its |anntools.ANN| objects by reference. This is shown by the following example: >>> from hydpy import SeasonalANN, ann >>> seasonalann = SeasonalANN(None) >>> seasonalann.simulationstep = '1d' >>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1, ... weights_input=0.0, weights_output=0.0, ... intercepts_hidden=0.0, intercepts_output=1.0) >>> seasonalann(_1_1_12=jan) >>> jan.nmb_inputs, jan.nmb_outputs = 2, 3 >>> jan.nmb_inputs, jan.nmb_outputs (2, 3) >>> seasonalann.nmb_inputs, seasonalann.nmb_outputs (1, 1) Due to the C level implementation of the mathematical core of both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|, such an inconsistency might result in a program crash without any informative error message. Whenever you are afraid some inconsistency might have crept in, and you want to repair it, call method |anntools.SeasonalANN.refresh| explicitly: >>> seasonalann.refresh() >>> jan.nmb_inputs, jan.nmb_outputs (2, 3) >>> seasonalann.nmb_inputs, seasonalann.nmb_outputs (2, 3)
[ "Prepare", "the", "actual", "|anntools", ".", "SeasonalANN|", "object", "for", "calculations", "." ]
python
train
43.416667
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py#L884-L887
def update_resources(self, dstpath, names=None, languages=None): """ Update or add manifest resource in dll/exe file dstpath """ UpdateManifestResourcesFromXML(dstpath, self.toprettyxml(), names, languages)
[ "def", "update_resources", "(", "self", ",", "dstpath", ",", "names", "=", "None", ",", "languages", "=", "None", ")", ":", "UpdateManifestResourcesFromXML", "(", "dstpath", ",", "self", ".", "toprettyxml", "(", ")", ",", "names", ",", "languages", ")" ]
Update or add manifest resource in dll/exe file dstpath
[ "Update", "or", "add", "manifest", "resource", "in", "dll", "/", "exe", "file", "dstpath" ]
python
train
64.75
SoCo/SoCo
soco/core.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/core.py#L1950-L1977
def get_sonos_playlist_by_attr(self, attr_name, match): """Return the first Sonos Playlist DidlPlaylistContainer that matches the attribute specified. Args: attr_name (str): DidlPlaylistContainer attribute to compare. The most useful being: 'title' and 'item_id'. match (str): Value to match. Returns: (:class:`~.soco.data_structures.DidlPlaylistContainer`): The first matching playlist object. Raises: (AttributeError): If indicated attribute name does not exist. (ValueError): If a match can not be found. Example:: device.get_sonos_playlist_by_attr('title', 'Foo') device.get_sonos_playlist_by_attr('item_id', 'SQ:3') """ for sonos_playlist in self.get_sonos_playlists(): if getattr(sonos_playlist, attr_name) == match: return sonos_playlist raise ValueError('No match on "{0}" for value "{1}"'.format(attr_name, match))
[ "def", "get_sonos_playlist_by_attr", "(", "self", ",", "attr_name", ",", "match", ")", ":", "for", "sonos_playlist", "in", "self", ".", "get_sonos_playlists", "(", ")", ":", "if", "getattr", "(", "sonos_playlist", ",", "attr_name", ")", "==", "match", ":", "return", "sonos_playlist", "raise", "ValueError", "(", "'No match on \"{0}\" for value \"{1}\"'", ".", "format", "(", "attr_name", ",", "match", ")", ")" ]
Return the first Sonos Playlist DidlPlaylistContainer that matches the attribute specified. Args: attr_name (str): DidlPlaylistContainer attribute to compare. The most useful being: 'title' and 'item_id'. match (str): Value to match. Returns: (:class:`~.soco.data_structures.DidlPlaylistContainer`): The first matching playlist object. Raises: (AttributeError): If indicated attribute name does not exist. (ValueError): If a match can not be found. Example:: device.get_sonos_playlist_by_attr('title', 'Foo') device.get_sonos_playlist_by_attr('item_id', 'SQ:3')
[ "Return", "the", "first", "Sonos", "Playlist", "DidlPlaylistContainer", "that", "matches", "the", "attribute", "specified", "." ]
python
train
38.785714
apache/incubator-superset
superset/views/core.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L106-L123
def check_datasource_perms(self, datasource_type=None, datasource_id=None): """ Check if user can access a cached response from explore_json. This function takes `self` since it must have the same signature as the the decorated method. """ form_data = get_form_data()[0] datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
[ "def", "check_datasource_perms", "(", "self", ",", "datasource_type", "=", "None", ",", "datasource_id", "=", "None", ")", ":", "form_data", "=", "get_form_data", "(", ")", "[", "0", "]", "datasource_id", ",", "datasource_type", "=", "get_datasource_info", "(", "datasource_id", ",", "datasource_type", ",", "form_data", ")", "viz_obj", "=", "get_viz", "(", "datasource_type", "=", "datasource_type", ",", "datasource_id", "=", "datasource_id", ",", "form_data", "=", "form_data", ",", "force", "=", "False", ",", ")", "security_manager", ".", "assert_datasource_permission", "(", "viz_obj", ".", "datasource", ")" ]
Check if user can access a cached response from explore_json. This function takes `self` since it must have the same signature as the the decorated method.
[ "Check", "if", "user", "can", "access", "a", "cached", "response", "from", "explore_json", "." ]
python
train
34.166667
spacetelescope/pysynphot
pysynphot/observation.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/observation.py#L26-L78
def check_overlap(a, b): """Check for wavelength overlap between two spectra. .. note:: Generalized from :meth:`pysynphot.spectrum.SpectralElement.check_overlap`. Parameters ---------- a, b : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement` Typically a source spectrum, spectral element, observation, or bandpass from observation mode. Returns ------- result : {'full', 'partial', 'none'} Full, partial, or no overlap. Raises ------ AttributeError Given spectrum does not have flux or throughput. """ if a.isAnalytic or b.isAnalytic: #then it's defined everywhere result = 'full' else: #get the wavelength arrays waves = list() for x in (a, b): if hasattr(x,'throughput'): wv = x.wave[np.where(x.throughput != 0)] elif hasattr(x,'flux'): wv = x.wave else: raise AttributeError("neither flux nor throughput in %s"%x) waves.append(wv) #get the endpoints a1,a2 = waves[0].min(), waves[0].max() b1,b2 = waves[1].min(), waves[1].max() #do the comparison if (a1>=b1 and a2<=b2): result = 'full' elif (a2<b1) or (b2<a1): result = 'none' else: result = 'partial' return result
[ "def", "check_overlap", "(", "a", ",", "b", ")", ":", "if", "a", ".", "isAnalytic", "or", "b", ".", "isAnalytic", ":", "#then it's defined everywhere", "result", "=", "'full'", "else", ":", "#get the wavelength arrays", "waves", "=", "list", "(", ")", "for", "x", "in", "(", "a", ",", "b", ")", ":", "if", "hasattr", "(", "x", ",", "'throughput'", ")", ":", "wv", "=", "x", ".", "wave", "[", "np", ".", "where", "(", "x", ".", "throughput", "!=", "0", ")", "]", "elif", "hasattr", "(", "x", ",", "'flux'", ")", ":", "wv", "=", "x", ".", "wave", "else", ":", "raise", "AttributeError", "(", "\"neither flux nor throughput in %s\"", "%", "x", ")", "waves", ".", "append", "(", "wv", ")", "#get the endpoints", "a1", ",", "a2", "=", "waves", "[", "0", "]", ".", "min", "(", ")", ",", "waves", "[", "0", "]", ".", "max", "(", ")", "b1", ",", "b2", "=", "waves", "[", "1", "]", ".", "min", "(", ")", ",", "waves", "[", "1", "]", ".", "max", "(", ")", "#do the comparison", "if", "(", "a1", ">=", "b1", "and", "a2", "<=", "b2", ")", ":", "result", "=", "'full'", "elif", "(", "a2", "<", "b1", ")", "or", "(", "b2", "<", "a1", ")", ":", "result", "=", "'none'", "else", ":", "result", "=", "'partial'", "return", "result" ]
Check for wavelength overlap between two spectra. .. note:: Generalized from :meth:`pysynphot.spectrum.SpectralElement.check_overlap`. Parameters ---------- a, b : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement` Typically a source spectrum, spectral element, observation, or bandpass from observation mode. Returns ------- result : {'full', 'partial', 'none'} Full, partial, or no overlap. Raises ------ AttributeError Given spectrum does not have flux or throughput.
[ "Check", "for", "wavelength", "overlap", "between", "two", "spectra", "." ]
python
train
26.188679
aio-libs/yarl
yarl/__init__.py
https://github.com/aio-libs/yarl/blob/e47da02c00ad764e030ca7647a9565548c97d362/yarl/__init__.py#L790-L809
def with_port(self, port): """Return a new URL with port replaced. Clear port to default if None is passed. """ # N.B. doesn't cleanup query/fragment if port is not None and not isinstance(port, int): raise TypeError("port should be int or None, got {}".format(type(port))) if not self.is_absolute(): raise ValueError("port replacement is not allowed " "for relative URLs") val = self._val return URL( self._val._replace( netloc=self._make_netloc( val.username, val.password, val.hostname, port, encode=False ) ), encoded=True, )
[ "def", "with_port", "(", "self", ",", "port", ")", ":", "# N.B. doesn't cleanup query/fragment", "if", "port", "is", "not", "None", "and", "not", "isinstance", "(", "port", ",", "int", ")", ":", "raise", "TypeError", "(", "\"port should be int or None, got {}\"", ".", "format", "(", "type", "(", "port", ")", ")", ")", "if", "not", "self", ".", "is_absolute", "(", ")", ":", "raise", "ValueError", "(", "\"port replacement is not allowed \"", "\"for relative URLs\"", ")", "val", "=", "self", ".", "_val", "return", "URL", "(", "self", ".", "_val", ".", "_replace", "(", "netloc", "=", "self", ".", "_make_netloc", "(", "val", ".", "username", ",", "val", ".", "password", ",", "val", ".", "hostname", ",", "port", ",", "encode", "=", "False", ")", ")", ",", "encoded", "=", "True", ",", ")" ]
Return a new URL with port replaced. Clear port to default if None is passed.
[ "Return", "a", "new", "URL", "with", "port", "replaced", "." ]
python
train
34.8
rameshg87/pyremotevbox
pyremotevbox/ZSI/schema.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/schema.py#L164-L189
def getElementDeclaration(cls, namespaceURI, name, isref=False, lazy=False): '''Grab an element declaration, returns a typecode instance representation or a typecode class definition. An element reference has its own facets, and is local so it will not be cached. Parameters: namespaceURI -- name -- isref -- if element reference, return class definition. ''' key = (namespaceURI, name) if isref: klass = cls.elements.get(key,None) if klass is not None and lazy is True: return _Mirage(klass) return klass typecode = cls.element_typecode_cache.get(key, None) if typecode is None: tcls = cls.elements.get(key,None) if tcls is not None: typecode = cls.element_typecode_cache[key] = tcls() typecode.typed = False return typecode
[ "def", "getElementDeclaration", "(", "cls", ",", "namespaceURI", ",", "name", ",", "isref", "=", "False", ",", "lazy", "=", "False", ")", ":", "key", "=", "(", "namespaceURI", ",", "name", ")", "if", "isref", ":", "klass", "=", "cls", ".", "elements", ".", "get", "(", "key", ",", "None", ")", "if", "klass", "is", "not", "None", "and", "lazy", "is", "True", ":", "return", "_Mirage", "(", "klass", ")", "return", "klass", "typecode", "=", "cls", ".", "element_typecode_cache", ".", "get", "(", "key", ",", "None", ")", "if", "typecode", "is", "None", ":", "tcls", "=", "cls", ".", "elements", ".", "get", "(", "key", ",", "None", ")", "if", "tcls", "is", "not", "None", ":", "typecode", "=", "cls", ".", "element_typecode_cache", "[", "key", "]", "=", "tcls", "(", ")", "typecode", ".", "typed", "=", "False", "return", "typecode" ]
Grab an element declaration, returns a typecode instance representation or a typecode class definition. An element reference has its own facets, and is local so it will not be cached. Parameters: namespaceURI -- name -- isref -- if element reference, return class definition.
[ "Grab", "an", "element", "declaration", "returns", "a", "typecode", "instance", "representation", "or", "a", "typecode", "class", "definition", ".", "An", "element", "reference", "has", "its", "own", "facets", "and", "is", "local", "so", "it", "will", "not", "be", "cached", "." ]
python
train
36.615385
awentzonline/keras-vgg-buddy
keras_vgg_buddy/models.py
https://github.com/awentzonline/keras-vgg-buddy/blob/716cb66396b839a66ec8dc66998066b360a8f395/keras_vgg_buddy/models.py#L48-L53
def get_layer_output(self, name): '''Get symbolic output of a layer.''' if not name in self._f_layer_outputs: layer = self.net.get_layer(name) self._f_layer_outputs[name] = layer.output return self._f_layer_outputs[name]
[ "def", "get_layer_output", "(", "self", ",", "name", ")", ":", "if", "not", "name", "in", "self", ".", "_f_layer_outputs", ":", "layer", "=", "self", ".", "net", ".", "get_layer", "(", "name", ")", "self", ".", "_f_layer_outputs", "[", "name", "]", "=", "layer", ".", "output", "return", "self", ".", "_f_layer_outputs", "[", "name", "]" ]
Get symbolic output of a layer.
[ "Get", "symbolic", "output", "of", "a", "layer", "." ]
python
test
43.833333
ministryofjustice/django-moj-irat
moj_irat/healthchecks.py
https://github.com/ministryofjustice/django-moj-irat/blob/c1588426fffce783bef6d8b9d73395a5e9a833c9/moj_irat/healthchecks.py#L196-L228
def run_healthchecks(self): """ Runs all registered healthchecks and returns a list of HealthcheckResponse. """ if not self._registry_loaded: self.load_healthchecks() def get_healthcheck_name(hc): if hasattr(hc, 'name'): return hc.name return hc.__name__ responses = [] for healthcheck in self._registry: try: if inspect.isclass(healthcheck): healthcheck = healthcheck() response = healthcheck() if isinstance(response, bool): response = HealthcheckResponse( name=get_healthcheck_name(healthcheck), status=response, ) except Exception as e: response = HealthcheckResponse( name=get_healthcheck_name(healthcheck), status=False, exception=str(e), exception_class=e.__class__.__name__, ) responses.append(response) return responses
[ "def", "run_healthchecks", "(", "self", ")", ":", "if", "not", "self", ".", "_registry_loaded", ":", "self", ".", "load_healthchecks", "(", ")", "def", "get_healthcheck_name", "(", "hc", ")", ":", "if", "hasattr", "(", "hc", ",", "'name'", ")", ":", "return", "hc", ".", "name", "return", "hc", ".", "__name__", "responses", "=", "[", "]", "for", "healthcheck", "in", "self", ".", "_registry", ":", "try", ":", "if", "inspect", ".", "isclass", "(", "healthcheck", ")", ":", "healthcheck", "=", "healthcheck", "(", ")", "response", "=", "healthcheck", "(", ")", "if", "isinstance", "(", "response", ",", "bool", ")", ":", "response", "=", "HealthcheckResponse", "(", "name", "=", "get_healthcheck_name", "(", "healthcheck", ")", ",", "status", "=", "response", ",", ")", "except", "Exception", "as", "e", ":", "response", "=", "HealthcheckResponse", "(", "name", "=", "get_healthcheck_name", "(", "healthcheck", ")", ",", "status", "=", "False", ",", "exception", "=", "str", "(", "e", ")", ",", "exception_class", "=", "e", ".", "__class__", ".", "__name__", ",", ")", "responses", ".", "append", "(", "response", ")", "return", "responses" ]
Runs all registered healthchecks and returns a list of HealthcheckResponse.
[ "Runs", "all", "registered", "healthchecks", "and", "returns", "a", "list", "of", "HealthcheckResponse", "." ]
python
train
34.121212
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAData/base_datastruct.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L513-L518
def mean_harmonic(self): '返回DataStruct.price的调和平均数' res = self.price.groupby(level=1 ).apply(lambda x: statistics.harmonic_mean(x)) res.name = 'mean_harmonic' return res
[ "def", "mean_harmonic", "(", "self", ")", ":", "res", "=", "self", ".", "price", ".", "groupby", "(", "level", "=", "1", ")", ".", "apply", "(", "lambda", "x", ":", "statistics", ".", "harmonic_mean", "(", "x", ")", ")", "res", ".", "name", "=", "'mean_harmonic'", "return", "res" ]
返回DataStruct.price的调和平均数
[ "返回DataStruct", ".", "price的调和平均数" ]
python
train
38
foremast/foremast
src/foremast/awslambda/api_gateway_event/api_gateway_event.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/api_gateway_event/api_gateway_event.py#L167-L180
def create_api_key(self): """Create API Key for API access.""" apikeys = self.client.get_api_keys() for key in apikeys['items']: if key['name'] == self.app_name: self.log.info("Key %s already exists", self.app_name) break else: self.client.create_api_key( name=self.app_name, enabled=True, stageKeys=[{ 'restApiId': self.api_id, 'stageName': self.env }]) self.log.info("Successfully created API Key %s. Look in the AWS console for the key", self.app_name)
[ "def", "create_api_key", "(", "self", ")", ":", "apikeys", "=", "self", ".", "client", ".", "get_api_keys", "(", ")", "for", "key", "in", "apikeys", "[", "'items'", "]", ":", "if", "key", "[", "'name'", "]", "==", "self", ".", "app_name", ":", "self", ".", "log", ".", "info", "(", "\"Key %s already exists\"", ",", "self", ".", "app_name", ")", "break", "else", ":", "self", ".", "client", ".", "create_api_key", "(", "name", "=", "self", ".", "app_name", ",", "enabled", "=", "True", ",", "stageKeys", "=", "[", "{", "'restApiId'", ":", "self", ".", "api_id", ",", "'stageName'", ":", "self", ".", "env", "}", "]", ")", "self", ".", "log", ".", "info", "(", "\"Successfully created API Key %s. Look in the AWS console for the key\"", ",", "self", ".", "app_name", ")" ]
Create API Key for API access.
[ "Create", "API", "Key", "for", "API", "access", "." ]
python
train
43.857143
saltstack/salt
salt/modules/at.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/at.py#L63-L163
def atq(tag=None): ''' List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number] ''' jobs = [] # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() # Tested on CentOS 5.8 if __grains__['os_family'] == 'RedHat': output = _cmd('at', '-l') else: output = _cmd('atq') if output is None: return '\'at.atq\' is not available.' # No jobs so return if output == '': return {'jobs': jobs} # Jobs created with at.at() will use the following # comment to denote a tagged job. job_kw_regex = re.compile(r'^### SALT: (\w+)') # Split each job into a dictionary and handle # pulling out tags or only listing jobs with a certain # tag for line in output.splitlines(): job_tag = '' # Redhat/CentOS if __grains__['os_family'] == 'RedHat': job, spec = line.split('\t') specs = spec.split() elif __grains__['os'] == 'OpenBSD': if line.startswith(' Rank'): continue else: tmp = line.split() timestr = ' '.join(tmp[1:5]) job = tmp[6] specs = datetime.datetime(*(time.strptime(timestr, '%b %d, %Y ' '%H:%M')[0:5])).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[5]) elif __grains__['os'] == 'FreeBSD': if line.startswith('Date'): continue else: tmp = line.split() timestr = ' '.join(tmp[1:6]) job = tmp[8] specs = datetime.datetime(*(time.strptime(timestr, '%b %d %H:%M:%S %Z %Y')[0:5])).isoformat().split('T') specs.append(tmp[7]) specs.append(tmp[6]) else: job, spec = line.split('\t') tmp = spec.split() timestr = ' '.join(tmp[0:5]) specs = datetime.datetime(*(time.strptime(timestr) [0:5])).isoformat().split('T') specs.append(tmp[5]) specs.append(tmp[6]) # Search for any tags atc_out = _cmd('at', '-c', job) for line in atc_out.splitlines(): tmp = job_kw_regex.match(line) if tmp: job_tag = tmp.groups()[0] if __grains__['os'] in BSD: job = six.text_type(job) else: job = int(job) # If a tag is supplied, only list jobs with that tag if tag: # TODO: Looks like there is a difference between salt and salt-call # If I don't wrap job in an int(), it fails on salt but works on # salt-call. With the int(), it fails with salt-call but not salt. if tag == job_tag or tag == job: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) else: jobs.append({'job': job, 'date': specs[0], 'time': specs[1], 'queue': specs[2], 'user': specs[3], 'tag': job_tag}) return {'jobs': jobs}
[ "def", "atq", "(", "tag", "=", "None", ")", ":", "jobs", "=", "[", "]", "# Shim to produce output similar to what __virtual__() should do", "# but __salt__ isn't available in __virtual__()", "# Tested on CentOS 5.8", "if", "__grains__", "[", "'os_family'", "]", "==", "'RedHat'", ":", "output", "=", "_cmd", "(", "'at'", ",", "'-l'", ")", "else", ":", "output", "=", "_cmd", "(", "'atq'", ")", "if", "output", "is", "None", ":", "return", "'\\'at.atq\\' is not available.'", "# No jobs so return", "if", "output", "==", "''", ":", "return", "{", "'jobs'", ":", "jobs", "}", "# Jobs created with at.at() will use the following", "# comment to denote a tagged job.", "job_kw_regex", "=", "re", ".", "compile", "(", "r'^### SALT: (\\w+)'", ")", "# Split each job into a dictionary and handle", "# pulling out tags or only listing jobs with a certain", "# tag", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "job_tag", "=", "''", "# Redhat/CentOS", "if", "__grains__", "[", "'os_family'", "]", "==", "'RedHat'", ":", "job", ",", "spec", "=", "line", ".", "split", "(", "'\\t'", ")", "specs", "=", "spec", ".", "split", "(", ")", "elif", "__grains__", "[", "'os'", "]", "==", "'OpenBSD'", ":", "if", "line", ".", "startswith", "(", "' Rank'", ")", ":", "continue", "else", ":", "tmp", "=", "line", ".", "split", "(", ")", "timestr", "=", "' '", ".", "join", "(", "tmp", "[", "1", ":", "5", "]", ")", "job", "=", "tmp", "[", "6", "]", "specs", "=", "datetime", ".", "datetime", "(", "*", "(", "time", ".", "strptime", "(", "timestr", ",", "'%b %d, %Y '", "'%H:%M'", ")", "[", "0", ":", "5", "]", ")", ")", ".", "isoformat", "(", ")", ".", "split", "(", "'T'", ")", "specs", ".", "append", "(", "tmp", "[", "7", "]", ")", "specs", ".", "append", "(", "tmp", "[", "5", "]", ")", "elif", "__grains__", "[", "'os'", "]", "==", "'FreeBSD'", ":", "if", "line", ".", "startswith", "(", "'Date'", ")", ":", "continue", "else", ":", "tmp", "=", "line", ".", "split", "(", ")", "timestr", "=", "' '", ".", "join", "(", "tmp", "[", "1", ":", "6", "]", ")", "job", "=", "tmp", "[", "8", "]", "specs", "=", "datetime", ".", "datetime", "(", "*", "(", "time", ".", "strptime", "(", "timestr", ",", "'%b %d %H:%M:%S %Z %Y'", ")", "[", "0", ":", "5", "]", ")", ")", ".", "isoformat", "(", ")", ".", "split", "(", "'T'", ")", "specs", ".", "append", "(", "tmp", "[", "7", "]", ")", "specs", ".", "append", "(", "tmp", "[", "6", "]", ")", "else", ":", "job", ",", "spec", "=", "line", ".", "split", "(", "'\\t'", ")", "tmp", "=", "spec", ".", "split", "(", ")", "timestr", "=", "' '", ".", "join", "(", "tmp", "[", "0", ":", "5", "]", ")", "specs", "=", "datetime", ".", "datetime", "(", "*", "(", "time", ".", "strptime", "(", "timestr", ")", "[", "0", ":", "5", "]", ")", ")", ".", "isoformat", "(", ")", ".", "split", "(", "'T'", ")", "specs", ".", "append", "(", "tmp", "[", "5", "]", ")", "specs", ".", "append", "(", "tmp", "[", "6", "]", ")", "# Search for any tags", "atc_out", "=", "_cmd", "(", "'at'", ",", "'-c'", ",", "job", ")", "for", "line", "in", "atc_out", ".", "splitlines", "(", ")", ":", "tmp", "=", "job_kw_regex", ".", "match", "(", "line", ")", "if", "tmp", ":", "job_tag", "=", "tmp", ".", "groups", "(", ")", "[", "0", "]", "if", "__grains__", "[", "'os'", "]", "in", "BSD", ":", "job", "=", "six", ".", "text_type", "(", "job", ")", "else", ":", "job", "=", "int", "(", "job", ")", "# If a tag is supplied, only list jobs with that tag", "if", "tag", ":", "# TODO: Looks like there is a difference between salt and salt-call", "# If I don't wrap job in an int(), it fails on salt but works on", "# salt-call. With the int(), it fails with salt-call but not salt.", "if", "tag", "==", "job_tag", "or", "tag", "==", "job", ":", "jobs", ".", "append", "(", "{", "'job'", ":", "job", ",", "'date'", ":", "specs", "[", "0", "]", ",", "'time'", ":", "specs", "[", "1", "]", ",", "'queue'", ":", "specs", "[", "2", "]", ",", "'user'", ":", "specs", "[", "3", "]", ",", "'tag'", ":", "job_tag", "}", ")", "else", ":", "jobs", ".", "append", "(", "{", "'job'", ":", "job", ",", "'date'", ":", "specs", "[", "0", "]", ",", "'time'", ":", "specs", "[", "1", "]", ",", "'queue'", ":", "specs", "[", "2", "]", ",", "'user'", ":", "specs", "[", "3", "]", ",", "'tag'", ":", "job_tag", "}", ")", "return", "{", "'jobs'", ":", "jobs", "}" ]
List all queued and running jobs or only those with an optional 'tag'. CLI Example: .. code-block:: bash salt '*' at.atq salt '*' at.atq [tag] salt '*' at.atq [job number]
[ "List", "all", "queued", "and", "running", "jobs", "or", "only", "those", "with", "an", "optional", "tag", "." ]
python
train
32.445545
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/filebrowser.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/filebrowser.py#L767-L787
def get_current_selection(self, i=None): """Get the :class:`TaskFileInfo` for the file selected in the active tab :param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection :type i: :returns: The taskfile info in the currently active tab :rtype: :class:`TaskFileInfo` | None :raises: None """ taskfile = None if (i is None and self.selection_tabw.currentIndex() == 0) or (i is not None and i == 0): indexes = self.assetverbrws.selected_indexes(0) if indexes and indexes[0].isValid(): item = indexes[0].internalPointer() taskfile = item.internal_data() elif (i is None and self.selection_tabw.currentIndex() == 1) or (i is not None and i == 1): indexes = self.shotverbrws.selected_indexes(0) if indexes and indexes[0].isValid(): item = indexes[0].internalPointer() taskfile = item.internal_data() return taskfile
[ "def", "get_current_selection", "(", "self", ",", "i", "=", "None", ")", ":", "taskfile", "=", "None", "if", "(", "i", "is", "None", "and", "self", ".", "selection_tabw", ".", "currentIndex", "(", ")", "==", "0", ")", "or", "(", "i", "is", "not", "None", "and", "i", "==", "0", ")", ":", "indexes", "=", "self", ".", "assetverbrws", ".", "selected_indexes", "(", "0", ")", "if", "indexes", "and", "indexes", "[", "0", "]", ".", "isValid", "(", ")", ":", "item", "=", "indexes", "[", "0", "]", ".", "internalPointer", "(", ")", "taskfile", "=", "item", ".", "internal_data", "(", ")", "elif", "(", "i", "is", "None", "and", "self", ".", "selection_tabw", ".", "currentIndex", "(", ")", "==", "1", ")", "or", "(", "i", "is", "not", "None", "and", "i", "==", "1", ")", ":", "indexes", "=", "self", ".", "shotverbrws", ".", "selected_indexes", "(", "0", ")", "if", "indexes", "and", "indexes", "[", "0", "]", ".", "isValid", "(", ")", ":", "item", "=", "indexes", "[", "0", "]", ".", "internalPointer", "(", ")", "taskfile", "=", "item", ".", "internal_data", "(", ")", "return", "taskfile" ]
Get the :class:`TaskFileInfo` for the file selected in the active tab :param i: If None, returns selection of active tab. If 0, assetselection. If 1, shotselection :type i: :returns: The taskfile info in the currently active tab :rtype: :class:`TaskFileInfo` | None :raises: None
[ "Get", "the", ":", "class", ":", "TaskFileInfo", "for", "the", "file", "selected", "in", "the", "active", "tab" ]
python
train
48.904762
mikekatz04/BOWIE
bowie/plotutils/plottypes.py
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/bowie/plotutils/plottypes.py#L194-L223
def make_plot(self): """Make the horizon plot. """ self.get_contour_values() # sets levels of main contour plot colors1 = ['blue', 'green', 'red', 'purple', 'orange', 'gold', 'magenta'] # set contour value. Default is SNR_CUT. self.snr_contour_value = (self.SNR_CUT if self.snr_contour_value is None else self.snr_contour_value) # plot contours for j in range(len(self.zvals)): hz = self.axis.contour(self.xvals[j], self.yvals[j], self.zvals[j], np.array([self.snr_contour_value]), colors=colors1[j], linewidths=1., linestyles='solid') # plot invisible lines for purpose of creating a legend if self.legend_labels != []: # plot a curve off of the grid with same color for legend label. self.axis.plot([0.1, 0.2], [0.1, 0.2], color=colors1[j], label=self.legend_labels[j]) if self.add_legend: self.axis.legend(**self.legend_kwargs) return
[ "def", "make_plot", "(", "self", ")", ":", "self", ".", "get_contour_values", "(", ")", "# sets levels of main contour plot", "colors1", "=", "[", "'blue'", ",", "'green'", ",", "'red'", ",", "'purple'", ",", "'orange'", ",", "'gold'", ",", "'magenta'", "]", "# set contour value. Default is SNR_CUT.", "self", ".", "snr_contour_value", "=", "(", "self", ".", "SNR_CUT", "if", "self", ".", "snr_contour_value", "is", "None", "else", "self", ".", "snr_contour_value", ")", "# plot contours", "for", "j", "in", "range", "(", "len", "(", "self", ".", "zvals", ")", ")", ":", "hz", "=", "self", ".", "axis", ".", "contour", "(", "self", ".", "xvals", "[", "j", "]", ",", "self", ".", "yvals", "[", "j", "]", ",", "self", ".", "zvals", "[", "j", "]", ",", "np", ".", "array", "(", "[", "self", ".", "snr_contour_value", "]", ")", ",", "colors", "=", "colors1", "[", "j", "]", ",", "linewidths", "=", "1.", ",", "linestyles", "=", "'solid'", ")", "# plot invisible lines for purpose of creating a legend", "if", "self", ".", "legend_labels", "!=", "[", "]", ":", "# plot a curve off of the grid with same color for legend label.", "self", ".", "axis", ".", "plot", "(", "[", "0.1", ",", "0.2", "]", ",", "[", "0.1", ",", "0.2", "]", ",", "color", "=", "colors1", "[", "j", "]", ",", "label", "=", "self", ".", "legend_labels", "[", "j", "]", ")", "if", "self", ".", "add_legend", ":", "self", ".", "axis", ".", "legend", "(", "*", "*", "self", ".", "legend_kwargs", ")", "return" ]
Make the horizon plot.
[ "Make", "the", "horizon", "plot", "." ]
python
train
37.9
PmagPy/PmagPy
pmagpy/ipmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L2736-L2977
def ani_depthplot2(ani_file='rmag_anisotropy.txt', meas_file='magic_measurements.txt', samp_file='er_samples.txt', age_file=None, sum_file=None, fmt='svg', dmin=-1, dmax=-1, depth_scale='sample_core_depth', dir_path='.'): """ returns matplotlib figure with anisotropy data plotted against depth available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option) """ pcol = 4 tint = 9 plots = 0 # format files to use full path # os.path.join(dir_path, ani_file) ani_file = pmag.resolve_file_name(ani_file, dir_path) if not os.path.isfile(ani_file): print("Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file)) return False, "Could not find rmag_anisotropy type file: {}.\nPlease provide a valid file path and try again".format(ani_file) # os.path.join(dir_path, meas_file) meas_file = pmag.resolve_file_name(meas_file, dir_path) if age_file: if not os.path.isfile(age_file): print( 'Warning: you have provided an invalid age file. Attempting to use sample file instead') age_file = None depth_scale = 'sample_core_depth' # os.path.join(dir_path, samp_file) samp_file = pmag.resolve_file_name(samp_file, dir_path) else: # os.path.join(dir_path, age_file) samp_file = pmag.resolve_file_name(samp_file, dir_path) depth_scale = 'age' print( 'Warning: you have provided an er_ages format file, which will take precedence over er_samples') else: samp_file = pmag.resolve_file_name(samp_file, dir_path) label = 1 if sum_file: sum_file = os.path.join(dir_path, sum_file) dmin, dmax = float(dmin), float(dmax) # get data read in isbulk = 0 # tests if there are bulk susceptibility measurements AniData, file_type = pmag.magic_read(ani_file) # read in tensor elements if not age_file: # read in sample depth info from er_sample.txt format file Samps, file_type = pmag.magic_read(samp_file) else: # read in sample age info from er_ages.txt format file Samps, file_type = pmag.magic_read(samp_file) age_unit = Samps[0]['age_unit'] for s in Samps: # change to upper case for every sample name s['er_sample_name'] = s['er_sample_name'].upper() Meas, file_type = pmag.magic_read(meas_file) # print 'meas_file', meas_file # print 'file_type', file_type if file_type == 'magic_measurements': isbulk = 1 Data = [] Bulks = [] BulkDepths = [] for rec in AniData: # look for depth record for this sample samprecs = pmag.get_dictitem(Samps, 'er_sample_name', rec['er_sample_name'].upper(), 'T') # see if there are non-blank depth data sampdepths = pmag.get_dictitem(samprecs, depth_scale, '', 'F') if dmax != -1: # fishes out records within depth bounds sampdepths = pmag.get_dictitem( sampdepths, depth_scale, dmax, 'max') sampdepths = pmag.get_dictitem( sampdepths, depth_scale, dmin, 'min') if len(sampdepths) > 0: # if there are any.... # set the core depth of this record rec['core_depth'] = sampdepths[0][depth_scale] Data.append(rec) # fish out data with core_depth if isbulk: # if there are bulk data chis = pmag.get_dictitem( Meas, 'er_specimen_name', rec['er_specimen_name'], 'T') # get the non-zero values for this specimen chis = pmag.get_dictitem( chis, 'measurement_chi_volume', '', 'F') if len(chis) > 0: # if there are any.... # put in microSI Bulks.append( 1e6 * float(chis[0]['measurement_chi_volume'])) BulkDepths.append(float(sampdepths[0][depth_scale])) if len(Bulks) > 0: # set min and max bulk values bmin = min(Bulks) bmax = max(Bulks) xlab = "Depth (m)" if len(Data) > 0: location = Data[0]['er_location_name'] else: return False, 'no data to plot' # collect the data for plotting tau V3_inc and V1_dec Depths, Tau1, Tau2, Tau3, V3Incs, P, V1Decs = [], [], [], [], [], [], [] F23s = [] Axs = [] # collect the plot ids # START HERE if len(Bulks) > 0: pcol += 1 # get all the s1 values from Data as floats s1 = pmag.get_dictkey(Data, 'anisotropy_s1', 'f') s2 = pmag.get_dictkey(Data, 'anisotropy_s2', 'f') s3 = pmag.get_dictkey(Data, 'anisotropy_s3', 'f') s4 = pmag.get_dictkey(Data, 'anisotropy_s4', 'f') s5 = pmag.get_dictkey(Data, 'anisotropy_s5', 'f') s6 = pmag.get_dictkey(Data, 'anisotropy_s6', 'f') nmeas = pmag.get_dictkey(Data, 'anisotropy_n', 'int') sigma = pmag.get_dictkey(Data, 'anisotropy_sigma', 'f') Depths = pmag.get_dictkey(Data, 'core_depth', 'f') # Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array Ss = np.array([s1, s2, s3, s4, s5, s6]).transpose() # make an array # Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of # 3x3 sub-arrays for k in range(len(Depths)): # tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors # v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum # eigenvector fpars = pmag.dohext(nmeas[k] - 6, sigma[k], Ss[k]) V3Incs.append(fpars['v3_inc']) V1Decs.append(fpars['v1_dec']) Tau1.append(fpars['t1']) Tau2.append(fpars['t2']) Tau3.append(fpars['t3']) P.append(old_div(Tau1[-1], Tau3[-1])) F23s.append(fpars['F23']) if len(Depths) > 0: if dmax == -1: dmax = max(Depths) dmin = min(Depths) tau_min = 1 for t in Tau3: if t > 0 and t < tau_min: tau_min = t tau_max = max(Tau1) # tau_min=min(Tau3) P_max = max(P) P_min = min(P) # dmax=dmax+.05*dmax # dmin=dmin-.05*dmax main_plot = plt.figure(1, figsize=(10, 8)) # make the figure version_num = pmag.get_version() plt.figtext(.02, .01, version_num) # attach the pmagpy version number ax = plt.subplot(1, pcol, 1) # make the first column Axs.append(ax) ax.plot(Tau1, Depths, 'rs') ax.plot(Tau2, Depths, 'b^') ax.plot(Tau3, Depths, 'ko') if sum_file: core_depth_key, core_label_key, Cores = read_core_csv_file( sum_file) for core in Cores: depth = float(core[core_depth_key]) if depth > dmin and depth < dmax: plt.plot([0, 90], [depth, depth], 'b--') ax.axis([tau_min, tau_max, dmax, dmin]) ax.set_xlabel('Eigenvalues') if depth_scale == 'sample_core_depth': ax.set_ylabel('Depth (mbsf)') elif depth_scale == 'age': ax.set_ylabel('Age (' + age_unit + ')') else: ax.set_ylabel('Depth (mcd)') ax2 = plt.subplot(1, pcol, 2) # make the second column ax2.plot(P, Depths, 'rs') ax2.axis([P_min, P_max, dmax, dmin]) ax2.set_xlabel('P') ax2.set_title(location) if sum_file: for core in Cores: depth = float(core[core_depth_key]) if depth > dmin and depth < dmax: plt.plot([0, 90], [depth, depth], 'b--') Axs.append(ax2) ax3 = plt.subplot(1, pcol, 3) Axs.append(ax3) ax3.plot(V3Incs, Depths, 'ko') ax3.axis([0, 90, dmax, dmin]) ax3.set_xlabel('V3 Inclination') if sum_file: for core in Cores: depth = float(core[core_depth_key]) if depth > dmin and depth < dmax: plt.plot([0, 90], [depth, depth], 'b--') ax4 = plt.subplot(1, np.abs(pcol), 4) Axs.append(ax4) ax4.plot(V1Decs, Depths, 'rs') ax4.axis([0, 360, dmax, dmin]) ax4.set_xlabel('V1 Declination') if sum_file: for core in Cores: depth = float(core[core_depth_key]) if depth >= dmin and depth <= dmax: plt.plot([0, 360], [depth, depth], 'b--') if pcol == 4 and label == 1: plt.text(360, depth + tint, core[core_label_key]) # ax5=plt.subplot(1,np.abs(pcol),5) # Axs.append(ax5) # ax5.plot(F23s,Depths,'rs') # bounds=ax5.axis() # ax5.axis([bounds[0],bounds[1],dmax,dmin]) # ax5.set_xlabel('F_23') # ax5.semilogx() # if sum_file: # for core in Cores: # depth=float(core[core_depth_key]) # if depth>=dmin and depth<=dmax: # plt.plot([bounds[0],bounds[1]],[depth,depth],'b--') # if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key]) # if pcol==6: if pcol == 5: # ax6=plt.subplot(1,pcol,6) ax6 = plt.subplot(1, pcol, 5) Axs.append(ax6) ax6.plot(Bulks, BulkDepths, 'bo') ax6.axis([bmin - 1, 1.1 * bmax, dmax, dmin]) ax6.set_xlabel('Bulk Susc. (uSI)') if sum_file: for core in Cores: depth = float(core[core_depth_key]) if depth >= dmin and depth <= dmax: plt.plot([0, bmax], [depth, depth], 'b--') if label == 1: plt.text(1.1 * bmax, depth + tint, core[core_label_key]) for x in Axs: # this makes the x-tick labels more reasonable - they were # overcrowded using the defaults pmagplotlib.delticks(x) fig_name = location + '_ani_depthplot.' + fmt return main_plot, fig_name else: return False, "No data to plot"
[ "def", "ani_depthplot2", "(", "ani_file", "=", "'rmag_anisotropy.txt'", ",", "meas_file", "=", "'magic_measurements.txt'", ",", "samp_file", "=", "'er_samples.txt'", ",", "age_file", "=", "None", ",", "sum_file", "=", "None", ",", "fmt", "=", "'svg'", ",", "dmin", "=", "-", "1", ",", "dmax", "=", "-", "1", ",", "depth_scale", "=", "'sample_core_depth'", ",", "dir_path", "=", "'.'", ")", ":", "pcol", "=", "4", "tint", "=", "9", "plots", "=", "0", "# format files to use full path", "# os.path.join(dir_path, ani_file)", "ani_file", "=", "pmag", ".", "resolve_file_name", "(", "ani_file", ",", "dir_path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "ani_file", ")", ":", "print", "(", "\"Could not find rmag_anisotropy type file: {}.\\nPlease provide a valid file path and try again\"", ".", "format", "(", "ani_file", ")", ")", "return", "False", ",", "\"Could not find rmag_anisotropy type file: {}.\\nPlease provide a valid file path and try again\"", ".", "format", "(", "ani_file", ")", "# os.path.join(dir_path, meas_file)", "meas_file", "=", "pmag", ".", "resolve_file_name", "(", "meas_file", ",", "dir_path", ")", "if", "age_file", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "age_file", ")", ":", "print", "(", "'Warning: you have provided an invalid age file. Attempting to use sample file instead'", ")", "age_file", "=", "None", "depth_scale", "=", "'sample_core_depth'", "# os.path.join(dir_path, samp_file)", "samp_file", "=", "pmag", ".", "resolve_file_name", "(", "samp_file", ",", "dir_path", ")", "else", ":", "# os.path.join(dir_path, age_file)", "samp_file", "=", "pmag", ".", "resolve_file_name", "(", "samp_file", ",", "dir_path", ")", "depth_scale", "=", "'age'", "print", "(", "'Warning: you have provided an er_ages format file, which will take precedence over er_samples'", ")", "else", ":", "samp_file", "=", "pmag", ".", "resolve_file_name", "(", "samp_file", ",", "dir_path", ")", "label", "=", "1", "if", "sum_file", ":", "sum_file", "=", "os", ".", "path", ".", "join", "(", "dir_path", ",", "sum_file", ")", "dmin", ",", "dmax", "=", "float", "(", "dmin", ")", ",", "float", "(", "dmax", ")", "# get data read in", "isbulk", "=", "0", "# tests if there are bulk susceptibility measurements", "AniData", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "ani_file", ")", "# read in tensor elements", "if", "not", "age_file", ":", "# read in sample depth info from er_sample.txt format file", "Samps", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "samp_file", ")", "else", ":", "# read in sample age info from er_ages.txt format file", "Samps", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "samp_file", ")", "age_unit", "=", "Samps", "[", "0", "]", "[", "'age_unit'", "]", "for", "s", "in", "Samps", ":", "# change to upper case for every sample name", "s", "[", "'er_sample_name'", "]", "=", "s", "[", "'er_sample_name'", "]", ".", "upper", "(", ")", "Meas", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "meas_file", ")", "# print 'meas_file', meas_file", "# print 'file_type', file_type", "if", "file_type", "==", "'magic_measurements'", ":", "isbulk", "=", "1", "Data", "=", "[", "]", "Bulks", "=", "[", "]", "BulkDepths", "=", "[", "]", "for", "rec", "in", "AniData", ":", "# look for depth record for this sample", "samprecs", "=", "pmag", ".", "get_dictitem", "(", "Samps", ",", "'er_sample_name'", ",", "rec", "[", "'er_sample_name'", "]", ".", "upper", "(", ")", ",", "'T'", ")", "# see if there are non-blank depth data", "sampdepths", "=", "pmag", ".", "get_dictitem", "(", "samprecs", ",", "depth_scale", ",", "''", ",", "'F'", ")", "if", "dmax", "!=", "-", "1", ":", "# fishes out records within depth bounds", "sampdepths", "=", "pmag", ".", "get_dictitem", "(", "sampdepths", ",", "depth_scale", ",", "dmax", ",", "'max'", ")", "sampdepths", "=", "pmag", ".", "get_dictitem", "(", "sampdepths", ",", "depth_scale", ",", "dmin", ",", "'min'", ")", "if", "len", "(", "sampdepths", ")", ">", "0", ":", "# if there are any....", "# set the core depth of this record", "rec", "[", "'core_depth'", "]", "=", "sampdepths", "[", "0", "]", "[", "depth_scale", "]", "Data", ".", "append", "(", "rec", ")", "# fish out data with core_depth", "if", "isbulk", ":", "# if there are bulk data", "chis", "=", "pmag", ".", "get_dictitem", "(", "Meas", ",", "'er_specimen_name'", ",", "rec", "[", "'er_specimen_name'", "]", ",", "'T'", ")", "# get the non-zero values for this specimen", "chis", "=", "pmag", ".", "get_dictitem", "(", "chis", ",", "'measurement_chi_volume'", ",", "''", ",", "'F'", ")", "if", "len", "(", "chis", ")", ">", "0", ":", "# if there are any....", "# put in microSI", "Bulks", ".", "append", "(", "1e6", "*", "float", "(", "chis", "[", "0", "]", "[", "'measurement_chi_volume'", "]", ")", ")", "BulkDepths", ".", "append", "(", "float", "(", "sampdepths", "[", "0", "]", "[", "depth_scale", "]", ")", ")", "if", "len", "(", "Bulks", ")", ">", "0", ":", "# set min and max bulk values", "bmin", "=", "min", "(", "Bulks", ")", "bmax", "=", "max", "(", "Bulks", ")", "xlab", "=", "\"Depth (m)\"", "if", "len", "(", "Data", ")", ">", "0", ":", "location", "=", "Data", "[", "0", "]", "[", "'er_location_name'", "]", "else", ":", "return", "False", ",", "'no data to plot'", "# collect the data for plotting tau V3_inc and V1_dec", "Depths", ",", "Tau1", ",", "Tau2", ",", "Tau3", ",", "V3Incs", ",", "P", ",", "V1Decs", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "F23s", "=", "[", "]", "Axs", "=", "[", "]", "# collect the plot ids", "# START HERE", "if", "len", "(", "Bulks", ")", ">", "0", ":", "pcol", "+=", "1", "# get all the s1 values from Data as floats", "s1", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'anisotropy_s1'", ",", "'f'", ")", "s2", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'anisotropy_s2'", ",", "'f'", ")", "s3", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'anisotropy_s3'", ",", "'f'", ")", "s4", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'anisotropy_s4'", ",", "'f'", ")", "s5", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'anisotropy_s5'", ",", "'f'", ")", "s6", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'anisotropy_s6'", ",", "'f'", ")", "nmeas", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'anisotropy_n'", ",", "'int'", ")", "sigma", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'anisotropy_sigma'", ",", "'f'", ")", "Depths", "=", "pmag", ".", "get_dictkey", "(", "Data", ",", "'core_depth'", ",", "'f'", ")", "# Ss=np.array([s1,s4,s5,s4,s2,s6,s5,s6,s3]).transpose() # make an array", "Ss", "=", "np", ".", "array", "(", "[", "s1", ",", "s2", ",", "s3", ",", "s4", ",", "s5", ",", "s6", "]", ")", ".", "transpose", "(", ")", "# make an array", "# Ts=np.reshape(Ss,(len(Ss),3,-1)) # and re-shape to be n-length array of", "# 3x3 sub-arrays", "for", "k", "in", "range", "(", "len", "(", "Depths", ")", ")", ":", "# tau,Evecs= pmag.tauV(Ts[k]) # get the sorted eigenvalues and eigenvectors", "# v3=pmag.cart2dir(Evecs[2])[1] # convert to inclination of the minimum", "# eigenvector", "fpars", "=", "pmag", ".", "dohext", "(", "nmeas", "[", "k", "]", "-", "6", ",", "sigma", "[", "k", "]", ",", "Ss", "[", "k", "]", ")", "V3Incs", ".", "append", "(", "fpars", "[", "'v3_inc'", "]", ")", "V1Decs", ".", "append", "(", "fpars", "[", "'v1_dec'", "]", ")", "Tau1", ".", "append", "(", "fpars", "[", "'t1'", "]", ")", "Tau2", ".", "append", "(", "fpars", "[", "'t2'", "]", ")", "Tau3", ".", "append", "(", "fpars", "[", "'t3'", "]", ")", "P", ".", "append", "(", "old_div", "(", "Tau1", "[", "-", "1", "]", ",", "Tau3", "[", "-", "1", "]", ")", ")", "F23s", ".", "append", "(", "fpars", "[", "'F23'", "]", ")", "if", "len", "(", "Depths", ")", ">", "0", ":", "if", "dmax", "==", "-", "1", ":", "dmax", "=", "max", "(", "Depths", ")", "dmin", "=", "min", "(", "Depths", ")", "tau_min", "=", "1", "for", "t", "in", "Tau3", ":", "if", "t", ">", "0", "and", "t", "<", "tau_min", ":", "tau_min", "=", "t", "tau_max", "=", "max", "(", "Tau1", ")", "# tau_min=min(Tau3)", "P_max", "=", "max", "(", "P", ")", "P_min", "=", "min", "(", "P", ")", "# dmax=dmax+.05*dmax", "# dmin=dmin-.05*dmax", "main_plot", "=", "plt", ".", "figure", "(", "1", ",", "figsize", "=", "(", "10", ",", "8", ")", ")", "# make the figure", "version_num", "=", "pmag", ".", "get_version", "(", ")", "plt", ".", "figtext", "(", ".02", ",", ".01", ",", "version_num", ")", "# attach the pmagpy version number", "ax", "=", "plt", ".", "subplot", "(", "1", ",", "pcol", ",", "1", ")", "# make the first column", "Axs", ".", "append", "(", "ax", ")", "ax", ".", "plot", "(", "Tau1", ",", "Depths", ",", "'rs'", ")", "ax", ".", "plot", "(", "Tau2", ",", "Depths", ",", "'b^'", ")", "ax", ".", "plot", "(", "Tau3", ",", "Depths", ",", "'ko'", ")", "if", "sum_file", ":", "core_depth_key", ",", "core_label_key", ",", "Cores", "=", "read_core_csv_file", "(", "sum_file", ")", "for", "core", "in", "Cores", ":", "depth", "=", "float", "(", "core", "[", "core_depth_key", "]", ")", "if", "depth", ">", "dmin", "and", "depth", "<", "dmax", ":", "plt", ".", "plot", "(", "[", "0", ",", "90", "]", ",", "[", "depth", ",", "depth", "]", ",", "'b--'", ")", "ax", ".", "axis", "(", "[", "tau_min", ",", "tau_max", ",", "dmax", ",", "dmin", "]", ")", "ax", ".", "set_xlabel", "(", "'Eigenvalues'", ")", "if", "depth_scale", "==", "'sample_core_depth'", ":", "ax", ".", "set_ylabel", "(", "'Depth (mbsf)'", ")", "elif", "depth_scale", "==", "'age'", ":", "ax", ".", "set_ylabel", "(", "'Age ('", "+", "age_unit", "+", "')'", ")", "else", ":", "ax", ".", "set_ylabel", "(", "'Depth (mcd)'", ")", "ax2", "=", "plt", ".", "subplot", "(", "1", ",", "pcol", ",", "2", ")", "# make the second column", "ax2", ".", "plot", "(", "P", ",", "Depths", ",", "'rs'", ")", "ax2", ".", "axis", "(", "[", "P_min", ",", "P_max", ",", "dmax", ",", "dmin", "]", ")", "ax2", ".", "set_xlabel", "(", "'P'", ")", "ax2", ".", "set_title", "(", "location", ")", "if", "sum_file", ":", "for", "core", "in", "Cores", ":", "depth", "=", "float", "(", "core", "[", "core_depth_key", "]", ")", "if", "depth", ">", "dmin", "and", "depth", "<", "dmax", ":", "plt", ".", "plot", "(", "[", "0", ",", "90", "]", ",", "[", "depth", ",", "depth", "]", ",", "'b--'", ")", "Axs", ".", "append", "(", "ax2", ")", "ax3", "=", "plt", ".", "subplot", "(", "1", ",", "pcol", ",", "3", ")", "Axs", ".", "append", "(", "ax3", ")", "ax3", ".", "plot", "(", "V3Incs", ",", "Depths", ",", "'ko'", ")", "ax3", ".", "axis", "(", "[", "0", ",", "90", ",", "dmax", ",", "dmin", "]", ")", "ax3", ".", "set_xlabel", "(", "'V3 Inclination'", ")", "if", "sum_file", ":", "for", "core", "in", "Cores", ":", "depth", "=", "float", "(", "core", "[", "core_depth_key", "]", ")", "if", "depth", ">", "dmin", "and", "depth", "<", "dmax", ":", "plt", ".", "plot", "(", "[", "0", ",", "90", "]", ",", "[", "depth", ",", "depth", "]", ",", "'b--'", ")", "ax4", "=", "plt", ".", "subplot", "(", "1", ",", "np", ".", "abs", "(", "pcol", ")", ",", "4", ")", "Axs", ".", "append", "(", "ax4", ")", "ax4", ".", "plot", "(", "V1Decs", ",", "Depths", ",", "'rs'", ")", "ax4", ".", "axis", "(", "[", "0", ",", "360", ",", "dmax", ",", "dmin", "]", ")", "ax4", ".", "set_xlabel", "(", "'V1 Declination'", ")", "if", "sum_file", ":", "for", "core", "in", "Cores", ":", "depth", "=", "float", "(", "core", "[", "core_depth_key", "]", ")", "if", "depth", ">=", "dmin", "and", "depth", "<=", "dmax", ":", "plt", ".", "plot", "(", "[", "0", ",", "360", "]", ",", "[", "depth", ",", "depth", "]", ",", "'b--'", ")", "if", "pcol", "==", "4", "and", "label", "==", "1", ":", "plt", ".", "text", "(", "360", ",", "depth", "+", "tint", ",", "core", "[", "core_label_key", "]", ")", "# ax5=plt.subplot(1,np.abs(pcol),5)", "# Axs.append(ax5)", "# ax5.plot(F23s,Depths,'rs')", "# bounds=ax5.axis()", "# ax5.axis([bounds[0],bounds[1],dmax,dmin])", "# ax5.set_xlabel('F_23')", "# ax5.semilogx()", "# if sum_file:", "# for core in Cores:", "# depth=float(core[core_depth_key])", "# if depth>=dmin and depth<=dmax:", "# plt.plot([bounds[0],bounds[1]],[depth,depth],'b--')", "# if pcol==5 and label==1:plt.text(bounds[1],depth+tint,core[core_label_key])", "# if pcol==6:", "if", "pcol", "==", "5", ":", "# ax6=plt.subplot(1,pcol,6)", "ax6", "=", "plt", ".", "subplot", "(", "1", ",", "pcol", ",", "5", ")", "Axs", ".", "append", "(", "ax6", ")", "ax6", ".", "plot", "(", "Bulks", ",", "BulkDepths", ",", "'bo'", ")", "ax6", ".", "axis", "(", "[", "bmin", "-", "1", ",", "1.1", "*", "bmax", ",", "dmax", ",", "dmin", "]", ")", "ax6", ".", "set_xlabel", "(", "'Bulk Susc. (uSI)'", ")", "if", "sum_file", ":", "for", "core", "in", "Cores", ":", "depth", "=", "float", "(", "core", "[", "core_depth_key", "]", ")", "if", "depth", ">=", "dmin", "and", "depth", "<=", "dmax", ":", "plt", ".", "plot", "(", "[", "0", ",", "bmax", "]", ",", "[", "depth", ",", "depth", "]", ",", "'b--'", ")", "if", "label", "==", "1", ":", "plt", ".", "text", "(", "1.1", "*", "bmax", ",", "depth", "+", "tint", ",", "core", "[", "core_label_key", "]", ")", "for", "x", "in", "Axs", ":", "# this makes the x-tick labels more reasonable - they were", "# overcrowded using the defaults", "pmagplotlib", ".", "delticks", "(", "x", ")", "fig_name", "=", "location", "+", "'_ani_depthplot.'", "+", "fmt", "return", "main_plot", ",", "fig_name", "else", ":", "return", "False", ",", "\"No data to plot\"" ]
returns matplotlib figure with anisotropy data plotted against depth available depth scales: 'sample_composite_depth', 'sample_core_depth', or 'age' (you must provide an age file to use this option)
[ "returns", "matplotlib", "figure", "with", "anisotropy", "data", "plotted", "against", "depth", "available", "depth", "scales", ":", "sample_composite_depth", "sample_core_depth", "or", "age", "(", "you", "must", "provide", "an", "age", "file", "to", "use", "this", "option", ")" ]
python
train
41.657025
quantopian/alphalens
alphalens/performance.py
https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/performance.py#L1021-L1088
def factor_cumulative_returns(factor_data, period, long_short=True, group_neutral=False, equal_weight=False, quantiles=None, groups=None): """ Simulate a portfolio using the factor in input and returns the cumulative returns of the simulated portfolio Parameters ---------- factor_data : pd.DataFrame - MultiIndex A MultiIndex DataFrame indexed by date (level 0) and asset (level 1), containing the values for a single alpha factor, forward returns for each period, the factor quantile/bin that factor value belongs to, and (optionally) the group the asset belongs to. - See full explanation in utils.get_clean_factor_and_forward_returns period : string 'factor_data' column name corresponding to the 'period' returns to be used in the computation of porfolio returns long_short : bool, optional if True then simulates a dollar neutral long-short portfolio - see performance.create_pyfolio_input for more details group_neutral : bool, optional If True then simulates a group neutral portfolio - see performance.create_pyfolio_input for more details equal_weight : bool, optional Control the assets weights: - see performance.create_pyfolio_input for more details quantiles: sequence[int], optional Use only specific quantiles in the computation. By default all quantiles are used groups: sequence[string], optional Use only specific groups in the computation. By default all groups are used Returns ------- Cumulative returns series : pd.Series Example: 2015-07-16 09:30:00 -0.012143 2015-07-16 12:30:00 0.012546 2015-07-17 09:30:00 0.045350 2015-07-17 12:30:00 0.065897 2015-07-20 09:30:00 0.030957 """ fwd_ret_cols = utils.get_forward_returns_columns(factor_data.columns) if period not in fwd_ret_cols: raise ValueError("Period '%s' not found" % period) todrop = list(fwd_ret_cols) todrop.remove(period) portfolio_data = factor_data.drop(todrop, axis=1) if quantiles is not None: portfolio_data = portfolio_data[portfolio_data['factor_quantile'].isin( quantiles)] if groups is not None: portfolio_data = portfolio_data[portfolio_data['group'].isin(groups)] returns = \ factor_returns(portfolio_data, long_short, group_neutral, equal_weight) return cumulative_returns(returns[period], period)
[ "def", "factor_cumulative_returns", "(", "factor_data", ",", "period", ",", "long_short", "=", "True", ",", "group_neutral", "=", "False", ",", "equal_weight", "=", "False", ",", "quantiles", "=", "None", ",", "groups", "=", "None", ")", ":", "fwd_ret_cols", "=", "utils", ".", "get_forward_returns_columns", "(", "factor_data", ".", "columns", ")", "if", "period", "not", "in", "fwd_ret_cols", ":", "raise", "ValueError", "(", "\"Period '%s' not found\"", "%", "period", ")", "todrop", "=", "list", "(", "fwd_ret_cols", ")", "todrop", ".", "remove", "(", "period", ")", "portfolio_data", "=", "factor_data", ".", "drop", "(", "todrop", ",", "axis", "=", "1", ")", "if", "quantiles", "is", "not", "None", ":", "portfolio_data", "=", "portfolio_data", "[", "portfolio_data", "[", "'factor_quantile'", "]", ".", "isin", "(", "quantiles", ")", "]", "if", "groups", "is", "not", "None", ":", "portfolio_data", "=", "portfolio_data", "[", "portfolio_data", "[", "'group'", "]", ".", "isin", "(", "groups", ")", "]", "returns", "=", "factor_returns", "(", "portfolio_data", ",", "long_short", ",", "group_neutral", ",", "equal_weight", ")", "return", "cumulative_returns", "(", "returns", "[", "period", "]", ",", "period", ")" ]
Simulate a portfolio using the factor in input and returns the cumulative returns of the simulated portfolio Parameters ---------- factor_data : pd.DataFrame - MultiIndex A MultiIndex DataFrame indexed by date (level 0) and asset (level 1), containing the values for a single alpha factor, forward returns for each period, the factor quantile/bin that factor value belongs to, and (optionally) the group the asset belongs to. - See full explanation in utils.get_clean_factor_and_forward_returns period : string 'factor_data' column name corresponding to the 'period' returns to be used in the computation of porfolio returns long_short : bool, optional if True then simulates a dollar neutral long-short portfolio - see performance.create_pyfolio_input for more details group_neutral : bool, optional If True then simulates a group neutral portfolio - see performance.create_pyfolio_input for more details equal_weight : bool, optional Control the assets weights: - see performance.create_pyfolio_input for more details quantiles: sequence[int], optional Use only specific quantiles in the computation. By default all quantiles are used groups: sequence[string], optional Use only specific groups in the computation. By default all groups are used Returns ------- Cumulative returns series : pd.Series Example: 2015-07-16 09:30:00 -0.012143 2015-07-16 12:30:00 0.012546 2015-07-17 09:30:00 0.045350 2015-07-17 12:30:00 0.065897 2015-07-20 09:30:00 0.030957
[ "Simulate", "a", "portfolio", "using", "the", "factor", "in", "input", "and", "returns", "the", "cumulative", "returns", "of", "the", "simulated", "portfolio" ]
python
train
39.279412
jeremymcrae/denovonear
denovonear/ensembl_requester.py
https://github.com/jeremymcrae/denovonear/blob/feaab0fc77e89d70b31e8092899e4f0e68bac9fe/denovonear/ensembl_requester.py#L308-L322
def get_chrom_for_transcript(self, transcript_id, hgnc_id): """ obtain the sequence for a transcript from ensembl """ headers = {"content-type": "application/json"} self.attempt = 0 ext = "/overlap/id/{}?feature=gene".format(transcript_id) r = self.ensembl_request(ext, headers) for gene in json.loads(r): if gene["external_name"] == hgnc_id: return gene["seq_region_name"] return None
[ "def", "get_chrom_for_transcript", "(", "self", ",", "transcript_id", ",", "hgnc_id", ")", ":", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", "}", "self", ".", "attempt", "=", "0", "ext", "=", "\"/overlap/id/{}?feature=gene\"", ".", "format", "(", "transcript_id", ")", "r", "=", "self", ".", "ensembl_request", "(", "ext", ",", "headers", ")", "for", "gene", "in", "json", ".", "loads", "(", "r", ")", ":", "if", "gene", "[", "\"external_name\"", "]", "==", "hgnc_id", ":", "return", "gene", "[", "\"seq_region_name\"", "]", "return", "None" ]
obtain the sequence for a transcript from ensembl
[ "obtain", "the", "sequence", "for", "a", "transcript", "from", "ensembl" ]
python
train
33.333333
costastf/toonlib
toonlib/toonlib.py
https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L162-L188
def _state(self): """The internal state of the object. The api responses are not consistent so a retry is performed on every call with information updating the internally saved state refreshing the data. The info is cached for STATE_CACHING_SECONDS. :return: The current state of the toons' information state. """ state = {} required_keys = ('deviceStatusInfo', 'gasUsage', 'powerUsage', 'thermostatInfo', 'thermostatStates') try: for _ in range(self._state_retries): state.update(self._get_data('/client/auth/retrieveToonState')) except TypeError: self._logger.exception('Could not get answer from service.') message = ('Updating internal state with retrieved ' 'state:{state}').format(state=state) self._logger.debug(message) self._state_.update(state) if not all([key in self._state_.keys() for key in required_keys]): raise IncompleteResponse(state) return self._state_
[ "def", "_state", "(", "self", ")", ":", "state", "=", "{", "}", "required_keys", "=", "(", "'deviceStatusInfo'", ",", "'gasUsage'", ",", "'powerUsage'", ",", "'thermostatInfo'", ",", "'thermostatStates'", ")", "try", ":", "for", "_", "in", "range", "(", "self", ".", "_state_retries", ")", ":", "state", ".", "update", "(", "self", ".", "_get_data", "(", "'/client/auth/retrieveToonState'", ")", ")", "except", "TypeError", ":", "self", ".", "_logger", ".", "exception", "(", "'Could not get answer from service.'", ")", "message", "=", "(", "'Updating internal state with retrieved '", "'state:{state}'", ")", ".", "format", "(", "state", "=", "state", ")", "self", ".", "_logger", ".", "debug", "(", "message", ")", "self", ".", "_state_", ".", "update", "(", "state", ")", "if", "not", "all", "(", "[", "key", "in", "self", ".", "_state_", ".", "keys", "(", ")", "for", "key", "in", "required_keys", "]", ")", ":", "raise", "IncompleteResponse", "(", "state", ")", "return", "self", ".", "_state_" ]
The internal state of the object. The api responses are not consistent so a retry is performed on every call with information updating the internally saved state refreshing the data. The info is cached for STATE_CACHING_SECONDS. :return: The current state of the toons' information state.
[ "The", "internal", "state", "of", "the", "object", "." ]
python
train
42.222222
softlayer/softlayer-python
SoftLayer/managers/network.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/network.py#L599-L634
def summary_by_datacenter(self): """Summary of the networks on the account, grouped by data center. The resultant dictionary is primarily useful for statistical purposes. It contains count information rather than raw data. If you want raw information, see the :func:`list_vlans` method instead. :returns: A dictionary keyed by data center with the data containing a set of counts for subnets, hardware, virtual servers, and other objects residing within that data center. """ datacenters = collections.defaultdict(lambda: { 'hardware_count': 0, 'public_ip_count': 0, 'subnet_count': 0, 'virtual_guest_count': 0, 'vlan_count': 0, }) for vlan in self.list_vlans(): name = utils.lookup(vlan, 'primaryRouter', 'datacenter', 'name') datacenters[name]['vlan_count'] += 1 datacenters[name]['public_ip_count'] += ( vlan['totalPrimaryIpAddressCount']) datacenters[name]['subnet_count'] += vlan['subnetCount'] # NOTE(kmcdonald): Only count hardware/guests once if vlan.get('networkSpace') == 'PRIVATE': datacenters[name]['hardware_count'] += ( vlan['hardwareCount']) datacenters[name]['virtual_guest_count'] += ( vlan['virtualGuestCount']) return dict(datacenters)
[ "def", "summary_by_datacenter", "(", "self", ")", ":", "datacenters", "=", "collections", ".", "defaultdict", "(", "lambda", ":", "{", "'hardware_count'", ":", "0", ",", "'public_ip_count'", ":", "0", ",", "'subnet_count'", ":", "0", ",", "'virtual_guest_count'", ":", "0", ",", "'vlan_count'", ":", "0", ",", "}", ")", "for", "vlan", "in", "self", ".", "list_vlans", "(", ")", ":", "name", "=", "utils", ".", "lookup", "(", "vlan", ",", "'primaryRouter'", ",", "'datacenter'", ",", "'name'", ")", "datacenters", "[", "name", "]", "[", "'vlan_count'", "]", "+=", "1", "datacenters", "[", "name", "]", "[", "'public_ip_count'", "]", "+=", "(", "vlan", "[", "'totalPrimaryIpAddressCount'", "]", ")", "datacenters", "[", "name", "]", "[", "'subnet_count'", "]", "+=", "vlan", "[", "'subnetCount'", "]", "# NOTE(kmcdonald): Only count hardware/guests once", "if", "vlan", ".", "get", "(", "'networkSpace'", ")", "==", "'PRIVATE'", ":", "datacenters", "[", "name", "]", "[", "'hardware_count'", "]", "+=", "(", "vlan", "[", "'hardwareCount'", "]", ")", "datacenters", "[", "name", "]", "[", "'virtual_guest_count'", "]", "+=", "(", "vlan", "[", "'virtualGuestCount'", "]", ")", "return", "dict", "(", "datacenters", ")" ]
Summary of the networks on the account, grouped by data center. The resultant dictionary is primarily useful for statistical purposes. It contains count information rather than raw data. If you want raw information, see the :func:`list_vlans` method instead. :returns: A dictionary keyed by data center with the data containing a set of counts for subnets, hardware, virtual servers, and other objects residing within that data center.
[ "Summary", "of", "the", "networks", "on", "the", "account", "grouped", "by", "data", "center", "." ]
python
train
40.611111
FNNDSC/pfmisc
pfmisc/C_snode.py
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L589-L612
def pwd(self, **kwargs): """ Returns the cwd Optional kwargs: node = <node> If specified, return only the directory name at depth <node>. """ b_node = False node = 0 for key,val in kwargs.items(): if key == 'node': b_node = True node = int(val) str_path = self.cwd() if b_node: l_path = str_path.split('/') if len(l_path) >= node+1: str_path = str_path.split('/')[node] return str_path
[ "def", "pwd", "(", "self", ",", "*", "*", "kwargs", ")", ":", "b_node", "=", "False", "node", "=", "0", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", "==", "'node'", ":", "b_node", "=", "True", "node", "=", "int", "(", "val", ")", "str_path", "=", "self", ".", "cwd", "(", ")", "if", "b_node", ":", "l_path", "=", "str_path", ".", "split", "(", "'/'", ")", "if", "len", "(", "l_path", ")", ">=", "node", "+", "1", ":", "str_path", "=", "str_path", ".", "split", "(", "'/'", ")", "[", "node", "]", "return", "str_path" ]
Returns the cwd Optional kwargs: node = <node> If specified, return only the directory name at depth <node>.
[ "Returns", "the", "cwd" ]
python
train
26.791667
globus/globus-cli
globus_cli/commands/ls.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/commands/ls.py#L84-L150
def ls_command( endpoint_plus_path, recursive_depth_limit, recursive, long_output, show_hidden, filter_val, ): """ Executor for `globus ls` """ endpoint_id, path = endpoint_plus_path # do autoactivation before the `ls` call so that recursive invocations # won't do this repeatedly, and won't have to instantiate new clients client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) # create the query paramaters to send to operation_ls ls_params = {"show_hidden": int(show_hidden)} if path: ls_params["path"] = path if filter_val: # this char has special meaning in the LS API's filter clause # can't be part of the pattern (but we don't support globbing across # dir structures anyway) if "/" in filter_val: raise click.UsageError('--filter cannot contain "/"') # format into a simple filter clause which operates on filenames ls_params["filter"] = "name:{}".format(filter_val) # get the `ls` result if recursive: # NOTE: # --recursive and --filter have an interplay that some users may find # surprising # if we're asked to change or "improve" the behavior in the future, we # could do so with "type:dir" or "type:file" filters added in, and # potentially work out some viable behavior based on what people want res = client.recursive_operation_ls( endpoint_id, depth=recursive_depth_limit, **ls_params ) else: res = client.operation_ls(endpoint_id, **ls_params) def cleaned_item_name(item): return item["name"] + ("/" if item["type"] == "dir" else "") # and then print it, per formatting rules formatted_print( res, fields=[ ("Permissions", "permissions"), ("User", "user"), ("Group", "group"), ("Size", "size"), ("Last Modified", "last_modified"), ("File Type", "type"), ("Filename", cleaned_item_name), ], simple_text=( None if long_output or is_verbose() or not outformat_is_text() else "\n".join(cleaned_item_name(x) for x in res) ), json_converter=iterable_response_to_dict, )
[ "def", "ls_command", "(", "endpoint_plus_path", ",", "recursive_depth_limit", ",", "recursive", ",", "long_output", ",", "show_hidden", ",", "filter_val", ",", ")", ":", "endpoint_id", ",", "path", "=", "endpoint_plus_path", "# do autoactivation before the `ls` call so that recursive invocations", "# won't do this repeatedly, and won't have to instantiate new clients", "client", "=", "get_client", "(", ")", "autoactivate", "(", "client", ",", "endpoint_id", ",", "if_expires_in", "=", "60", ")", "# create the query paramaters to send to operation_ls", "ls_params", "=", "{", "\"show_hidden\"", ":", "int", "(", "show_hidden", ")", "}", "if", "path", ":", "ls_params", "[", "\"path\"", "]", "=", "path", "if", "filter_val", ":", "# this char has special meaning in the LS API's filter clause", "# can't be part of the pattern (but we don't support globbing across", "# dir structures anyway)", "if", "\"/\"", "in", "filter_val", ":", "raise", "click", ".", "UsageError", "(", "'--filter cannot contain \"/\"'", ")", "# format into a simple filter clause which operates on filenames", "ls_params", "[", "\"filter\"", "]", "=", "\"name:{}\"", ".", "format", "(", "filter_val", ")", "# get the `ls` result", "if", "recursive", ":", "# NOTE:", "# --recursive and --filter have an interplay that some users may find", "# surprising", "# if we're asked to change or \"improve\" the behavior in the future, we", "# could do so with \"type:dir\" or \"type:file\" filters added in, and", "# potentially work out some viable behavior based on what people want", "res", "=", "client", ".", "recursive_operation_ls", "(", "endpoint_id", ",", "depth", "=", "recursive_depth_limit", ",", "*", "*", "ls_params", ")", "else", ":", "res", "=", "client", ".", "operation_ls", "(", "endpoint_id", ",", "*", "*", "ls_params", ")", "def", "cleaned_item_name", "(", "item", ")", ":", "return", "item", "[", "\"name\"", "]", "+", "(", "\"/\"", "if", "item", "[", "\"type\"", "]", "==", "\"dir\"", "else", "\"\"", ")", "# and then print it, per formatting rules", "formatted_print", "(", "res", ",", "fields", "=", "[", "(", "\"Permissions\"", ",", "\"permissions\"", ")", ",", "(", "\"User\"", ",", "\"user\"", ")", ",", "(", "\"Group\"", ",", "\"group\"", ")", ",", "(", "\"Size\"", ",", "\"size\"", ")", ",", "(", "\"Last Modified\"", ",", "\"last_modified\"", ")", ",", "(", "\"File Type\"", ",", "\"type\"", ")", ",", "(", "\"Filename\"", ",", "cleaned_item_name", ")", ",", "]", ",", "simple_text", "=", "(", "None", "if", "long_output", "or", "is_verbose", "(", ")", "or", "not", "outformat_is_text", "(", ")", "else", "\"\\n\"", ".", "join", "(", "cleaned_item_name", "(", "x", ")", "for", "x", "in", "res", ")", ")", ",", "json_converter", "=", "iterable_response_to_dict", ",", ")" ]
Executor for `globus ls`
[ "Executor", "for", "globus", "ls" ]
python
train
33.835821
laymonage/kbbi-python
kbbi/kbbi.py
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L172-L184
def _nama(self): """Mengembalikan representasi string untuk nama entri ini. :returns: String representasi nama entri :rtype: str """ hasil = self.nama if self.nomor: hasil += " [{}]".format(self.nomor) if self.kata_dasar: hasil = " » ".join(self.kata_dasar) + " » " + hasil return hasil
[ "def", "_nama", "(", "self", ")", ":", "hasil", "=", "self", ".", "nama", "if", "self", ".", "nomor", ":", "hasil", "+=", "\" [{}]\"", ".", "format", "(", "self", ".", "nomor", ")", "if", "self", ".", "kata_dasar", ":", "hasil", "=", "\" » \".", "j", "oin(", "s", "elf.", "k", "ata_dasar)", " ", " ", " » \" +", "h", "sil", "return", "hasil" ]
Mengembalikan representasi string untuk nama entri ini. :returns: String representasi nama entri :rtype: str
[ "Mengembalikan", "representasi", "string", "untuk", "nama", "entri", "ini", "." ]
python
train
28
rvswift/EB
EB/builder/utilities/classification.py
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/classification.py#L358-L380
def calculate_auc_covar(auc_structure1, auc_structure2): """ determine AUC covariance due to actives (covar_a) and decoys (covar_d) :param auc_structure1: list [(id, best_score, best_query, status, fpf, tpf), ...,] :param auc_structure2: list [(id, best_score, best_query, status, fpf, tpf), ...,] :return (covar_a, covar_d): tuple """ # split data by activity class actives1, decoys1 = splitter(auc_structure1) actives2, decoys2 = splitter(auc_structure2) # covariance due to actives = E[{fpf2 - E(fpf2)a} * {fpf1 - E(fpf1)a}]a fpf1 = [x[4] for x in actives1] fpf2 = [x[4] for x in actives2] covara = np.cov(fpf1,fpf2)[0][1] # covariance due to decoys = E[{tpf2 - E(tpf2)d} * {tpf1 - E(tpf1)d}] tpf1 = [x[5] for x in decoys1] tpf2 = [x[5] for x in decoys2] covard = np.cov(tpf1,tpf2)[0][1] # this is only compatible with versions >= 1.5 return covara, covard
[ "def", "calculate_auc_covar", "(", "auc_structure1", ",", "auc_structure2", ")", ":", "# split data by activity class", "actives1", ",", "decoys1", "=", "splitter", "(", "auc_structure1", ")", "actives2", ",", "decoys2", "=", "splitter", "(", "auc_structure2", ")", "# covariance due to actives = E[{fpf2 - E(fpf2)a} * {fpf1 - E(fpf1)a}]a", "fpf1", "=", "[", "x", "[", "4", "]", "for", "x", "in", "actives1", "]", "fpf2", "=", "[", "x", "[", "4", "]", "for", "x", "in", "actives2", "]", "covara", "=", "np", ".", "cov", "(", "fpf1", ",", "fpf2", ")", "[", "0", "]", "[", "1", "]", "# covariance due to decoys = E[{tpf2 - E(tpf2)d} * {tpf1 - E(tpf1)d}]", "tpf1", "=", "[", "x", "[", "5", "]", "for", "x", "in", "decoys1", "]", "tpf2", "=", "[", "x", "[", "5", "]", "for", "x", "in", "decoys2", "]", "covard", "=", "np", ".", "cov", "(", "tpf1", ",", "tpf2", ")", "[", "0", "]", "[", "1", "]", "# this is only compatible with versions >= 1.5", "return", "covara", ",", "covard" ]
determine AUC covariance due to actives (covar_a) and decoys (covar_d) :param auc_structure1: list [(id, best_score, best_query, status, fpf, tpf), ...,] :param auc_structure2: list [(id, best_score, best_query, status, fpf, tpf), ...,] :return (covar_a, covar_d): tuple
[ "determine", "AUC", "covariance", "due", "to", "actives", "(", "covar_a", ")", "and", "decoys", "(", "covar_d", ")", ":", "param", "auc_structure1", ":", "list", "[", "(", "id", "best_score", "best_query", "status", "fpf", "tpf", ")", "...", "]", ":", "param", "auc_structure2", ":", "list", "[", "(", "id", "best_score", "best_query", "status", "fpf", "tpf", ")", "...", "]", ":", "return", "(", "covar_a", "covar_d", ")", ":", "tuple" ]
python
train
39.826087
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/streaming_client.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/streaming_client.py#L859-L899
def _read_bytes_from_framed_body(self, b): """Reads the requested number of bytes from a streaming framed message body. :param int b: Number of bytes to read :returns: Bytes read from source stream and decrypted :rtype: bytes """ plaintext = b"" final_frame = False _LOGGER.debug("collecting %d bytes", b) while len(plaintext) < b and not final_frame: _LOGGER.debug("Reading frame") frame_data, final_frame = deserialize_frame( stream=self.source_stream, header=self._header, verifier=self.verifier ) _LOGGER.debug("Read complete for frame %d", frame_data.sequence_number) if frame_data.sequence_number != self.last_sequence_number + 1: raise SerializationError("Malformed message: frames out of order") self.last_sequence_number += 1 aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string( content_type=self._header.content_type, is_final_frame=frame_data.final_frame ) associated_data = assemble_content_aad( message_id=self._header.message_id, aad_content_string=aad_content_string, seq_num=frame_data.sequence_number, length=len(frame_data.ciphertext), ) plaintext += decrypt( algorithm=self._header.algorithm, key=self._derived_data_key, encrypted_data=frame_data, associated_data=associated_data, ) plaintext_length = len(plaintext) _LOGGER.debug("bytes collected: %d", plaintext_length) if final_frame: _LOGGER.debug("Reading footer") self.footer = deserialize_footer(stream=self.source_stream, verifier=self.verifier) return plaintext
[ "def", "_read_bytes_from_framed_body", "(", "self", ",", "b", ")", ":", "plaintext", "=", "b\"\"", "final_frame", "=", "False", "_LOGGER", ".", "debug", "(", "\"collecting %d bytes\"", ",", "b", ")", "while", "len", "(", "plaintext", ")", "<", "b", "and", "not", "final_frame", ":", "_LOGGER", ".", "debug", "(", "\"Reading frame\"", ")", "frame_data", ",", "final_frame", "=", "deserialize_frame", "(", "stream", "=", "self", ".", "source_stream", ",", "header", "=", "self", ".", "_header", ",", "verifier", "=", "self", ".", "verifier", ")", "_LOGGER", ".", "debug", "(", "\"Read complete for frame %d\"", ",", "frame_data", ".", "sequence_number", ")", "if", "frame_data", ".", "sequence_number", "!=", "self", ".", "last_sequence_number", "+", "1", ":", "raise", "SerializationError", "(", "\"Malformed message: frames out of order\"", ")", "self", ".", "last_sequence_number", "+=", "1", "aad_content_string", "=", "aws_encryption_sdk", ".", "internal", ".", "utils", ".", "get_aad_content_string", "(", "content_type", "=", "self", ".", "_header", ".", "content_type", ",", "is_final_frame", "=", "frame_data", ".", "final_frame", ")", "associated_data", "=", "assemble_content_aad", "(", "message_id", "=", "self", ".", "_header", ".", "message_id", ",", "aad_content_string", "=", "aad_content_string", ",", "seq_num", "=", "frame_data", ".", "sequence_number", ",", "length", "=", "len", "(", "frame_data", ".", "ciphertext", ")", ",", ")", "plaintext", "+=", "decrypt", "(", "algorithm", "=", "self", ".", "_header", ".", "algorithm", ",", "key", "=", "self", ".", "_derived_data_key", ",", "encrypted_data", "=", "frame_data", ",", "associated_data", "=", "associated_data", ",", ")", "plaintext_length", "=", "len", "(", "plaintext", ")", "_LOGGER", ".", "debug", "(", "\"bytes collected: %d\"", ",", "plaintext_length", ")", "if", "final_frame", ":", "_LOGGER", ".", "debug", "(", "\"Reading footer\"", ")", "self", ".", "footer", "=", "deserialize_footer", "(", "stream", "=", "self", ".", "source_stream", ",", "verifier", "=", "self", ".", "verifier", ")", "return", "plaintext" ]
Reads the requested number of bytes from a streaming framed message body. :param int b: Number of bytes to read :returns: Bytes read from source stream and decrypted :rtype: bytes
[ "Reads", "the", "requested", "number", "of", "bytes", "from", "a", "streaming", "framed", "message", "body", "." ]
python
train
45.926829
NarrativeScience/lsi
src/lsi/utils/hosts.py
https://github.com/NarrativeScience/lsi/blob/7d901b03fdb1a34ef795e5412bfe9685d948e32d/src/lsi/utils/hosts.py#L327-L344
def format_string(self, fmat_string): """ Takes a string containing 0 or more {variables} and formats it according to this instance's attributes. :param fmat_string: A string, e.g. '{name}-foo.txt' :type fmat_string: ``str`` :return: The string formatted according to this instance. E.g. 'production-runtime-foo.txt' :rtype: ``str`` """ try: return fmat_string.format(**vars(self)) except KeyError as e: raise ValueError('Invalid format string: {0}. Instance has no ' 'attribute {1}.'.format(repr(fmat_string), repr(e)))
[ "def", "format_string", "(", "self", ",", "fmat_string", ")", ":", "try", ":", "return", "fmat_string", ".", "format", "(", "*", "*", "vars", "(", "self", ")", ")", "except", "KeyError", "as", "e", ":", "raise", "ValueError", "(", "'Invalid format string: {0}. Instance has no '", "'attribute {1}.'", ".", "format", "(", "repr", "(", "fmat_string", ")", ",", "repr", "(", "e", ")", ")", ")" ]
Takes a string containing 0 or more {variables} and formats it according to this instance's attributes. :param fmat_string: A string, e.g. '{name}-foo.txt' :type fmat_string: ``str`` :return: The string formatted according to this instance. E.g. 'production-runtime-foo.txt' :rtype: ``str``
[ "Takes", "a", "string", "containing", "0", "or", "more", "{", "variables", "}", "and", "formats", "it", "according", "to", "this", "instance", "s", "attributes", "." ]
python
test
39.333333
ungarj/mapchete
mapchete/formats/default/png.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/formats/default/png.py#L181-L195
def for_web(self, data): """ Convert data to web output. Parameters ---------- data : array Returns ------- web data : array """ rgba = self._prepare_array_for_png(data) data = ma.masked_where(rgba == self.nodata, rgba) return memory_file(data, self.profile()), 'image/png'
[ "def", "for_web", "(", "self", ",", "data", ")", ":", "rgba", "=", "self", ".", "_prepare_array_for_png", "(", "data", ")", "data", "=", "ma", ".", "masked_where", "(", "rgba", "==", "self", ".", "nodata", ",", "rgba", ")", "return", "memory_file", "(", "data", ",", "self", ".", "profile", "(", ")", ")", ",", "'image/png'" ]
Convert data to web output. Parameters ---------- data : array Returns ------- web data : array
[ "Convert", "data", "to", "web", "output", "." ]
python
valid
23.8
marrow/mongo
web/session/mongo.py
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/web/session/mongo.py#L92-L98
def persist(self, context): """Update or insert the session document into the configured collection""" D = self._Document document = context.session[self.name] D.get_collection().replace_one(D.id == document.id, document, True)
[ "def", "persist", "(", "self", ",", "context", ")", ":", "D", "=", "self", ".", "_Document", "document", "=", "context", ".", "session", "[", "self", ".", "name", "]", "D", ".", "get_collection", "(", ")", ".", "replace_one", "(", "D", ".", "id", "==", "document", ".", "id", ",", "document", ",", "True", ")" ]
Update or insert the session document into the configured collection
[ "Update", "or", "insert", "the", "session", "document", "into", "the", "configured", "collection" ]
python
train
33.571429
chrisspen/burlap
burlap/deploy.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/deploy.py#L247-L275
def preview(self, components=None, ask=0): """ Inspects differences between the last deployment and the current code state. """ ask = int(ask) self.init() component_order, plan_funcs = self.get_component_funcs(components=components) print('\n%i changes found for host %s.\n' % (len(component_order), self.genv.host_string)) if component_order and plan_funcs: if self.verbose: print('These components have changed:\n') for component in sorted(component_order): print((' '*4)+component) print('Deployment plan for host %s:\n' % self.genv.host_string) for func_name, _ in plan_funcs: print(success_str((' '*4)+func_name)) if component_order: print() if ask and self.genv.host_string == self.genv.hosts[-1]: if component_order: if not raw_input('Begin deployment? [yn] ').strip().lower().startswith('y'): sys.exit(0) else: sys.exit(0)
[ "def", "preview", "(", "self", ",", "components", "=", "None", ",", "ask", "=", "0", ")", ":", "ask", "=", "int", "(", "ask", ")", "self", ".", "init", "(", ")", "component_order", ",", "plan_funcs", "=", "self", ".", "get_component_funcs", "(", "components", "=", "components", ")", "print", "(", "'\\n%i changes found for host %s.\\n'", "%", "(", "len", "(", "component_order", ")", ",", "self", ".", "genv", ".", "host_string", ")", ")", "if", "component_order", "and", "plan_funcs", ":", "if", "self", ".", "verbose", ":", "print", "(", "'These components have changed:\\n'", ")", "for", "component", "in", "sorted", "(", "component_order", ")", ":", "print", "(", "(", "' '", "*", "4", ")", "+", "component", ")", "print", "(", "'Deployment plan for host %s:\\n'", "%", "self", ".", "genv", ".", "host_string", ")", "for", "func_name", ",", "_", "in", "plan_funcs", ":", "print", "(", "success_str", "(", "(", "' '", "*", "4", ")", "+", "func_name", ")", ")", "if", "component_order", ":", "print", "(", ")", "if", "ask", "and", "self", ".", "genv", ".", "host_string", "==", "self", ".", "genv", ".", "hosts", "[", "-", "1", "]", ":", "if", "component_order", ":", "if", "not", "raw_input", "(", "'Begin deployment? [yn] '", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", ".", "startswith", "(", "'y'", ")", ":", "sys", ".", "exit", "(", "0", ")", "else", ":", "sys", ".", "exit", "(", "0", ")" ]
Inspects differences between the last deployment and the current code state.
[ "Inspects", "differences", "between", "the", "last", "deployment", "and", "the", "current", "code", "state", "." ]
python
valid
37.206897
CalebBell/fluids
fluids/particle_size_distribution.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/particle_size_distribution.py#L1360-L1417
def dn(self, fraction, n=None): r'''Computes the diameter at which a specified `fraction` of the distribution falls under. Utilizes a bounded solver to search for the desired diameter. Parameters ---------- fraction : float Fraction of the distribution which should be under the calculated diameter, [-] n : int, optional None (for the `order` specified when the distribution was created), 0 (number), 1 (length), 2 (area), 3 (volume/mass), or any integer, [-] Returns ------- d : float Particle size diameter, [m] Examples -------- >>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3) >>> psd.dn(.5) 5e-06 >>> psd.dn(1) 0.00029474365335233776 >>> psd.dn(0) 0.0 ''' if fraction == 1.0: # Avoid returning the maximum value of the search interval fraction = 1.0 - epsilon if fraction < 0: raise ValueError('Fraction must be more than 0') elif fraction == 0: # pragma: no cover if self.truncated: return self.d_min return 0.0 # Solve to float prevision limit - works well, but is there a real # point when with mpmath it would never happen? # dist.cdf(dist.dn(0)-1e-35) == 0 # dist.cdf(dist.dn(0)-1e-36) == input # dn(0) == 1.9663615597466143e-20 # def err(d): # cdf = self.cdf(d, n=n) # if cdf == 0: # cdf = -1 # return cdf # return brenth(err, self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200) elif fraction > 1: raise ValueError('Fraction less than 1') # As the dn may be incredibly small, it is required for the absolute # tolerance to not be happy - it needs to continue iterating as long # as necessary to pin down the answer return brenth(lambda d:self.cdf(d, n=n) -fraction, self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200)
[ "def", "dn", "(", "self", ",", "fraction", ",", "n", "=", "None", ")", ":", "if", "fraction", "==", "1.0", ":", "# Avoid returning the maximum value of the search interval", "fraction", "=", "1.0", "-", "epsilon", "if", "fraction", "<", "0", ":", "raise", "ValueError", "(", "'Fraction must be more than 0'", ")", "elif", "fraction", "==", "0", ":", "# pragma: no cover", "if", "self", ".", "truncated", ":", "return", "self", ".", "d_min", "return", "0.0", "# Solve to float prevision limit - works well, but is there a real", "# point when with mpmath it would never happen?", "# dist.cdf(dist.dn(0)-1e-35) == 0", "# dist.cdf(dist.dn(0)-1e-36) == input", "# dn(0) == 1.9663615597466143e-20", "# def err(d): ", "# cdf = self.cdf(d, n=n)", "# if cdf == 0:", "# cdf = -1", "# return cdf", "# return brenth(err, self.d_minimum, self.d_excessive, maxiter=1000, xtol=1E-200)", "elif", "fraction", ">", "1", ":", "raise", "ValueError", "(", "'Fraction less than 1'", ")", "# As the dn may be incredibly small, it is required for the absolute ", "# tolerance to not be happy - it needs to continue iterating as long", "# as necessary to pin down the answer", "return", "brenth", "(", "lambda", "d", ":", "self", ".", "cdf", "(", "d", ",", "n", "=", "n", ")", "-", "fraction", ",", "self", ".", "d_minimum", ",", "self", ".", "d_excessive", ",", "maxiter", "=", "1000", ",", "xtol", "=", "1E-200", ")" ]
r'''Computes the diameter at which a specified `fraction` of the distribution falls under. Utilizes a bounded solver to search for the desired diameter. Parameters ---------- fraction : float Fraction of the distribution which should be under the calculated diameter, [-] n : int, optional None (for the `order` specified when the distribution was created), 0 (number), 1 (length), 2 (area), 3 (volume/mass), or any integer, [-] Returns ------- d : float Particle size diameter, [m] Examples -------- >>> psd = PSDLognormal(s=0.5, d_characteristic=5E-6, order=3) >>> psd.dn(.5) 5e-06 >>> psd.dn(1) 0.00029474365335233776 >>> psd.dn(0) 0.0
[ "r", "Computes", "the", "diameter", "at", "which", "a", "specified", "fraction", "of", "the", "distribution", "falls", "under", ".", "Utilizes", "a", "bounded", "solver", "to", "search", "for", "the", "desired", "diameter", "." ]
python
train
37.448276
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxfile.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile.py#L256-L271
def _new(self, dx_hash, media_type=None, **kwargs): """ :param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param media_type: Internet Media Type :type media_type: string Creates a new remote file with media type *media_type*, if given. """ if media_type is not None: dx_hash["media"] = media_type resp = dxpy.api.file_new(dx_hash, **kwargs) self.set_ids(resp["id"], dx_hash["project"])
[ "def", "_new", "(", "self", ",", "dx_hash", ",", "media_type", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "media_type", "is", "not", "None", ":", "dx_hash", "[", "\"media\"", "]", "=", "media_type", "resp", "=", "dxpy", ".", "api", ".", "file_new", "(", "dx_hash", ",", "*", "*", "kwargs", ")", "self", ".", "set_ids", "(", "resp", "[", "\"id\"", "]", ",", "dx_hash", "[", "\"project\"", "]", ")" ]
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes. :type dx_hash: dict :param media_type: Internet Media Type :type media_type: string Creates a new remote file with media type *media_type*, if given.
[ ":", "param", "dx_hash", ":", "Standard", "hash", "populated", "in", ":", "func", ":", "dxpy", ".", "bindings", ".", "DXDataObject", ".", "new", "()", "containing", "attributes", "common", "to", "all", "data", "object", "classes", ".", ":", "type", "dx_hash", ":", "dict", ":", "param", "media_type", ":", "Internet", "Media", "Type", ":", "type", "media_type", ":", "string" ]
python
train
36.0625
peri-source/peri
peri/util.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/util.py#L572-L585
def pad(self, pad): """ Pad this tile by an equal amount on each side as specified by pad >>> Tile(10).pad(2) Tile [-2, -2, -2] -> [12, 12, 12] ([14, 14, 14]) >>> Tile(10).pad([1,2,3]) Tile [-1, -2, -3] -> [11, 12, 13] ([12, 14, 16]) """ tile = self.copy() tile.l -= pad tile.r += pad return tile
[ "def", "pad", "(", "self", ",", "pad", ")", ":", "tile", "=", "self", ".", "copy", "(", ")", "tile", ".", "l", "-=", "pad", "tile", ".", "r", "+=", "pad", "return", "tile" ]
Pad this tile by an equal amount on each side as specified by pad >>> Tile(10).pad(2) Tile [-2, -2, -2] -> [12, 12, 12] ([14, 14, 14]) >>> Tile(10).pad([1,2,3]) Tile [-1, -2, -3] -> [11, 12, 13] ([12, 14, 16])
[ "Pad", "this", "tile", "by", "an", "equal", "amount", "on", "each", "side", "as", "specified", "by", "pad" ]
python
valid
26.642857
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_lldp_ext.py#L212-L228
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_port_description(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail") config = get_lldp_neighbor_detail output = ET.SubElement(get_lldp_neighbor_detail, "output") lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail") local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name") local_interface_name_key.text = kwargs.pop('local_interface_name') remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name") remote_interface_name_key.text = kwargs.pop('remote_interface_name') remote_port_description = ET.SubElement(lldp_neighbor_detail, "remote-port-description") remote_port_description.text = kwargs.pop('remote_port_description') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_port_description", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_lldp_neighbor_detail", "=", "ET", ".", "Element", "(", "\"get_lldp_neighbor_detail\"", ")", "config", "=", "get_lldp_neighbor_detail", "output", "=", "ET", ".", "SubElement", "(", "get_lldp_neighbor_detail", ",", "\"output\"", ")", "lldp_neighbor_detail", "=", "ET", ".", "SubElement", "(", "output", ",", "\"lldp-neighbor-detail\"", ")", "local_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"local-interface-name\"", ")", "local_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'local_interface_name'", ")", "remote_interface_name_key", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"remote-interface-name\"", ")", "remote_interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'remote_interface_name'", ")", "remote_port_description", "=", "ET", ".", "SubElement", "(", "lldp_neighbor_detail", ",", "\"remote-port-description\"", ")", "remote_port_description", ".", "text", "=", "kwargs", ".", "pop", "(", "'remote_port_description'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
60.647059
postlund/pyatv
pyatv/mrp/srp.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/srp.py#L129-L143
def verify2(self): """Last verification step. The derived keys (output, input) are returned here. """ output_key = hkdf_expand('MediaRemote-Salt', 'MediaRemote-Write-Encryption-Key', self._shared) input_key = hkdf_expand('MediaRemote-Salt', 'MediaRemote-Read-Encryption-Key', self._shared) log_binary(_LOGGER, 'Keys', Output=output_key, Input=input_key) return output_key, input_key
[ "def", "verify2", "(", "self", ")", ":", "output_key", "=", "hkdf_expand", "(", "'MediaRemote-Salt'", ",", "'MediaRemote-Write-Encryption-Key'", ",", "self", ".", "_shared", ")", "input_key", "=", "hkdf_expand", "(", "'MediaRemote-Salt'", ",", "'MediaRemote-Read-Encryption-Key'", ",", "self", ".", "_shared", ")", "log_binary", "(", "_LOGGER", ",", "'Keys'", ",", "Output", "=", "output_key", ",", "Input", "=", "input_key", ")", "return", "output_key", ",", "input_key" ]
Last verification step. The derived keys (output, input) are returned here.
[ "Last", "verification", "step", "." ]
python
train
37.133333
dead-beef/markovchain
markovchain/text/scanner.py
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/text/scanner.py#L166-L177
def save(self): """Convert to JSON. Returns ------- `dict` JSON data. """ data = super().save() data['end_chars'] = self.end_chars data['default_end'] = self.default_end return data
[ "def", "save", "(", "self", ")", ":", "data", "=", "super", "(", ")", ".", "save", "(", ")", "data", "[", "'end_chars'", "]", "=", "self", ".", "end_chars", "data", "[", "'default_end'", "]", "=", "self", ".", "default_end", "return", "data" ]
Convert to JSON. Returns ------- `dict` JSON data.
[ "Convert", "to", "JSON", "." ]
python
train
21.25
ioam/parambokeh
parambokeh/widgets.py
https://github.com/ioam/parambokeh/blob/fb9744f216273c7b24e65d037b1d621c08d7fde6/parambokeh/widgets.py#L16-L20
def TextWidget(*args, **kw): """Forces a parameter value to be text""" kw['value'] = str(kw['value']) kw.pop('options', None) return TextInput(*args,**kw)
[ "def", "TextWidget", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "kw", "[", "'value'", "]", "=", "str", "(", "kw", "[", "'value'", "]", ")", "kw", ".", "pop", "(", "'options'", ",", "None", ")", "return", "TextInput", "(", "*", "args", ",", "*", "*", "kw", ")" ]
Forces a parameter value to be text
[ "Forces", "a", "parameter", "value", "to", "be", "text" ]
python
test
33.2
tvgrabbers/DataTree
DataTreeGrab.py
https://github.com/tvgrabbers/DataTree/blob/f3ee73645147a5fd5158eb10877c7c3861a4d146/DataTreeGrab.py#L73-L130
def is_data_value(searchpath, searchtree, dtype = None, empty_is_false = False): """ Follow searchpath through the datatree in searchtree and report if there exists a value of type dtype searchpath is a list of keys/indices If dtype is None check for any value you can also supply a tuple to dtype """ if isinstance(searchpath, (str, unicode, int)): searchpath = [searchpath] if not isinstance(searchpath, (list, tuple)): return False for d in searchpath: if isinstance(searchtree, dict): if not d in searchtree.keys(): return False elif isinstance(searchtree, (list, tuple)): if (not isinstance(d, int) or (d >= 0 and d >= len(searchtree)) or (d < 0 and -d > len(searchtree))): return False else: return False searchtree = searchtree[d] if dtype == None and not (empty_is_false and searchtree == None): return True if empty_is_false and searchtree in (None, "", {}, []): return False if isinstance(dtype, tuple): dtype = list(dtype) elif not isinstance(dtype, list): dtype = [dtype] if float in dtype and not int in dtype: dtype.append(int) if str in dtype or unicode in dtype or 'string' in dtype: for dtp in (str, unicode, 'string'): while dtp in dtype: dtype.remove(dtp) dtype.extend([str, unicode]) if list in dtype or tuple in dtype or 'list' in dtype: for dtp in (list, tuple, 'list'): while dtp in dtype: dtype.remove(dtp) dtype.extend([list, tuple]) dtype = tuple(dtype) return bool(isinstance(searchtree, dtype))
[ "def", "is_data_value", "(", "searchpath", ",", "searchtree", ",", "dtype", "=", "None", ",", "empty_is_false", "=", "False", ")", ":", "if", "isinstance", "(", "searchpath", ",", "(", "str", ",", "unicode", ",", "int", ")", ")", ":", "searchpath", "=", "[", "searchpath", "]", "if", "not", "isinstance", "(", "searchpath", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "False", "for", "d", "in", "searchpath", ":", "if", "isinstance", "(", "searchtree", ",", "dict", ")", ":", "if", "not", "d", "in", "searchtree", ".", "keys", "(", ")", ":", "return", "False", "elif", "isinstance", "(", "searchtree", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "(", "not", "isinstance", "(", "d", ",", "int", ")", "or", "(", "d", ">=", "0", "and", "d", ">=", "len", "(", "searchtree", ")", ")", "or", "(", "d", "<", "0", "and", "-", "d", ">", "len", "(", "searchtree", ")", ")", ")", ":", "return", "False", "else", ":", "return", "False", "searchtree", "=", "searchtree", "[", "d", "]", "if", "dtype", "==", "None", "and", "not", "(", "empty_is_false", "and", "searchtree", "==", "None", ")", ":", "return", "True", "if", "empty_is_false", "and", "searchtree", "in", "(", "None", ",", "\"\"", ",", "{", "}", ",", "[", "]", ")", ":", "return", "False", "if", "isinstance", "(", "dtype", ",", "tuple", ")", ":", "dtype", "=", "list", "(", "dtype", ")", "elif", "not", "isinstance", "(", "dtype", ",", "list", ")", ":", "dtype", "=", "[", "dtype", "]", "if", "float", "in", "dtype", "and", "not", "int", "in", "dtype", ":", "dtype", ".", "append", "(", "int", ")", "if", "str", "in", "dtype", "or", "unicode", "in", "dtype", "or", "'string'", "in", "dtype", ":", "for", "dtp", "in", "(", "str", ",", "unicode", ",", "'string'", ")", ":", "while", "dtp", "in", "dtype", ":", "dtype", ".", "remove", "(", "dtp", ")", "dtype", ".", "extend", "(", "[", "str", ",", "unicode", "]", ")", "if", "list", "in", "dtype", "or", "tuple", "in", "dtype", "or", "'list'", "in", "dtype", ":", "for", "dtp", "in", "(", "list", ",", "tuple", ",", "'list'", ")", ":", "while", "dtp", "in", "dtype", ":", "dtype", ".", "remove", "(", "dtp", ")", "dtype", ".", "extend", "(", "[", "list", ",", "tuple", "]", ")", "dtype", "=", "tuple", "(", "dtype", ")", "return", "bool", "(", "isinstance", "(", "searchtree", ",", "dtype", ")", ")" ]
Follow searchpath through the datatree in searchtree and report if there exists a value of type dtype searchpath is a list of keys/indices If dtype is None check for any value you can also supply a tuple to dtype
[ "Follow", "searchpath", "through", "the", "datatree", "in", "searchtree", "and", "report", "if", "there", "exists", "a", "value", "of", "type", "dtype", "searchpath", "is", "a", "list", "of", "keys", "/", "indices", "If", "dtype", "is", "None", "check", "for", "any", "value", "you", "can", "also", "supply", "a", "tuple", "to", "dtype" ]
python
train
29.310345
marshmallow-code/marshmallow
src/marshmallow/schema.py
https://github.com/marshmallow-code/marshmallow/blob/a6b6c4151f1fbf16f3774d4052ca2bddf6903750/src/marshmallow/schema.py#L896-L913
def __apply_nested_option(self, option_name, field_names, set_operation): """Apply nested options to nested fields""" # Split nested field names on the first dot. nested_fields = [name.split('.', 1) for name in field_names if '.' in name] # Partition the nested field names by parent field. nested_options = defaultdict(list) for parent, nested_names in nested_fields: nested_options[parent].append(nested_names) # Apply the nested field options. for key, options in iter(nested_options.items()): new_options = self.set_class(options) original_options = getattr(self.declared_fields[key], option_name, ()) if original_options: if set_operation == 'union': new_options |= self.set_class(original_options) if set_operation == 'intersection': new_options &= self.set_class(original_options) setattr(self.declared_fields[key], option_name, new_options)
[ "def", "__apply_nested_option", "(", "self", ",", "option_name", ",", "field_names", ",", "set_operation", ")", ":", "# Split nested field names on the first dot.", "nested_fields", "=", "[", "name", ".", "split", "(", "'.'", ",", "1", ")", "for", "name", "in", "field_names", "if", "'.'", "in", "name", "]", "# Partition the nested field names by parent field.", "nested_options", "=", "defaultdict", "(", "list", ")", "for", "parent", ",", "nested_names", "in", "nested_fields", ":", "nested_options", "[", "parent", "]", ".", "append", "(", "nested_names", ")", "# Apply the nested field options.", "for", "key", ",", "options", "in", "iter", "(", "nested_options", ".", "items", "(", ")", ")", ":", "new_options", "=", "self", ".", "set_class", "(", "options", ")", "original_options", "=", "getattr", "(", "self", ".", "declared_fields", "[", "key", "]", ",", "option_name", ",", "(", ")", ")", "if", "original_options", ":", "if", "set_operation", "==", "'union'", ":", "new_options", "|=", "self", ".", "set_class", "(", "original_options", ")", "if", "set_operation", "==", "'intersection'", ":", "new_options", "&=", "self", ".", "set_class", "(", "original_options", ")", "setattr", "(", "self", ".", "declared_fields", "[", "key", "]", ",", "option_name", ",", "new_options", ")" ]
Apply nested options to nested fields
[ "Apply", "nested", "options", "to", "nested", "fields" ]
python
train
57.055556
rocky/python3-trepan
trepan/bwcli.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/bwcli.py#L73-L90
def _postprocess_options(dbg, opts): ''' Handle options (`opts') that feed into the debugger (`dbg')''' # Set dbg.settings['printset'] print_events = [] if opts.fntrace: print_events = ['c_call', 'c_return', 'call', 'return'] # if opts.linetrace: print_events += ['line'] if len(print_events): dbg.settings['printset'] = frozenset(print_events) pass for setting in ('basename', 'different',): dbg.settings[setting] = getattr(opts, setting) pass dbg.settings['highlight'] = 'plain' Mdebugger.debugger_obj = dbg return
[ "def", "_postprocess_options", "(", "dbg", ",", "opts", ")", ":", "# Set dbg.settings['printset']", "print_events", "=", "[", "]", "if", "opts", ".", "fntrace", ":", "print_events", "=", "[", "'c_call'", ",", "'c_return'", ",", "'call'", ",", "'return'", "]", "# if opts.linetrace: print_events += ['line']", "if", "len", "(", "print_events", ")", ":", "dbg", ".", "settings", "[", "'printset'", "]", "=", "frozenset", "(", "print_events", ")", "pass", "for", "setting", "in", "(", "'basename'", ",", "'different'", ",", ")", ":", "dbg", ".", "settings", "[", "setting", "]", "=", "getattr", "(", "opts", ",", "setting", ")", "pass", "dbg", ".", "settings", "[", "'highlight'", "]", "=", "'plain'", "Mdebugger", ".", "debugger_obj", "=", "dbg", "return" ]
Handle options (`opts') that feed into the debugger (`dbg')
[ "Handle", "options", "(", "opts", ")", "that", "feed", "into", "the", "debugger", "(", "dbg", ")" ]
python
test
32
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/build/build_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L1645-L1664
def add_definition_tags(self, tags, project, definition_id): """AddDefinitionTags. [Preview API] Adds multiple tags to a definition. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :rtype: [str] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if definition_id is not None: route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int') content = self._serialize.body(tags, '[str]') response = self._send(http_method='POST', location_id='cb894432-134a-4d31-a839-83beceaace4b', version='5.0-preview.2', route_values=route_values, content=content) return self._deserialize('[str]', self._unwrap_collection(response))
[ "def", "add_definition_tags", "(", "self", ",", "tags", ",", "project", ",", "definition_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "definition_id", "is", "not", "None", ":", "route_values", "[", "'definitionId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'definition_id'", ",", "definition_id", ",", "'int'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "tags", ",", "'[str]'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'cb894432-134a-4d31-a839-83beceaace4b'", ",", "version", "=", "'5.0-preview.2'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'[str]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
AddDefinitionTags. [Preview API] Adds multiple tags to a definition. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int definition_id: The ID of the definition. :rtype: [str]
[ "AddDefinitionTags", ".", "[", "Preview", "API", "]", "Adds", "multiple", "tags", "to", "a", "definition", ".", ":", "param", "[", "str", "]", "tags", ":", "The", "tags", "to", "add", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "definition_id", ":", "The", "ID", "of", "the", "definition", ".", ":", "rtype", ":", "[", "str", "]" ]
python
train
51.35
jreinhardt/handkerchief
handkerchief/handkerchief.py
https://github.com/jreinhardt/handkerchief/blob/450291314ccbbf557b41a30ce9c523587758fe76/handkerchief/handkerchief.py#L248-L286
def collect_reponames(): """ Try to figure out a list of repos to consider by default from the contents of the working directory. """ reponames = [] #try to figure out the repo from git repo in current directory try: with open(os.devnull) as devnull: remote_data = subprocess.check_output(["git","remote","-v","show"],stderr=devnull) branches = {} for line in remote_data.decode('utf-8').split("\n"): if line.strip() == "": continue remote_match = re_mote.match(line) if not remote_match is None: branches[remote_match.group(1)] = remote_match.group(5) if len(branches) > 0: if "origin" in branches: reponames.append(branches["origin"]) else: reponames.append(branches.values()[0]) except OSError: pass except subprocess.CalledProcessError: pass #scan html files for further repos to consider for fname in glob.iglob("*.html"): fid = open(fname,"r","utf8") #check the second line for the repo marker fid.readline() line = fid.readline() match = re.match(repo_marker_re,line) if not match is None: reponames.append(match.group(1)) reponames = list(set(reponames)) return reponames
[ "def", "collect_reponames", "(", ")", ":", "reponames", "=", "[", "]", "#try to figure out the repo from git repo in current directory", "try", ":", "with", "open", "(", "os", ".", "devnull", ")", "as", "devnull", ":", "remote_data", "=", "subprocess", ".", "check_output", "(", "[", "\"git\"", ",", "\"remote\"", ",", "\"-v\"", ",", "\"show\"", "]", ",", "stderr", "=", "devnull", ")", "branches", "=", "{", "}", "for", "line", "in", "remote_data", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "\"\\n\"", ")", ":", "if", "line", ".", "strip", "(", ")", "==", "\"\"", ":", "continue", "remote_match", "=", "re_mote", ".", "match", "(", "line", ")", "if", "not", "remote_match", "is", "None", ":", "branches", "[", "remote_match", ".", "group", "(", "1", ")", "]", "=", "remote_match", ".", "group", "(", "5", ")", "if", "len", "(", "branches", ")", ">", "0", ":", "if", "\"origin\"", "in", "branches", ":", "reponames", ".", "append", "(", "branches", "[", "\"origin\"", "]", ")", "else", ":", "reponames", ".", "append", "(", "branches", ".", "values", "(", ")", "[", "0", "]", ")", "except", "OSError", ":", "pass", "except", "subprocess", ".", "CalledProcessError", ":", "pass", "#scan html files for further repos to consider", "for", "fname", "in", "glob", ".", "iglob", "(", "\"*.html\"", ")", ":", "fid", "=", "open", "(", "fname", ",", "\"r\"", ",", "\"utf8\"", ")", "#check the second line for the repo marker", "fid", ".", "readline", "(", ")", "line", "=", "fid", ".", "readline", "(", ")", "match", "=", "re", ".", "match", "(", "repo_marker_re", ",", "line", ")", "if", "not", "match", "is", "None", ":", "reponames", ".", "append", "(", "match", ".", "group", "(", "1", ")", ")", "reponames", "=", "list", "(", "set", "(", "reponames", ")", ")", "return", "reponames" ]
Try to figure out a list of repos to consider by default from the contents of the working directory.
[ "Try", "to", "figure", "out", "a", "list", "of", "repos", "to", "consider", "by", "default", "from", "the", "contents", "of", "the", "working", "directory", "." ]
python
train
28.666667
mongodb/mongo-python-driver
pymongo/database.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/database.py#L1243-L1269
def remove_user(self, name, session=None): """**DEPRECATED**: Remove user `name` from this :class:`Database`. User `name` will no longer have permissions to access this :class:`Database`. .. note:: remove_user is deprecated and will be removed in PyMongo 4.0. Use the dropUser command instead:: db.command("dropUser", "user") :Parameters: - `name`: the name of the user to remove - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. .. versionchanged:: 3.6 Added ``session`` parameter. Deprecated remove_user. """ warnings.warn("remove_user is deprecated and will be removed in " "PyMongo 4.0. Use db.command with dropUser " "instead", DeprecationWarning, stacklevel=2) cmd = SON([("dropUser", name)]) # Don't send {} as writeConcern. if self.write_concern.acknowledged and self.write_concern.document: cmd["writeConcern"] = self.write_concern.document self.command(cmd, session=session)
[ "def", "remove_user", "(", "self", ",", "name", ",", "session", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"remove_user is deprecated and will be removed in \"", "\"PyMongo 4.0. Use db.command with dropUser \"", "\"instead\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "cmd", "=", "SON", "(", "[", "(", "\"dropUser\"", ",", "name", ")", "]", ")", "# Don't send {} as writeConcern.", "if", "self", ".", "write_concern", ".", "acknowledged", "and", "self", ".", "write_concern", ".", "document", ":", "cmd", "[", "\"writeConcern\"", "]", "=", "self", ".", "write_concern", ".", "document", "self", ".", "command", "(", "cmd", ",", "session", "=", "session", ")" ]
**DEPRECATED**: Remove user `name` from this :class:`Database`. User `name` will no longer have permissions to access this :class:`Database`. .. note:: remove_user is deprecated and will be removed in PyMongo 4.0. Use the dropUser command instead:: db.command("dropUser", "user") :Parameters: - `name`: the name of the user to remove - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. .. versionchanged:: 3.6 Added ``session`` parameter. Deprecated remove_user.
[ "**", "DEPRECATED", "**", ":", "Remove", "user", "name", "from", "this", ":", "class", ":", "Database", "." ]
python
train
40.888889
michaelkuty/django-service-templates
django_service_templates/engine.py
https://github.com/michaelkuty/django-service-templates/blob/4f85bd812aeac8e01e1031f2118a68b344793118/django_service_templates/engine.py#L11-L25
def render(self, name=None, template=None, context={}): ''''Render Template meta from jinja2 templates. ''' if isinstance(template, Template): _template = template else: _template = Template.objects.get(name=name) # Maybe cache or save local ? response = self.env.from_string( _template.content).render(context) return response
[ "def", "render", "(", "self", ",", "name", "=", "None", ",", "template", "=", "None", ",", "context", "=", "{", "}", ")", ":", "if", "isinstance", "(", "template", ",", "Template", ")", ":", "_template", "=", "template", "else", ":", "_template", "=", "Template", ".", "objects", ".", "get", "(", "name", "=", "name", ")", "# Maybe cache or save local ?", "response", "=", "self", ".", "env", ".", "from_string", "(", "_template", ".", "content", ")", ".", "render", "(", "context", ")", "return", "response" ]
Render Template meta from jinja2 templates.
[ "Render", "Template", "meta", "from", "jinja2", "templates", "." ]
python
train
27.266667
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavplayback.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/tools/mavplayback.py#L150-L155
def slew(self, value): '''move to a given position in the file''' if float(value) != self.filepos: pos = float(value) * self.filesize self.mlog.f.seek(int(pos)) self.find_message()
[ "def", "slew", "(", "self", ",", "value", ")", ":", "if", "float", "(", "value", ")", "!=", "self", ".", "filepos", ":", "pos", "=", "float", "(", "value", ")", "*", "self", ".", "filesize", "self", ".", "mlog", ".", "f", ".", "seek", "(", "int", "(", "pos", ")", ")", "self", ".", "find_message", "(", ")" ]
move to a given position in the file
[ "move", "to", "a", "given", "position", "in", "the", "file" ]
python
train
37.833333
tjcsl/cslbot
cslbot/helpers/handler.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/handler.py#L318-L343
def do_mode(self, target, msg, nick, send): """reop and handle guard violations.""" mode_changes = irc.modes.parse_channel_modes(msg) with self.data_lock: for change in mode_changes: if change[1] == 'v': self.voiced[target][change[2]] = True if change[0] == '+' else False if change[1] == 'o': self.opers[target][change[2]] = True if change[0] == '+' else False # reop # FIXME: handle -o+o msbobBot msbobBot if [x for x in mode_changes if self.check_mode(x)]: send("%s: :(" % nick, target=target) # Assume bot admins know what they're doing. if not self.is_admin(None, nick): send("OP %s" % target, target='ChanServ') send("UNBAN %s" % target, target='ChanServ') if len(self.guarded) > 0: # if user is guarded and quieted, devoiced, or deopped, fix that regex = r"(.*(-v|-o|\+q|\+b)[^ ]*) (%s)" % "|".join(self.guarded) match = re.search(regex, msg) if match and nick not in [match.group(3), self.connection.real_nickname]: modestring = "+voe-qb %s" % (" ".join([match.group(3)] * 5)) self.connection.mode(target, modestring) send('Mode %s on %s by the guard system' % (modestring, target), target=self.config['core']['ctrlchan'])
[ "def", "do_mode", "(", "self", ",", "target", ",", "msg", ",", "nick", ",", "send", ")", ":", "mode_changes", "=", "irc", ".", "modes", ".", "parse_channel_modes", "(", "msg", ")", "with", "self", ".", "data_lock", ":", "for", "change", "in", "mode_changes", ":", "if", "change", "[", "1", "]", "==", "'v'", ":", "self", ".", "voiced", "[", "target", "]", "[", "change", "[", "2", "]", "]", "=", "True", "if", "change", "[", "0", "]", "==", "'+'", "else", "False", "if", "change", "[", "1", "]", "==", "'o'", ":", "self", ".", "opers", "[", "target", "]", "[", "change", "[", "2", "]", "]", "=", "True", "if", "change", "[", "0", "]", "==", "'+'", "else", "False", "# reop", "# FIXME: handle -o+o msbobBot msbobBot", "if", "[", "x", "for", "x", "in", "mode_changes", "if", "self", ".", "check_mode", "(", "x", ")", "]", ":", "send", "(", "\"%s: :(\"", "%", "nick", ",", "target", "=", "target", ")", "# Assume bot admins know what they're doing.", "if", "not", "self", ".", "is_admin", "(", "None", ",", "nick", ")", ":", "send", "(", "\"OP %s\"", "%", "target", ",", "target", "=", "'ChanServ'", ")", "send", "(", "\"UNBAN %s\"", "%", "target", ",", "target", "=", "'ChanServ'", ")", "if", "len", "(", "self", ".", "guarded", ")", ">", "0", ":", "# if user is guarded and quieted, devoiced, or deopped, fix that", "regex", "=", "r\"(.*(-v|-o|\\+q|\\+b)[^ ]*) (%s)\"", "%", "\"|\"", ".", "join", "(", "self", ".", "guarded", ")", "match", "=", "re", ".", "search", "(", "regex", ",", "msg", ")", "if", "match", "and", "nick", "not", "in", "[", "match", ".", "group", "(", "3", ")", ",", "self", ".", "connection", ".", "real_nickname", "]", ":", "modestring", "=", "\"+voe-qb %s\"", "%", "(", "\" \"", ".", "join", "(", "[", "match", ".", "group", "(", "3", ")", "]", "*", "5", ")", ")", "self", ".", "connection", ".", "mode", "(", "target", ",", "modestring", ")", "send", "(", "'Mode %s on %s by the guard system'", "%", "(", "modestring", ",", "target", ")", ",", "target", "=", "self", ".", "config", "[", "'core'", "]", "[", "'ctrlchan'", "]", ")" ]
reop and handle guard violations.
[ "reop", "and", "handle", "guard", "violations", "." ]
python
train
54.230769
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L4146-L4175
def randomWalkFunction(requestContext, name, step=60): """ Short Alias: randomWalk() Returns a random walk starting at 0. This is great for testing when there is no real data in whisper. Example:: &target=randomWalk("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == x(t-1)+random()-0.5, and x(0) == 0. Accepts an optional second argument as step parameter (default step is 60 sec). """ delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] current = 0 while when < requestContext["endTime"]: values.append(current) current += random.random() - 0.5 when += delta return [TimeSeries( name, int(epoch(requestContext["startTime"])), int(epoch(requestContext["endTime"])), step, values)]
[ "def", "randomWalkFunction", "(", "requestContext", ",", "name", ",", "step", "=", "60", ")", ":", "delta", "=", "timedelta", "(", "seconds", "=", "step", ")", "when", "=", "requestContext", "[", "\"startTime\"", "]", "values", "=", "[", "]", "current", "=", "0", "while", "when", "<", "requestContext", "[", "\"endTime\"", "]", ":", "values", ".", "append", "(", "current", ")", "current", "+=", "random", ".", "random", "(", ")", "-", "0.5", "when", "+=", "delta", "return", "[", "TimeSeries", "(", "name", ",", "int", "(", "epoch", "(", "requestContext", "[", "\"startTime\"", "]", ")", ")", ",", "int", "(", "epoch", "(", "requestContext", "[", "\"endTime\"", "]", ")", ")", ",", "step", ",", "values", ")", "]" ]
Short Alias: randomWalk() Returns a random walk starting at 0. This is great for testing when there is no real data in whisper. Example:: &target=randomWalk("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == x(t-1)+random()-0.5, and x(0) == 0. Accepts an optional second argument as step parameter (default step is 60 sec).
[ "Short", "Alias", ":", "randomWalk", "()" ]
python
train
28.633333
wtolson/gnsq
gnsq/nsqd.py
https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/nsqd.py#L600-L603
def unpause_topic(self, topic): """Resume message flow to channels of an existing, paused, topic.""" nsq.assert_valid_topic_name(topic) return self._request('POST', '/topic/unpause', fields={'topic': topic})
[ "def", "unpause_topic", "(", "self", ",", "topic", ")", ":", "nsq", ".", "assert_valid_topic_name", "(", "topic", ")", "return", "self", ".", "_request", "(", "'POST'", ",", "'/topic/unpause'", ",", "fields", "=", "{", "'topic'", ":", "topic", "}", ")" ]
Resume message flow to channels of an existing, paused, topic.
[ "Resume", "message", "flow", "to", "channels", "of", "an", "existing", "paused", "topic", "." ]
python
train
57
iotile/coretools
transport_plugins/jlink/iotile_transport_jlink/jlink.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/jlink/iotile_transport_jlink/jlink.py#L351-L374
def send_script_async(self, conn_id, data, progress_callback, callback): """Asynchronously send a a script to this IOTile device Args: conn_id (int): A unique identifer that will refer to this connection data (string): the script to send to the device progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count) callback (callable): A callback for when we have finished sending the script. The callback will be called as" callback(connection_id, adapter_id, success, failure_reason) 'connection_id': the connection id 'adapter_id': this adapter's id 'success': a bool indicating whether we received a response to our attempted RPC 'failure_reason': a string with the reason for the failure if success == False """ def _on_finished(_name, _retval, exception): if exception is not None: callback(conn_id, self.id, False, str(exception)) return callback(conn_id, self.id, True, None) self._control_thread.command(JLinkControlThread.SEND_SCRIPT, _on_finished, self._device_info, self._control_info, data, progress_callback)
[ "def", "send_script_async", "(", "self", ",", "conn_id", ",", "data", ",", "progress_callback", ",", "callback", ")", ":", "def", "_on_finished", "(", "_name", ",", "_retval", ",", "exception", ")", ":", "if", "exception", "is", "not", "None", ":", "callback", "(", "conn_id", ",", "self", ".", "id", ",", "False", ",", "str", "(", "exception", ")", ")", "return", "callback", "(", "conn_id", ",", "self", ".", "id", ",", "True", ",", "None", ")", "self", ".", "_control_thread", ".", "command", "(", "JLinkControlThread", ".", "SEND_SCRIPT", ",", "_on_finished", ",", "self", ".", "_device_info", ",", "self", ".", "_control_info", ",", "data", ",", "progress_callback", ")" ]
Asynchronously send a a script to this IOTile device Args: conn_id (int): A unique identifer that will refer to this connection data (string): the script to send to the device progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count) callback (callable): A callback for when we have finished sending the script. The callback will be called as" callback(connection_id, adapter_id, success, failure_reason) 'connection_id': the connection id 'adapter_id': this adapter's id 'success': a bool indicating whether we received a response to our attempted RPC 'failure_reason': a string with the reason for the failure if success == False
[ "Asynchronously", "send", "a", "a", "script", "to", "this", "IOTile", "device" ]
python
train
54.916667
roamanalytics/mittens
mittens/np_mittens.py
https://github.com/roamanalytics/mittens/blob/dbf0c3f8d18651475cf7e21ab1ceb824c5f89150/mittens/np_mittens.py#L136-L154
def _apply_updates(self, gradients): """Apply AdaGrad update to parameters. Parameters ---------- gradients Returns ------- """ if not hasattr(self, 'optimizers'): self.optimizers = \ {obj: AdaGradOptimizer(self.learning_rate) for obj in ['W', 'C', 'bw', 'bc']} self.W -= self.optimizers['W'].get_step(gradients['W']) self.C -= self.optimizers['C'].get_step(gradients['C']) self.bw -= self.optimizers['bw'].get_step(gradients['bw']) self.bc -= self.optimizers['bc'].get_step(gradients['bc'])
[ "def", "_apply_updates", "(", "self", ",", "gradients", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'optimizers'", ")", ":", "self", ".", "optimizers", "=", "{", "obj", ":", "AdaGradOptimizer", "(", "self", ".", "learning_rate", ")", "for", "obj", "in", "[", "'W'", ",", "'C'", ",", "'bw'", ",", "'bc'", "]", "}", "self", ".", "W", "-=", "self", ".", "optimizers", "[", "'W'", "]", ".", "get_step", "(", "gradients", "[", "'W'", "]", ")", "self", ".", "C", "-=", "self", ".", "optimizers", "[", "'C'", "]", ".", "get_step", "(", "gradients", "[", "'C'", "]", ")", "self", ".", "bw", "-=", "self", ".", "optimizers", "[", "'bw'", "]", ".", "get_step", "(", "gradients", "[", "'bw'", "]", ")", "self", ".", "bc", "-=", "self", ".", "optimizers", "[", "'bc'", "]", ".", "get_step", "(", "gradients", "[", "'bc'", "]", ")" ]
Apply AdaGrad update to parameters. Parameters ---------- gradients Returns -------
[ "Apply", "AdaGrad", "update", "to", "parameters", "." ]
python
train
32.473684
ejeschke/ginga
ginga/ImageView.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L386-L389
def configure(self, width, height): """See :meth:`set_window_size`.""" self._imgwin_set = True self.set_window_size(width, height)
[ "def", "configure", "(", "self", ",", "width", ",", "height", ")", ":", "self", ".", "_imgwin_set", "=", "True", "self", ".", "set_window_size", "(", "width", ",", "height", ")" ]
See :meth:`set_window_size`.
[ "See", ":", "meth", ":", "set_window_size", "." ]
python
train
37.75
hvac/hvac
hvac/api/secrets_engines/identity.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/identity.py#L309-L338
def merge_entities(self, from_entity_ids, to_entity_id, force=False, mount_point=DEFAULT_MOUNT_POINT): """Merge many entities into one entity. Supported methods: POST: /{mount_point}/entity/merge. Produces: 204 (empty body) :param from_entity_ids: Entity IDs which needs to get merged. :type from_entity_ids: array :param to_entity_id: Entity ID into which all the other entities need to get merged. :type to_entity_id: str | unicode :param force: Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the same type both in entities that are merged from and in entity into which all others are getting merged, secrets in the destination will be unaltered. If not set, this API will throw an error containing all the conflicts. :type force: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response """ params = { 'from_entity_ids': from_entity_ids, 'to_entity_id': to_entity_id, 'force': force, } api_path = '/v1/{mount_point}/entity/merge'.format(mount_point=mount_point) return self._adapter.post( url=api_path, json=params, )
[ "def", "merge_entities", "(", "self", ",", "from_entity_ids", ",", "to_entity_id", ",", "force", "=", "False", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'from_entity_ids'", ":", "from_entity_ids", ",", "'to_entity_id'", ":", "to_entity_id", ",", "'force'", ":", "force", ",", "}", "api_path", "=", "'/v1/{mount_point}/entity/merge'", ".", "format", "(", "mount_point", "=", "mount_point", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
Merge many entities into one entity. Supported methods: POST: /{mount_point}/entity/merge. Produces: 204 (empty body) :param from_entity_ids: Entity IDs which needs to get merged. :type from_entity_ids: array :param to_entity_id: Entity ID into which all the other entities need to get merged. :type to_entity_id: str | unicode :param force: Setting this will follow the 'mine' strategy for merging MFA secrets. If there are secrets of the same type both in entities that are merged from and in entity into which all others are getting merged, secrets in the destination will be unaltered. If not set, this API will throw an error containing all the conflicts. :type force: bool :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
[ "Merge", "many", "entities", "into", "one", "entity", "." ]
python
train
46.766667
mongodb/mongo-python-driver
pymongo/bulk.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/bulk.py#L322-L350
def execute_command(self, generator, write_concern, session): """Execute using write commands. """ # nModified is only reported for write commands, not legacy ops. full_result = { "writeErrors": [], "writeConcernErrors": [], "nInserted": 0, "nUpserted": 0, "nMatched": 0, "nModified": 0, "nRemoved": 0, "upserted": [], } op_id = _randint() def retryable_bulk(session, sock_info, retryable): self._execute_command( generator, write_concern, session, sock_info, op_id, retryable, full_result) client = self.collection.database.client with client._tmp_session(session) as s: client._retry_with_session( self.is_retryable, retryable_bulk, s, self) if full_result["writeErrors"] or full_result["writeConcernErrors"]: _raise_bulk_write_error(full_result) return full_result
[ "def", "execute_command", "(", "self", ",", "generator", ",", "write_concern", ",", "session", ")", ":", "# nModified is only reported for write commands, not legacy ops.", "full_result", "=", "{", "\"writeErrors\"", ":", "[", "]", ",", "\"writeConcernErrors\"", ":", "[", "]", ",", "\"nInserted\"", ":", "0", ",", "\"nUpserted\"", ":", "0", ",", "\"nMatched\"", ":", "0", ",", "\"nModified\"", ":", "0", ",", "\"nRemoved\"", ":", "0", ",", "\"upserted\"", ":", "[", "]", ",", "}", "op_id", "=", "_randint", "(", ")", "def", "retryable_bulk", "(", "session", ",", "sock_info", ",", "retryable", ")", ":", "self", ".", "_execute_command", "(", "generator", ",", "write_concern", ",", "session", ",", "sock_info", ",", "op_id", ",", "retryable", ",", "full_result", ")", "client", "=", "self", ".", "collection", ".", "database", ".", "client", "with", "client", ".", "_tmp_session", "(", "session", ")", "as", "s", ":", "client", ".", "_retry_with_session", "(", "self", ".", "is_retryable", ",", "retryable_bulk", ",", "s", ",", "self", ")", "if", "full_result", "[", "\"writeErrors\"", "]", "or", "full_result", "[", "\"writeConcernErrors\"", "]", ":", "_raise_bulk_write_error", "(", "full_result", ")", "return", "full_result" ]
Execute using write commands.
[ "Execute", "using", "write", "commands", "." ]
python
train
34.827586
manns/pyspread
pyspread/src/gui/_grid.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L607-L617
def OnCellFontSize(self, event): """Cell font size event handler""" with undo.group(_("Font size")): self.grid.actions.set_attr("pointsize", event.size) self.grid.ForceRefresh() self.grid.update_attribute_toolbar() event.Skip()
[ "def", "OnCellFontSize", "(", "self", ",", "event", ")", ":", "with", "undo", ".", "group", "(", "_", "(", "\"Font size\"", ")", ")", ":", "self", ".", "grid", ".", "actions", ".", "set_attr", "(", "\"pointsize\"", ",", "event", ".", "size", ")", "self", ".", "grid", ".", "ForceRefresh", "(", ")", "self", ".", "grid", ".", "update_attribute_toolbar", "(", ")", "event", ".", "Skip", "(", ")" ]
Cell font size event handler
[ "Cell", "font", "size", "event", "handler" ]
python
train
24.818182
nicolargo/glances
glances/attribute.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/attribute.py#L133-L135
def history_json(self, nb=0): """Return the history in ISO JSON format""" return [(i[0].isoformat(), i[1]) for i in self._history[-nb:]]
[ "def", "history_json", "(", "self", ",", "nb", "=", "0", ")", ":", "return", "[", "(", "i", "[", "0", "]", ".", "isoformat", "(", ")", ",", "i", "[", "1", "]", ")", "for", "i", "in", "self", ".", "_history", "[", "-", "nb", ":", "]", "]" ]
Return the history in ISO JSON format
[ "Return", "the", "history", "in", "ISO", "JSON", "format" ]
python
train
50
starling-lab/rnlp
rnlp/parse.py
https://github.com/starling-lab/rnlp/blob/72054cc2c0cbaea1d281bf3d56b271d4da29fc4a/rnlp/parse.py#L38-L46
def _writeBlock(block, blockID): '''writes the block to a file with the id''' with open("blockIDs.txt", "a") as fp: fp.write("blockID: " + str(blockID) + "\n") sentences = "" for sentence in block: sentences += sentence+"," fp.write("block sentences: "+sentences[:-1]+"\n") fp.write("\n")
[ "def", "_writeBlock", "(", "block", ",", "blockID", ")", ":", "with", "open", "(", "\"blockIDs.txt\"", ",", "\"a\"", ")", "as", "fp", ":", "fp", ".", "write", "(", "\"blockID: \"", "+", "str", "(", "blockID", ")", "+", "\"\\n\"", ")", "sentences", "=", "\"\"", "for", "sentence", "in", "block", ":", "sentences", "+=", "sentence", "+", "\",\"", "fp", ".", "write", "(", "\"block sentences: \"", "+", "sentences", "[", ":", "-", "1", "]", "+", "\"\\n\"", ")", "fp", ".", "write", "(", "\"\\n\"", ")" ]
writes the block to a file with the id
[ "writes", "the", "block", "to", "a", "file", "with", "the", "id" ]
python
train
37.777778
reingart/gui2py
gui/component.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/component.py#L741-L761
def _calc_dimension(self, dim_val, dim_max, font_dim): "Calculate final pos and size (auto, absolute in pixels & relativa)" if dim_val is None: return -1 # let wx automatic pos/size elif isinstance(dim_val, int): return dim_val # use fixed pixel value (absolute) elif isinstance(dim_val, basestring): if dim_val.endswith("%"): # percentaje, relative to parent max size: dim_val = int(dim_val[:-1]) dim_val = dim_val / 100.0 * dim_max elif dim_val.endswith("em"): # use current font size (suport fractions): dim_val = float(dim_val[:-2]) dim_val = dim_val * font_dim elif dim_val.endswith("px"): # fixed pixels dim_val = dim_val[:-2] elif dim_val == "" or dim_val == "auto": dim_val = -1 return int(dim_val)
[ "def", "_calc_dimension", "(", "self", ",", "dim_val", ",", "dim_max", ",", "font_dim", ")", ":", "if", "dim_val", "is", "None", ":", "return", "-", "1", "# let wx automatic pos/size\r", "elif", "isinstance", "(", "dim_val", ",", "int", ")", ":", "return", "dim_val", "# use fixed pixel value (absolute)\r", "elif", "isinstance", "(", "dim_val", ",", "basestring", ")", ":", "if", "dim_val", ".", "endswith", "(", "\"%\"", ")", ":", "# percentaje, relative to parent max size:\r", "dim_val", "=", "int", "(", "dim_val", "[", ":", "-", "1", "]", ")", "dim_val", "=", "dim_val", "/", "100.0", "*", "dim_max", "elif", "dim_val", ".", "endswith", "(", "\"em\"", ")", ":", "# use current font size (suport fractions):\r", "dim_val", "=", "float", "(", "dim_val", "[", ":", "-", "2", "]", ")", "dim_val", "=", "dim_val", "*", "font_dim", "elif", "dim_val", ".", "endswith", "(", "\"px\"", ")", ":", "# fixed pixels\r", "dim_val", "=", "dim_val", "[", ":", "-", "2", "]", "elif", "dim_val", "==", "\"\"", "or", "dim_val", "==", "\"auto\"", ":", "dim_val", "=", "-", "1", "return", "int", "(", "dim_val", ")" ]
Calculate final pos and size (auto, absolute in pixels & relativa)
[ "Calculate", "final", "pos", "and", "size", "(", "auto", "absolute", "in", "pixels", "&", "relativa", ")" ]
python
test
46.142857
veltzer/pypitools
pypitools/common.py
https://github.com/veltzer/pypitools/blob/5f097be21e9bc65578eed5b6b7855c1945540701/pypitools/common.py#L207-L217
def upload(self): """ upload via the method configured :return: """ if self.upload_method == "setup": self.upload_by_setup() if self.upload_method == "twine": self.upload_by_twine() if self.upload_method == "gemfury": self.upload_by_gemfury()
[ "def", "upload", "(", "self", ")", ":", "if", "self", ".", "upload_method", "==", "\"setup\"", ":", "self", ".", "upload_by_setup", "(", ")", "if", "self", ".", "upload_method", "==", "\"twine\"", ":", "self", ".", "upload_by_twine", "(", ")", "if", "self", ".", "upload_method", "==", "\"gemfury\"", ":", "self", ".", "upload_by_gemfury", "(", ")" ]
upload via the method configured :return:
[ "upload", "via", "the", "method", "configured", ":", "return", ":" ]
python
train
29.454545
pypa/pipenv
pipenv/vendor/distlib/_backport/shutil.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/shutil.py#L300-L338
def move(src, dst): """Recursively move a file or directory to another location. This is similar to the Unix "mv" command. If the destination is a directory or a symlink to a directory, the source is moved inside the directory. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. If the destination is on our current filesystem, then rename() is used. Otherwise, src is copied to the destination and then removed. A lot more could be done here... A look at a mv.c shows a lot of the issues this implementation glosses over. """ real_dst = dst if os.path.isdir(dst): if _samefile(src, dst): # We might be on a case insensitive filesystem, # perform the rename anyway. os.rename(src, dst) return real_dst = os.path.join(dst, _basename(src)) if os.path.exists(real_dst): raise Error("Destination path '%s' already exists" % real_dst) try: os.rename(src, real_dst) except OSError: if os.path.isdir(src): if _destinsrc(src, dst): raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) copytree(src, real_dst, symlinks=True) rmtree(src) else: copy2(src, real_dst) os.unlink(src)
[ "def", "move", "(", "src", ",", "dst", ")", ":", "real_dst", "=", "dst", "if", "os", ".", "path", ".", "isdir", "(", "dst", ")", ":", "if", "_samefile", "(", "src", ",", "dst", ")", ":", "# We might be on a case insensitive filesystem,", "# perform the rename anyway.", "os", ".", "rename", "(", "src", ",", "dst", ")", "return", "real_dst", "=", "os", ".", "path", ".", "join", "(", "dst", ",", "_basename", "(", "src", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "real_dst", ")", ":", "raise", "Error", "(", "\"Destination path '%s' already exists\"", "%", "real_dst", ")", "try", ":", "os", ".", "rename", "(", "src", ",", "real_dst", ")", "except", "OSError", ":", "if", "os", ".", "path", ".", "isdir", "(", "src", ")", ":", "if", "_destinsrc", "(", "src", ",", "dst", ")", ":", "raise", "Error", "(", "\"Cannot move a directory '%s' into itself '%s'.\"", "%", "(", "src", ",", "dst", ")", ")", "copytree", "(", "src", ",", "real_dst", ",", "symlinks", "=", "True", ")", "rmtree", "(", "src", ")", "else", ":", "copy2", "(", "src", ",", "real_dst", ")", "os", ".", "unlink", "(", "src", ")" ]
Recursively move a file or directory to another location. This is similar to the Unix "mv" command. If the destination is a directory or a symlink to a directory, the source is moved inside the directory. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. If the destination is on our current filesystem, then rename() is used. Otherwise, src is copied to the destination and then removed. A lot more could be done here... A look at a mv.c shows a lot of the issues this implementation glosses over.
[ "Recursively", "move", "a", "file", "or", "directory", "to", "another", "location", ".", "This", "is", "similar", "to", "the", "Unix", "mv", "command", "." ]
python
train
36.333333
ArchiveTeam/wpull
wpull/warc/recorder.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/warc/recorder.py#L410-L433
def _write_cdx_header(self): '''Write the CDX header. It writes the fields: 1. a: original URL 2. b: UNIX timestamp 3. m: MIME Type from the HTTP Content-type 4. s: response code 5. k: new style checksum 6. S: raw file record size 7. V: offset in raw file 8. g: filename of raw file 9. u: record ID ''' with open(self._cdx_filename, mode='a', encoding='utf-8') as out_file: out_file.write(self.CDX_DELIMINATOR) out_file.write(self.CDX_DELIMINATOR.join(( 'CDX', 'a', 'b', 'm', 's', 'k', 'S', 'V', 'g', 'u' ))) out_file.write('\n')
[ "def", "_write_cdx_header", "(", "self", ")", ":", "with", "open", "(", "self", ".", "_cdx_filename", ",", "mode", "=", "'a'", ",", "encoding", "=", "'utf-8'", ")", "as", "out_file", ":", "out_file", ".", "write", "(", "self", ".", "CDX_DELIMINATOR", ")", "out_file", ".", "write", "(", "self", ".", "CDX_DELIMINATOR", ".", "join", "(", "(", "'CDX'", ",", "'a'", ",", "'b'", ",", "'m'", ",", "'s'", ",", "'k'", ",", "'S'", ",", "'V'", ",", "'g'", ",", "'u'", ")", ")", ")", "out_file", ".", "write", "(", "'\\n'", ")" ]
Write the CDX header. It writes the fields: 1. a: original URL 2. b: UNIX timestamp 3. m: MIME Type from the HTTP Content-type 4. s: response code 5. k: new style checksum 6. S: raw file record size 7. V: offset in raw file 8. g: filename of raw file 9. u: record ID
[ "Write", "the", "CDX", "header", "." ]
python
train
30.166667
robmarkcole/HASS-data-detective
detective/config.py
https://github.com/robmarkcole/HASS-data-detective/blob/f67dfde9dd63a3af411944d1857b0835632617c5/detective/config.py#L36-L49
def _secret_yaml(loader, node): """Load secrets and embed it into the configuration YAML.""" fname = os.path.join(os.path.dirname(loader.name), "secrets.yaml") try: with open(fname, encoding="utf-8") as secret_file: secrets = YAML(typ="safe").load(secret_file) except FileNotFoundError: raise ValueError("Secrets file {} not found".format(fname)) from None try: return secrets[node.value] except KeyError: raise ValueError("Secret {} not found".format(node.value)) from None
[ "def", "_secret_yaml", "(", "loader", ",", "node", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "loader", ".", "name", ")", ",", "\"secrets.yaml\"", ")", "try", ":", "with", "open", "(", "fname", ",", "encoding", "=", "\"utf-8\"", ")", "as", "secret_file", ":", "secrets", "=", "YAML", "(", "typ", "=", "\"safe\"", ")", ".", "load", "(", "secret_file", ")", "except", "FileNotFoundError", ":", "raise", "ValueError", "(", "\"Secrets file {} not found\"", ".", "format", "(", "fname", ")", ")", "from", "None", "try", ":", "return", "secrets", "[", "node", ".", "value", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Secret {} not found\"", ".", "format", "(", "node", ".", "value", ")", ")", "from", "None" ]
Load secrets and embed it into the configuration YAML.
[ "Load", "secrets", "and", "embed", "it", "into", "the", "configuration", "YAML", "." ]
python
train
37.928571
digmore/pypushed
pushed/pushed.py
https://github.com/digmore/pypushed/blob/4240fc27323b89d59f0c652dcea4b65f78437c5b/pushed/pushed.py#L37-L50
def push_channel(self, content, channel, content_url=None): '''Push a notification to a Pushed channel. Param: content -> content of Pushed notification message channel -> string identifying a Pushed channel content_url (optional) -> enrich message with URL Returns Shipment ID as string ''' parameters = { 'app_key': self.app_key, 'app_secret': self.app_secret, 'target_alias': channel } return self._push(content, 'channel', parameters, content_url)
[ "def", "push_channel", "(", "self", ",", "content", ",", "channel", ",", "content_url", "=", "None", ")", ":", "parameters", "=", "{", "'app_key'", ":", "self", ".", "app_key", ",", "'app_secret'", ":", "self", ".", "app_secret", ",", "'target_alias'", ":", "channel", "}", "return", "self", ".", "_push", "(", "content", ",", "'channel'", ",", "parameters", ",", "content_url", ")" ]
Push a notification to a Pushed channel. Param: content -> content of Pushed notification message channel -> string identifying a Pushed channel content_url (optional) -> enrich message with URL Returns Shipment ID as string
[ "Push", "a", "notification", "to", "a", "Pushed", "channel", "." ]
python
train
40.071429
hozn/coilmq
coilmq/store/sa/__init__.py
https://github.com/hozn/coilmq/blob/76b7fcf347144b3a5746423a228bed121dc564b5/coilmq/store/sa/__init__.py#L151-L167
def has_frames(self, destination): """ Whether specified queue has any frames. @param destination: The queue name (destinationination). @type destination: C{str} @return: Whether there are any frames in the specified queue. @rtype: C{bool} """ session = meta.Session() sel = select([model.frames_table.c.message_id]).where( model.frames_table.c.destination == destination) result = session.execute(sel) first = result.fetchone() return first is not None
[ "def", "has_frames", "(", "self", ",", "destination", ")", ":", "session", "=", "meta", ".", "Session", "(", ")", "sel", "=", "select", "(", "[", "model", ".", "frames_table", ".", "c", ".", "message_id", "]", ")", ".", "where", "(", "model", ".", "frames_table", ".", "c", ".", "destination", "==", "destination", ")", "result", "=", "session", ".", "execute", "(", "sel", ")", "first", "=", "result", ".", "fetchone", "(", ")", "return", "first", "is", "not", "None" ]
Whether specified queue has any frames. @param destination: The queue name (destinationination). @type destination: C{str} @return: Whether there are any frames in the specified queue. @rtype: C{bool}
[ "Whether", "specified", "queue", "has", "any", "frames", "." ]
python
train
32.235294
Nekroze/librarian
librarian/deck.py
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/deck.py#L56-L62
def move_top_cards(self, other, number=1): """ Move the top `number` of cards to the top of some `other` deck. By default only one card will be moved if `number` is not specified. """ other.cards.append(reversed(self.cards[-number:]))
[ "def", "move_top_cards", "(", "self", ",", "other", ",", "number", "=", "1", ")", ":", "other", ".", "cards", ".", "append", "(", "reversed", "(", "self", ".", "cards", "[", "-", "number", ":", "]", ")", ")" ]
Move the top `number` of cards to the top of some `other` deck. By default only one card will be moved if `number` is not specified.
[ "Move", "the", "top", "number", "of", "cards", "to", "the", "top", "of", "some", "other", "deck", "." ]
python
train
38.428571
ejeschke/ginga
ginga/ImageView.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/ImageView.py#L3447-L3495
def set_onscreen_message(self, text, redraw=True): """Called by a subclass to update the onscreen message. Parameters ---------- text : str The text to show in the display. """ width, height = self.get_window_size() font = self.t_.get('onscreen_font', 'sans serif') font_size = self.t_.get('onscreen_font_size', None) if font_size is None: font_size = self._calc_font_size(width) # TODO: need some way to accurately estimate text extents # without actually putting text on the canvas ht, wd = font_size, font_size if text is not None: wd = len(text) * font_size * 1.1 x = (width // 2) - (wd // 2) y = ((height // 3) * 2) - (ht // 2) tag = '_$onscreen_msg' canvas = self.get_private_canvas() try: message = canvas.get_object_by_tag(tag) if text is None: message.text = '' else: message.x = x message.y = y message.text = text message.fontsize = font_size except KeyError: if text is None: text = '' Text = canvas.get_draw_class('text') canvas.add(Text(x, y, text=text, font=font, fontsize=font_size, color=self.img_fg, coord='window'), tag=tag, redraw=False) if redraw: canvas.update_canvas(whence=3)
[ "def", "set_onscreen_message", "(", "self", ",", "text", ",", "redraw", "=", "True", ")", ":", "width", ",", "height", "=", "self", ".", "get_window_size", "(", ")", "font", "=", "self", ".", "t_", ".", "get", "(", "'onscreen_font'", ",", "'sans serif'", ")", "font_size", "=", "self", ".", "t_", ".", "get", "(", "'onscreen_font_size'", ",", "None", ")", "if", "font_size", "is", "None", ":", "font_size", "=", "self", ".", "_calc_font_size", "(", "width", ")", "# TODO: need some way to accurately estimate text extents", "# without actually putting text on the canvas", "ht", ",", "wd", "=", "font_size", ",", "font_size", "if", "text", "is", "not", "None", ":", "wd", "=", "len", "(", "text", ")", "*", "font_size", "*", "1.1", "x", "=", "(", "width", "//", "2", ")", "-", "(", "wd", "//", "2", ")", "y", "=", "(", "(", "height", "//", "3", ")", "*", "2", ")", "-", "(", "ht", "//", "2", ")", "tag", "=", "'_$onscreen_msg'", "canvas", "=", "self", ".", "get_private_canvas", "(", ")", "try", ":", "message", "=", "canvas", ".", "get_object_by_tag", "(", "tag", ")", "if", "text", "is", "None", ":", "message", ".", "text", "=", "''", "else", ":", "message", ".", "x", "=", "x", "message", ".", "y", "=", "y", "message", ".", "text", "=", "text", "message", ".", "fontsize", "=", "font_size", "except", "KeyError", ":", "if", "text", "is", "None", ":", "text", "=", "''", "Text", "=", "canvas", ".", "get_draw_class", "(", "'text'", ")", "canvas", ".", "add", "(", "Text", "(", "x", ",", "y", ",", "text", "=", "text", ",", "font", "=", "font", ",", "fontsize", "=", "font_size", ",", "color", "=", "self", ".", "img_fg", ",", "coord", "=", "'window'", ")", ",", "tag", "=", "tag", ",", "redraw", "=", "False", ")", "if", "redraw", ":", "canvas", ".", "update_canvas", "(", "whence", "=", "3", ")" ]
Called by a subclass to update the onscreen message. Parameters ---------- text : str The text to show in the display.
[ "Called", "by", "a", "subclass", "to", "update", "the", "onscreen", "message", "." ]
python
train
30.918367
adamhadani/python-yelp
yelp/api.py
https://github.com/adamhadani/python-yelp/blob/7694ccb7274cc3c5783250ed0c3396cda2fcfa1a/yelp/api.py#L233-L243
def by_geopoint(self, lat, long): """ Perform a Yelp Neighborhood API Search based on a geopoint. Args: lat - geopoint latitude long - geopoint longitude """ header, content = self._http_request(self.BASE_URL, lat=lat, long=long) return json.loads(content)
[ "def", "by_geopoint", "(", "self", ",", "lat", ",", "long", ")", ":", "header", ",", "content", "=", "self", ".", "_http_request", "(", "self", ".", "BASE_URL", ",", "lat", "=", "lat", ",", "long", "=", "long", ")", "return", "json", ".", "loads", "(", "content", ")" ]
Perform a Yelp Neighborhood API Search based on a geopoint. Args: lat - geopoint latitude long - geopoint longitude
[ "Perform", "a", "Yelp", "Neighborhood", "API", "Search", "based", "on", "a", "geopoint", "." ]
python
train
30
python-bugzilla/python-bugzilla
bugzilla/base.py
https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/base.py#L581-L621
def login(self, user=None, password=None, restrict_login=None): """ Attempt to log in using the given username and password. Subsequent method calls will use this username and password. Returns False if login fails, otherwise returns some kind of login info - typically either a numeric userid, or a dict of user info. If user is not set, the value of Bugzilla.user will be used. If *that* is not set, ValueError will be raised. If login fails, BugzillaError will be raised. The login session can be restricted to current user IP address with restrict_login argument. (Bugzilla 4.4+) This method will be called implicitly at the end of connect() if user and password are both set. So under most circumstances you won't need to call this yourself. """ if self.api_key: raise ValueError("cannot login when using an API key") if user: self.user = user if password: self.password = password if not self.user: raise ValueError("missing username") if not self.password: raise ValueError("missing password") if restrict_login: log.info("logging in with restrict_login=True") try: ret = self._login(self.user, self.password, restrict_login) self.password = '' log.info("login successful for user=%s", self.user) return ret except Fault as e: raise BugzillaError("Login failed: %s" % str(e.faultString))
[ "def", "login", "(", "self", ",", "user", "=", "None", ",", "password", "=", "None", ",", "restrict_login", "=", "None", ")", ":", "if", "self", ".", "api_key", ":", "raise", "ValueError", "(", "\"cannot login when using an API key\"", ")", "if", "user", ":", "self", ".", "user", "=", "user", "if", "password", ":", "self", ".", "password", "=", "password", "if", "not", "self", ".", "user", ":", "raise", "ValueError", "(", "\"missing username\"", ")", "if", "not", "self", ".", "password", ":", "raise", "ValueError", "(", "\"missing password\"", ")", "if", "restrict_login", ":", "log", ".", "info", "(", "\"logging in with restrict_login=True\"", ")", "try", ":", "ret", "=", "self", ".", "_login", "(", "self", ".", "user", ",", "self", ".", "password", ",", "restrict_login", ")", "self", ".", "password", "=", "''", "log", ".", "info", "(", "\"login successful for user=%s\"", ",", "self", ".", "user", ")", "return", "ret", "except", "Fault", "as", "e", ":", "raise", "BugzillaError", "(", "\"Login failed: %s\"", "%", "str", "(", "e", ".", "faultString", ")", ")" ]
Attempt to log in using the given username and password. Subsequent method calls will use this username and password. Returns False if login fails, otherwise returns some kind of login info - typically either a numeric userid, or a dict of user info. If user is not set, the value of Bugzilla.user will be used. If *that* is not set, ValueError will be raised. If login fails, BugzillaError will be raised. The login session can be restricted to current user IP address with restrict_login argument. (Bugzilla 4.4+) This method will be called implicitly at the end of connect() if user and password are both set. So under most circumstances you won't need to call this yourself.
[ "Attempt", "to", "log", "in", "using", "the", "given", "username", "and", "password", ".", "Subsequent", "method", "calls", "will", "use", "this", "username", "and", "password", ".", "Returns", "False", "if", "login", "fails", "otherwise", "returns", "some", "kind", "of", "login", "info", "-", "typically", "either", "a", "numeric", "userid", "or", "a", "dict", "of", "user", "info", "." ]
python
train
38.268293
dasevilla/rovi-python
roviclient/base.py
https://github.com/dasevilla/rovi-python/blob/46039d6ebfcf2ff20b4edb4636cb972682cf6af4/roviclient/base.py#L52-L66
def make_request(self, resource, params=None): """ Performs the API request. Most methods are a wrapper around this one. """ if params is None: params = {} url = self.request_url(resource) params['format'] = 'json' r = self.session.get(url=url, params=params) r.raise_for_status() return r
[ "def", "make_request", "(", "self", ",", "resource", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "url", "=", "self", ".", "request_url", "(", "resource", ")", "params", "[", "'format'", "]", "=", "'json'", "r", "=", "self", ".", "session", ".", "get", "(", "url", "=", "url", ",", "params", "=", "params", ")", "r", ".", "raise_for_status", "(", ")", "return", "r" ]
Performs the API request. Most methods are a wrapper around this one.
[ "Performs", "the", "API", "request", ".", "Most", "methods", "are", "a", "wrapper", "around", "this", "one", "." ]
python
train
24.2
Cairnarvon/uptime
src/__init__.py
https://github.com/Cairnarvon/uptime/blob/1ddfd06bb300c00e6dc4bd2a9ddf9bf1aa27b1bb/src/__init__.py#L351-L365
def boottime(): """Returns boot time if remotely possible, or None if not.""" global __boottime if __boottime is None: up = uptime() if up is None: return None if __boottime is None: _boottime_linux() if datetime is None: raise RuntimeError('datetime module required.') return datetime.fromtimestamp(__boottime or time.time() - up)
[ "def", "boottime", "(", ")", ":", "global", "__boottime", "if", "__boottime", "is", "None", ":", "up", "=", "uptime", "(", ")", "if", "up", "is", "None", ":", "return", "None", "if", "__boottime", "is", "None", ":", "_boottime_linux", "(", ")", "if", "datetime", "is", "None", ":", "raise", "RuntimeError", "(", "'datetime module required.'", ")", "return", "datetime", ".", "fromtimestamp", "(", "__boottime", "or", "time", ".", "time", "(", ")", "-", "up", ")" ]
Returns boot time if remotely possible, or None if not.
[ "Returns", "boot", "time", "if", "remotely", "possible", "or", "None", "if", "not", "." ]
python
valid
25.866667
dswah/pyGAM
pygam/core.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/core.py#L156-L179
def set_params(self, deep=False, force=False, **parameters): """ sets an object's paramters Parameters ---------- deep : boolean, default: False when True, also sets non-user-facing paramters force : boolean, default: False when True, also sets parameters that the object does not already have **parameters : paramters to set Returns ------ self """ param_names = self.get_params(deep=deep).keys() for parameter, value in parameters.items(): if (parameter in param_names or force or (hasattr(self, parameter) and parameter == parameter.strip('_'))): setattr(self, parameter, value) return self
[ "def", "set_params", "(", "self", ",", "deep", "=", "False", ",", "force", "=", "False", ",", "*", "*", "parameters", ")", ":", "param_names", "=", "self", ".", "get_params", "(", "deep", "=", "deep", ")", ".", "keys", "(", ")", "for", "parameter", ",", "value", "in", "parameters", ".", "items", "(", ")", ":", "if", "(", "parameter", "in", "param_names", "or", "force", "or", "(", "hasattr", "(", "self", ",", "parameter", ")", "and", "parameter", "==", "parameter", ".", "strip", "(", "'_'", ")", ")", ")", ":", "setattr", "(", "self", ",", "parameter", ",", "value", ")", "return", "self" ]
sets an object's paramters Parameters ---------- deep : boolean, default: False when True, also sets non-user-facing paramters force : boolean, default: False when True, also sets parameters that the object does not already have **parameters : paramters to set Returns ------ self
[ "sets", "an", "object", "s", "paramters" ]
python
train
32.5
tweepy/tweepy
tweepy/streaming.py
https://github.com/tweepy/tweepy/blob/cc3894073905811c4d9fd816202f93454ed932da/tweepy/streaming.py#L167-L182
def read_line(self, sep=six.b('\n')): """Read the data stream until a given separator is found (default \n) :param sep: Separator to read until. Must by of the bytes type (str in python 2, bytes in python 3) :return: The str of the data read until sep """ start = 0 while not self._stream.closed: loc = self._buffer.find(sep, start) if loc >= 0: return self._pop(loc + len(sep)) else: start = len(self._buffer) self._buffer += self._stream.read(self._chunk_size) return six.b('')
[ "def", "read_line", "(", "self", ",", "sep", "=", "six", ".", "b", "(", "'\\n'", ")", ")", ":", "start", "=", "0", "while", "not", "self", ".", "_stream", ".", "closed", ":", "loc", "=", "self", ".", "_buffer", ".", "find", "(", "sep", ",", "start", ")", "if", "loc", ">=", "0", ":", "return", "self", ".", "_pop", "(", "loc", "+", "len", "(", "sep", ")", ")", "else", ":", "start", "=", "len", "(", "self", ".", "_buffer", ")", "self", ".", "_buffer", "+=", "self", ".", "_stream", ".", "read", "(", "self", ".", "_chunk_size", ")", "return", "six", ".", "b", "(", "''", ")" ]
Read the data stream until a given separator is found (default \n) :param sep: Separator to read until. Must by of the bytes type (str in python 2, bytes in python 3) :return: The str of the data read until sep
[ "Read", "the", "data", "stream", "until", "a", "given", "separator", "is", "found", "(", "default", "\\", "n", ")" ]
python
train
38.3125
MacHu-GWU/uszipcode-project
uszipcode/search.py
https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L804-L819
def by_median_household_income(self, lower=-1, upper=2 ** 31, zipcode_type=ZipcodeType.Standard, sort_by=SimpleZipcode.median_household_income.name, ascending=False, returns=DEFAULT_LIMIT): """ Search zipcode information by median household income. """ return self.query( median_household_income_lower=lower, median_household_income_upper=upper, sort_by=sort_by, zipcode_type=zipcode_type, ascending=ascending, returns=returns, )
[ "def", "by_median_household_income", "(", "self", ",", "lower", "=", "-", "1", ",", "upper", "=", "2", "**", "31", ",", "zipcode_type", "=", "ZipcodeType", ".", "Standard", ",", "sort_by", "=", "SimpleZipcode", ".", "median_household_income", ".", "name", ",", "ascending", "=", "False", ",", "returns", "=", "DEFAULT_LIMIT", ")", ":", "return", "self", ".", "query", "(", "median_household_income_lower", "=", "lower", ",", "median_household_income_upper", "=", "upper", ",", "sort_by", "=", "sort_by", ",", "zipcode_type", "=", "zipcode_type", ",", "ascending", "=", "ascending", ",", "returns", "=", "returns", ",", ")" ]
Search zipcode information by median household income.
[ "Search", "zipcode", "information", "by", "median", "household", "income", "." ]
python
train
44.5
PmagPy/PmagPy
dialogs/drop_down_menus3.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/drop_down_menus3.py#L284-L397
def on_left_click(self, event, grid, choices): """ creates popup menu when user clicks on the column if that column is in the list of choices that get a drop-down menu. allows user to edit the column, but only from available values """ row, col = event.GetRow(), event.GetCol() if col == 0 and self.grid.name != 'ages': default_val = self.grid.GetCellValue(row, col) msg = "Choose a new name for {}.\nThe new value will propagate throughout the contribution.".format(default_val) dia = wx.TextEntryDialog(self.grid, msg, "Rename {}".format(self.grid.name, default_val), default_val) res = dia.ShowModal() if res == wx.ID_OK: new_val = dia.GetValue() # update the contribution with new name self.contribution.rename_item(self.grid.name, default_val, new_val) # don't propagate changes if we are just assigning a new name # and not really renaming # (i.e., if a blank row was added then named) if default_val == '': self.grid.SetCellValue(row, 0, new_val) return # update the current grid with new name for row in range(self.grid.GetNumberRows()): cell_value = self.grid.GetCellValue(row, 0) if cell_value == default_val: self.grid.SetCellValue(row, 0, new_val) else: continue return color = self.grid.GetCellBackgroundColour(event.GetRow(), event.GetCol()) # allow user to cherry-pick cells for editing. # gets selection of meta key for mac, ctrl key for pc if event.ControlDown() or event.MetaDown(): row, col = event.GetRow(), event.GetCol() if (row, col) not in self.dispersed_selection: self.dispersed_selection.append((row, col)) self.grid.SetCellBackgroundColour(row, col, 'light blue') else: self.dispersed_selection.remove((row, col)) self.grid.SetCellBackgroundColour(row, col, color)# 'white' self.grid.ForceRefresh() return if event.ShiftDown(): # allow user to highlight multiple consecutive cells in a column previous_col = self.grid.GetGridCursorCol() previous_row = self.grid.GetGridCursorRow() col = event.GetCol() row = event.GetRow() if col != previous_col: return else: if row > previous_row: row_range = list(range(previous_row, row+1)) else: row_range = list(range(row, previous_row+1)) for r in row_range: self.grid.SetCellBackgroundColour(r, col, 'light blue') self.selection.append((r, col)) self.grid.ForceRefresh() return selection = False if self.dispersed_selection: is_dispersed = True selection = self.dispersed_selection if self.selection: is_dispersed = False selection = self.selection try: col = event.GetCol() row = event.GetRow() except AttributeError: row, col = selection[0][0], selection[0][1] self.grid.SetGridCursor(row, col) if col in list(choices.keys()): # column should have a pop-up menu menu = wx.Menu() two_tiered = choices[col][1] choices = choices[col][0] if not two_tiered: # menu is one tiered if 'CLEAR cell of all values' not in choices: choices.insert(0, 'CLEAR cell of all values') for choice in choices: if not choice: choice = " " # prevents error if choice is an empty string menuitem = menu.Append(wx.ID_ANY, str(choice)) self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem) self.show_menu(event, menu) else: # menu is two_tiered clear = menu.Append(-1, 'CLEAR cell of all values') self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), clear) for choice in sorted(choices.items()): submenu = wx.Menu() for item in choice[1]: menuitem = submenu.Append(-1, str(item)) self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem) menu.Append(-1, choice[0], submenu) self.show_menu(event, menu) if selection: # re-whiten the cells that were previously highlighted for row, col in selection: self.grid.SetCellBackgroundColour(row, col, self.col_color) self.dispersed_selection = [] self.selection = [] self.grid.ForceRefresh()
[ "def", "on_left_click", "(", "self", ",", "event", ",", "grid", ",", "choices", ")", ":", "row", ",", "col", "=", "event", ".", "GetRow", "(", ")", ",", "event", ".", "GetCol", "(", ")", "if", "col", "==", "0", "and", "self", ".", "grid", ".", "name", "!=", "'ages'", ":", "default_val", "=", "self", ".", "grid", ".", "GetCellValue", "(", "row", ",", "col", ")", "msg", "=", "\"Choose a new name for {}.\\nThe new value will propagate throughout the contribution.\"", ".", "format", "(", "default_val", ")", "dia", "=", "wx", ".", "TextEntryDialog", "(", "self", ".", "grid", ",", "msg", ",", "\"Rename {}\"", ".", "format", "(", "self", ".", "grid", ".", "name", ",", "default_val", ")", ",", "default_val", ")", "res", "=", "dia", ".", "ShowModal", "(", ")", "if", "res", "==", "wx", ".", "ID_OK", ":", "new_val", "=", "dia", ".", "GetValue", "(", ")", "# update the contribution with new name", "self", ".", "contribution", ".", "rename_item", "(", "self", ".", "grid", ".", "name", ",", "default_val", ",", "new_val", ")", "# don't propagate changes if we are just assigning a new name", "# and not really renaming", "# (i.e., if a blank row was added then named)", "if", "default_val", "==", "''", ":", "self", ".", "grid", ".", "SetCellValue", "(", "row", ",", "0", ",", "new_val", ")", "return", "# update the current grid with new name", "for", "row", "in", "range", "(", "self", ".", "grid", ".", "GetNumberRows", "(", ")", ")", ":", "cell_value", "=", "self", ".", "grid", ".", "GetCellValue", "(", "row", ",", "0", ")", "if", "cell_value", "==", "default_val", ":", "self", ".", "grid", ".", "SetCellValue", "(", "row", ",", "0", ",", "new_val", ")", "else", ":", "continue", "return", "color", "=", "self", ".", "grid", ".", "GetCellBackgroundColour", "(", "event", ".", "GetRow", "(", ")", ",", "event", ".", "GetCol", "(", ")", ")", "# allow user to cherry-pick cells for editing.", "# gets selection of meta key for mac, ctrl key for pc", "if", "event", ".", "ControlDown", "(", ")", "or", "event", ".", "MetaDown", "(", ")", ":", "row", ",", "col", "=", "event", ".", "GetRow", "(", ")", ",", "event", ".", "GetCol", "(", ")", "if", "(", "row", ",", "col", ")", "not", "in", "self", ".", "dispersed_selection", ":", "self", ".", "dispersed_selection", ".", "append", "(", "(", "row", ",", "col", ")", ")", "self", ".", "grid", ".", "SetCellBackgroundColour", "(", "row", ",", "col", ",", "'light blue'", ")", "else", ":", "self", ".", "dispersed_selection", ".", "remove", "(", "(", "row", ",", "col", ")", ")", "self", ".", "grid", ".", "SetCellBackgroundColour", "(", "row", ",", "col", ",", "color", ")", "# 'white'", "self", ".", "grid", ".", "ForceRefresh", "(", ")", "return", "if", "event", ".", "ShiftDown", "(", ")", ":", "# allow user to highlight multiple consecutive cells in a column", "previous_col", "=", "self", ".", "grid", ".", "GetGridCursorCol", "(", ")", "previous_row", "=", "self", ".", "grid", ".", "GetGridCursorRow", "(", ")", "col", "=", "event", ".", "GetCol", "(", ")", "row", "=", "event", ".", "GetRow", "(", ")", "if", "col", "!=", "previous_col", ":", "return", "else", ":", "if", "row", ">", "previous_row", ":", "row_range", "=", "list", "(", "range", "(", "previous_row", ",", "row", "+", "1", ")", ")", "else", ":", "row_range", "=", "list", "(", "range", "(", "row", ",", "previous_row", "+", "1", ")", ")", "for", "r", "in", "row_range", ":", "self", ".", "grid", ".", "SetCellBackgroundColour", "(", "r", ",", "col", ",", "'light blue'", ")", "self", ".", "selection", ".", "append", "(", "(", "r", ",", "col", ")", ")", "self", ".", "grid", ".", "ForceRefresh", "(", ")", "return", "selection", "=", "False", "if", "self", ".", "dispersed_selection", ":", "is_dispersed", "=", "True", "selection", "=", "self", ".", "dispersed_selection", "if", "self", ".", "selection", ":", "is_dispersed", "=", "False", "selection", "=", "self", ".", "selection", "try", ":", "col", "=", "event", ".", "GetCol", "(", ")", "row", "=", "event", ".", "GetRow", "(", ")", "except", "AttributeError", ":", "row", ",", "col", "=", "selection", "[", "0", "]", "[", "0", "]", ",", "selection", "[", "0", "]", "[", "1", "]", "self", ".", "grid", ".", "SetGridCursor", "(", "row", ",", "col", ")", "if", "col", "in", "list", "(", "choices", ".", "keys", "(", ")", ")", ":", "# column should have a pop-up menu", "menu", "=", "wx", ".", "Menu", "(", ")", "two_tiered", "=", "choices", "[", "col", "]", "[", "1", "]", "choices", "=", "choices", "[", "col", "]", "[", "0", "]", "if", "not", "two_tiered", ":", "# menu is one tiered", "if", "'CLEAR cell of all values'", "not", "in", "choices", ":", "choices", ".", "insert", "(", "0", ",", "'CLEAR cell of all values'", ")", "for", "choice", "in", "choices", ":", "if", "not", "choice", ":", "choice", "=", "\" \"", "# prevents error if choice is an empty string", "menuitem", "=", "menu", ".", "Append", "(", "wx", ".", "ID_ANY", ",", "str", "(", "choice", ")", ")", "self", ".", "window", ".", "Bind", "(", "wx", ".", "EVT_MENU", ",", "lambda", "event", ":", "self", ".", "on_select_menuitem", "(", "event", ",", "grid", ",", "row", ",", "col", ",", "selection", ")", ",", "menuitem", ")", "self", ".", "show_menu", "(", "event", ",", "menu", ")", "else", ":", "# menu is two_tiered", "clear", "=", "menu", ".", "Append", "(", "-", "1", ",", "'CLEAR cell of all values'", ")", "self", ".", "window", ".", "Bind", "(", "wx", ".", "EVT_MENU", ",", "lambda", "event", ":", "self", ".", "on_select_menuitem", "(", "event", ",", "grid", ",", "row", ",", "col", ",", "selection", ")", ",", "clear", ")", "for", "choice", "in", "sorted", "(", "choices", ".", "items", "(", ")", ")", ":", "submenu", "=", "wx", ".", "Menu", "(", ")", "for", "item", "in", "choice", "[", "1", "]", ":", "menuitem", "=", "submenu", ".", "Append", "(", "-", "1", ",", "str", "(", "item", ")", ")", "self", ".", "window", ".", "Bind", "(", "wx", ".", "EVT_MENU", ",", "lambda", "event", ":", "self", ".", "on_select_menuitem", "(", "event", ",", "grid", ",", "row", ",", "col", ",", "selection", ")", ",", "menuitem", ")", "menu", ".", "Append", "(", "-", "1", ",", "choice", "[", "0", "]", ",", "submenu", ")", "self", ".", "show_menu", "(", "event", ",", "menu", ")", "if", "selection", ":", "# re-whiten the cells that were previously highlighted", "for", "row", ",", "col", "in", "selection", ":", "self", ".", "grid", ".", "SetCellBackgroundColour", "(", "row", ",", "col", ",", "self", ".", "col_color", ")", "self", ".", "dispersed_selection", "=", "[", "]", "self", ".", "selection", "=", "[", "]", "self", ".", "grid", ".", "ForceRefresh", "(", ")" ]
creates popup menu when user clicks on the column if that column is in the list of choices that get a drop-down menu. allows user to edit the column, but only from available values
[ "creates", "popup", "menu", "when", "user", "clicks", "on", "the", "column", "if", "that", "column", "is", "in", "the", "list", "of", "choices", "that", "get", "a", "drop", "-", "down", "menu", ".", "allows", "user", "to", "edit", "the", "column", "but", "only", "from", "available", "values" ]
python
train
46.280702
20c/tmpl
tmpl/__init__.py
https://github.com/20c/tmpl/blob/ed24d3b744353c93735f370a2b989ed322960ed9/tmpl/__init__.py#L10-L19
def get_engine(name): """ get an engine from string (engine class without Engine) """ name = name.capitalize() + 'Engine' if name in globals(): return globals()[name] raise KeyError("engine '%s' does not exist" % name)
[ "def", "get_engine", "(", "name", ")", ":", "name", "=", "name", ".", "capitalize", "(", ")", "+", "'Engine'", "if", "name", "in", "globals", "(", ")", ":", "return", "globals", "(", ")", "[", "name", "]", "raise", "KeyError", "(", "\"engine '%s' does not exist\"", "%", "name", ")" ]
get an engine from string (engine class without Engine)
[ "get", "an", "engine", "from", "string", "(", "engine", "class", "without", "Engine", ")" ]
python
train
24.3
objectrocket/python-client
objectrocket/util.py
https://github.com/objectrocket/python-client/blob/a65868c7511ff49a5fbe304e53bf592b7fc6d5ef/objectrocket/util.py#L23-L31
def register_extension_method(ext, base, *args, **kwargs): """Register the given extension method as a public attribute of the given base. README: The expected protocol here is that the given extension method is an unbound function. It will be bound to the specified base as a method, and then set as a public attribute of that base. """ bound_method = create_bound_method(ext.plugin, base) setattr(base, ext.name.lstrip('_'), bound_method)
[ "def", "register_extension_method", "(", "ext", ",", "base", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "bound_method", "=", "create_bound_method", "(", "ext", ".", "plugin", ",", "base", ")", "setattr", "(", "base", ",", "ext", ".", "name", ".", "lstrip", "(", "'_'", ")", ",", "bound_method", ")" ]
Register the given extension method as a public attribute of the given base. README: The expected protocol here is that the given extension method is an unbound function. It will be bound to the specified base as a method, and then set as a public attribute of that base.
[ "Register", "the", "given", "extension", "method", "as", "a", "public", "attribute", "of", "the", "given", "base", "." ]
python
train
51.222222
bram85/topydo
topydo/ui/columns/Main.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/ui/columns/Main.py#L479-L493
def _viewdata_to_view(self, p_data): """ Converts a dictionary describing a view to an actual UIView instance. """ sorter = Sorter(p_data['sortexpr'], p_data['groupexpr']) filters = [] if not p_data['show_all']: filters.append(DependencyFilter(self.todolist)) filters.append(RelevanceFilter()) filters.append(HiddenTagFilter()) filters += get_filter_list(p_data['filterexpr'].split()) return UIView(sorter, filters, self.todolist, p_data)
[ "def", "_viewdata_to_view", "(", "self", ",", "p_data", ")", ":", "sorter", "=", "Sorter", "(", "p_data", "[", "'sortexpr'", "]", ",", "p_data", "[", "'groupexpr'", "]", ")", "filters", "=", "[", "]", "if", "not", "p_data", "[", "'show_all'", "]", ":", "filters", ".", "append", "(", "DependencyFilter", "(", "self", ".", "todolist", ")", ")", "filters", ".", "append", "(", "RelevanceFilter", "(", ")", ")", "filters", ".", "append", "(", "HiddenTagFilter", "(", ")", ")", "filters", "+=", "get_filter_list", "(", "p_data", "[", "'filterexpr'", "]", ".", "split", "(", ")", ")", "return", "UIView", "(", "sorter", ",", "filters", ",", "self", ".", "todolist", ",", "p_data", ")" ]
Converts a dictionary describing a view to an actual UIView instance.
[ "Converts", "a", "dictionary", "describing", "a", "view", "to", "an", "actual", "UIView", "instance", "." ]
python
train
35.133333
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L1302-L1310
def namespace(self, namespace): """Setter method; for a description see the getter method.""" # pylint: disable=attribute-defined-outside-init self._namespace = _ensure_unicode(namespace) if self._namespace is not None: # In Python 3, a byte string cannot be stripped by a unicode char # Therefore, the stripping needs to be done after the unicode # conversion. self._namespace = self._namespace.strip('/')
[ "def", "namespace", "(", "self", ",", "namespace", ")", ":", "# pylint: disable=attribute-defined-outside-init", "self", ".", "_namespace", "=", "_ensure_unicode", "(", "namespace", ")", "if", "self", ".", "_namespace", "is", "not", "None", ":", "# In Python 3, a byte string cannot be stripped by a unicode char", "# Therefore, the stripping needs to be done after the unicode", "# conversion.", "self", ".", "_namespace", "=", "self", ".", "_namespace", ".", "strip", "(", "'/'", ")" ]
Setter method; for a description see the getter method.
[ "Setter", "method", ";", "for", "a", "description", "see", "the", "getter", "method", "." ]
python
train
53.111111
fridiculous/django-estimators
estimators/models/base.py
https://github.com/fridiculous/django-estimators/blob/5dd72694dab6725335214543a59104c4de504037/estimators/models/base.py#L30-L41
def _extract_model_params(self, defaults, **kwargs): """this method allows django managers use `objects.get_or_create` and `objects.update_or_create` on a hashable object. """ obj = kwargs.pop(self.object_property_name, None) if obj is not None: kwargs['object_hash'] = self.model._compute_hash(obj) lookup, params = super()._extract_model_params(defaults, **kwargs) if obj is not None: params[self.object_property_name] = obj del params['object_hash'] return lookup, params
[ "def", "_extract_model_params", "(", "self", ",", "defaults", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "kwargs", ".", "pop", "(", "self", ".", "object_property_name", ",", "None", ")", "if", "obj", "is", "not", "None", ":", "kwargs", "[", "'object_hash'", "]", "=", "self", ".", "model", ".", "_compute_hash", "(", "obj", ")", "lookup", ",", "params", "=", "super", "(", ")", ".", "_extract_model_params", "(", "defaults", ",", "*", "*", "kwargs", ")", "if", "obj", "is", "not", "None", ":", "params", "[", "self", ".", "object_property_name", "]", "=", "obj", "del", "params", "[", "'object_hash'", "]", "return", "lookup", ",", "params" ]
this method allows django managers use `objects.get_or_create` and `objects.update_or_create` on a hashable object.
[ "this", "method", "allows", "django", "managers", "use", "objects", ".", "get_or_create", "and", "objects", ".", "update_or_create", "on", "a", "hashable", "object", "." ]
python
train
46.916667
Rapptz/discord.py
discord/ext/commands/help.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/help.py#L103-L133
def add_line(self, line='', *, empty=False): """Adds a line to the current page. If the line exceeds the :attr:`max_size` then an exception is raised. Parameters ----------- line: :class:`str` The line to add. empty: :class:`bool` Indicates if another empty line should be added. Raises ------ RuntimeError The line was too big for the current :attr:`max_size`. """ max_page_size = self.max_size - self._prefix_len - 2 if len(line) > max_page_size: raise RuntimeError('Line exceeds maximum page size %s' % (max_page_size)) if self._count + len(line) + 1 > self.max_size: self.close_page() self._count += len(line) + 1 self._current_page.append(line) if empty: self._current_page.append('') self._count += 1
[ "def", "add_line", "(", "self", ",", "line", "=", "''", ",", "*", ",", "empty", "=", "False", ")", ":", "max_page_size", "=", "self", ".", "max_size", "-", "self", ".", "_prefix_len", "-", "2", "if", "len", "(", "line", ")", ">", "max_page_size", ":", "raise", "RuntimeError", "(", "'Line exceeds maximum page size %s'", "%", "(", "max_page_size", ")", ")", "if", "self", ".", "_count", "+", "len", "(", "line", ")", "+", "1", ">", "self", ".", "max_size", ":", "self", ".", "close_page", "(", ")", "self", ".", "_count", "+=", "len", "(", "line", ")", "+", "1", "self", ".", "_current_page", ".", "append", "(", "line", ")", "if", "empty", ":", "self", ".", "_current_page", ".", "append", "(", "''", ")", "self", ".", "_count", "+=", "1" ]
Adds a line to the current page. If the line exceeds the :attr:`max_size` then an exception is raised. Parameters ----------- line: :class:`str` The line to add. empty: :class:`bool` Indicates if another empty line should be added. Raises ------ RuntimeError The line was too big for the current :attr:`max_size`.
[ "Adds", "a", "line", "to", "the", "current", "page", "." ]
python
train
29.096774
offu/WeRoBot
werobot/pay.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/pay.py#L60-L86
def create_js_pay_params(self, **package): """ 签名 js 需要的参数 详情请参考 支付开发文档 :: wxclient.create_js_pay_params( body=标题, out_trade_no=本地订单号, total_fee=价格单位分, notify_url=通知url, spbill_create_ip=建议为支付人ip, ) :param package: 需要签名的的参数 :return: 支付需要的对象 """ pay_param, sign, sign_type = self._pay_sign_dict( package=self.create_js_pay_package(**package) ) pay_param['paySign'] = sign pay_param['signType'] = sign_type # 腾讯这个还得转成大写 JS 才认 for key in ['appId', 'timeStamp', 'nonceStr']: pay_param[key] = str(pay_param.pop(key.lower())) return pay_param
[ "def", "create_js_pay_params", "(", "self", ",", "*", "*", "package", ")", ":", "pay_param", ",", "sign", ",", "sign_type", "=", "self", ".", "_pay_sign_dict", "(", "package", "=", "self", ".", "create_js_pay_package", "(", "*", "*", "package", ")", ")", "pay_param", "[", "'paySign'", "]", "=", "sign", "pay_param", "[", "'signType'", "]", "=", "sign_type", "# 腾讯这个还得转成大写 JS 才认", "for", "key", "in", "[", "'appId'", ",", "'timeStamp'", ",", "'nonceStr'", "]", ":", "pay_param", "[", "key", "]", "=", "str", "(", "pay_param", ".", "pop", "(", "key", ".", "lower", "(", ")", ")", ")", "return", "pay_param" ]
签名 js 需要的参数 详情请参考 支付开发文档 :: wxclient.create_js_pay_params( body=标题, out_trade_no=本地订单号, total_fee=价格单位分, notify_url=通知url, spbill_create_ip=建议为支付人ip, ) :param package: 需要签名的的参数 :return: 支付需要的对象
[ "签名", "js", "需要的参数", "详情请参考", "支付开发文档" ]
python
train
26.777778
Duke-GCB/DukeDSClient
ddsc/core/download.py
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/core/download.py#L464-L472
def _on_bytes_read(self, num_bytes_read): """ Record our progress so we can validate that we receive all the data :param num_bytes_read: int: number of bytes we received as part of one chunk """ self.actual_bytes_read += num_bytes_read if self.actual_bytes_read > self.bytes_to_read: raise TooLargeChunkDownloadError(self.actual_bytes_read, self.bytes_to_read, self.local_path) self.download_context.send_processed_message(num_bytes_read)
[ "def", "_on_bytes_read", "(", "self", ",", "num_bytes_read", ")", ":", "self", ".", "actual_bytes_read", "+=", "num_bytes_read", "if", "self", ".", "actual_bytes_read", ">", "self", ".", "bytes_to_read", ":", "raise", "TooLargeChunkDownloadError", "(", "self", ".", "actual_bytes_read", ",", "self", ".", "bytes_to_read", ",", "self", ".", "local_path", ")", "self", ".", "download_context", ".", "send_processed_message", "(", "num_bytes_read", ")" ]
Record our progress so we can validate that we receive all the data :param num_bytes_read: int: number of bytes we received as part of one chunk
[ "Record", "our", "progress", "so", "we", "can", "validate", "that", "we", "receive", "all", "the", "data", ":", "param", "num_bytes_read", ":", "int", ":", "number", "of", "bytes", "we", "received", "as", "part", "of", "one", "chunk" ]
python
train
55.333333
h2oai/h2o-3
h2o-py/h2o/utils/debugging.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/utils/debugging.py#L238-L265
def _get_method_full_name(func): """ Return fully qualified function name. This method will attempt to find "full name" of the given function object. This full name is either of the form "<class name>.<method name>" if the function is a class method, or "<module name>.<func name>" if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2. :param func: a function object. :returns: string with the function's full name as explained above. """ # Python 3.3 already has this information available... if hasattr(func, "__qualname__"): return func.__qualname__ module = inspect.getmodule(func) if module is None: return "?.%s" % getattr(func, "__name__", "?") for cls_name in dir(module): cls = getattr(module, cls_name) if not inspect.isclass(cls): continue for method_name in dir(cls): cls_method = getattr(cls, method_name) if cls_method == func: return "%s.%s" % (cls_name, method_name) if hasattr(func, "__name__"): return "%s.%s" % (module.__name__, func.__name__) return "<unknown>"
[ "def", "_get_method_full_name", "(", "func", ")", ":", "# Python 3.3 already has this information available...", "if", "hasattr", "(", "func", ",", "\"__qualname__\"", ")", ":", "return", "func", ".", "__qualname__", "module", "=", "inspect", ".", "getmodule", "(", "func", ")", "if", "module", "is", "None", ":", "return", "\"?.%s\"", "%", "getattr", "(", "func", ",", "\"__name__\"", ",", "\"?\"", ")", "for", "cls_name", "in", "dir", "(", "module", ")", ":", "cls", "=", "getattr", "(", "module", ",", "cls_name", ")", "if", "not", "inspect", ".", "isclass", "(", "cls", ")", ":", "continue", "for", "method_name", "in", "dir", "(", "cls", ")", ":", "cls_method", "=", "getattr", "(", "cls", ",", "method_name", ")", "if", "cls_method", "==", "func", ":", "return", "\"%s.%s\"", "%", "(", "cls_name", ",", "method_name", ")", "if", "hasattr", "(", "func", ",", "\"__name__\"", ")", ":", "return", "\"%s.%s\"", "%", "(", "module", ".", "__name__", ",", "func", ".", "__name__", ")", "return", "\"<unknown>\"" ]
Return fully qualified function name. This method will attempt to find "full name" of the given function object. This full name is either of the form "<class name>.<method name>" if the function is a class method, or "<module name>.<func name>" if it's a regular function. Thus, this is an attempt to back-port func.__qualname__ to Python 2. :param func: a function object. :returns: string with the function's full name as explained above.
[ "Return", "fully", "qualified", "function", "name", "." ]
python
test
40.75
benvanwerkhoven/kernel_tuner
kernel_tuner/util.py
https://github.com/benvanwerkhoven/kernel_tuner/blob/cfcb5da5e510db494f8219c22566ab65d5fcbd9f/kernel_tuner/util.py#L34-L68
def check_argument_list(kernel_name, kernel_string, args): """ raise an exception if a kernel arguments do not match host arguments """ kernel_arguments = list() collected_errors = list() for iterator in re.finditer(kernel_name + "[ \n\t]*" + "\(", kernel_string): kernel_start = iterator.end() kernel_end = kernel_string.find(")", kernel_start) if kernel_start != 0: kernel_arguments.append(kernel_string[kernel_start:kernel_end].split(",")) for arguments_set, arguments in enumerate(kernel_arguments): collected_errors.append(list()) if len(arguments) != len(args): collected_errors[arguments_set].append("Kernel and host argument lists do not match in size.") continue for (i, arg) in enumerate(args): kernel_argument = arguments[i] if not isinstance(arg, (numpy.ndarray, numpy.generic)): raise TypeError("Argument at position " + str(i) + " of type: " + str(type(arg)) + " should be of type numpy.ndarray or numpy scalar") correct = True if isinstance(arg, numpy.ndarray) and not "*" in kernel_argument: correct = False #array is passed to non-pointer kernel argument if correct and check_argument_type(str(arg.dtype), kernel_argument, i): continue collected_errors[arguments_set].append("Argument at position " + str(i) + " of dtype: " + str(arg.dtype) + " does not match " + kernel_argument + ".") if not collected_errors[arguments_set]: # We assume that if there is a possible list of arguments that matches with the provided one # it is the right one return for errors in collected_errors: warnings.warn(errors[0], UserWarning)
[ "def", "check_argument_list", "(", "kernel_name", ",", "kernel_string", ",", "args", ")", ":", "kernel_arguments", "=", "list", "(", ")", "collected_errors", "=", "list", "(", ")", "for", "iterator", "in", "re", ".", "finditer", "(", "kernel_name", "+", "\"[ \\n\\t]*\"", "+", "\"\\(\"", ",", "kernel_string", ")", ":", "kernel_start", "=", "iterator", ".", "end", "(", ")", "kernel_end", "=", "kernel_string", ".", "find", "(", "\")\"", ",", "kernel_start", ")", "if", "kernel_start", "!=", "0", ":", "kernel_arguments", ".", "append", "(", "kernel_string", "[", "kernel_start", ":", "kernel_end", "]", ".", "split", "(", "\",\"", ")", ")", "for", "arguments_set", ",", "arguments", "in", "enumerate", "(", "kernel_arguments", ")", ":", "collected_errors", ".", "append", "(", "list", "(", ")", ")", "if", "len", "(", "arguments", ")", "!=", "len", "(", "args", ")", ":", "collected_errors", "[", "arguments_set", "]", ".", "append", "(", "\"Kernel and host argument lists do not match in size.\"", ")", "continue", "for", "(", "i", ",", "arg", ")", "in", "enumerate", "(", "args", ")", ":", "kernel_argument", "=", "arguments", "[", "i", "]", "if", "not", "isinstance", "(", "arg", ",", "(", "numpy", ".", "ndarray", ",", "numpy", ".", "generic", ")", ")", ":", "raise", "TypeError", "(", "\"Argument at position \"", "+", "str", "(", "i", ")", "+", "\" of type: \"", "+", "str", "(", "type", "(", "arg", ")", ")", "+", "\" should be of type numpy.ndarray or numpy scalar\"", ")", "correct", "=", "True", "if", "isinstance", "(", "arg", ",", "numpy", ".", "ndarray", ")", "and", "not", "\"*\"", "in", "kernel_argument", ":", "correct", "=", "False", "#array is passed to non-pointer kernel argument", "if", "correct", "and", "check_argument_type", "(", "str", "(", "arg", ".", "dtype", ")", ",", "kernel_argument", ",", "i", ")", ":", "continue", "collected_errors", "[", "arguments_set", "]", ".", "append", "(", "\"Argument at position \"", "+", "str", "(", "i", ")", "+", "\" of dtype: \"", "+", "str", "(", "arg", ".", "dtype", ")", "+", "\" does not match \"", "+", "kernel_argument", "+", "\".\"", ")", "if", "not", "collected_errors", "[", "arguments_set", "]", ":", "# We assume that if there is a possible list of arguments that matches with the provided one", "# it is the right one", "return", "for", "errors", "in", "collected_errors", ":", "warnings", ".", "warn", "(", "errors", "[", "0", "]", ",", "UserWarning", ")" ]
raise an exception if a kernel arguments do not match host arguments
[ "raise", "an", "exception", "if", "a", "kernel", "arguments", "do", "not", "match", "host", "arguments" ]
python
train
52.485714
prompt-toolkit/ptpython
ptpython/contrib/asyncssh_repl.py
https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/contrib/asyncssh_repl.py#L118-L133
def _print(self, *data, **kw): """ _print(self, *data, sep=' ', end='\n', file=None) Alternative 'print' function that prints back into the SSH channel. """ # Pop keyword-only arguments. (We cannot use the syntax from the # signature. Otherwise, Python2 will give a syntax error message when # installing.) sep = kw.pop('sep', ' ') end = kw.pop('end', '\n') _ = kw.pop('file', None) assert not kw, 'Too many keyword-only arguments' data = sep.join(map(str, data)) self._chan.write(data + end)
[ "def", "_print", "(", "self", ",", "*", "data", ",", "*", "*", "kw", ")", ":", "# Pop keyword-only arguments. (We cannot use the syntax from the", "# signature. Otherwise, Python2 will give a syntax error message when", "# installing.)", "sep", "=", "kw", ".", "pop", "(", "'sep'", ",", "' '", ")", "end", "=", "kw", ".", "pop", "(", "'end'", ",", "'\\n'", ")", "_", "=", "kw", ".", "pop", "(", "'file'", ",", "None", ")", "assert", "not", "kw", ",", "'Too many keyword-only arguments'", "data", "=", "sep", ".", "join", "(", "map", "(", "str", ",", "data", ")", ")", "self", ".", "_chan", ".", "write", "(", "data", "+", "end", ")" ]
_print(self, *data, sep=' ', end='\n', file=None) Alternative 'print' function that prints back into the SSH channel.
[ "_print", "(", "self", "*", "data", "sep", "=", "end", "=", "\\", "n", "file", "=", "None", ")" ]
python
train
36.4375
philgyford/django-spectator
spectator/events/views.py
https://github.com/philgyford/django-spectator/blob/f3c72004f9caa1fde0f5a3b2f0d2bf285fc01ada/spectator/events/views.py#L147-L153
def get_work_kind(self): """ We'll have a kind_slug like 'movies'. We need to translate that into a work `kind` like 'movie'. """ slugs_to_kinds = {v:k for k,v in Work.KIND_SLUGS.items()} return slugs_to_kinds.get(self.kind_slug, None)
[ "def", "get_work_kind", "(", "self", ")", ":", "slugs_to_kinds", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "Work", ".", "KIND_SLUGS", ".", "items", "(", ")", "}", "return", "slugs_to_kinds", ".", "get", "(", "self", ".", "kind_slug", ",", "None", ")" ]
We'll have a kind_slug like 'movies'. We need to translate that into a work `kind` like 'movie'.
[ "We", "ll", "have", "a", "kind_slug", "like", "movies", ".", "We", "need", "to", "translate", "that", "into", "a", "work", "kind", "like", "movie", "." ]
python
train
39.571429
Terrance/SkPy
skpy/conn.py
https://github.com/Terrance/SkPy/blob/0f9489c94e8ec4d3effab4314497428872a80ad1/skpy/conn.py#L312-L327
def writeToken(self): """ Store details of the current connection in the named file. This can be used by :meth:`readToken` to re-authenticate at a later time. """ # Write token file privately. with os.fdopen(os.open(self.tokenFile, os.O_WRONLY | os.O_CREAT, 0o600), "w") as f: # When opening files via os, truncation must be done manually. f.truncate() f.write(self.userId + "\n") f.write(self.tokens["skype"] + "\n") f.write(str(int(time.mktime(self.tokenExpiry["skype"].timetuple()))) + "\n") f.write(self.tokens["reg"] + "\n") f.write(str(int(time.mktime(self.tokenExpiry["reg"].timetuple()))) + "\n") f.write(self.msgsHost + "\n")
[ "def", "writeToken", "(", "self", ")", ":", "# Write token file privately.", "with", "os", ".", "fdopen", "(", "os", ".", "open", "(", "self", ".", "tokenFile", ",", "os", ".", "O_WRONLY", "|", "os", ".", "O_CREAT", ",", "0o600", ")", ",", "\"w\"", ")", "as", "f", ":", "# When opening files via os, truncation must be done manually.", "f", ".", "truncate", "(", ")", "f", ".", "write", "(", "self", ".", "userId", "+", "\"\\n\"", ")", "f", ".", "write", "(", "self", ".", "tokens", "[", "\"skype\"", "]", "+", "\"\\n\"", ")", "f", ".", "write", "(", "str", "(", "int", "(", "time", ".", "mktime", "(", "self", ".", "tokenExpiry", "[", "\"skype\"", "]", ".", "timetuple", "(", ")", ")", ")", ")", "+", "\"\\n\"", ")", "f", ".", "write", "(", "self", ".", "tokens", "[", "\"reg\"", "]", "+", "\"\\n\"", ")", "f", ".", "write", "(", "str", "(", "int", "(", "time", ".", "mktime", "(", "self", ".", "tokenExpiry", "[", "\"reg\"", "]", ".", "timetuple", "(", ")", ")", ")", ")", "+", "\"\\n\"", ")", "f", ".", "write", "(", "self", ".", "msgsHost", "+", "\"\\n\"", ")" ]
Store details of the current connection in the named file. This can be used by :meth:`readToken` to re-authenticate at a later time.
[ "Store", "details", "of", "the", "current", "connection", "in", "the", "named", "file", "." ]
python
test
47.75
synw/dataswim
dataswim/data/clean.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/clean.py#L345-L365
def strip(self, col: str): """ Remove leading and trailing white spaces in a column's values :param col: name of the column :type col: str :example: ``ds.strip("mycol")`` """ def remove_ws(row): val = str(row[col]) if " " in val.startswith(" "): row[col] = val.strip() return row try: self.df.apply(remove_ws) except Exception as e: self.err(e, "Can not remove white space in column") return self.ok("White space removed in column values")
[ "def", "strip", "(", "self", ",", "col", ":", "str", ")", ":", "def", "remove_ws", "(", "row", ")", ":", "val", "=", "str", "(", "row", "[", "col", "]", ")", "if", "\" \"", "in", "val", ".", "startswith", "(", "\" \"", ")", ":", "row", "[", "col", "]", "=", "val", ".", "strip", "(", ")", "return", "row", "try", ":", "self", ".", "df", ".", "apply", "(", "remove_ws", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Can not remove white space in column\"", ")", "return", "self", ".", "ok", "(", "\"White space removed in column values\"", ")" ]
Remove leading and trailing white spaces in a column's values :param col: name of the column :type col: str :example: ``ds.strip("mycol")``
[ "Remove", "leading", "and", "trailing", "white", "spaces", "in", "a", "column", "s", "values" ]
python
train
28.095238
SiLab-Bonn/pyBAR
pybar/scans/calibrate_hit_or.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/scans/calibrate_hit_or.py#L20-L148
def create_hitor_calibration(output_filename, plot_pixel_calibrations=False): '''Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data. Parameters ---------- output_filename : string Input raw data file name. plot_pixel_calibrations : bool, iterable If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels. Returns ------- nothing ''' logging.info('Analyze HitOR calibration data and plot results of %s', output_filename) with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True) as analyze_raw_data: # Interpret the raw data file analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histogramming analyze_raw_data.create_hit_table = True analyze_raw_data.create_tdc_hist = True analyze_raw_data.align_at_tdc = True # align events at TDC words, first word of event has to be a tdc word analyze_raw_data.interpret_word_table() analyze_raw_data.interpreter.print_summary() analyze_raw_data.plot_histograms() n_injections = analyze_raw_data.n_injections # use later meta_data = analyze_raw_data.out_file_h5.root.meta_data[:] scan_parameters_dict = get_scan_parameter(meta_data) inner_loop_parameter_values = scan_parameters_dict[next(reversed(scan_parameters_dict))] # inner loop parameter name is unknown scan_parameter_names = scan_parameters_dict.keys() # col_row_combinations = get_unique_scan_parameter_combinations(analyze_raw_data.out_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True) meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameter_names) scan_parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameter_names) event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number']) event_ranges_per_parameter = np.column_stack((scan_parameter_values, event_number_ranges)) if analyze_raw_data.out_file_h5.root.Hits.nrows == 0: raise AnalysisError("Found no hits.") hits = analyze_raw_data.out_file_h5.root.Hits[:] event_numbers = hits['event_number'].copy() # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays output_filename = os.path.splitext(output_filename)[0] with tb.open_file(output_filename + "_calibration.h5", mode="w") as calibration_data_file: logging.info('Create calibration') calibration_data = np.full(shape=(80, 336, len(inner_loop_parameter_values), 4), fill_value=np.nan, dtype='f4') # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(event_ranges_per_parameter), term_width=80) progress_bar.start() for index, (actual_scan_parameter_values, event_start, event_stop) in enumerate(event_ranges_per_parameter): if event_stop is None: # happens for the last chunk event_stop = hits[-1]['event_number'] + 1 array_index = np.searchsorted(event_numbers, np.array([event_start, event_stop])) actual_hits = hits[array_index[0]:array_index[1]] for item_index, item in enumerate(scan_parameter_names): if item == "column": actual_col = actual_scan_parameter_values[item_index] elif item == "row": actual_row = actual_scan_parameter_values[item_index] elif item == "PlsrDAC": plser_dac = actual_scan_parameter_values[item_index] else: raise ValueError("Unknown scan parameter %s" % item) # Only pixel of actual column/row should be in the actual data chunk but since FIFO is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case n_wrong_pixel = np.count_nonzero(np.logical_or(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row)) if n_wrong_pixel != 0: logging.warning('%d hit(s) from other pixels for scan parameters %s', n_wrong_pixel, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)])) actual_hits = actual_hits[np.logical_and(actual_hits['column'] == actual_col, actual_hits['row'] == actual_row)] # Only take data from selected pixel actual_tdc_hits = actual_hits[(actual_hits['event_status'] & 0b0000111110011100) == 0b0000000100000000] # only take hits from good events (one TDC word only, no error) actual_tot_hits = actual_hits[(actual_hits['event_status'] & 0b0000100010011100) == 0b0000000000000000] # only take hits from good events for tot tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC'] if tdc.shape[0] < n_injections: logging.info('%d of %d expected TDC hits for scan parameters %s', tdc.shape[0], n_injections, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)])) if tot.shape[0] < n_injections: logging.info('%d of %d expected hits for scan parameters %s', tot.shape[0], n_injections, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)])) inner_loop_scan_parameter_index = np.where(plser_dac == inner_loop_parameter_values)[0][0] # translate the scan parameter value to an index for the result histogram # numpy mean and std return nan if array is empty calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot) calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc) progress_bar.update(index) progress_bar.finish() calibration_data_out = calibration_data_file.create_carray(calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) calibration_data_out[:] = calibration_data calibration_data_out.attrs.dimensions = scan_parameter_names calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values calibration_data_out.flush() # with PdfPages(output_filename + "_calibration.pdf") as output_pdf: plot_scurves(calibration_data[:, :, :, 0], inner_loop_parameter_values, "ToT calibration", "ToT", 15, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf) plot_scurves(calibration_data[:, :, :, 1], inner_loop_parameter_values, "TDC calibration", "TDC [ns]", None, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf) tot_mean_all_pix = np.nanmean(calibration_data[:, :, :, 0], axis=(0, 1)) tot_error_all_pix = np.nanstd(calibration_data[:, :, :, 0], axis=(0, 1)) tdc_mean_all_pix = np.nanmean(calibration_data[:, :, :, 1], axis=(0, 1)) tdc_error_all_pix = np.nanstd(calibration_data[:, :, :, 1], axis=(0, 1)) plot_tot_tdc_calibration(scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_all_pix, tot_error=tot_error_all_pix, tdc_mean=tdc_mean_all_pix, tdc_error=tdc_error_all_pix, filename=analyze_raw_data.output_pdf, title="Mean charge calibration of %d pixel(s)" % np.count_nonzero(~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2))) # plotting individual pixels if plot_pixel_calibrations is True: # selecting pixels with non-nan entries col_row_non_nan = np.nonzero(~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2)) plot_pixel_calibrations = np.dstack(col_row_non_nan)[0] elif plot_pixel_calibrations is False: plot_pixel_calibrations = np.array([], dtype=np.int) else: # assuming list of column / row tuples plot_pixel_calibrations = np.array(plot_pixel_calibrations) - 1 # generate index array pixel_indices = np.arange(plot_pixel_calibrations.shape[0]) plot_n_pixels = 10 # number of pixels at the beginning, center and end of the array np.random.seed(0) # select random pixels if pixel_indices.size - 2 * plot_n_pixels >= 0: random_pixel_indices = np.sort(np.random.choice(pixel_indices[plot_n_pixels:-plot_n_pixels], min(plot_n_pixels, pixel_indices.size - 2 * plot_n_pixels), replace=False)) else: random_pixel_indices = np.array([], dtype=np.int) selected_pixel_indices = np.unique(np.hstack([pixel_indices[:plot_n_pixels], random_pixel_indices, pixel_indices[-plot_n_pixels:]])) # plotting individual pixels for (column, row) in plot_pixel_calibrations[selected_pixel_indices]: logging.info("Plotting charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1)) tot_mean_single_pix = calibration_data[column, row, :, 0] tot_std_single_pix = calibration_data[column, row, :, 2] tdc_mean_single_pix = calibration_data[column, row, :, 1] tdc_std_single_pix = calibration_data[column, row, :, 3] plot_tot_tdc_calibration(scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_single_pix, tot_error=tot_std_single_pix, tdc_mean=tdc_mean_single_pix, tdc_error=tdc_std_single_pix, filename=analyze_raw_data.output_pdf, title="Charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1))
[ "def", "create_hitor_calibration", "(", "output_filename", ",", "plot_pixel_calibrations", "=", "False", ")", ":", "logging", ".", "info", "(", "'Analyze HitOR calibration data and plot results of %s'", ",", "output_filename", ")", "with", "AnalyzeRawData", "(", "raw_data_file", "=", "output_filename", ",", "create_pdf", "=", "True", ")", "as", "analyze_raw_data", ":", "# Interpret the raw data file\r", "analyze_raw_data", ".", "create_occupancy_hist", "=", "False", "# too many scan parameters to do in ram histogramming\r", "analyze_raw_data", ".", "create_hit_table", "=", "True", "analyze_raw_data", ".", "create_tdc_hist", "=", "True", "analyze_raw_data", ".", "align_at_tdc", "=", "True", "# align events at TDC words, first word of event has to be a tdc word\r", "analyze_raw_data", ".", "interpret_word_table", "(", ")", "analyze_raw_data", ".", "interpreter", ".", "print_summary", "(", ")", "analyze_raw_data", ".", "plot_histograms", "(", ")", "n_injections", "=", "analyze_raw_data", ".", "n_injections", "# use later\r", "meta_data", "=", "analyze_raw_data", ".", "out_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "scan_parameters_dict", "=", "get_scan_parameter", "(", "meta_data", ")", "inner_loop_parameter_values", "=", "scan_parameters_dict", "[", "next", "(", "reversed", "(", "scan_parameters_dict", ")", ")", "]", "# inner loop parameter name is unknown\r", "scan_parameter_names", "=", "scan_parameters_dict", ".", "keys", "(", ")", "# col_row_combinations = get_unique_scan_parameter_combinations(analyze_raw_data.out_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True)\r", "meta_data_table_at_scan_parameter", "=", "get_unique_scan_parameter_combinations", "(", "meta_data", ",", "scan_parameters", "=", "scan_parameter_names", ")", "scan_parameter_values", "=", "get_scan_parameters_table_from_meta_data", "(", "meta_data_table_at_scan_parameter", ",", "scan_parameter_names", ")", "event_number_ranges", "=", "get_ranges_from_array", "(", "meta_data_table_at_scan_parameter", "[", "'event_number'", "]", ")", "event_ranges_per_parameter", "=", "np", ".", "column_stack", "(", "(", "scan_parameter_values", ",", "event_number_ranges", ")", ")", "if", "analyze_raw_data", ".", "out_file_h5", ".", "root", ".", "Hits", ".", "nrows", "==", "0", ":", "raise", "AnalysisError", "(", "\"Found no hits.\"", ")", "hits", "=", "analyze_raw_data", ".", "out_file_h5", ".", "root", ".", "Hits", "[", ":", "]", "event_numbers", "=", "hits", "[", "'event_number'", "]", ".", "copy", "(", ")", "# create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays\r", "output_filename", "=", "os", ".", "path", ".", "splitext", "(", "output_filename", ")", "[", "0", "]", "with", "tb", ".", "open_file", "(", "output_filename", "+", "\"_calibration.h5\"", ",", "mode", "=", "\"w\"", ")", "as", "calibration_data_file", ":", "logging", ".", "info", "(", "'Create calibration'", ")", "calibration_data", "=", "np", ".", "full", "(", "shape", "=", "(", "80", ",", "336", ",", "len", "(", "inner_loop_parameter_values", ")", ",", "4", ")", ",", "fill_value", "=", "np", ".", "nan", ",", "dtype", "=", "'f4'", ")", "# result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC\r", "progress_bar", "=", "progressbar", ".", "ProgressBar", "(", "widgets", "=", "[", "''", ",", "progressbar", ".", "Percentage", "(", ")", ",", "' '", ",", "progressbar", ".", "Bar", "(", "marker", "=", "'*'", ",", "left", "=", "'|'", ",", "right", "=", "'|'", ")", ",", "' '", ",", "progressbar", ".", "AdaptiveETA", "(", ")", "]", ",", "maxval", "=", "len", "(", "event_ranges_per_parameter", ")", ",", "term_width", "=", "80", ")", "progress_bar", ".", "start", "(", ")", "for", "index", ",", "(", "actual_scan_parameter_values", ",", "event_start", ",", "event_stop", ")", "in", "enumerate", "(", "event_ranges_per_parameter", ")", ":", "if", "event_stop", "is", "None", ":", "# happens for the last chunk\r", "event_stop", "=", "hits", "[", "-", "1", "]", "[", "'event_number'", "]", "+", "1", "array_index", "=", "np", ".", "searchsorted", "(", "event_numbers", ",", "np", ".", "array", "(", "[", "event_start", ",", "event_stop", "]", ")", ")", "actual_hits", "=", "hits", "[", "array_index", "[", "0", "]", ":", "array_index", "[", "1", "]", "]", "for", "item_index", ",", "item", "in", "enumerate", "(", "scan_parameter_names", ")", ":", "if", "item", "==", "\"column\"", ":", "actual_col", "=", "actual_scan_parameter_values", "[", "item_index", "]", "elif", "item", "==", "\"row\"", ":", "actual_row", "=", "actual_scan_parameter_values", "[", "item_index", "]", "elif", "item", "==", "\"PlsrDAC\"", ":", "plser_dac", "=", "actual_scan_parameter_values", "[", "item_index", "]", "else", ":", "raise", "ValueError", "(", "\"Unknown scan parameter %s\"", "%", "item", ")", "# Only pixel of actual column/row should be in the actual data chunk but since FIFO is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case\r", "n_wrong_pixel", "=", "np", ".", "count_nonzero", "(", "np", ".", "logical_or", "(", "actual_hits", "[", "'column'", "]", "!=", "actual_col", ",", "actual_hits", "[", "'row'", "]", "!=", "actual_row", ")", ")", "if", "n_wrong_pixel", "!=", "0", ":", "logging", ".", "warning", "(", "'%d hit(s) from other pixels for scan parameters %s'", ",", "n_wrong_pixel", ",", "', '", ".", "join", "(", "[", "'%s=%s'", "%", "(", "name", ",", "value", ")", "for", "(", "name", ",", "value", ")", "in", "zip", "(", "scan_parameter_names", ",", "actual_scan_parameter_values", ")", "]", ")", ")", "actual_hits", "=", "actual_hits", "[", "np", ".", "logical_and", "(", "actual_hits", "[", "'column'", "]", "==", "actual_col", ",", "actual_hits", "[", "'row'", "]", "==", "actual_row", ")", "]", "# Only take data from selected pixel\r", "actual_tdc_hits", "=", "actual_hits", "[", "(", "actual_hits", "[", "'event_status'", "]", "&", "0b0000111110011100", ")", "==", "0b0000000100000000", "]", "# only take hits from good events (one TDC word only, no error)\r", "actual_tot_hits", "=", "actual_hits", "[", "(", "actual_hits", "[", "'event_status'", "]", "&", "0b0000100010011100", ")", "==", "0b0000000000000000", "]", "# only take hits from good events for tot\r", "tot", ",", "tdc", "=", "actual_tot_hits", "[", "'tot'", "]", ",", "actual_tdc_hits", "[", "'TDC'", "]", "if", "tdc", ".", "shape", "[", "0", "]", "<", "n_injections", ":", "logging", ".", "info", "(", "'%d of %d expected TDC hits for scan parameters %s'", ",", "tdc", ".", "shape", "[", "0", "]", ",", "n_injections", ",", "', '", ".", "join", "(", "[", "'%s=%s'", "%", "(", "name", ",", "value", ")", "for", "(", "name", ",", "value", ")", "in", "zip", "(", "scan_parameter_names", ",", "actual_scan_parameter_values", ")", "]", ")", ")", "if", "tot", ".", "shape", "[", "0", "]", "<", "n_injections", ":", "logging", ".", "info", "(", "'%d of %d expected hits for scan parameters %s'", ",", "tot", ".", "shape", "[", "0", "]", ",", "n_injections", ",", "', '", ".", "join", "(", "[", "'%s=%s'", "%", "(", "name", ",", "value", ")", "for", "(", "name", ",", "value", ")", "in", "zip", "(", "scan_parameter_names", ",", "actual_scan_parameter_values", ")", "]", ")", ")", "inner_loop_scan_parameter_index", "=", "np", ".", "where", "(", "plser_dac", "==", "inner_loop_parameter_values", ")", "[", "0", "]", "[", "0", "]", "# translate the scan parameter value to an index for the result histogram\r", "# numpy mean and std return nan if array is empty\r", "calibration_data", "[", "actual_col", "-", "1", ",", "actual_row", "-", "1", ",", "inner_loop_scan_parameter_index", ",", "0", "]", "=", "np", ".", "mean", "(", "tot", ")", "calibration_data", "[", "actual_col", "-", "1", ",", "actual_row", "-", "1", ",", "inner_loop_scan_parameter_index", ",", "1", "]", "=", "np", ".", "mean", "(", "tdc", ")", "calibration_data", "[", "actual_col", "-", "1", ",", "actual_row", "-", "1", ",", "inner_loop_scan_parameter_index", ",", "2", "]", "=", "np", ".", "std", "(", "tot", ")", "calibration_data", "[", "actual_col", "-", "1", ",", "actual_row", "-", "1", ",", "inner_loop_scan_parameter_index", ",", "3", "]", "=", "np", ".", "std", "(", "tdc", ")", "progress_bar", ".", "update", "(", "index", ")", "progress_bar", ".", "finish", "(", ")", "calibration_data_out", "=", "calibration_data_file", ".", "create_carray", "(", "calibration_data_file", ".", "root", ",", "name", "=", "'HitOrCalibration'", ",", "title", "=", "'Hit OR calibration data'", ",", "atom", "=", "tb", ".", "Atom", ".", "from_dtype", "(", "calibration_data", ".", "dtype", ")", ",", "shape", "=", "calibration_data", ".", "shape", ",", "filters", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", ")", "calibration_data_out", "[", ":", "]", "=", "calibration_data", "calibration_data_out", ".", "attrs", ".", "dimensions", "=", "scan_parameter_names", "calibration_data_out", ".", "attrs", ".", "scan_parameter_values", "=", "inner_loop_parameter_values", "calibration_data_out", ".", "flush", "(", ")", "# with PdfPages(output_filename + \"_calibration.pdf\") as output_pdf:\r", "plot_scurves", "(", "calibration_data", "[", ":", ",", ":", ",", ":", ",", "0", "]", ",", "inner_loop_parameter_values", ",", "\"ToT calibration\"", ",", "\"ToT\"", ",", "15", ",", "\"Charge [PlsrDAC]\"", ",", "filename", "=", "analyze_raw_data", ".", "output_pdf", ")", "plot_scurves", "(", "calibration_data", "[", ":", ",", ":", ",", ":", ",", "1", "]", ",", "inner_loop_parameter_values", ",", "\"TDC calibration\"", ",", "\"TDC [ns]\"", ",", "None", ",", "\"Charge [PlsrDAC]\"", ",", "filename", "=", "analyze_raw_data", ".", "output_pdf", ")", "tot_mean_all_pix", "=", "np", ".", "nanmean", "(", "calibration_data", "[", ":", ",", ":", ",", ":", ",", "0", "]", ",", "axis", "=", "(", "0", ",", "1", ")", ")", "tot_error_all_pix", "=", "np", ".", "nanstd", "(", "calibration_data", "[", ":", ",", ":", ",", ":", ",", "0", "]", ",", "axis", "=", "(", "0", ",", "1", ")", ")", "tdc_mean_all_pix", "=", "np", ".", "nanmean", "(", "calibration_data", "[", ":", ",", ":", ",", ":", ",", "1", "]", ",", "axis", "=", "(", "0", ",", "1", ")", ")", "tdc_error_all_pix", "=", "np", ".", "nanstd", "(", "calibration_data", "[", ":", ",", ":", ",", ":", ",", "1", "]", ",", "axis", "=", "(", "0", ",", "1", ")", ")", "plot_tot_tdc_calibration", "(", "scan_parameters", "=", "inner_loop_parameter_values", ",", "tot_mean", "=", "tot_mean_all_pix", ",", "tot_error", "=", "tot_error_all_pix", ",", "tdc_mean", "=", "tdc_mean_all_pix", ",", "tdc_error", "=", "tdc_error_all_pix", ",", "filename", "=", "analyze_raw_data", ".", "output_pdf", ",", "title", "=", "\"Mean charge calibration of %d pixel(s)\"", "%", "np", ".", "count_nonzero", "(", "~", "np", ".", "all", "(", "np", ".", "isnan", "(", "calibration_data", "[", ":", ",", ":", ",", ":", ",", "0", "]", ")", ",", "axis", "=", "2", ")", ")", ")", "# plotting individual pixels\r", "if", "plot_pixel_calibrations", "is", "True", ":", "# selecting pixels with non-nan entries\r", "col_row_non_nan", "=", "np", ".", "nonzero", "(", "~", "np", ".", "all", "(", "np", ".", "isnan", "(", "calibration_data", "[", ":", ",", ":", ",", ":", ",", "0", "]", ")", ",", "axis", "=", "2", ")", ")", "plot_pixel_calibrations", "=", "np", ".", "dstack", "(", "col_row_non_nan", ")", "[", "0", "]", "elif", "plot_pixel_calibrations", "is", "False", ":", "plot_pixel_calibrations", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "np", ".", "int", ")", "else", ":", "# assuming list of column / row tuples\r", "plot_pixel_calibrations", "=", "np", ".", "array", "(", "plot_pixel_calibrations", ")", "-", "1", "# generate index array\r", "pixel_indices", "=", "np", ".", "arange", "(", "plot_pixel_calibrations", ".", "shape", "[", "0", "]", ")", "plot_n_pixels", "=", "10", "# number of pixels at the beginning, center and end of the array\r", "np", ".", "random", ".", "seed", "(", "0", ")", "# select random pixels\r", "if", "pixel_indices", ".", "size", "-", "2", "*", "plot_n_pixels", ">=", "0", ":", "random_pixel_indices", "=", "np", ".", "sort", "(", "np", ".", "random", ".", "choice", "(", "pixel_indices", "[", "plot_n_pixels", ":", "-", "plot_n_pixels", "]", ",", "min", "(", "plot_n_pixels", ",", "pixel_indices", ".", "size", "-", "2", "*", "plot_n_pixels", ")", ",", "replace", "=", "False", ")", ")", "else", ":", "random_pixel_indices", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "np", ".", "int", ")", "selected_pixel_indices", "=", "np", ".", "unique", "(", "np", ".", "hstack", "(", "[", "pixel_indices", "[", ":", "plot_n_pixels", "]", ",", "random_pixel_indices", ",", "pixel_indices", "[", "-", "plot_n_pixels", ":", "]", "]", ")", ")", "# plotting individual pixels\r", "for", "(", "column", ",", "row", ")", "in", "plot_pixel_calibrations", "[", "selected_pixel_indices", "]", ":", "logging", ".", "info", "(", "\"Plotting charge calibration for pixel column \"", "+", "str", "(", "column", "+", "1", ")", "+", "\" / row \"", "+", "str", "(", "row", "+", "1", ")", ")", "tot_mean_single_pix", "=", "calibration_data", "[", "column", ",", "row", ",", ":", ",", "0", "]", "tot_std_single_pix", "=", "calibration_data", "[", "column", ",", "row", ",", ":", ",", "2", "]", "tdc_mean_single_pix", "=", "calibration_data", "[", "column", ",", "row", ",", ":", ",", "1", "]", "tdc_std_single_pix", "=", "calibration_data", "[", "column", ",", "row", ",", ":", ",", "3", "]", "plot_tot_tdc_calibration", "(", "scan_parameters", "=", "inner_loop_parameter_values", ",", "tot_mean", "=", "tot_mean_single_pix", ",", "tot_error", "=", "tot_std_single_pix", ",", "tdc_mean", "=", "tdc_mean_single_pix", ",", "tdc_error", "=", "tdc_std_single_pix", ",", "filename", "=", "analyze_raw_data", ".", "output_pdf", ",", "title", "=", "\"Charge calibration for pixel column \"", "+", "str", "(", "column", "+", "1", ")", "+", "\" / row \"", "+", "str", "(", "row", "+", "1", ")", ")" ]
Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data. Parameters ---------- output_filename : string Input raw data file name. plot_pixel_calibrations : bool, iterable If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels. Returns ------- nothing
[ "Generating", "HitOr", "calibration", "file", "(", "_calibration", ".", "h5", ")", "from", "raw", "data", "file", "and", "plotting", "of", "calibration", "data", ".", "Parameters", "----------", "output_filename", ":", "string", "Input", "raw", "data", "file", "name", ".", "plot_pixel_calibrations", ":", "bool", "iterable", "If", "True", "genearating", "additional", "pixel", "calibration", "plots", ".", "If", "list", "of", "column", "and", "row", "tuples", "(", "from", "1", "to", "80", "/", "336", ")", "print", "selected", "pixels", ".", "Returns", "-------", "nothing" ]
python
train
83.51938
nion-software/nionswift
nion/swift/Facade.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L1080-L1089
def set_dimensional_calibrations(self, dimensional_calibrations: typing.List[CalibrationModule.Calibration]) -> None: """Set the dimensional calibrations. :param dimensional_calibrations: A list of calibrations, must match the dimensions of the data. .. versionadded:: 1.0 Scriptable: Yes """ self.__data_item.set_dimensional_calibrations(dimensional_calibrations)
[ "def", "set_dimensional_calibrations", "(", "self", ",", "dimensional_calibrations", ":", "typing", ".", "List", "[", "CalibrationModule", ".", "Calibration", "]", ")", "->", "None", ":", "self", ".", "__data_item", ".", "set_dimensional_calibrations", "(", "dimensional_calibrations", ")" ]
Set the dimensional calibrations. :param dimensional_calibrations: A list of calibrations, must match the dimensions of the data. .. versionadded:: 1.0 Scriptable: Yes
[ "Set", "the", "dimensional", "calibrations", "." ]
python
train
40.6
bpython/curtsies
curtsies/events.py
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/events.py#L243-L260
def pp_event(seq): """Returns pretty representation of an Event or keypress""" if isinstance(seq, Event): return str(seq) # Get the original sequence back if seq is a pretty name already rev_curses = dict((v, k) for k, v in CURSES_NAMES.items()) rev_curtsies = dict((v, k) for k, v in CURTSIES_NAMES.items()) if seq in rev_curses: seq = rev_curses[seq] elif seq in rev_curtsies: seq = rev_curtsies[seq] pretty = curtsies_name(seq) if pretty != seq: return pretty return repr(seq).lstrip('u')[1:-1]
[ "def", "pp_event", "(", "seq", ")", ":", "if", "isinstance", "(", "seq", ",", "Event", ")", ":", "return", "str", "(", "seq", ")", "# Get the original sequence back if seq is a pretty name already", "rev_curses", "=", "dict", "(", "(", "v", ",", "k", ")", "for", "k", ",", "v", "in", "CURSES_NAMES", ".", "items", "(", ")", ")", "rev_curtsies", "=", "dict", "(", "(", "v", ",", "k", ")", "for", "k", ",", "v", "in", "CURTSIES_NAMES", ".", "items", "(", ")", ")", "if", "seq", "in", "rev_curses", ":", "seq", "=", "rev_curses", "[", "seq", "]", "elif", "seq", "in", "rev_curtsies", ":", "seq", "=", "rev_curtsies", "[", "seq", "]", "pretty", "=", "curtsies_name", "(", "seq", ")", "if", "pretty", "!=", "seq", ":", "return", "pretty", "return", "repr", "(", "seq", ")", ".", "lstrip", "(", "'u'", ")", "[", "1", ":", "-", "1", "]" ]
Returns pretty representation of an Event or keypress
[ "Returns", "pretty", "representation", "of", "an", "Event", "or", "keypress" ]
python
train
30.833333
Alignak-monitoring/alignak
alignak/objects/schedulingitem.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/schedulingitem.py#L2562-L2610
def create_business_rules(self, hosts, services, hostgroups, servicegroups, macromodulations, timeperiods, running=False): # pylint: disable=too-many-locals """Create business rules if necessary (cmd contains bp_rule) :param hosts: Hosts object to look for objects :type hosts: alignak.objects.host.Hosts :param services: Services object to look for objects :type services: alignak.objects.service.Services :param running: flag used in eval_cor_pattern function :type running: bool :return: None """ cmdcall = getattr(self, 'check_command', None) # If we do not have a command, we bailout if cmdcall is None: return # we get our base command, like # bp_rule!(host,svc & host, svc) -> bp_rule cmd = cmdcall.call elts = cmd.split('!') base_cmd = elts[0] # If it's bp_rule, we got a rule :) if base_cmd == 'bp_rule': self.got_business_rule = True rule = '' if len(elts) >= 2: rule = '!'.join(elts[1:]) # Only (re-)evaluate the business rule if it has never been # evaluated before, or it contains a macro. if re.match(r"\$[\w\d_-]+\$", rule) or self.business_rule is None: macroresolver = MacroResolver() data = self.get_data_for_checks(hosts) rule = macroresolver.resolve_simple_macros_in_string(rule, data, macromodulations, timeperiods) prev = getattr(self, "processed_business_rule", "") if rule == prev: # Business rule did not changed (no macro was modulated) return fact = DependencyNodeFactory(self) node = fact.eval_cor_pattern(rule, hosts, services, hostgroups, servicegroups, running) self.processed_business_rule = rule self.business_rule = node
[ "def", "create_business_rules", "(", "self", ",", "hosts", ",", "services", ",", "hostgroups", ",", "servicegroups", ",", "macromodulations", ",", "timeperiods", ",", "running", "=", "False", ")", ":", "# pylint: disable=too-many-locals", "cmdcall", "=", "getattr", "(", "self", ",", "'check_command'", ",", "None", ")", "# If we do not have a command, we bailout", "if", "cmdcall", "is", "None", ":", "return", "# we get our base command, like", "# bp_rule!(host,svc & host, svc) -> bp_rule", "cmd", "=", "cmdcall", ".", "call", "elts", "=", "cmd", ".", "split", "(", "'!'", ")", "base_cmd", "=", "elts", "[", "0", "]", "# If it's bp_rule, we got a rule :)", "if", "base_cmd", "==", "'bp_rule'", ":", "self", ".", "got_business_rule", "=", "True", "rule", "=", "''", "if", "len", "(", "elts", ")", ">=", "2", ":", "rule", "=", "'!'", ".", "join", "(", "elts", "[", "1", ":", "]", ")", "# Only (re-)evaluate the business rule if it has never been", "# evaluated before, or it contains a macro.", "if", "re", ".", "match", "(", "r\"\\$[\\w\\d_-]+\\$\"", ",", "rule", ")", "or", "self", ".", "business_rule", "is", "None", ":", "macroresolver", "=", "MacroResolver", "(", ")", "data", "=", "self", ".", "get_data_for_checks", "(", "hosts", ")", "rule", "=", "macroresolver", ".", "resolve_simple_macros_in_string", "(", "rule", ",", "data", ",", "macromodulations", ",", "timeperiods", ")", "prev", "=", "getattr", "(", "self", ",", "\"processed_business_rule\"", ",", "\"\"", ")", "if", "rule", "==", "prev", ":", "# Business rule did not changed (no macro was modulated)", "return", "fact", "=", "DependencyNodeFactory", "(", "self", ")", "node", "=", "fact", ".", "eval_cor_pattern", "(", "rule", ",", "hosts", ",", "services", ",", "hostgroups", ",", "servicegroups", ",", "running", ")", "self", ".", "processed_business_rule", "=", "rule", "self", ".", "business_rule", "=", "node" ]
Create business rules if necessary (cmd contains bp_rule) :param hosts: Hosts object to look for objects :type hosts: alignak.objects.host.Hosts :param services: Services object to look for objects :type services: alignak.objects.service.Services :param running: flag used in eval_cor_pattern function :type running: bool :return: None
[ "Create", "business", "rules", "if", "necessary", "(", "cmd", "contains", "bp_rule", ")" ]
python
train
44.244898
Gandi/gandi.cli
gandi/cli/commands/mail.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/mail.py#L140-L158
def purge(gandi, email, background, force, alias): """Purge a mailbox.""" login, domain = email if alias: if not force: proceed = click.confirm('Are you sure to purge all aliases for ' 'mailbox %s@%s ?' % (login, domain)) if not proceed: return result = gandi.mail.set_alias(domain, login, []) else: if not force: proceed = click.confirm('Are you sure to purge mailbox %s@%s ?' % (login, domain)) if not proceed: return result = gandi.mail.purge(domain, login, background) return result
[ "def", "purge", "(", "gandi", ",", "email", ",", "background", ",", "force", ",", "alias", ")", ":", "login", ",", "domain", "=", "email", "if", "alias", ":", "if", "not", "force", ":", "proceed", "=", "click", ".", "confirm", "(", "'Are you sure to purge all aliases for '", "'mailbox %s@%s ?'", "%", "(", "login", ",", "domain", ")", ")", "if", "not", "proceed", ":", "return", "result", "=", "gandi", ".", "mail", ".", "set_alias", "(", "domain", ",", "login", ",", "[", "]", ")", "else", ":", "if", "not", "force", ":", "proceed", "=", "click", ".", "confirm", "(", "'Are you sure to purge mailbox %s@%s ?'", "%", "(", "login", ",", "domain", ")", ")", "if", "not", "proceed", ":", "return", "result", "=", "gandi", ".", "mail", ".", "purge", "(", "domain", ",", "login", ",", "background", ")", "return", "result" ]
Purge a mailbox.
[ "Purge", "a", "mailbox", "." ]
python
train
35.421053
guyzmo/git-repo
git_repo/services/service.py
https://github.com/guyzmo/git-repo/blob/2974c3f52bc64fa8a467ac2b0e9a485ba7ed333b/git_repo/services/service.py#L340-L352
def fetch(self, remote, branch, local_branch = None, force=False): '''Pull a repository :param remote: git-remote instance :param branch: name of the branch to pull ''' pb = ProgressBar() pb.setup(self.name) if local_branch: branch = ':'.join([branch, local_branch]) remote.fetch(branch, update_head_ok=True, force=force, progress=pb) print()
[ "def", "fetch", "(", "self", ",", "remote", ",", "branch", ",", "local_branch", "=", "None", ",", "force", "=", "False", ")", ":", "pb", "=", "ProgressBar", "(", ")", "pb", ".", "setup", "(", "self", ".", "name", ")", "if", "local_branch", ":", "branch", "=", "':'", ".", "join", "(", "[", "branch", ",", "local_branch", "]", ")", "remote", ".", "fetch", "(", "branch", ",", "update_head_ok", "=", "True", ",", "force", "=", "force", ",", "progress", "=", "pb", ")", "print", "(", ")" ]
Pull a repository :param remote: git-remote instance :param branch: name of the branch to pull
[ "Pull", "a", "repository", ":", "param", "remote", ":", "git", "-", "remote", "instance", ":", "param", "branch", ":", "name", "of", "the", "branch", "to", "pull" ]
python
train
32
spyder-ide/spyder
spyder/plugins/plots/widgets/figurebrowser.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/plots/widgets/figurebrowser.py#L698-L704
def set_current_thumbnail(self, thumbnail): """Set the currently selected thumbnail.""" self.current_thumbnail = thumbnail self.figure_viewer.load_figure( thumbnail.canvas.fig, thumbnail.canvas.fmt) for thumbnail in self._thumbnails: thumbnail.highlight_canvas(thumbnail == self.current_thumbnail)
[ "def", "set_current_thumbnail", "(", "self", ",", "thumbnail", ")", ":", "self", ".", "current_thumbnail", "=", "thumbnail", "self", ".", "figure_viewer", ".", "load_figure", "(", "thumbnail", ".", "canvas", ".", "fig", ",", "thumbnail", ".", "canvas", ".", "fmt", ")", "for", "thumbnail", "in", "self", ".", "_thumbnails", ":", "thumbnail", ".", "highlight_canvas", "(", "thumbnail", "==", "self", ".", "current_thumbnail", ")" ]
Set the currently selected thumbnail.
[ "Set", "the", "currently", "selected", "thumbnail", "." ]
python
train
50.142857
lablup/backend.ai-client-py
src/ai/backend/client/cli/admin/images.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/admin/images.py#L72-L83
def dealias_image(alias): '''Remove an image alias.''' with Session() as session: try: result = session.Image.dealiasImage(alias) except Exception as e: print_error(e) sys.exit(1) if result['ok']: print("alias {0} removed.".format(alias)) else: print(result['msg'])
[ "def", "dealias_image", "(", "alias", ")", ":", "with", "Session", "(", ")", "as", "session", ":", "try", ":", "result", "=", "session", ".", "Image", ".", "dealiasImage", "(", "alias", ")", "except", "Exception", "as", "e", ":", "print_error", "(", "e", ")", "sys", ".", "exit", "(", "1", ")", "if", "result", "[", "'ok'", "]", ":", "print", "(", "\"alias {0} removed.\"", ".", "format", "(", "alias", ")", ")", "else", ":", "print", "(", "result", "[", "'msg'", "]", ")" ]
Remove an image alias.
[ "Remove", "an", "image", "alias", "." ]
python
train
29.5
wbond/asn1crypto
asn1crypto/keys.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/keys.py#L523-L588
def wrap(cls, private_key, algorithm): """ Wraps a private key in a PrivateKeyInfo structure :param private_key: A byte string or Asn1Value object of the private key :param algorithm: A unicode string of "rsa", "dsa" or "ec" :return: A PrivateKeyInfo object """ if not isinstance(private_key, byte_cls) and not isinstance(private_key, Asn1Value): raise TypeError(unwrap( ''' private_key must be a byte string or Asn1Value, not %s ''', type_name(private_key) )) if algorithm == 'rsa': if not isinstance(private_key, RSAPrivateKey): private_key = RSAPrivateKey.load(private_key) params = Null() elif algorithm == 'dsa': if not isinstance(private_key, DSAPrivateKey): private_key = DSAPrivateKey.load(private_key) params = DSAParams() params['p'] = private_key['p'] params['q'] = private_key['q'] params['g'] = private_key['g'] public_key = private_key['public_key'] private_key = private_key['private_key'] elif algorithm == 'ec': if not isinstance(private_key, ECPrivateKey): private_key = ECPrivateKey.load(private_key) else: private_key = private_key.copy() params = private_key['parameters'] del private_key['parameters'] else: raise ValueError(unwrap( ''' algorithm must be one of "rsa", "dsa", "ec", not %s ''', repr(algorithm) )) private_key_algo = PrivateKeyAlgorithm() private_key_algo['algorithm'] = PrivateKeyAlgorithmId(algorithm) private_key_algo['parameters'] = params container = cls() container._algorithm = algorithm container['version'] = Integer(0) container['private_key_algorithm'] = private_key_algo container['private_key'] = private_key # Here we save the DSA public key if possible since it is not contained # within the PKCS#8 structure for a DSA key if algorithm == 'dsa': container._public_key = public_key return container
[ "def", "wrap", "(", "cls", ",", "private_key", ",", "algorithm", ")", ":", "if", "not", "isinstance", "(", "private_key", ",", "byte_cls", ")", "and", "not", "isinstance", "(", "private_key", ",", "Asn1Value", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n private_key must be a byte string or Asn1Value, not %s\n '''", ",", "type_name", "(", "private_key", ")", ")", ")", "if", "algorithm", "==", "'rsa'", ":", "if", "not", "isinstance", "(", "private_key", ",", "RSAPrivateKey", ")", ":", "private_key", "=", "RSAPrivateKey", ".", "load", "(", "private_key", ")", "params", "=", "Null", "(", ")", "elif", "algorithm", "==", "'dsa'", ":", "if", "not", "isinstance", "(", "private_key", ",", "DSAPrivateKey", ")", ":", "private_key", "=", "DSAPrivateKey", ".", "load", "(", "private_key", ")", "params", "=", "DSAParams", "(", ")", "params", "[", "'p'", "]", "=", "private_key", "[", "'p'", "]", "params", "[", "'q'", "]", "=", "private_key", "[", "'q'", "]", "params", "[", "'g'", "]", "=", "private_key", "[", "'g'", "]", "public_key", "=", "private_key", "[", "'public_key'", "]", "private_key", "=", "private_key", "[", "'private_key'", "]", "elif", "algorithm", "==", "'ec'", ":", "if", "not", "isinstance", "(", "private_key", ",", "ECPrivateKey", ")", ":", "private_key", "=", "ECPrivateKey", ".", "load", "(", "private_key", ")", "else", ":", "private_key", "=", "private_key", ".", "copy", "(", ")", "params", "=", "private_key", "[", "'parameters'", "]", "del", "private_key", "[", "'parameters'", "]", "else", ":", "raise", "ValueError", "(", "unwrap", "(", "'''\n algorithm must be one of \"rsa\", \"dsa\", \"ec\", not %s\n '''", ",", "repr", "(", "algorithm", ")", ")", ")", "private_key_algo", "=", "PrivateKeyAlgorithm", "(", ")", "private_key_algo", "[", "'algorithm'", "]", "=", "PrivateKeyAlgorithmId", "(", "algorithm", ")", "private_key_algo", "[", "'parameters'", "]", "=", "params", "container", "=", "cls", "(", ")", "container", ".", "_algorithm", "=", "algorithm", "container", "[", "'version'", "]", "=", "Integer", "(", "0", ")", "container", "[", "'private_key_algorithm'", "]", "=", "private_key_algo", "container", "[", "'private_key'", "]", "=", "private_key", "# Here we save the DSA public key if possible since it is not contained", "# within the PKCS#8 structure for a DSA key", "if", "algorithm", "==", "'dsa'", ":", "container", ".", "_public_key", "=", "public_key", "return", "container" ]
Wraps a private key in a PrivateKeyInfo structure :param private_key: A byte string or Asn1Value object of the private key :param algorithm: A unicode string of "rsa", "dsa" or "ec" :return: A PrivateKeyInfo object
[ "Wraps", "a", "private", "key", "in", "a", "PrivateKeyInfo", "structure" ]
python
train
35.166667
zooniverse/panoptes-python-client
panoptes_client/collection.py
https://github.com/zooniverse/panoptes-python-client/blob/138d93cb03378501a8d349428e381ad73f928680/panoptes_client/collection.py#L28-L46
def find(cls, id='', slug=None): """ Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug as well as ID. Examples:: collection_1234 = Collection.find(1234) my_collection = Collection.find(slug="example/my-collection") """ if not id and not slug: return None try: return cls.where(id=id, slug=slug).next() except StopIteration: raise PanoptesAPIException( "Could not find collection with slug='{}'".format(slug) )
[ "def", "find", "(", "cls", ",", "id", "=", "''", ",", "slug", "=", "None", ")", ":", "if", "not", "id", "and", "not", "slug", ":", "return", "None", "try", ":", "return", "cls", ".", "where", "(", "id", "=", "id", ",", "slug", "=", "slug", ")", ".", "next", "(", ")", "except", "StopIteration", ":", "raise", "PanoptesAPIException", "(", "\"Could not find collection with slug='{}'\"", ".", "format", "(", "slug", ")", ")" ]
Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug as well as ID. Examples:: collection_1234 = Collection.find(1234) my_collection = Collection.find(slug="example/my-collection")
[ "Similar", "to", ":", "py", ":", "meth", ":", ".", "PanoptesObject", ".", "find", "but", "allows", "lookup", "by", "slug", "as", "well", "as", "ID", "." ]
python
train
29.789474
akatrevorjay/mainline
mainline/di.py
https://github.com/akatrevorjay/mainline/blob/8aa7f6ef6cad4051fcd5f8d43d2ba8cdad681986/mainline/di.py#L93-L111
def iresolve(self, *keys): ''' Iterates over resolved instances for given provider keys. :param keys: Provider keys :type keys: tuple :return: Iterator of resolved instances :rtype: generator ''' for key in keys: missing = self.get_missing_deps(key) if missing: raise UnresolvableError("Missing dependencies for %s: %s" % (key, missing)) provider = self._providers.get(key) if not provider: raise UnresolvableError("Provider does not exist for %s" % key) yield provider()
[ "def", "iresolve", "(", "self", ",", "*", "keys", ")", ":", "for", "key", "in", "keys", ":", "missing", "=", "self", ".", "get_missing_deps", "(", "key", ")", "if", "missing", ":", "raise", "UnresolvableError", "(", "\"Missing dependencies for %s: %s\"", "%", "(", "key", ",", "missing", ")", ")", "provider", "=", "self", ".", "_providers", ".", "get", "(", "key", ")", "if", "not", "provider", ":", "raise", "UnresolvableError", "(", "\"Provider does not exist for %s\"", "%", "key", ")", "yield", "provider", "(", ")" ]
Iterates over resolved instances for given provider keys. :param keys: Provider keys :type keys: tuple :return: Iterator of resolved instances :rtype: generator
[ "Iterates", "over", "resolved", "instances", "for", "given", "provider", "keys", "." ]
python
train
32.210526