repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py#L866-L942
def begin_transaction( self, database, options_=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Starts a new transaction. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> response = client.begin_transaction(database) Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction. Defaults to a read-write transaction. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "begin_transaction" not in self._inner_api_calls: self._inner_api_calls[ "begin_transaction" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.begin_transaction, default_retry=self._method_configs["BeginTransaction"].retry, default_timeout=self._method_configs["BeginTransaction"].timeout, client_info=self._client_info, ) request = firestore_pb2.BeginTransactionRequest( database=database, options=options_ ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["begin_transaction"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "begin_transaction", "(", "self", ",", "database", ",", "options_", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"begin_transaction\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"begin_transaction\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "begin_transaction", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"BeginTransaction\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"BeginTransaction\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "firestore_pb2", ".", "BeginTransactionRequest", "(", "database", "=", "database", ",", "options", "=", "options_", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"database\"", ",", "database", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"begin_transaction\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Starts a new transaction. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> response = client.begin_transaction(database) Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. options_ (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): The options for the transaction. Defaults to a read-write transaction. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.firestore_v1beta1.types.BeginTransactionResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Starts", "a", "new", "transaction", "." ]
python
train
41.571429
portfors-lab/sparkle
sparkle/gui/qprotocol.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/qprotocol.py#L211-L222
def cursor(self, pos): """Returns a line at the nearest row split between tests. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>` """ row = self.indexAt(pos).row() if row == -1: row = self.model().rowCount() row_height = self.rowHeight(0) y = row_height*row x = self.width() return QtCore.QLine(0,y,x,y)
[ "def", "cursor", "(", "self", ",", "pos", ")", ":", "row", "=", "self", ".", "indexAt", "(", "pos", ")", ".", "row", "(", ")", "if", "row", "==", "-", "1", ":", "row", "=", "self", ".", "model", "(", ")", ".", "rowCount", "(", ")", "row_height", "=", "self", ".", "rowHeight", "(", "0", ")", "y", "=", "row_height", "*", "row", "x", "=", "self", ".", "width", "(", ")", "return", "QtCore", ".", "QLine", "(", "0", ",", "y", ",", "x", ",", "y", ")" ]
Returns a line at the nearest row split between tests. Re-implemented from :meth:`AbstractDragView<sparkle.gui.abstract_drag_view.AbstractDragView.cursor>`
[ "Returns", "a", "line", "at", "the", "nearest", "row", "split", "between", "tests", "." ]
python
train
35.75
petl-developers/petl
petl/transform/regex.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/regex.py#L140-L172
def split(table, field, pattern, newfields=None, include_original=False, maxsplit=0, flags=0): """ Add one or more new fields with values generated by splitting an existing value around occurrences of a regular expression. E.g.:: >>> import petl as etl >>> table1 = [['id', 'variable', 'value'], ... ['1', 'parad1', '12'], ... ['2', 'parad2', '15'], ... ['3', 'tempd1', '18'], ... ['4', 'tempd2', '19']] >>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day']) >>> table2 +-----+-------+----------+-----+ | id | value | variable | day | +=====+=======+==========+=====+ | '1' | '12' | 'para' | '1' | +-----+-------+----------+-----+ | '2' | '15' | 'para' | '2' | +-----+-------+----------+-----+ | '3' | '18' | 'temp' | '1' | +-----+-------+----------+-----+ | '4' | '19' | 'temp' | '2' | +-----+-------+----------+-----+ By default the field on which the split is performed is omitted. It can be included using the `include_original` argument. """ return SplitView(table, field, pattern, newfields, include_original, maxsplit, flags)
[ "def", "split", "(", "table", ",", "field", ",", "pattern", ",", "newfields", "=", "None", ",", "include_original", "=", "False", ",", "maxsplit", "=", "0", ",", "flags", "=", "0", ")", ":", "return", "SplitView", "(", "table", ",", "field", ",", "pattern", ",", "newfields", ",", "include_original", ",", "maxsplit", ",", "flags", ")" ]
Add one or more new fields with values generated by splitting an existing value around occurrences of a regular expression. E.g.:: >>> import petl as etl >>> table1 = [['id', 'variable', 'value'], ... ['1', 'parad1', '12'], ... ['2', 'parad2', '15'], ... ['3', 'tempd1', '18'], ... ['4', 'tempd2', '19']] >>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day']) >>> table2 +-----+-------+----------+-----+ | id | value | variable | day | +=====+=======+==========+=====+ | '1' | '12' | 'para' | '1' | +-----+-------+----------+-----+ | '2' | '15' | 'para' | '2' | +-----+-------+----------+-----+ | '3' | '18' | 'temp' | '1' | +-----+-------+----------+-----+ | '4' | '19' | 'temp' | '2' | +-----+-------+----------+-----+ By default the field on which the split is performed is omitted. It can be included using the `include_original` argument.
[ "Add", "one", "or", "more", "new", "fields", "with", "values", "generated", "by", "splitting", "an", "existing", "value", "around", "occurrences", "of", "a", "regular", "expression", ".", "E", ".", "g", ".", "::" ]
python
train
38.848485
portfors-lab/sparkle
sparkle/gui/controlwindow.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/controlwindow.py#L295-L397
def loadInputs(self, fname): """Load previsouly saved input values, and load them to GUI widgets :param fname: file path where stashed input values are stored :type fname: str """ inputsfname = os.path.join(systools.get_appdir(), fname) try: with open(inputsfname, 'r') as jf: inputsdict = json.load(jf) except: logger = logging.getLogger('main') logger.warning("Unable to load app data from file: {}".format(inputsfname)) inputsdict = {} # self.display.spiketracePlot.setThreshold(inputsdict.get('threshold', 0.5)) self._thesholds = inputsdict.get('threshold', {}) self.stashedAisr = inputsdict.get('aifs', 100000) self.ui.aifsSpnbx.setValue(self.stashedAisr) self.ui.windowszSpnbx.setValue(inputsdict.get('windowsz', 0.1)) self.ui.binszSpnbx.setValue(inputsdict.get('binsz', 0.005)) self.saveformat = inputsdict.get('saveformat', 'hdf5') self.ui.exploreStimEditor.setReps((inputsdict.get('ex_nreps', 5))) self.ui.reprateSpnbx.setValue(inputsdict.get('reprate', 1)) # self.display.spiketracePlot.setRasterBounds(inputsdict.get('raster_bounds', (0.5,1))) self.specArgs = inputsdict.get('specargs',{u'nfft':512, u'window':u'hanning', u'overlap':90, 'colormap':{'lut':None, 'state':None, 'levels':None}}) # self.display.setSpecArgs(**self.specArgs) SpecWidget.setSpecArgs(**self.specArgs) self.viewSettings = inputsdict.get('viewSettings', {'fontsz': 10, 'display_attributes':{}}) self.ui.stimDetails.setDisplayAttributes(self.viewSettings['display_attributes']) font = QtGui.QFont() font.setPointSize(self.viewSettings['fontsz']) QtGui.QApplication.setFont(font) self.ui.calibrationWidget.ui.nrepsSpnbx.setValue(inputsdict.get('calreps', 5)) self.calvals = inputsdict.get('calvals', {'calf':20000, 'caldb':100, 'calv':0.1, 'use_calfile':False, 'frange':(5000, 1e5), 'calname': ''}) self.calvals['use_calfile'] = False self.calvals['calname'] = '' self.ui.refDbSpnbx.setValue(self.calvals['caldb']) self.ui.mphoneSensSpnbx.setValue(inputsdict.get('mphonesens', 0.004)) self.ui.mphoneDBSpnbx.setValue(MPHONE_CALDB) # self.ui.mphoneDBSpnbx.setValue(inputsdict.get('mphonedb', 94)) Vocalization.paths = inputsdict.get('vocalpaths', []) # load the previous sessions scaling self.tscale = inputsdict.get('tscale', SmartSpinBox.MilliSeconds) self.fscale = inputsdict.get('fscale', SmartSpinBox.kHz) try: self.updateUnitLabels(self.tscale, self.fscale) except: self.tscale = 'ms' self.fscale = 'kHz' self.updateUnitLabels(self.tscale, self.fscale) cal_template = inputsdict.get('calparams', None) if cal_template is not None: try: self.acqmodel.load_calibration_template(cal_template) except: logger = logging.getLogger('main') logger.exception("Unable to load previous calibration settings") else: logger = logging.getLogger('main') logger.debug('No saved calibration stimului inputs') if 'explorestims' in inputsdict: self.ui.exploreStimEditor.loadTemplate(inputsdict['explorestims']) else: logger = logging.getLogger('main') logger.debug('No saved explore stimului inputs') # load the previous session's Tuning Curve defaults TCFactory.defaultInputs.update(inputsdict.get('tuning_curve', TCFactory.defaultInputs)) # set defaults then merge self.advanced_options = {'device_name':'', 'max_voltage':1.5, 'device_max_voltage': 10.0, 'volt_amp_conversion': 0.1, 'use_attenuator': False } if 'advanced_options' in inputsdict: self.advanced_options.update(inputsdict['advanced_options']) StimulusModel.setMaxVoltage(self.advanced_options['max_voltage'], self.advanced_options['device_max_voltage']) self.display.setAmpConversionFactor(self.advanced_options['volt_amp_conversion']) if self.advanced_options['use_attenuator']: self.acqmodel.attenuator_connection(True) else: self.acqmodel.attenuator_connection(False) self._aichans = inputsdict.get('aichans', []) self._aichan_details = inputsdict.get('aichan_details', {}) for name, deets in self._aichan_details.items(): # make sure all field as present in details for each channel self._aichan_details[name]['threshold'] = deets.get('threshold', 5) self._aichan_details[name]['polarity'] = deets.get('polarity', 1) self._aichan_details[name]['raster_bounds'] = deets.get('raster_bounds', (0.5,0.9)) self._aichan_details[name]['abs'] = deets.get('abs', True) self.reset_device_channels() stim_defaults = inputsdict.get('stim_view_defaults', {}) for name, state in stim_defaults.items(): StimulusView.updateDefaults(name, state)
[ "def", "loadInputs", "(", "self", ",", "fname", ")", ":", "inputsfname", "=", "os", ".", "path", ".", "join", "(", "systools", ".", "get_appdir", "(", ")", ",", "fname", ")", "try", ":", "with", "open", "(", "inputsfname", ",", "'r'", ")", "as", "jf", ":", "inputsdict", "=", "json", ".", "load", "(", "jf", ")", "except", ":", "logger", "=", "logging", ".", "getLogger", "(", "'main'", ")", "logger", ".", "warning", "(", "\"Unable to load app data from file: {}\"", ".", "format", "(", "inputsfname", ")", ")", "inputsdict", "=", "{", "}", "# self.display.spiketracePlot.setThreshold(inputsdict.get('threshold', 0.5))", "self", ".", "_thesholds", "=", "inputsdict", ".", "get", "(", "'threshold'", ",", "{", "}", ")", "self", ".", "stashedAisr", "=", "inputsdict", ".", "get", "(", "'aifs'", ",", "100000", ")", "self", ".", "ui", ".", "aifsSpnbx", ".", "setValue", "(", "self", ".", "stashedAisr", ")", "self", ".", "ui", ".", "windowszSpnbx", ".", "setValue", "(", "inputsdict", ".", "get", "(", "'windowsz'", ",", "0.1", ")", ")", "self", ".", "ui", ".", "binszSpnbx", ".", "setValue", "(", "inputsdict", ".", "get", "(", "'binsz'", ",", "0.005", ")", ")", "self", ".", "saveformat", "=", "inputsdict", ".", "get", "(", "'saveformat'", ",", "'hdf5'", ")", "self", ".", "ui", ".", "exploreStimEditor", ".", "setReps", "(", "(", "inputsdict", ".", "get", "(", "'ex_nreps'", ",", "5", ")", ")", ")", "self", ".", "ui", ".", "reprateSpnbx", ".", "setValue", "(", "inputsdict", ".", "get", "(", "'reprate'", ",", "1", ")", ")", "# self.display.spiketracePlot.setRasterBounds(inputsdict.get('raster_bounds', (0.5,1)))", "self", ".", "specArgs", "=", "inputsdict", ".", "get", "(", "'specargs'", ",", "{", "u'nfft'", ":", "512", ",", "u'window'", ":", "u'hanning'", ",", "u'overlap'", ":", "90", ",", "'colormap'", ":", "{", "'lut'", ":", "None", ",", "'state'", ":", "None", ",", "'levels'", ":", "None", "}", "}", ")", "# self.display.setSpecArgs(**self.specArgs) ", "SpecWidget", ".", "setSpecArgs", "(", "*", "*", "self", ".", "specArgs", ")", "self", ".", "viewSettings", "=", "inputsdict", ".", "get", "(", "'viewSettings'", ",", "{", "'fontsz'", ":", "10", ",", "'display_attributes'", ":", "{", "}", "}", ")", "self", ".", "ui", ".", "stimDetails", ".", "setDisplayAttributes", "(", "self", ".", "viewSettings", "[", "'display_attributes'", "]", ")", "font", "=", "QtGui", ".", "QFont", "(", ")", "font", ".", "setPointSize", "(", "self", ".", "viewSettings", "[", "'fontsz'", "]", ")", "QtGui", ".", "QApplication", ".", "setFont", "(", "font", ")", "self", ".", "ui", ".", "calibrationWidget", ".", "ui", ".", "nrepsSpnbx", ".", "setValue", "(", "inputsdict", ".", "get", "(", "'calreps'", ",", "5", ")", ")", "self", ".", "calvals", "=", "inputsdict", ".", "get", "(", "'calvals'", ",", "{", "'calf'", ":", "20000", ",", "'caldb'", ":", "100", ",", "'calv'", ":", "0.1", ",", "'use_calfile'", ":", "False", ",", "'frange'", ":", "(", "5000", ",", "1e5", ")", ",", "'calname'", ":", "''", "}", ")", "self", ".", "calvals", "[", "'use_calfile'", "]", "=", "False", "self", ".", "calvals", "[", "'calname'", "]", "=", "''", "self", ".", "ui", ".", "refDbSpnbx", ".", "setValue", "(", "self", ".", "calvals", "[", "'caldb'", "]", ")", "self", ".", "ui", ".", "mphoneSensSpnbx", ".", "setValue", "(", "inputsdict", ".", "get", "(", "'mphonesens'", ",", "0.004", ")", ")", "self", ".", "ui", ".", "mphoneDBSpnbx", ".", "setValue", "(", "MPHONE_CALDB", ")", "# self.ui.mphoneDBSpnbx.setValue(inputsdict.get('mphonedb', 94))", "Vocalization", ".", "paths", "=", "inputsdict", ".", "get", "(", "'vocalpaths'", ",", "[", "]", ")", "# load the previous sessions scaling", "self", ".", "tscale", "=", "inputsdict", ".", "get", "(", "'tscale'", ",", "SmartSpinBox", ".", "MilliSeconds", ")", "self", ".", "fscale", "=", "inputsdict", ".", "get", "(", "'fscale'", ",", "SmartSpinBox", ".", "kHz", ")", "try", ":", "self", ".", "updateUnitLabels", "(", "self", ".", "tscale", ",", "self", ".", "fscale", ")", "except", ":", "self", ".", "tscale", "=", "'ms'", "self", ".", "fscale", "=", "'kHz'", "self", ".", "updateUnitLabels", "(", "self", ".", "tscale", ",", "self", ".", "fscale", ")", "cal_template", "=", "inputsdict", ".", "get", "(", "'calparams'", ",", "None", ")", "if", "cal_template", "is", "not", "None", ":", "try", ":", "self", ".", "acqmodel", ".", "load_calibration_template", "(", "cal_template", ")", "except", ":", "logger", "=", "logging", ".", "getLogger", "(", "'main'", ")", "logger", ".", "exception", "(", "\"Unable to load previous calibration settings\"", ")", "else", ":", "logger", "=", "logging", ".", "getLogger", "(", "'main'", ")", "logger", ".", "debug", "(", "'No saved calibration stimului inputs'", ")", "if", "'explorestims'", "in", "inputsdict", ":", "self", ".", "ui", ".", "exploreStimEditor", ".", "loadTemplate", "(", "inputsdict", "[", "'explorestims'", "]", ")", "else", ":", "logger", "=", "logging", ".", "getLogger", "(", "'main'", ")", "logger", ".", "debug", "(", "'No saved explore stimului inputs'", ")", "# load the previous session's Tuning Curve defaults", "TCFactory", ".", "defaultInputs", ".", "update", "(", "inputsdict", ".", "get", "(", "'tuning_curve'", ",", "TCFactory", ".", "defaultInputs", ")", ")", "# set defaults then merge", "self", ".", "advanced_options", "=", "{", "'device_name'", ":", "''", ",", "'max_voltage'", ":", "1.5", ",", "'device_max_voltage'", ":", "10.0", ",", "'volt_amp_conversion'", ":", "0.1", ",", "'use_attenuator'", ":", "False", "}", "if", "'advanced_options'", "in", "inputsdict", ":", "self", ".", "advanced_options", ".", "update", "(", "inputsdict", "[", "'advanced_options'", "]", ")", "StimulusModel", ".", "setMaxVoltage", "(", "self", ".", "advanced_options", "[", "'max_voltage'", "]", ",", "self", ".", "advanced_options", "[", "'device_max_voltage'", "]", ")", "self", ".", "display", ".", "setAmpConversionFactor", "(", "self", ".", "advanced_options", "[", "'volt_amp_conversion'", "]", ")", "if", "self", ".", "advanced_options", "[", "'use_attenuator'", "]", ":", "self", ".", "acqmodel", ".", "attenuator_connection", "(", "True", ")", "else", ":", "self", ".", "acqmodel", ".", "attenuator_connection", "(", "False", ")", "self", ".", "_aichans", "=", "inputsdict", ".", "get", "(", "'aichans'", ",", "[", "]", ")", "self", ".", "_aichan_details", "=", "inputsdict", ".", "get", "(", "'aichan_details'", ",", "{", "}", ")", "for", "name", ",", "deets", "in", "self", ".", "_aichan_details", ".", "items", "(", ")", ":", "# make sure all field as present in details for each channel", "self", ".", "_aichan_details", "[", "name", "]", "[", "'threshold'", "]", "=", "deets", ".", "get", "(", "'threshold'", ",", "5", ")", "self", ".", "_aichan_details", "[", "name", "]", "[", "'polarity'", "]", "=", "deets", ".", "get", "(", "'polarity'", ",", "1", ")", "self", ".", "_aichan_details", "[", "name", "]", "[", "'raster_bounds'", "]", "=", "deets", ".", "get", "(", "'raster_bounds'", ",", "(", "0.5", ",", "0.9", ")", ")", "self", ".", "_aichan_details", "[", "name", "]", "[", "'abs'", "]", "=", "deets", ".", "get", "(", "'abs'", ",", "True", ")", "self", ".", "reset_device_channels", "(", ")", "stim_defaults", "=", "inputsdict", ".", "get", "(", "'stim_view_defaults'", ",", "{", "}", ")", "for", "name", ",", "state", "in", "stim_defaults", ".", "items", "(", ")", ":", "StimulusView", ".", "updateDefaults", "(", "name", ",", "state", ")" ]
Load previsouly saved input values, and load them to GUI widgets :param fname: file path where stashed input values are stored :type fname: str
[ "Load", "previsouly", "saved", "input", "values", "and", "load", "them", "to", "GUI", "widgets" ]
python
train
51.621359
nerdvegas/rez
src/rez/vendor/sortedcontainers/sorteddict.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/sortedcontainers/sorteddict.py#L569-L579
def index(self, value): """ Return index of *value* in self. Raises ValueError if *value* is not found. """ # pylint: disable=arguments-differ for idx, val in enumerate(self): if value == val: return idx raise ValueError('{0!r} is not in dict'.format(value))
[ "def", "index", "(", "self", ",", "value", ")", ":", "# pylint: disable=arguments-differ", "for", "idx", ",", "val", "in", "enumerate", "(", "self", ")", ":", "if", "value", "==", "val", ":", "return", "idx", "raise", "ValueError", "(", "'{0!r} is not in dict'", ".", "format", "(", "value", ")", ")" ]
Return index of *value* in self. Raises ValueError if *value* is not found.
[ "Return", "index", "of", "*", "value", "*", "in", "self", "." ]
python
train
30.272727
kajala/django-jacc
jacc/admin.py
https://github.com/kajala/django-jacc/blob/2c4356a46bc46430569136303488db6a9af65560/jacc/admin.py#L669-L679
def _format_date(self, obj) -> str: """ Short date format. :param obj: date or datetime or None :return: str """ if obj is None: return '' if isinstance(obj, datetime): obj = obj.date() return date_format(obj, 'SHORT_DATE_FORMAT')
[ "def", "_format_date", "(", "self", ",", "obj", ")", "->", "str", ":", "if", "obj", "is", "None", ":", "return", "''", "if", "isinstance", "(", "obj", ",", "datetime", ")", ":", "obj", "=", "obj", ".", "date", "(", ")", "return", "date_format", "(", "obj", ",", "'SHORT_DATE_FORMAT'", ")" ]
Short date format. :param obj: date or datetime or None :return: str
[ "Short", "date", "format", ".", ":", "param", "obj", ":", "date", "or", "datetime", "or", "None", ":", "return", ":", "str" ]
python
train
28
tijme/not-your-average-web-crawler
nyawc/QueueItem.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/QueueItem.py#L118-L152
def get_hash(self): """Generate and return the dict index hash of the given queue item. Note: Cookies should not be included in the hash calculation because otherwise requests are crawled multiple times with e.g. different session keys, causing infinite crawling recursion. Note: At this moment the keys do not actually get hashed since it works perfectly without and since hashing the keys requires us to built hash collision management. Returns: str: The hash of the given queue item. """ if self.__index_hash: return self.__index_hash key = self.request.method key += URLHelper.get_protocol(self.request.url) key += URLHelper.get_subdomain(self.request.url) key += URLHelper.get_hostname(self.request.url) key += URLHelper.get_tld(self.request.url) key += URLHelper.get_path(self.request.url) key += str(URLHelper.get_ordered_params(self.request.url)) if self.request.data is not None: key += str(self.request.data.keys()) self.__index_hash = key return self.__index_hash
[ "def", "get_hash", "(", "self", ")", ":", "if", "self", ".", "__index_hash", ":", "return", "self", ".", "__index_hash", "key", "=", "self", ".", "request", ".", "method", "key", "+=", "URLHelper", ".", "get_protocol", "(", "self", ".", "request", ".", "url", ")", "key", "+=", "URLHelper", ".", "get_subdomain", "(", "self", ".", "request", ".", "url", ")", "key", "+=", "URLHelper", ".", "get_hostname", "(", "self", ".", "request", ".", "url", ")", "key", "+=", "URLHelper", ".", "get_tld", "(", "self", ".", "request", ".", "url", ")", "key", "+=", "URLHelper", ".", "get_path", "(", "self", ".", "request", ".", "url", ")", "key", "+=", "str", "(", "URLHelper", ".", "get_ordered_params", "(", "self", ".", "request", ".", "url", ")", ")", "if", "self", ".", "request", ".", "data", "is", "not", "None", ":", "key", "+=", "str", "(", "self", ".", "request", ".", "data", ".", "keys", "(", ")", ")", "self", ".", "__index_hash", "=", "key", "return", "self", ".", "__index_hash" ]
Generate and return the dict index hash of the given queue item. Note: Cookies should not be included in the hash calculation because otherwise requests are crawled multiple times with e.g. different session keys, causing infinite crawling recursion. Note: At this moment the keys do not actually get hashed since it works perfectly without and since hashing the keys requires us to built hash collision management. Returns: str: The hash of the given queue item.
[ "Generate", "and", "return", "the", "dict", "index", "hash", "of", "the", "given", "queue", "item", "." ]
python
train
33.542857
NYUCCL/psiTurk
psiturk/amt_services_wrapper.py
https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/amt_services_wrapper.py#L202-L213
def worker_unreject(self, chosen_hit, assignment_ids = None): ''' Unreject worker ''' if chosen_hit: workers = self.amt_services.get_workers("Rejected") assignment_ids = [worker['assignmentId'] for worker in workers if \ worker['hitId'] == chosen_hit] for assignment_id in assignment_ids: success = self.amt_services.unreject_worker(assignment_id) if success: print 'unrejected %s' % (assignment_id) else: print '*** failed to unreject', assignment_id
[ "def", "worker_unreject", "(", "self", ",", "chosen_hit", ",", "assignment_ids", "=", "None", ")", ":", "if", "chosen_hit", ":", "workers", "=", "self", ".", "amt_services", ".", "get_workers", "(", "\"Rejected\"", ")", "assignment_ids", "=", "[", "worker", "[", "'assignmentId'", "]", "for", "worker", "in", "workers", "if", "worker", "[", "'hitId'", "]", "==", "chosen_hit", "]", "for", "assignment_id", "in", "assignment_ids", ":", "success", "=", "self", ".", "amt_services", ".", "unreject_worker", "(", "assignment_id", ")", "if", "success", ":", "print", "'unrejected %s'", "%", "(", "assignment_id", ")", "else", ":", "print", "'*** failed to unreject'", ",", "assignment_id" ]
Unreject worker
[ "Unreject", "worker" ]
python
train
48.833333
Jammy2211/PyAutoLens
autolens/data/array/util/mapping_util.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/util/mapping_util.py#L197-L237
def map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d, shape, one_to_two): """For a 1D array that was computed by mapping unmasked values from a 2D array of shape (rows, columns), map its \ values back to the original 2D array where masked values are set to zero. This uses a 1D array 'one_to_two' where each index gives the 2D pixel indexes of the 1D array's unmasked pixels, \ for example: - If one_to_two[0] = [0,0], the first value of the 1D array maps to the pixel [0,0] of the 2D array. - If one_to_two[1] = [0,1], the second value of the 1D array maps to the pixel [0,1] of the 2D array. - If one_to_two[4] = [1,1], the fifth value of the 1D array maps to the pixel [1,1] of the 2D array. Parameters ---------- array_1d : ndarray The 1D array of values which are mapped to a 2D array. shape : (int, int) The shape of the 2D array which the pixels are defined on. one_to_two : ndarray An array describing the 2D array index that every 1D array index maps too. Returns -------- ndarray A 2D array of values mapped from the 1D array with dimensions shape. Examples -------- one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]]) array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0]) array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3), one_to_two=one_to_two) """ array_2d = np.zeros(shape) for index in range(len(one_to_two)): array_2d[one_to_two[index, 0], one_to_two[index, 1]] = array_1d[index] return array_2d
[ "def", "map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two", "(", "array_1d", ",", "shape", ",", "one_to_two", ")", ":", "array_2d", "=", "np", ".", "zeros", "(", "shape", ")", "for", "index", "in", "range", "(", "len", "(", "one_to_two", ")", ")", ":", "array_2d", "[", "one_to_two", "[", "index", ",", "0", "]", ",", "one_to_two", "[", "index", ",", "1", "]", "]", "=", "array_1d", "[", "index", "]", "return", "array_2d" ]
For a 1D array that was computed by mapping unmasked values from a 2D array of shape (rows, columns), map its \ values back to the original 2D array where masked values are set to zero. This uses a 1D array 'one_to_two' where each index gives the 2D pixel indexes of the 1D array's unmasked pixels, \ for example: - If one_to_two[0] = [0,0], the first value of the 1D array maps to the pixel [0,0] of the 2D array. - If one_to_two[1] = [0,1], the second value of the 1D array maps to the pixel [0,1] of the 2D array. - If one_to_two[4] = [1,1], the fifth value of the 1D array maps to the pixel [1,1] of the 2D array. Parameters ---------- array_1d : ndarray The 1D array of values which are mapped to a 2D array. shape : (int, int) The shape of the 2D array which the pixels are defined on. one_to_two : ndarray An array describing the 2D array index that every 1D array index maps too. Returns -------- ndarray A 2D array of values mapped from the 1D array with dimensions shape. Examples -------- one_to_two = np.array([[0,1], [1,0], [1,1], [1,2], [2,1]]) array_1d = np.array([[2.0, 4.0, 5.0, 6.0, 8.0]) array_2d = map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two(array_1d=array_1d, shape=(3,3), one_to_two=one_to_two)
[ "For", "a", "1D", "array", "that", "was", "computed", "by", "mapping", "unmasked", "values", "from", "a", "2D", "array", "of", "shape", "(", "rows", "columns", ")", "map", "its", "\\", "values", "back", "to", "the", "original", "2D", "array", "where", "masked", "values", "are", "set", "to", "zero", "." ]
python
valid
41.146341
hivetech/dna
python/dna/apy/auth.py
https://github.com/hivetech/dna/blob/50ad00031be29765b2576fa407d35a36e0608de9/python/dna/apy/auth.py#L30-L33
def check_token(token): ''' Verify http header token authentification ''' user = models.User.objects(api_key=token).first() return user or None
[ "def", "check_token", "(", "token", ")", ":", "user", "=", "models", ".", "User", ".", "objects", "(", "api_key", "=", "token", ")", ".", "first", "(", ")", "return", "user", "or", "None" ]
Verify http header token authentification
[ "Verify", "http", "header", "token", "authentification" ]
python
test
38
brentp/toolshed
toolshed/__init__.py
https://github.com/brentp/toolshed/blob/c9529d6872bf28207642896c3b416f68e79b1269/toolshed/__init__.py#L14-L35
def groupby(iterable, key=0, filter=None): """ wrapper to itertools.groupby that returns a list of each group, rather than a generator and accepts integers or strings as the key and automatically converts them to callables with itemgetter(key) Arguments: iterable: iterable key: string, int or callable that tells how to group Returns: an iterable where each item is the key and a *list* of that group. (itertools.groupby returns a generator of that group). e.g. groupby(iterable, 0) """ if isinstance(key, (basestring, int)): key = itemgetter(key) elif isinstance(key, (tuple, list)): key = itemgetter(*key) for label, grp in igroupby(iterable, key): yield label, list(grp)
[ "def", "groupby", "(", "iterable", ",", "key", "=", "0", ",", "filter", "=", "None", ")", ":", "if", "isinstance", "(", "key", ",", "(", "basestring", ",", "int", ")", ")", ":", "key", "=", "itemgetter", "(", "key", ")", "elif", "isinstance", "(", "key", ",", "(", "tuple", ",", "list", ")", ")", ":", "key", "=", "itemgetter", "(", "*", "key", ")", "for", "label", ",", "grp", "in", "igroupby", "(", "iterable", ",", "key", ")", ":", "yield", "label", ",", "list", "(", "grp", ")" ]
wrapper to itertools.groupby that returns a list of each group, rather than a generator and accepts integers or strings as the key and automatically converts them to callables with itemgetter(key) Arguments: iterable: iterable key: string, int or callable that tells how to group Returns: an iterable where each item is the key and a *list* of that group. (itertools.groupby returns a generator of that group). e.g. groupby(iterable, 0)
[ "wrapper", "to", "itertools", ".", "groupby", "that", "returns", "a", "list", "of", "each", "group", "rather", "than", "a", "generator", "and", "accepts", "integers", "or", "strings", "as", "the", "key", "and", "automatically", "converts", "them", "to", "callables", "with", "itemgetter", "(", "key", ")" ]
python
train
34.363636
pyrogram/pyrogram
pyrogram/client/methods/decorators/on_disconnect.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/methods/decorators/on_disconnect.py#L25-L38
def on_disconnect(self=None) -> callable: """Use this decorator to automatically register a function for handling disconnections. This does the same thing as :meth:`add_handler` using the :class:`DisconnectHandler`. """ def decorator(func: callable) -> Handler: handler = pyrogram.DisconnectHandler(func) if self is not None: self.add_handler(handler) return handler return decorator
[ "def", "on_disconnect", "(", "self", "=", "None", ")", "->", "callable", ":", "def", "decorator", "(", "func", ":", "callable", ")", "->", "Handler", ":", "handler", "=", "pyrogram", ".", "DisconnectHandler", "(", "func", ")", "if", "self", "is", "not", "None", ":", "self", ".", "add_handler", "(", "handler", ")", "return", "handler", "return", "decorator" ]
Use this decorator to automatically register a function for handling disconnections. This does the same thing as :meth:`add_handler` using the :class:`DisconnectHandler`.
[ "Use", "this", "decorator", "to", "automatically", "register", "a", "function", "for", "handling", "disconnections", ".", "This", "does", "the", "same", "thing", "as", ":", "meth", ":", "add_handler", "using", "the", ":", "class", ":", "DisconnectHandler", "." ]
python
train
33.285714
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1897-L1911
def com_google_fonts_check_metadata_valid_copyright(font_metadata): """Copyright notices match canonical pattern in METADATA.pb""" import re string = font_metadata.copyright does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)', string) if does_match: yield PASS, "METADATA.pb copyright string is good" else: yield FAIL, ("METADATA.pb: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'").format(string)
[ "def", "com_google_fonts_check_metadata_valid_copyright", "(", "font_metadata", ")", ":", "import", "re", "string", "=", "font_metadata", ".", "copyright", "does_match", "=", "re", ".", "search", "(", "r'Copyright [0-9]{4} The .* Project Authors \\([^\\@]*\\)'", ",", "string", ")", "if", "does_match", ":", "yield", "PASS", ",", "\"METADATA.pb copyright string is good\"", "else", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb: Copyright notices should match\"", "\" a pattern similar to:\"", "\" 'Copyright 2017 The Familyname\"", "\" Project Authors (git url)'\\n\"", "\"But instead we have got:\"", "\" '{}'\"", ")", ".", "format", "(", "string", ")" ]
Copyright notices match canonical pattern in METADATA.pb
[ "Copyright", "notices", "match", "canonical", "pattern", "in", "METADATA", ".", "pb" ]
python
train
43.466667
tensorflow/tensor2tensor
tensor2tensor/data_generators/wikisum/parallel_launch.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/parallel_launch.py#L98-L111
def remote_run(cmd, instance_name, detach=False, retries=1): """Run command on GCS instance, optionally detached.""" if detach: cmd = SCREEN.format(command=cmd) args = SSH.format(instance_name=instance_name).split() args.append(cmd) for i in range(retries + 1): try: if i > 0: tf.logging.info("Retry %d for %s", i, args) return sp.check_call(args) except sp.CalledProcessError as e: if i == retries: raise e
[ "def", "remote_run", "(", "cmd", ",", "instance_name", ",", "detach", "=", "False", ",", "retries", "=", "1", ")", ":", "if", "detach", ":", "cmd", "=", "SCREEN", ".", "format", "(", "command", "=", "cmd", ")", "args", "=", "SSH", ".", "format", "(", "instance_name", "=", "instance_name", ")", ".", "split", "(", ")", "args", ".", "append", "(", "cmd", ")", "for", "i", "in", "range", "(", "retries", "+", "1", ")", ":", "try", ":", "if", "i", ">", "0", ":", "tf", ".", "logging", ".", "info", "(", "\"Retry %d for %s\"", ",", "i", ",", "args", ")", "return", "sp", ".", "check_call", "(", "args", ")", "except", "sp", ".", "CalledProcessError", "as", "e", ":", "if", "i", "==", "retries", ":", "raise", "e" ]
Run command on GCS instance, optionally detached.
[ "Run", "command", "on", "GCS", "instance", "optionally", "detached", "." ]
python
train
32.142857
tkem/cachetools
cachetools/ttl.py
https://github.com/tkem/cachetools/blob/1b67cddadccb89993e9d2567bac22e57e2b2b373/cachetools/ttl.py#L197-L209
def popitem(self): """Remove and return the `(key, value)` pair least recently used that has not already expired. """ with self.__timer as time: self.expire(time) try: key = next(iter(self.__links)) except StopIteration: raise KeyError('%s is empty' % self.__class__.__name__) else: return (key, self.pop(key))
[ "def", "popitem", "(", "self", ")", ":", "with", "self", ".", "__timer", "as", "time", ":", "self", ".", "expire", "(", "time", ")", "try", ":", "key", "=", "next", "(", "iter", "(", "self", ".", "__links", ")", ")", "except", "StopIteration", ":", "raise", "KeyError", "(", "'%s is empty'", "%", "self", ".", "__class__", ".", "__name__", ")", "else", ":", "return", "(", "key", ",", "self", ".", "pop", "(", "key", ")", ")" ]
Remove and return the `(key, value)` pair least recently used that has not already expired.
[ "Remove", "and", "return", "the", "(", "key", "value", ")", "pair", "least", "recently", "used", "that", "has", "not", "already", "expired", "." ]
python
train
32.846154
awslabs/sockeye
sockeye/train.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/train.py#L105-L145
def check_resume(args: argparse.Namespace, output_folder: str) -> bool: """ Check if we should resume a broken training run. :param args: Arguments as returned by argparse. :param output_folder: Main output folder for the model. :return: Flag signaling if we are resuming training and the directory with the training status. """ resume_training = False training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME) if os.path.exists(output_folder): if args.overwrite_output: logger.info("Removing existing output folder %s.", output_folder) shutil.rmtree(output_folder) os.makedirs(output_folder) elif os.path.exists(training_state_dir): old_args = vars(arguments.load_args(os.path.join(output_folder, C.ARGS_STATE_NAME))) arg_diffs = _dict_difference(vars(args), old_args) | _dict_difference(old_args, vars(args)) # Remove args that may differ without affecting the training. arg_diffs -= set(C.ARGS_MAY_DIFFER) # allow different device-ids provided their total count is the same if 'device_ids' in arg_diffs and len(old_args['device_ids']) == len(vars(args)['device_ids']): arg_diffs.discard('device_ids') if not arg_diffs: resume_training = True else: # We do not have the logger yet logger.error("Mismatch in arguments for training continuation.") logger.error("Differing arguments: %s.", ", ".join(arg_diffs)) sys.exit(1) elif os.path.exists(os.path.join(output_folder, C.PARAMS_BEST_NAME)): logger.error("Refusing to overwrite model folder %s as it seems to contain a trained model.", output_folder) sys.exit(1) else: logger.info("The output folder %s already exists, but no training state or parameter file was found. " "Will start training from scratch.", output_folder) else: os.makedirs(output_folder) return resume_training
[ "def", "check_resume", "(", "args", ":", "argparse", ".", "Namespace", ",", "output_folder", ":", "str", ")", "->", "bool", ":", "resume_training", "=", "False", "training_state_dir", "=", "os", ".", "path", ".", "join", "(", "output_folder", ",", "C", ".", "TRAINING_STATE_DIRNAME", ")", "if", "os", ".", "path", ".", "exists", "(", "output_folder", ")", ":", "if", "args", ".", "overwrite_output", ":", "logger", ".", "info", "(", "\"Removing existing output folder %s.\"", ",", "output_folder", ")", "shutil", ".", "rmtree", "(", "output_folder", ")", "os", ".", "makedirs", "(", "output_folder", ")", "elif", "os", ".", "path", ".", "exists", "(", "training_state_dir", ")", ":", "old_args", "=", "vars", "(", "arguments", ".", "load_args", "(", "os", ".", "path", ".", "join", "(", "output_folder", ",", "C", ".", "ARGS_STATE_NAME", ")", ")", ")", "arg_diffs", "=", "_dict_difference", "(", "vars", "(", "args", ")", ",", "old_args", ")", "|", "_dict_difference", "(", "old_args", ",", "vars", "(", "args", ")", ")", "# Remove args that may differ without affecting the training.", "arg_diffs", "-=", "set", "(", "C", ".", "ARGS_MAY_DIFFER", ")", "# allow different device-ids provided their total count is the same", "if", "'device_ids'", "in", "arg_diffs", "and", "len", "(", "old_args", "[", "'device_ids'", "]", ")", "==", "len", "(", "vars", "(", "args", ")", "[", "'device_ids'", "]", ")", ":", "arg_diffs", ".", "discard", "(", "'device_ids'", ")", "if", "not", "arg_diffs", ":", "resume_training", "=", "True", "else", ":", "# We do not have the logger yet", "logger", ".", "error", "(", "\"Mismatch in arguments for training continuation.\"", ")", "logger", ".", "error", "(", "\"Differing arguments: %s.\"", ",", "\", \"", ".", "join", "(", "arg_diffs", ")", ")", "sys", ".", "exit", "(", "1", ")", "elif", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "output_folder", ",", "C", ".", "PARAMS_BEST_NAME", ")", ")", ":", "logger", ".", "error", "(", "\"Refusing to overwrite model folder %s as it seems to contain a trained model.\"", ",", "output_folder", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "logger", ".", "info", "(", "\"The output folder %s already exists, but no training state or parameter file was found. \"", "\"Will start training from scratch.\"", ",", "output_folder", ")", "else", ":", "os", ".", "makedirs", "(", "output_folder", ")", "return", "resume_training" ]
Check if we should resume a broken training run. :param args: Arguments as returned by argparse. :param output_folder: Main output folder for the model. :return: Flag signaling if we are resuming training and the directory with the training status.
[ "Check", "if", "we", "should", "resume", "a", "broken", "training", "run", "." ]
python
train
50.95122
macacajs/wd.py
macaca/util.py
https://github.com/macacajs/wd.py/blob/6d3c52060013e01a67cd52b68b5230b387427bad/macaca/util.py#L40-L53
def format_map(self, format_string, mapping): """format a string by a map Args: format_string(str): A format string mapping(dict): A map to format the string Returns: A formatted string. Raises: KeyError: if key is not provided by the given map. """ return self.vformat(format_string, args=None, kwargs=mapping)
[ "def", "format_map", "(", "self", ",", "format_string", ",", "mapping", ")", ":", "return", "self", ".", "vformat", "(", "format_string", ",", "args", "=", "None", ",", "kwargs", "=", "mapping", ")" ]
format a string by a map Args: format_string(str): A format string mapping(dict): A map to format the string Returns: A formatted string. Raises: KeyError: if key is not provided by the given map.
[ "format", "a", "string", "by", "a", "map" ]
python
valid
28.357143
wright-group/WrightTools
WrightTools/_dataset.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/_dataset.py#L391-L406
def slices(self): """Returns a generator yielding tuple of slice objects. Order is not guaranteed. """ if self.chunks is None: yield tuple(slice(None, s) for s in self.shape) else: ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks)) for idx in np.ndindex(ceilings): # could also use itertools.product out = [] for i, c, s in zip(idx, self.chunks, self.shape): start = i * c stop = min(start + c, s + 1) out.append(slice(start, stop, 1)) yield tuple(out)
[ "def", "slices", "(", "self", ")", ":", "if", "self", ".", "chunks", "is", "None", ":", "yield", "tuple", "(", "slice", "(", "None", ",", "s", ")", "for", "s", "in", "self", ".", "shape", ")", "else", ":", "ceilings", "=", "tuple", "(", "-", "(", "-", "s", "//", "c", ")", "for", "s", ",", "c", "in", "zip", "(", "self", ".", "shape", ",", "self", ".", "chunks", ")", ")", "for", "idx", "in", "np", ".", "ndindex", "(", "ceilings", ")", ":", "# could also use itertools.product", "out", "=", "[", "]", "for", "i", ",", "c", ",", "s", "in", "zip", "(", "idx", ",", "self", ".", "chunks", ",", "self", ".", "shape", ")", ":", "start", "=", "i", "*", "c", "stop", "=", "min", "(", "start", "+", "c", ",", "s", "+", "1", ")", "out", ".", "append", "(", "slice", "(", "start", ",", "stop", ",", "1", ")", ")", "yield", "tuple", "(", "out", ")" ]
Returns a generator yielding tuple of slice objects. Order is not guaranteed.
[ "Returns", "a", "generator", "yielding", "tuple", "of", "slice", "objects", "." ]
python
train
40.125
PMEAL/porespy
porespy/metrics/__funcs__.py
https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/metrics/__funcs__.py#L294-L334
def _radial_profile(autocorr, r_max, nbins=100): r""" Helper functions to calculate the radial profile of the autocorrelation Masks the image in radial segments from the center and averages the values The distance values are normalized and 100 bins are used as default. Parameters ---------- autocorr : ND-array The image of autocorrelation produced by FFT r_max : int or float The maximum radius in pixels to sum the image over Returns ------- result : named_tuple A named tupling containing an array of ``bins`` of radial position and an array of ``counts`` in each bin. """ if len(autocorr.shape) == 2: adj = sp.reshape(autocorr.shape, [2, 1, 1]) inds = sp.indices(autocorr.shape) - adj/2 dt = sp.sqrt(inds[0]**2 + inds[1]**2) elif len(autocorr.shape) == 3: adj = sp.reshape(autocorr.shape, [3, 1, 1, 1]) inds = sp.indices(autocorr.shape) - adj/2 dt = sp.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2) else: raise Exception('Image dimensions must be 2 or 3') bin_size = np.int(np.ceil(r_max/nbins)) bins = np.arange(bin_size, r_max, step=bin_size) radial_sum = np.zeros_like(bins) for i, r in enumerate(bins): # Generate Radial Mask from dt using bins mask = (dt <= r) * (dt > (r-bin_size)) radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask) # Return normalized bin and radially summed autoc norm_autoc_radial = radial_sum/np.max(autocorr) tpcf = namedtuple('two_point_correlation_function', ('distance', 'probability')) return tpcf(bins, norm_autoc_radial)
[ "def", "_radial_profile", "(", "autocorr", ",", "r_max", ",", "nbins", "=", "100", ")", ":", "if", "len", "(", "autocorr", ".", "shape", ")", "==", "2", ":", "adj", "=", "sp", ".", "reshape", "(", "autocorr", ".", "shape", ",", "[", "2", ",", "1", ",", "1", "]", ")", "inds", "=", "sp", ".", "indices", "(", "autocorr", ".", "shape", ")", "-", "adj", "/", "2", "dt", "=", "sp", ".", "sqrt", "(", "inds", "[", "0", "]", "**", "2", "+", "inds", "[", "1", "]", "**", "2", ")", "elif", "len", "(", "autocorr", ".", "shape", ")", "==", "3", ":", "adj", "=", "sp", ".", "reshape", "(", "autocorr", ".", "shape", ",", "[", "3", ",", "1", ",", "1", ",", "1", "]", ")", "inds", "=", "sp", ".", "indices", "(", "autocorr", ".", "shape", ")", "-", "adj", "/", "2", "dt", "=", "sp", ".", "sqrt", "(", "inds", "[", "0", "]", "**", "2", "+", "inds", "[", "1", "]", "**", "2", "+", "inds", "[", "2", "]", "**", "2", ")", "else", ":", "raise", "Exception", "(", "'Image dimensions must be 2 or 3'", ")", "bin_size", "=", "np", ".", "int", "(", "np", ".", "ceil", "(", "r_max", "/", "nbins", ")", ")", "bins", "=", "np", ".", "arange", "(", "bin_size", ",", "r_max", ",", "step", "=", "bin_size", ")", "radial_sum", "=", "np", ".", "zeros_like", "(", "bins", ")", "for", "i", ",", "r", "in", "enumerate", "(", "bins", ")", ":", "# Generate Radial Mask from dt using bins", "mask", "=", "(", "dt", "<=", "r", ")", "*", "(", "dt", ">", "(", "r", "-", "bin_size", ")", ")", "radial_sum", "[", "i", "]", "=", "np", ".", "sum", "(", "autocorr", "[", "mask", "]", ")", "/", "np", ".", "sum", "(", "mask", ")", "# Return normalized bin and radially summed autoc", "norm_autoc_radial", "=", "radial_sum", "/", "np", ".", "max", "(", "autocorr", ")", "tpcf", "=", "namedtuple", "(", "'two_point_correlation_function'", ",", "(", "'distance'", ",", "'probability'", ")", ")", "return", "tpcf", "(", "bins", ",", "norm_autoc_radial", ")" ]
r""" Helper functions to calculate the radial profile of the autocorrelation Masks the image in radial segments from the center and averages the values The distance values are normalized and 100 bins are used as default. Parameters ---------- autocorr : ND-array The image of autocorrelation produced by FFT r_max : int or float The maximum radius in pixels to sum the image over Returns ------- result : named_tuple A named tupling containing an array of ``bins`` of radial position and an array of ``counts`` in each bin.
[ "r", "Helper", "functions", "to", "calculate", "the", "radial", "profile", "of", "the", "autocorrelation", "Masks", "the", "image", "in", "radial", "segments", "from", "the", "center", "and", "averages", "the", "values", "The", "distance", "values", "are", "normalized", "and", "100", "bins", "are", "used", "as", "default", "." ]
python
train
40.121951
StarlitGhost/pyhedrals
pyhedrals/pyhedrals.py
https://github.com/StarlitGhost/pyhedrals/blob/74b3a48ecc2b73a27ded913e4152273cd5ba9cc7/pyhedrals/pyhedrals.py#L442-L448
def _sumDiceRolls(self, rollList): """convert from dice roll structure to a single integer result""" if isinstance(rollList, RollList): self.rolls.append(rollList) return rollList.sum() else: return rollList
[ "def", "_sumDiceRolls", "(", "self", ",", "rollList", ")", ":", "if", "isinstance", "(", "rollList", ",", "RollList", ")", ":", "self", ".", "rolls", ".", "append", "(", "rollList", ")", "return", "rollList", ".", "sum", "(", ")", "else", ":", "return", "rollList" ]
convert from dice roll structure to a single integer result
[ "convert", "from", "dice", "roll", "structure", "to", "a", "single", "integer", "result" ]
python
train
37.285714
TrafficSenseMSD/SumoTools
traci/_polygon.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_polygon.py#L61-L69
def setType(self, polygonID, polygonType): """setType(string, string) -> None Sets the (abstract) type of the polygon. """ self._connection._beginMessage( tc.CMD_SET_POLYGON_VARIABLE, tc.VAR_TYPE, polygonID, 1 + 4 + len(polygonType)) self._connection._packString(polygonType) self._connection._sendExact()
[ "def", "setType", "(", "self", ",", "polygonID", ",", "polygonType", ")", ":", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_POLYGON_VARIABLE", ",", "tc", ".", "VAR_TYPE", ",", "polygonID", ",", "1", "+", "4", "+", "len", "(", "polygonType", ")", ")", "self", ".", "_connection", ".", "_packString", "(", "polygonType", ")", "self", ".", "_connection", ".", "_sendExact", "(", ")" ]
setType(string, string) -> None Sets the (abstract) type of the polygon.
[ "setType", "(", "string", "string", ")", "-", ">", "None" ]
python
train
39.777778
sentinel-hub/sentinelhub-py
sentinelhub/geo_utils.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geo_utils.py#L161-L175
def pixel_to_utm(row, column, transform): """ Convert pixel coordinate to UTM coordinate given a transform :param row: row pixel coordinate :type row: int or float :param column: column pixel coordinate :type column: int or float :param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)` :type transform: tuple or list :return: east, north UTM coordinates :rtype: float, float """ east = transform[0] + column * transform[1] north = transform[3] + row * transform[5] return east, north
[ "def", "pixel_to_utm", "(", "row", ",", "column", ",", "transform", ")", ":", "east", "=", "transform", "[", "0", "]", "+", "column", "*", "transform", "[", "1", "]", "north", "=", "transform", "[", "3", "]", "+", "row", "*", "transform", "[", "5", "]", "return", "east", ",", "north" ]
Convert pixel coordinate to UTM coordinate given a transform :param row: row pixel coordinate :type row: int or float :param column: column pixel coordinate :type column: int or float :param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)` :type transform: tuple or list :return: east, north UTM coordinates :rtype: float, float
[ "Convert", "pixel", "coordinate", "to", "UTM", "coordinate", "given", "a", "transform" ]
python
train
38.666667
libtcod/python-tcod
tcod/image.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/image.py#L160-L183
def get_mipmap_pixel( self, left: float, top: float, right: float, bottom: float ) -> Tuple[int, int, int]: """Get the average color of a rectangle in this Image. Parameters should stay within the following limits: * 0 <= left < right < Image.width * 0 <= top < bottom < Image.height Args: left (float): Left corner of the region. top (float): Top corner of the region. right (float): Right corner of the region. bottom (float): Bottom corner of the region. Returns: Tuple[int, int, int]: An (r, g, b) tuple containing the averaged color value. Values are in a 0 to 255 range. """ color = lib.TCOD_image_get_mipmap_pixel( self.image_c, left, top, right, bottom ) return (color.r, color.g, color.b)
[ "def", "get_mipmap_pixel", "(", "self", ",", "left", ":", "float", ",", "top", ":", "float", ",", "right", ":", "float", ",", "bottom", ":", "float", ")", "->", "Tuple", "[", "int", ",", "int", ",", "int", "]", ":", "color", "=", "lib", ".", "TCOD_image_get_mipmap_pixel", "(", "self", ".", "image_c", ",", "left", ",", "top", ",", "right", ",", "bottom", ")", "return", "(", "color", ".", "r", ",", "color", ".", "g", ",", "color", ".", "b", ")" ]
Get the average color of a rectangle in this Image. Parameters should stay within the following limits: * 0 <= left < right < Image.width * 0 <= top < bottom < Image.height Args: left (float): Left corner of the region. top (float): Top corner of the region. right (float): Right corner of the region. bottom (float): Bottom corner of the region. Returns: Tuple[int, int, int]: An (r, g, b) tuple containing the averaged color value. Values are in a 0 to 255 range.
[ "Get", "the", "average", "color", "of", "a", "rectangle", "in", "this", "Image", "." ]
python
train
36.375
apache/airflow
airflow/contrib/hooks/aws_athena_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_athena_hook.py#L91-L107
def get_query_results(self, query_execution_id): """ Fetch submitted athena query results. returns none if query is in intermediate state or failed/cancelled state else dict of query output :param query_execution_id: Id of submitted athena query :type query_execution_id: str :return: dict """ query_state = self.check_query_status(query_execution_id) if query_state is None: self.log.error('Invalid Query state') return None elif query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES: self.log.error('Query is in {state} state. Cannot fetch results'.format(state=query_state)) return None return self.conn.get_query_results(QueryExecutionId=query_execution_id)
[ "def", "get_query_results", "(", "self", ",", "query_execution_id", ")", ":", "query_state", "=", "self", ".", "check_query_status", "(", "query_execution_id", ")", "if", "query_state", "is", "None", ":", "self", ".", "log", ".", "error", "(", "'Invalid Query state'", ")", "return", "None", "elif", "query_state", "in", "self", ".", "INTERMEDIATE_STATES", "or", "query_state", "in", "self", ".", "FAILURE_STATES", ":", "self", ".", "log", ".", "error", "(", "'Query is in {state} state. Cannot fetch results'", ".", "format", "(", "state", "=", "query_state", ")", ")", "return", "None", "return", "self", ".", "conn", ".", "get_query_results", "(", "QueryExecutionId", "=", "query_execution_id", ")" ]
Fetch submitted athena query results. returns none if query is in intermediate state or failed/cancelled state else dict of query output :param query_execution_id: Id of submitted athena query :type query_execution_id: str :return: dict
[ "Fetch", "submitted", "athena", "query", "results", ".", "returns", "none", "if", "query", "is", "in", "intermediate", "state", "or", "failed", "/", "cancelled", "state", "else", "dict", "of", "query", "output" ]
python
test
47.411765
pyvisa/pyvisa-sim
pyvisa-sim/highlevel.py
https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/highlevel.py#L44-L53
def get_debug_info(): """Return a list of lines with backend info. """ from . import __version__ from .parser import SPEC_VERSION d = OrderedDict() d['Version'] = '%s' % __version__ d['Spec version'] = SPEC_VERSION return d
[ "def", "get_debug_info", "(", ")", ":", "from", ".", "import", "__version__", "from", ".", "parser", "import", "SPEC_VERSION", "d", "=", "OrderedDict", "(", ")", "d", "[", "'Version'", "]", "=", "'%s'", "%", "__version__", "d", "[", "'Spec version'", "]", "=", "SPEC_VERSION", "return", "d" ]
Return a list of lines with backend info.
[ "Return", "a", "list", "of", "lines", "with", "backend", "info", "." ]
python
train
27.9
mieubrisse/wunderpy2
wunderpy2/positions_endpoints.py
https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/positions_endpoints.py#L39-L48
def update_list_positions_obj(client, positions_obj_id, revision, values): ''' Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out. See https://developer.wunderlist.com/documentation/endpoints/positions for more info Return: The updated ListPositionsObj-mapped object defining the order of list layout ''' return _update_positions_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id, revision, values)
[ "def", "update_list_positions_obj", "(", "client", ",", "positions_obj_id", ",", "revision", ",", "values", ")", ":", "return", "_update_positions_obj", "(", "client", ",", "client", ".", "api", ".", "Endpoints", ".", "LIST_POSITIONS", ",", "positions_obj_id", ",", "revision", ",", "values", ")" ]
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out. See https://developer.wunderlist.com/documentation/endpoints/positions for more info Return: The updated ListPositionsObj-mapped object defining the order of list layout
[ "Updates", "the", "ordering", "of", "lists", "to", "have", "the", "given", "value", ".", "The", "given", "ID", "and", "revision", "should", "match", "the", "singleton", "object", "defining", "how", "lists", "are", "laid", "out", "." ]
python
train
53.1
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L14063-L14095
def get_cpuid_leaf_by_ordinal(self, ordinal): """Used to enumerate CPUID information override values. in ordinal of type int The ordinal number of the leaf to get. out idx of type int CPUID leaf index. out idx_sub of type int CPUID leaf sub-index. out val_eax of type int CPUID leaf value for register eax. out val_ebx of type int CPUID leaf value for register ebx. out val_ecx of type int CPUID leaf value for register ecx. out val_edx of type int CPUID leaf value for register edx. raises :class:`OleErrorInvalidarg` Invalid ordinal number is out of range. """ if not isinstance(ordinal, baseinteger): raise TypeError("ordinal can only be an instance of type baseinteger") (idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx) = self._call("getCPUIDLeafByOrdinal", in_p=[ordinal]) return (idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx)
[ "def", "get_cpuid_leaf_by_ordinal", "(", "self", ",", "ordinal", ")", ":", "if", "not", "isinstance", "(", "ordinal", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"ordinal can only be an instance of type baseinteger\"", ")", "(", "idx", ",", "idx_sub", ",", "val_eax", ",", "val_ebx", ",", "val_ecx", ",", "val_edx", ")", "=", "self", ".", "_call", "(", "\"getCPUIDLeafByOrdinal\"", ",", "in_p", "=", "[", "ordinal", "]", ")", "return", "(", "idx", ",", "idx_sub", ",", "val_eax", ",", "val_ebx", ",", "val_ecx", ",", "val_edx", ")" ]
Used to enumerate CPUID information override values. in ordinal of type int The ordinal number of the leaf to get. out idx of type int CPUID leaf index. out idx_sub of type int CPUID leaf sub-index. out val_eax of type int CPUID leaf value for register eax. out val_ebx of type int CPUID leaf value for register ebx. out val_ecx of type int CPUID leaf value for register ecx. out val_edx of type int CPUID leaf value for register edx. raises :class:`OleErrorInvalidarg` Invalid ordinal number is out of range.
[ "Used", "to", "enumerate", "CPUID", "information", "override", "values", "." ]
python
train
31.969697
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1121-L1155
def combine_slices(self, slices, tensor_shape, device=None): """Turns a set of slices into a single tensor. Args: slices: list of tf.Tensor with length self.size. tensor_shape: Shape. device: optional str. If absent, we use the devices of the slices. Returns: tf.Tensor. """ if tensor_shape.ndims == 0: return slices[0] ret = slices[:] tensor_layout = self.tensor_layout(tensor_shape) for mesh_dim, tensor_axis in zip( self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)): slice_size = len(ret) // mesh_dim.size if tensor_axis is None: ret = ret[:slice_size] else: if device: devices = [device] * slice_size else: devices = [ret[i].device for i in xrange(slice_size)] concat_inputs = [] for i in xrange(slice_size): concat_inputs.append( [ret[i + slice_size * j] for j in xrange(mesh_dim.size)]) ret = parallel( devices, tf.concat, concat_inputs, axis=[tensor_axis] * len(devices)) assert len(ret) == 1 return ret[0]
[ "def", "combine_slices", "(", "self", ",", "slices", ",", "tensor_shape", ",", "device", "=", "None", ")", ":", "if", "tensor_shape", ".", "ndims", "==", "0", ":", "return", "slices", "[", "0", "]", "ret", "=", "slices", "[", ":", "]", "tensor_layout", "=", "self", ".", "tensor_layout", "(", "tensor_shape", ")", "for", "mesh_dim", ",", "tensor_axis", "in", "zip", "(", "self", ".", "shape", ",", "tensor_layout", ".", "mesh_axis_to_tensor_axis", "(", "self", ".", "ndims", ")", ")", ":", "slice_size", "=", "len", "(", "ret", ")", "//", "mesh_dim", ".", "size", "if", "tensor_axis", "is", "None", ":", "ret", "=", "ret", "[", ":", "slice_size", "]", "else", ":", "if", "device", ":", "devices", "=", "[", "device", "]", "*", "slice_size", "else", ":", "devices", "=", "[", "ret", "[", "i", "]", ".", "device", "for", "i", "in", "xrange", "(", "slice_size", ")", "]", "concat_inputs", "=", "[", "]", "for", "i", "in", "xrange", "(", "slice_size", ")", ":", "concat_inputs", ".", "append", "(", "[", "ret", "[", "i", "+", "slice_size", "*", "j", "]", "for", "j", "in", "xrange", "(", "mesh_dim", ".", "size", ")", "]", ")", "ret", "=", "parallel", "(", "devices", ",", "tf", ".", "concat", ",", "concat_inputs", ",", "axis", "=", "[", "tensor_axis", "]", "*", "len", "(", "devices", ")", ")", "assert", "len", "(", "ret", ")", "==", "1", "return", "ret", "[", "0", "]" ]
Turns a set of slices into a single tensor. Args: slices: list of tf.Tensor with length self.size. tensor_shape: Shape. device: optional str. If absent, we use the devices of the slices. Returns: tf.Tensor.
[ "Turns", "a", "set", "of", "slices", "into", "a", "single", "tensor", "." ]
python
train
31.657143
suds-community/suds
suds/xsd/sxbuiltin.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/xsd/sxbuiltin.py#L334-L347
def create(cls, schema, name): """ Create an object based on the root tag name. @param schema: A schema object. @type schema: L{schema.Schema} @param name: The name. @type name: str @return: The created object. @rtype: L{XBuiltin} """ fn = cls.tags.get(name, XBuiltin) return fn(schema, name)
[ "def", "create", "(", "cls", ",", "schema", ",", "name", ")", ":", "fn", "=", "cls", ".", "tags", ".", "get", "(", "name", ",", "XBuiltin", ")", "return", "fn", "(", "schema", ",", "name", ")" ]
Create an object based on the root tag name. @param schema: A schema object. @type schema: L{schema.Schema} @param name: The name. @type name: str @return: The created object. @rtype: L{XBuiltin}
[ "Create", "an", "object", "based", "on", "the", "root", "tag", "name", "." ]
python
train
26.357143
aleju/imgaug
imgaug/augmentables/polys.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/polys.py#L712-L725
def to_shapely_polygon(self): """ Convert this polygon to a Shapely polygon. Returns ------- shapely.geometry.Polygon The Shapely polygon matching this polygon's exterior. """ # load shapely lazily, which makes the dependency more optional import shapely.geometry return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])
[ "def", "to_shapely_polygon", "(", "self", ")", ":", "# load shapely lazily, which makes the dependency more optional", "import", "shapely", ".", "geometry", "return", "shapely", ".", "geometry", ".", "Polygon", "(", "[", "(", "point", "[", "0", "]", ",", "point", "[", "1", "]", ")", "for", "point", "in", "self", ".", "exterior", "]", ")" ]
Convert this polygon to a Shapely polygon. Returns ------- shapely.geometry.Polygon The Shapely polygon matching this polygon's exterior.
[ "Convert", "this", "polygon", "to", "a", "Shapely", "polygon", "." ]
python
valid
30
adafruit/Adafruit_Python_GPIO
Adafruit_GPIO/I2C.py
https://github.com/adafruit/Adafruit_Python_GPIO/blob/a92a23d6b5869663b2bc1ccf78bb11585076a9c4/Adafruit_GPIO/I2C.py#L148-L153
def readU8(self, register): """Read an unsigned byte from the specified register.""" result = self._bus.read_byte_data(self._address, register) & 0xFF self._logger.debug("Read 0x%02X from register 0x%02X", result, register) return result
[ "def", "readU8", "(", "self", ",", "register", ")", ":", "result", "=", "self", ".", "_bus", ".", "read_byte_data", "(", "self", ".", "_address", ",", "register", ")", "&", "0xFF", "self", ".", "_logger", ".", "debug", "(", "\"Read 0x%02X from register 0x%02X\"", ",", "result", ",", "register", ")", "return", "result" ]
Read an unsigned byte from the specified register.
[ "Read", "an", "unsigned", "byte", "from", "the", "specified", "register", "." ]
python
valid
47.5
ThreatConnect-Inc/tcex
tcex/tcex_ti_batch.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_batch.py#L579-L592
def data_indicators(self, indicators, entity_count): """Process Indicator data.""" data = [] # process indicator objects for xid, indicator_data in indicators.items(): entity_count += 1 if isinstance(indicator_data, dict): data.append(indicator_data) else: data.append(indicator_data.data) del indicators[xid] if entity_count >= self._batch_max_chunk: break return data, entity_count
[ "def", "data_indicators", "(", "self", ",", "indicators", ",", "entity_count", ")", ":", "data", "=", "[", "]", "# process indicator objects", "for", "xid", ",", "indicator_data", "in", "indicators", ".", "items", "(", ")", ":", "entity_count", "+=", "1", "if", "isinstance", "(", "indicator_data", ",", "dict", ")", ":", "data", ".", "append", "(", "indicator_data", ")", "else", ":", "data", ".", "append", "(", "indicator_data", ".", "data", ")", "del", "indicators", "[", "xid", "]", "if", "entity_count", ">=", "self", ".", "_batch_max_chunk", ":", "break", "return", "data", ",", "entity_count" ]
Process Indicator data.
[ "Process", "Indicator", "data", "." ]
python
train
37
johnbywater/eventsourcing
eventsourcing/application/system.py
https://github.com/johnbywater/eventsourcing/blob/de2c22c653fdccf2f5ee96faea74453ff1847e42/eventsourcing/application/system.py#L223-L255
def run_followers(self, prompt): """ First caller adds a prompt to queue and runs followers until there are no more pending prompts. Subsequent callers just add a prompt to the queue, avoiding recursion. """ assert isinstance(prompt, Prompt) # Put the prompt on the queue. self.pending_prompts.put(prompt) if self.iteration_lock.acquire(False): start_time = time.time() i = 0 try: while True: try: prompt = self.pending_prompts.get(False) except Empty: break else: followers = self.system.followers[prompt.process_name] for follower_name in followers: follower = self.system.processes[follower_name] follower.run(prompt) i += 1 self.pending_prompts.task_done() finally: run_frequency = i / (time.time() - start_time) # print(f"Run frequency: {run_frequency}") self.iteration_lock.release()
[ "def", "run_followers", "(", "self", ",", "prompt", ")", ":", "assert", "isinstance", "(", "prompt", ",", "Prompt", ")", "# Put the prompt on the queue.", "self", ".", "pending_prompts", ".", "put", "(", "prompt", ")", "if", "self", ".", "iteration_lock", ".", "acquire", "(", "False", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "i", "=", "0", "try", ":", "while", "True", ":", "try", ":", "prompt", "=", "self", ".", "pending_prompts", ".", "get", "(", "False", ")", "except", "Empty", ":", "break", "else", ":", "followers", "=", "self", ".", "system", ".", "followers", "[", "prompt", ".", "process_name", "]", "for", "follower_name", "in", "followers", ":", "follower", "=", "self", ".", "system", ".", "processes", "[", "follower_name", "]", "follower", ".", "run", "(", "prompt", ")", "i", "+=", "1", "self", ".", "pending_prompts", ".", "task_done", "(", ")", "finally", ":", "run_frequency", "=", "i", "/", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "# print(f\"Run frequency: {run_frequency}\")", "self", ".", "iteration_lock", ".", "release", "(", ")" ]
First caller adds a prompt to queue and runs followers until there are no more pending prompts. Subsequent callers just add a prompt to the queue, avoiding recursion.
[ "First", "caller", "adds", "a", "prompt", "to", "queue", "and", "runs", "followers", "until", "there", "are", "no", "more", "pending", "prompts", "." ]
python
train
37.060606
googleapis/google-cloud-python
storage/google/cloud/storage/acl.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/acl.py#L133-L140
def revoke(self, role): """Remove a role from the entity. :type role: str :param role: The role to remove from the entity. """ if role in self.roles: self.roles.remove(role)
[ "def", "revoke", "(", "self", ",", "role", ")", ":", "if", "role", "in", "self", ".", "roles", ":", "self", ".", "roles", ".", "remove", "(", "role", ")" ]
Remove a role from the entity. :type role: str :param role: The role to remove from the entity.
[ "Remove", "a", "role", "from", "the", "entity", "." ]
python
train
27.375
KelSolaar/Umbra
umbra/ui/widgets/codeEditor_QPlainTextEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/codeEditor_QPlainTextEdit.py#L1157-L1189
def toggle_comments(self): """ Toggles comments on the document selected lines. :return: Method success. :rtype: bool """ if not self.__comment_marker: return True cursor = self.textCursor() if not cursor.hasSelection(): cursor.movePosition(QTextCursor.StartOfBlock) line = foundations.strings.to_string(self.document().findBlockByNumber(cursor.blockNumber()).text()) if line.startswith(self.__comment_marker): foundations.common.repeat(cursor.deleteChar, len(self.__comment_marker)) else: cursor.insertText(self.__comment_marker) else: block = self.document().findBlock(cursor.selectionStart()) while True: block_cursor = self.textCursor() block_cursor.setPosition(block.position()) if foundations.strings.to_string(block.text()).startswith(self.__comment_marker): foundations.common.repeat(block_cursor.deleteChar, len(self.__comment_marker)) else: block_cursor.insertText(self.__comment_marker) if block.contains(cursor.selectionEnd()): break block = block.next() return True
[ "def", "toggle_comments", "(", "self", ")", ":", "if", "not", "self", ".", "__comment_marker", ":", "return", "True", "cursor", "=", "self", ".", "textCursor", "(", ")", "if", "not", "cursor", ".", "hasSelection", "(", ")", ":", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "StartOfBlock", ")", "line", "=", "foundations", ".", "strings", ".", "to_string", "(", "self", ".", "document", "(", ")", ".", "findBlockByNumber", "(", "cursor", ".", "blockNumber", "(", ")", ")", ".", "text", "(", ")", ")", "if", "line", ".", "startswith", "(", "self", ".", "__comment_marker", ")", ":", "foundations", ".", "common", ".", "repeat", "(", "cursor", ".", "deleteChar", ",", "len", "(", "self", ".", "__comment_marker", ")", ")", "else", ":", "cursor", ".", "insertText", "(", "self", ".", "__comment_marker", ")", "else", ":", "block", "=", "self", ".", "document", "(", ")", ".", "findBlock", "(", "cursor", ".", "selectionStart", "(", ")", ")", "while", "True", ":", "block_cursor", "=", "self", ".", "textCursor", "(", ")", "block_cursor", ".", "setPosition", "(", "block", ".", "position", "(", ")", ")", "if", "foundations", ".", "strings", ".", "to_string", "(", "block", ".", "text", "(", ")", ")", ".", "startswith", "(", "self", ".", "__comment_marker", ")", ":", "foundations", ".", "common", ".", "repeat", "(", "block_cursor", ".", "deleteChar", ",", "len", "(", "self", ".", "__comment_marker", ")", ")", "else", ":", "block_cursor", ".", "insertText", "(", "self", ".", "__comment_marker", ")", "if", "block", ".", "contains", "(", "cursor", ".", "selectionEnd", "(", ")", ")", ":", "break", "block", "=", "block", ".", "next", "(", ")", "return", "True" ]
Toggles comments on the document selected lines. :return: Method success. :rtype: bool
[ "Toggles", "comments", "on", "the", "document", "selected", "lines", "." ]
python
train
39.454545
matplotlib/cmocean
cmocean/plots.py
https://github.com/matplotlib/cmocean/blob/37edd4a209a733d87dea7fed9eb22adc1d5a57c8/cmocean/plots.py#L64-L115
def plot_gallery(saveplot=False): '''Make plot of colormaps and labels, like in the matplotlib gallery. :param saveplot=False: Whether to save the plot or not. ''' from colorspacious import cspace_converter gradient = np.linspace(0, 1, 256) gradient = np.vstack((gradient, gradient)) x = np.linspace(0.0, 1.0, 256) fig, axes = plt.subplots(nrows=int(len(cm.cmap_d)/2), ncols=1, figsize=(6, 12)) fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99, wspace=0.05) for ax, cmapname in zip(axes, cm.cmapnames): if '_r' in cmapname: # skip reversed versions for plot continue cmap = cm.cmap_d[cmapname] # get the colormap instance rgb = cmap(x)[np.newaxis, :, :3] # Find a good conversion to grayscale jch = cspace_converter("sRGB1", "CAM02-UCS")(rgb) # Not sure why to use JCh instead so using this. L = jch[0, :, 0] L = np.float32(np.vstack((L, L, L))) ax.imshow(gradient, aspect='auto', cmap=cmap) pos1 = ax.get_position() # get the original position pos2 = [pos1.x0, pos1.y0, pos1.width, pos1.height / 3.0] axbw = fig.add_axes(pos2) # colorbar axes axbw.set_axis_off() axbw.imshow(L, aspect='auto', cmap=cm.gray, vmin=0, vmax=100.) pos = list(ax.get_position().bounds) x_text = pos[0] - 0.01 y_text = pos[1] + pos[3]/2. fig.text(x_text, y_text, cmap.name, va='center', ha='right') # Turn off *all* ticks & spines, not just the ones with colormaps. for ax in axes: ax.set_axis_off() if saveplot: fig.savefig('figures/gallery.pdf', bbox_inches='tight') fig.savefig('figures/gallery.png', bbox_inches='tight') plt.show()
[ "def", "plot_gallery", "(", "saveplot", "=", "False", ")", ":", "from", "colorspacious", "import", "cspace_converter", "gradient", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "256", ")", "gradient", "=", "np", ".", "vstack", "(", "(", "gradient", ",", "gradient", ")", ")", "x", "=", "np", ".", "linspace", "(", "0.0", ",", "1.0", ",", "256", ")", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "nrows", "=", "int", "(", "len", "(", "cm", ".", "cmap_d", ")", "/", "2", ")", ",", "ncols", "=", "1", ",", "figsize", "=", "(", "6", ",", "12", ")", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.99", ",", "bottom", "=", "0.01", ",", "left", "=", "0.2", ",", "right", "=", "0.99", ",", "wspace", "=", "0.05", ")", "for", "ax", ",", "cmapname", "in", "zip", "(", "axes", ",", "cm", ".", "cmapnames", ")", ":", "if", "'_r'", "in", "cmapname", ":", "# skip reversed versions for plot", "continue", "cmap", "=", "cm", ".", "cmap_d", "[", "cmapname", "]", "# get the colormap instance", "rgb", "=", "cmap", "(", "x", ")", "[", "np", ".", "newaxis", ",", ":", ",", ":", "3", "]", "# Find a good conversion to grayscale", "jch", "=", "cspace_converter", "(", "\"sRGB1\"", ",", "\"CAM02-UCS\"", ")", "(", "rgb", ")", "# Not sure why to use JCh instead so using this.", "L", "=", "jch", "[", "0", ",", ":", ",", "0", "]", "L", "=", "np", ".", "float32", "(", "np", ".", "vstack", "(", "(", "L", ",", "L", ",", "L", ")", ")", ")", "ax", ".", "imshow", "(", "gradient", ",", "aspect", "=", "'auto'", ",", "cmap", "=", "cmap", ")", "pos1", "=", "ax", ".", "get_position", "(", ")", "# get the original position", "pos2", "=", "[", "pos1", ".", "x0", ",", "pos1", ".", "y0", ",", "pos1", ".", "width", ",", "pos1", ".", "height", "/", "3.0", "]", "axbw", "=", "fig", ".", "add_axes", "(", "pos2", ")", "# colorbar axes", "axbw", ".", "set_axis_off", "(", ")", "axbw", ".", "imshow", "(", "L", ",", "aspect", "=", "'auto'", ",", "cmap", "=", "cm", ".", "gray", ",", "vmin", "=", "0", ",", "vmax", "=", "100.", ")", "pos", "=", "list", "(", "ax", ".", "get_position", "(", ")", ".", "bounds", ")", "x_text", "=", "pos", "[", "0", "]", "-", "0.01", "y_text", "=", "pos", "[", "1", "]", "+", "pos", "[", "3", "]", "/", "2.", "fig", ".", "text", "(", "x_text", ",", "y_text", ",", "cmap", ".", "name", ",", "va", "=", "'center'", ",", "ha", "=", "'right'", ")", "# Turn off *all* ticks & spines, not just the ones with colormaps.", "for", "ax", "in", "axes", ":", "ax", ".", "set_axis_off", "(", ")", "if", "saveplot", ":", "fig", ".", "savefig", "(", "'figures/gallery.pdf'", ",", "bbox_inches", "=", "'tight'", ")", "fig", ".", "savefig", "(", "'figures/gallery.png'", ",", "bbox_inches", "=", "'tight'", ")", "plt", ".", "show", "(", ")" ]
Make plot of colormaps and labels, like in the matplotlib gallery. :param saveplot=False: Whether to save the plot or not.
[ "Make", "plot", "of", "colormaps", "and", "labels", "like", "in", "the", "matplotlib", "gallery", "." ]
python
train
33.25
googledatalab/pydatalab
google/datalab/bigquery/_table.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_table.py#L803-L826
def snapshot(self, at): """ Return a new Table which is a snapshot of this table at the specified time. Args: at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta (relative to current time). The result must be after the table was created and no more than seven days in the past. Passing None will get a reference the oldest snapshot. Note that using a datetime will get a snapshot at an absolute point in time, while a timedelta will provide a varying snapshot; any queries issued against such a Table will be done against a snapshot that has an age relative to the execution time of the query. Returns: A new Table object referencing the snapshot. Raises: An exception if this Table is already decorated, or if the time specified is invalid. """ if self._name_parts.decorator != '': raise Exception("Cannot use snapshot() on an already decorated table") value = Table._convert_decorator_time(at) return Table("%s@%s" % (self._full_name, str(value)), context=self._context)
[ "def", "snapshot", "(", "self", ",", "at", ")", ":", "if", "self", ".", "_name_parts", ".", "decorator", "!=", "''", ":", "raise", "Exception", "(", "\"Cannot use snapshot() on an already decorated table\"", ")", "value", "=", "Table", ".", "_convert_decorator_time", "(", "at", ")", "return", "Table", "(", "\"%s@%s\"", "%", "(", "self", ".", "_full_name", ",", "str", "(", "value", ")", ")", ",", "context", "=", "self", ".", "_context", ")" ]
Return a new Table which is a snapshot of this table at the specified time. Args: at: the time of the snapshot. This can be a Python datetime (absolute) or timedelta (relative to current time). The result must be after the table was created and no more than seven days in the past. Passing None will get a reference the oldest snapshot. Note that using a datetime will get a snapshot at an absolute point in time, while a timedelta will provide a varying snapshot; any queries issued against such a Table will be done against a snapshot that has an age relative to the execution time of the query. Returns: A new Table object referencing the snapshot. Raises: An exception if this Table is already decorated, or if the time specified is invalid.
[ "Return", "a", "new", "Table", "which", "is", "a", "snapshot", "of", "this", "table", "at", "the", "specified", "time", "." ]
python
train
45.916667
remix/partridge
partridge/utilities.py
https://github.com/remix/partridge/blob/0ba80fa30035e5e09fd8d7a7bdf1f28b93d53d03/partridge/utilities.py#L20-L30
def remove_node_attributes(G: nx.DiGraph, attributes: Union[str, Iterable[str]]): """ Return a copy of the graph with the given attributes deleted from all nodes. """ G = G.copy() for _, data in G.nodes(data=True): for attribute in setwrap(attributes): if attribute in data: del data[attribute] return G
[ "def", "remove_node_attributes", "(", "G", ":", "nx", ".", "DiGraph", ",", "attributes", ":", "Union", "[", "str", ",", "Iterable", "[", "str", "]", "]", ")", ":", "G", "=", "G", ".", "copy", "(", ")", "for", "_", ",", "data", "in", "G", ".", "nodes", "(", "data", "=", "True", ")", ":", "for", "attribute", "in", "setwrap", "(", "attributes", ")", ":", "if", "attribute", "in", "data", ":", "del", "data", "[", "attribute", "]", "return", "G" ]
Return a copy of the graph with the given attributes deleted from all nodes.
[ "Return", "a", "copy", "of", "the", "graph", "with", "the", "given", "attributes", "deleted", "from", "all", "nodes", "." ]
python
train
32.454545
apache/spark
python/pyspark/sql/functions.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1710-L1735
def split(str, pattern, limit=-1): """ Splits str around matches of the given pattern. :param str: a string expression to split :param pattern: a string representing a regular expression. The regex string should be a Java regular expression. :param limit: an integer which controls the number of times `pattern` is applied. * ``limit > 0``: The resulting array's length will not be more than `limit`, and the resulting array's last entry will contain all input beyond the last matched pattern. * ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting array can be of any size. .. versionchanged:: 3.0 `split` now takes an optional `limit` field. If not provided, default limit value is -1. >>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',]) >>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect() [Row(s=[u'one', u'twoBthreeC'])] >>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect() [Row(s=[u'one', u'two', u'three', u''])] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
[ "def", "split", "(", "str", ",", "pattern", ",", "limit", "=", "-", "1", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "split", "(", "_to_java_column", "(", "str", ")", ",", "pattern", ",", "limit", ")", ")" ]
Splits str around matches of the given pattern. :param str: a string expression to split :param pattern: a string representing a regular expression. The regex string should be a Java regular expression. :param limit: an integer which controls the number of times `pattern` is applied. * ``limit > 0``: The resulting array's length will not be more than `limit`, and the resulting array's last entry will contain all input beyond the last matched pattern. * ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting array can be of any size. .. versionchanged:: 3.0 `split` now takes an optional `limit` field. If not provided, default limit value is -1. >>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',]) >>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect() [Row(s=[u'one', u'twoBthreeC'])] >>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect() [Row(s=[u'one', u'two', u'three', u''])]
[ "Splits", "str", "around", "matches", "of", "the", "given", "pattern", "." ]
python
train
47.730769
fabioz/PyDev.Debugger
pydevd.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd.py#L729-L752
def _exclude_by_filter(self, frame, filename): ''' :param str filename: The filename to filter. :return: True if it should be excluded, False if it should be included and None if no rule matched the given file. ''' try: return self._exclude_by_filter_cache[filename] except KeyError: cache = self._exclude_by_filter_cache abs_real_path_and_basename = get_abs_path_real_path_and_base_from_file(filename) # pydevd files are always filtered out if self.get_file_type(abs_real_path_and_basename) == self.PYDEV_FILE: cache[filename] = True else: module_name = None if self._files_filtering.require_module: module_name = frame.f_globals.get('__name__') cache[filename] = self._files_filtering.exclude_by_filter(filename, module_name) return cache[filename]
[ "def", "_exclude_by_filter", "(", "self", ",", "frame", ",", "filename", ")", ":", "try", ":", "return", "self", ".", "_exclude_by_filter_cache", "[", "filename", "]", "except", "KeyError", ":", "cache", "=", "self", ".", "_exclude_by_filter_cache", "abs_real_path_and_basename", "=", "get_abs_path_real_path_and_base_from_file", "(", "filename", ")", "# pydevd files are always filtered out", "if", "self", ".", "get_file_type", "(", "abs_real_path_and_basename", ")", "==", "self", ".", "PYDEV_FILE", ":", "cache", "[", "filename", "]", "=", "True", "else", ":", "module_name", "=", "None", "if", "self", ".", "_files_filtering", ".", "require_module", ":", "module_name", "=", "frame", ".", "f_globals", ".", "get", "(", "'__name__'", ")", "cache", "[", "filename", "]", "=", "self", ".", "_files_filtering", ".", "exclude_by_filter", "(", "filename", ",", "module_name", ")", "return", "cache", "[", "filename", "]" ]
:param str filename: The filename to filter. :return: True if it should be excluded, False if it should be included and None if no rule matched the given file.
[ ":", "param", "str", "filename", ":", "The", "filename", "to", "filter", "." ]
python
train
40.416667
rsgalloway/grit
grit/server/cherrypy/__init__.py
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/server/cherrypy/__init__.py#L137-L183
def read_headers(rfile, hdict=None): """Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. You should probably return "400 Bad Request" if this happens. """ if hdict is None: hdict = {} while True: line = rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") if line[0] in ' \t': # It's a continuation line. v = line.strip() else: try: k, v = line.split(":", 1) except ValueError: raise ValueError("Illegal header line.") # TODO: what about TE and WWW-Authenticate? k = k.strip().title() v = v.strip() hname = k if k in comma_separated_headers: existing = hdict.get(hname) if existing: v = ", ".join((existing, v)) hdict[hname] = v return hdict
[ "def", "read_headers", "(", "rfile", ",", "hdict", "=", "None", ")", ":", "if", "hdict", "is", "None", ":", "hdict", "=", "{", "}", "while", "True", ":", "line", "=", "rfile", ".", "readline", "(", ")", "if", "not", "line", ":", "# No more data--illegal end of headers", "raise", "ValueError", "(", "\"Illegal end of headers.\"", ")", "if", "line", "==", "CRLF", ":", "# Normal end of headers", "break", "if", "not", "line", ".", "endswith", "(", "CRLF", ")", ":", "raise", "ValueError", "(", "\"HTTP requires CRLF terminators\"", ")", "if", "line", "[", "0", "]", "in", "' \\t'", ":", "# It's a continuation line.", "v", "=", "line", ".", "strip", "(", ")", "else", ":", "try", ":", "k", ",", "v", "=", "line", ".", "split", "(", "\":\"", ",", "1", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Illegal header line.\"", ")", "# TODO: what about TE and WWW-Authenticate?", "k", "=", "k", ".", "strip", "(", ")", ".", "title", "(", ")", "v", "=", "v", ".", "strip", "(", ")", "hname", "=", "k", "if", "k", "in", "comma_separated_headers", ":", "existing", "=", "hdict", ".", "get", "(", "hname", ")", "if", "existing", ":", "v", "=", "\", \"", ".", "join", "(", "(", "existing", ",", "v", ")", ")", "hdict", "[", "hname", "]", "=", "v", "return", "hdict" ]
Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. You should probably return "400 Bad Request" if this happens.
[ "Read", "headers", "from", "the", "given", "stream", "into", "the", "given", "header", "dict", ".", "If", "hdict", "is", "None", "a", "new", "header", "dict", "is", "created", ".", "Returns", "the", "populated", "header", "dict", ".", "Headers", "which", "are", "repeated", "are", "folded", "together", "using", "a", "comma", "if", "their", "specification", "so", "dictates", ".", "This", "function", "raises", "ValueError", "when", "the", "read", "bytes", "violate", "the", "HTTP", "spec", ".", "You", "should", "probably", "return", "400", "Bad", "Request", "if", "this", "happens", "." ]
python
train
30.765957
hydpy-dev/hydpy
hydpy/core/devicetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/devicetools.py#L444-L494
def remove_device(self, device: Union[DeviceType, str]) -> None: """Remove the given |Node| or |Element| object from the actual |Nodes| or |Elements| object. You can pass either a string or a device: >>> from hydpy import Node, Nodes >>> nodes = Nodes('node_x', 'node_y') >>> node_x, node_y = nodes >>> nodes.remove_device(Node('node_y')) >>> nodes Nodes("node_x") >>> nodes.remove_device(Node('node_x')) >>> nodes Nodes() >>> nodes.remove_device(Node('node_z')) Traceback (most recent call last): ... ValueError: While trying to remove the device `node_z` from a \ Nodes object, the following error occurred: The actual Nodes object does \ not handle such a device. Method |Devices.remove_device| is disabled for immutable |Nodes| and |Elements| objects: >>> nodes.mutable = False >>> nodes.remove_device('node_z') Traceback (most recent call last): ... RuntimeError: While trying to remove the device `node_z` from a \ Nodes object, the following error occurred: Removing devices from \ immutable Nodes objects is not allowed. """ try: if self.mutable: _device = self.get_contentclass()(device) try: del self._name2device[_device.name] except KeyError: raise ValueError( f'The actual {objecttools.classname(self)} ' f'object does not handle such a device.') del _id2devices[_device][id(self)] else: raise RuntimeError( f'Removing devices from immutable ' f'{objecttools.classname(self)} objects is not allowed.') except BaseException: objecttools.augment_excmessage( f'While trying to remove the device `{device}` from a ' f'{objecttools.classname(self)} object')
[ "def", "remove_device", "(", "self", ",", "device", ":", "Union", "[", "DeviceType", ",", "str", "]", ")", "->", "None", ":", "try", ":", "if", "self", ".", "mutable", ":", "_device", "=", "self", ".", "get_contentclass", "(", ")", "(", "device", ")", "try", ":", "del", "self", ".", "_name2device", "[", "_device", ".", "name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "f'The actual {objecttools.classname(self)} '", "f'object does not handle such a device.'", ")", "del", "_id2devices", "[", "_device", "]", "[", "id", "(", "self", ")", "]", "else", ":", "raise", "RuntimeError", "(", "f'Removing devices from immutable '", "f'{objecttools.classname(self)} objects is not allowed.'", ")", "except", "BaseException", ":", "objecttools", ".", "augment_excmessage", "(", "f'While trying to remove the device `{device}` from a '", "f'{objecttools.classname(self)} object'", ")" ]
Remove the given |Node| or |Element| object from the actual |Nodes| or |Elements| object. You can pass either a string or a device: >>> from hydpy import Node, Nodes >>> nodes = Nodes('node_x', 'node_y') >>> node_x, node_y = nodes >>> nodes.remove_device(Node('node_y')) >>> nodes Nodes("node_x") >>> nodes.remove_device(Node('node_x')) >>> nodes Nodes() >>> nodes.remove_device(Node('node_z')) Traceback (most recent call last): ... ValueError: While trying to remove the device `node_z` from a \ Nodes object, the following error occurred: The actual Nodes object does \ not handle such a device. Method |Devices.remove_device| is disabled for immutable |Nodes| and |Elements| objects: >>> nodes.mutable = False >>> nodes.remove_device('node_z') Traceback (most recent call last): ... RuntimeError: While trying to remove the device `node_z` from a \ Nodes object, the following error occurred: Removing devices from \ immutable Nodes objects is not allowed.
[ "Remove", "the", "given", "|Node|", "or", "|Element|", "object", "from", "the", "actual", "|Nodes|", "or", "|Elements|", "object", "." ]
python
train
39.27451
Cue/scales
src/greplin/scales/aggregation.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/aggregation.py#L358-L386
def _aggregate(self, source, aggregators, data, result): """Performs aggregation at a specific node in the data/aggregator tree.""" if data is None: return if hasattr(aggregators, 'items'): # Keep walking the tree. for key, value in six.iteritems(aggregators): if isinstance(key, tuple): key, regex = key for dataKey, dataValue in six.iteritems(data): if regex.match(dataKey): result.setdefault(key, {}) self._aggregate(source, value, dataValue, result[key]) else: if key == '*': for dataKey, dataValue in six.iteritems(data): result.setdefault(dataKey, {}) self._aggregate(source, value, dataValue, result[dataKey]) elif key in data: result.setdefault(key, {}) self._aggregate(source, value, data[key], result[key]) else: # We found a leaf. for aggregator in aggregators: if aggregator.name not in result: result[aggregator.name] = aggregator.clone() result[aggregator.name].addValue(source, data)
[ "def", "_aggregate", "(", "self", ",", "source", ",", "aggregators", ",", "data", ",", "result", ")", ":", "if", "data", "is", "None", ":", "return", "if", "hasattr", "(", "aggregators", ",", "'items'", ")", ":", "# Keep walking the tree.", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "aggregators", ")", ":", "if", "isinstance", "(", "key", ",", "tuple", ")", ":", "key", ",", "regex", "=", "key", "for", "dataKey", ",", "dataValue", "in", "six", ".", "iteritems", "(", "data", ")", ":", "if", "regex", ".", "match", "(", "dataKey", ")", ":", "result", ".", "setdefault", "(", "key", ",", "{", "}", ")", "self", ".", "_aggregate", "(", "source", ",", "value", ",", "dataValue", ",", "result", "[", "key", "]", ")", "else", ":", "if", "key", "==", "'*'", ":", "for", "dataKey", ",", "dataValue", "in", "six", ".", "iteritems", "(", "data", ")", ":", "result", ".", "setdefault", "(", "dataKey", ",", "{", "}", ")", "self", ".", "_aggregate", "(", "source", ",", "value", ",", "dataValue", ",", "result", "[", "dataKey", "]", ")", "elif", "key", "in", "data", ":", "result", ".", "setdefault", "(", "key", ",", "{", "}", ")", "self", ".", "_aggregate", "(", "source", ",", "value", ",", "data", "[", "key", "]", ",", "result", "[", "key", "]", ")", "else", ":", "# We found a leaf.", "for", "aggregator", "in", "aggregators", ":", "if", "aggregator", ".", "name", "not", "in", "result", ":", "result", "[", "aggregator", ".", "name", "]", "=", "aggregator", ".", "clone", "(", ")", "result", "[", "aggregator", ".", "name", "]", ".", "addValue", "(", "source", ",", "data", ")" ]
Performs aggregation at a specific node in the data/aggregator tree.
[ "Performs", "aggregation", "at", "a", "specific", "node", "in", "the", "data", "/", "aggregator", "tree", "." ]
python
train
38.068966
gbowerman/azurerm
examples/vmssdisk_cliauth.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/vmssdisk_cliauth.py#L28-L82
def main(): '''Main routine.''' # validate command line arguments arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--vmssname', '-n', required=True, action='store', help='Scale set name') arg_parser.add_argument('--rgname', '-g', required=True, action='store', help='Resource Group Name') arg_parser.add_argument('--operation', '-o', required=True, action='store', help='Operation (attach/detach)') arg_parser.add_argument('--vmid', '-i', required=True, action='store', help='VM id') arg_parser.add_argument('--lun', '-l', required=True, action='store', help='lun id') arg_parser.add_argument('--diskname', '-d', required=False, action='store', help='Optional password') args = arg_parser.parse_args() vmssname = args.vmssname rgname = args.rgname operation = args.operation vmid = args.vmid lun = int(args.lun) diskname = args.diskname if operation != 'attach' and operation != 'detach': sys.exit('--operation must be attach or detach') if diskname is None and operation == 'attach': sys.exit('--diskname is required for attach operation.') subscription_id = azurerm.get_subscription_from_cli() # authenticate access_token = azurerm.get_access_token_from_cli() # do a get on the VM vmssvm_model = azurerm.get_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid) # check operation if operation == 'attach': new_model = attach_model(subscription_id, rgname, vmssvm_model, diskname, lun) else: if operation == 'detach': new_model = detach_model(vmssvm_model, lun) # do a put on the VM rmreturn = azurerm.put_vmss_vm(access_token, subscription_id, rgname, vmssname, vmid, new_model) if rmreturn.status_code != 201: sys.exit('Error ' + str(rmreturn.status_code) + ' creating VM. ' + rmreturn.text) print(json.dumps(rmreturn, sort_keys=False, indent=2, separators=(',', ': ')))
[ "def", "main", "(", ")", ":", "# validate command line arguments", "arg_parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "arg_parser", ".", "add_argument", "(", "'--vmssname'", ",", "'-n'", ",", "required", "=", "True", ",", "action", "=", "'store'", ",", "help", "=", "'Scale set name'", ")", "arg_parser", ".", "add_argument", "(", "'--rgname'", ",", "'-g'", ",", "required", "=", "True", ",", "action", "=", "'store'", ",", "help", "=", "'Resource Group Name'", ")", "arg_parser", ".", "add_argument", "(", "'--operation'", ",", "'-o'", ",", "required", "=", "True", ",", "action", "=", "'store'", ",", "help", "=", "'Operation (attach/detach)'", ")", "arg_parser", ".", "add_argument", "(", "'--vmid'", ",", "'-i'", ",", "required", "=", "True", ",", "action", "=", "'store'", ",", "help", "=", "'VM id'", ")", "arg_parser", ".", "add_argument", "(", "'--lun'", ",", "'-l'", ",", "required", "=", "True", ",", "action", "=", "'store'", ",", "help", "=", "'lun id'", ")", "arg_parser", ".", "add_argument", "(", "'--diskname'", ",", "'-d'", ",", "required", "=", "False", ",", "action", "=", "'store'", ",", "help", "=", "'Optional password'", ")", "args", "=", "arg_parser", ".", "parse_args", "(", ")", "vmssname", "=", "args", ".", "vmssname", "rgname", "=", "args", ".", "rgname", "operation", "=", "args", ".", "operation", "vmid", "=", "args", ".", "vmid", "lun", "=", "int", "(", "args", ".", "lun", ")", "diskname", "=", "args", ".", "diskname", "if", "operation", "!=", "'attach'", "and", "operation", "!=", "'detach'", ":", "sys", ".", "exit", "(", "'--operation must be attach or detach'", ")", "if", "diskname", "is", "None", "and", "operation", "==", "'attach'", ":", "sys", ".", "exit", "(", "'--diskname is required for attach operation.'", ")", "subscription_id", "=", "azurerm", ".", "get_subscription_from_cli", "(", ")", "# authenticate", "access_token", "=", "azurerm", ".", "get_access_token_from_cli", "(", ")", "# do a get on the VM", "vmssvm_model", "=", "azurerm", ".", "get_vmss_vm", "(", "access_token", ",", "subscription_id", ",", "rgname", ",", "vmssname", ",", "vmid", ")", "# check operation", "if", "operation", "==", "'attach'", ":", "new_model", "=", "attach_model", "(", "subscription_id", ",", "rgname", ",", "vmssvm_model", ",", "diskname", ",", "lun", ")", "else", ":", "if", "operation", "==", "'detach'", ":", "new_model", "=", "detach_model", "(", "vmssvm_model", ",", "lun", ")", "# do a put on the VM", "rmreturn", "=", "azurerm", ".", "put_vmss_vm", "(", "access_token", ",", "subscription_id", ",", "rgname", ",", "vmssname", ",", "vmid", ",", "new_model", ")", "if", "rmreturn", ".", "status_code", "!=", "201", ":", "sys", ".", "exit", "(", "'Error '", "+", "str", "(", "rmreturn", ".", "status_code", ")", "+", "' creating VM. '", "+", "rmreturn", ".", "text", ")", "print", "(", "json", ".", "dumps", "(", "rmreturn", ",", "sort_keys", "=", "False", ",", "indent", "=", "2", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")" ]
Main routine.
[ "Main", "routine", "." ]
python
train
39
tamasgal/km3pipe
km3pipe/tools.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L332-L334
def zero_pad(m, n=1): """Pad a matrix with zeros, on all sides.""" return np.pad(m, (n, n), mode='constant', constant_values=[0])
[ "def", "zero_pad", "(", "m", ",", "n", "=", "1", ")", ":", "return", "np", ".", "pad", "(", "m", ",", "(", "n", ",", "n", ")", ",", "mode", "=", "'constant'", ",", "constant_values", "=", "[", "0", "]", ")" ]
Pad a matrix with zeros, on all sides.
[ "Pad", "a", "matrix", "with", "zeros", "on", "all", "sides", "." ]
python
train
45
twosigma/marbles
marbles/core/marbles/core/marbles.py
https://github.com/twosigma/marbles/blob/f0c668be8344c70d4d63bc57e82c6f2da43c6925/marbles/core/marbles/core/marbles.py#L398-L406
def _validate_annotation(self, annotation): '''Ensures that the annotation has the right fields.''' required_keys = set(self._required_keys) keys = set(key for key, val in annotation.items() if val) missing_keys = required_keys.difference(keys) if missing_keys: error = 'Annotation missing required fields: {0}'.format( missing_keys) raise AnnotationError(error)
[ "def", "_validate_annotation", "(", "self", ",", "annotation", ")", ":", "required_keys", "=", "set", "(", "self", ".", "_required_keys", ")", "keys", "=", "set", "(", "key", "for", "key", ",", "val", "in", "annotation", ".", "items", "(", ")", "if", "val", ")", "missing_keys", "=", "required_keys", ".", "difference", "(", "keys", ")", "if", "missing_keys", ":", "error", "=", "'Annotation missing required fields: {0}'", ".", "format", "(", "missing_keys", ")", "raise", "AnnotationError", "(", "error", ")" ]
Ensures that the annotation has the right fields.
[ "Ensures", "that", "the", "annotation", "has", "the", "right", "fields", "." ]
python
train
48.222222
pyblish/pyblish-qml
pyblish_qml/vendor/mock.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/vendor/mock.py#L849-L878
def assert_has_calls(self, calls, any_order=False): """assert the mock has been called with the specified calls. The `mock_calls` list is checked for the calls. If `any_order` is False (the default) then the calls must be sequential. There can be extra calls before or after the specified calls. If `any_order` is True then the calls can be in any order, but they must all appear in `mock_calls`.""" if not any_order: if calls not in self.mock_calls: raise AssertionError( 'Calls not found.\nExpected: %r\n' 'Actual: %r' % (calls, self.mock_calls) ) return all_calls = list(self.mock_calls) not_found = [] for kall in calls: try: all_calls.remove(kall) except ValueError: not_found.append(kall) if not_found: raise AssertionError( '%r not all found in call list' % (tuple(not_found),) )
[ "def", "assert_has_calls", "(", "self", ",", "calls", ",", "any_order", "=", "False", ")", ":", "if", "not", "any_order", ":", "if", "calls", "not", "in", "self", ".", "mock_calls", ":", "raise", "AssertionError", "(", "'Calls not found.\\nExpected: %r\\n'", "'Actual: %r'", "%", "(", "calls", ",", "self", ".", "mock_calls", ")", ")", "return", "all_calls", "=", "list", "(", "self", ".", "mock_calls", ")", "not_found", "=", "[", "]", "for", "kall", "in", "calls", ":", "try", ":", "all_calls", ".", "remove", "(", "kall", ")", "except", "ValueError", ":", "not_found", ".", "append", "(", "kall", ")", "if", "not_found", ":", "raise", "AssertionError", "(", "'%r not all found in call list'", "%", "(", "tuple", "(", "not_found", ")", ",", ")", ")" ]
assert the mock has been called with the specified calls. The `mock_calls` list is checked for the calls. If `any_order` is False (the default) then the calls must be sequential. There can be extra calls before or after the specified calls. If `any_order` is True then the calls can be in any order, but they must all appear in `mock_calls`.
[ "assert", "the", "mock", "has", "been", "called", "with", "the", "specified", "calls", ".", "The", "mock_calls", "list", "is", "checked", "for", "the", "calls", "." ]
python
train
34.966667
ellmetha/django-machina
machina/apps/forum_permission/viewmixins.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_permission/viewmixins.py#L84-L105
def check_permissions(self, request): """ Retrieves the controlled object and perform the permissions check. """ obj = ( hasattr(self, 'get_controlled_object') and self.get_controlled_object() or hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None) ) user = request.user # Get the permissions to check perms = self.get_required_permissions(self) # Check permissions has_permissions = self.perform_permissions_check(user, obj, perms) if not has_permissions and not user.is_authenticated: return HttpResponseRedirect('{}?{}={}'.format( resolve_url(self.login_url), self.redirect_field_name, urlquote(request.get_full_path()) )) elif not has_permissions: raise PermissionDenied
[ "def", "check_permissions", "(", "self", ",", "request", ")", ":", "obj", "=", "(", "hasattr", "(", "self", ",", "'get_controlled_object'", ")", "and", "self", ".", "get_controlled_object", "(", ")", "or", "hasattr", "(", "self", ",", "'get_object'", ")", "and", "self", ".", "get_object", "(", ")", "or", "getattr", "(", "self", ",", "'object'", ",", "None", ")", ")", "user", "=", "request", ".", "user", "# Get the permissions to check", "perms", "=", "self", ".", "get_required_permissions", "(", "self", ")", "# Check permissions", "has_permissions", "=", "self", ".", "perform_permissions_check", "(", "user", ",", "obj", ",", "perms", ")", "if", "not", "has_permissions", "and", "not", "user", ".", "is_authenticated", ":", "return", "HttpResponseRedirect", "(", "'{}?{}={}'", ".", "format", "(", "resolve_url", "(", "self", ".", "login_url", ")", ",", "self", ".", "redirect_field_name", ",", "urlquote", "(", "request", ".", "get_full_path", "(", ")", ")", ")", ")", "elif", "not", "has_permissions", ":", "raise", "PermissionDenied" ]
Retrieves the controlled object and perform the permissions check.
[ "Retrieves", "the", "controlled", "object", "and", "perform", "the", "permissions", "check", "." ]
python
train
39.727273
reiinakano/scikit-plot
scikitplot/estimators.py
https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/estimators.py#L135-L247
def plot_learning_curve(clf, X, y, title='Learning Curve', cv=None, shuffle=False, random_state=None, train_sizes=None, n_jobs=1, scoring=None, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Generates a plot of the train and test learning curves for a classifier. Args: clf: Classifier instance that implements ``fit`` and ``predict`` methods. X (array-like, shape (n_samples, n_features)): Training vector, where n_samples is the number of samples and n_features is the number of features. y (array-like, shape (n_samples) or (n_samples, n_features)): Target relative to X for classification or regression; None for unsupervised learning. title (string, optional): Title of the generated plot. Defaults to "Learning Curve" cv (int, cross-validation generator, iterable, optional): Determines the cross-validation strategy to be used for splitting. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is not a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. shuffle (bool, optional): Used when do_cv is set to True. Determines whether to shuffle the training data before splitting using cross-validation. Default set to True. random_state (int :class:`RandomState`): Pseudo-random number generator state used for random sampling. train_sizes (iterable, optional): Determines the training sizes used to plot the learning curve. If None, ``np.linspace(.1, 1.0, 5)`` is used. n_jobs (int, optional): Number of jobs to run in parallel. Defaults to 1. scoring (string, callable or None, optional): default: None A string (see scikit-learn model evaluation documentation) or a scorerbcallable object / function with signature scorer(estimator, X, y). ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> rf = RandomForestClassifier() >>> skplt.estimators.plot_learning_curve(rf, X, y) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_learning_curve.png :align: center :alt: Learning Curve """ if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) if train_sizes is None: train_sizes = np.linspace(.1, 1.0, 5) ax.set_title(title, fontsize=title_fontsize) ax.set_xlabel("Training examples", fontsize=text_fontsize) ax.set_ylabel("Score", fontsize=text_fontsize) train_sizes, train_scores, test_scores = learning_curve( clf, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring=scoring, shuffle=shuffle, random_state=random_state) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) ax.grid() ax.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") ax.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") ax.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") ax.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") ax.tick_params(labelsize=text_fontsize) ax.legend(loc="best", fontsize=text_fontsize) return ax
[ "def", "plot_learning_curve", "(", "clf", ",", "X", ",", "y", ",", "title", "=", "'Learning Curve'", ",", "cv", "=", "None", ",", "shuffle", "=", "False", ",", "random_state", "=", "None", ",", "train_sizes", "=", "None", ",", "n_jobs", "=", "1", ",", "scoring", "=", "None", ",", "ax", "=", "None", ",", "figsize", "=", "None", ",", "title_fontsize", "=", "\"large\"", ",", "text_fontsize", "=", "\"medium\"", ")", ":", "if", "ax", "is", "None", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "1", ",", "figsize", "=", "figsize", ")", "if", "train_sizes", "is", "None", ":", "train_sizes", "=", "np", ".", "linspace", "(", ".1", ",", "1.0", ",", "5", ")", "ax", ".", "set_title", "(", "title", ",", "fontsize", "=", "title_fontsize", ")", "ax", ".", "set_xlabel", "(", "\"Training examples\"", ",", "fontsize", "=", "text_fontsize", ")", "ax", ".", "set_ylabel", "(", "\"Score\"", ",", "fontsize", "=", "text_fontsize", ")", "train_sizes", ",", "train_scores", ",", "test_scores", "=", "learning_curve", "(", "clf", ",", "X", ",", "y", ",", "cv", "=", "cv", ",", "n_jobs", "=", "n_jobs", ",", "train_sizes", "=", "train_sizes", ",", "scoring", "=", "scoring", ",", "shuffle", "=", "shuffle", ",", "random_state", "=", "random_state", ")", "train_scores_mean", "=", "np", ".", "mean", "(", "train_scores", ",", "axis", "=", "1", ")", "train_scores_std", "=", "np", ".", "std", "(", "train_scores", ",", "axis", "=", "1", ")", "test_scores_mean", "=", "np", ".", "mean", "(", "test_scores", ",", "axis", "=", "1", ")", "test_scores_std", "=", "np", ".", "std", "(", "test_scores", ",", "axis", "=", "1", ")", "ax", ".", "grid", "(", ")", "ax", ".", "fill_between", "(", "train_sizes", ",", "train_scores_mean", "-", "train_scores_std", ",", "train_scores_mean", "+", "train_scores_std", ",", "alpha", "=", "0.1", ",", "color", "=", "\"r\"", ")", "ax", ".", "fill_between", "(", "train_sizes", ",", "test_scores_mean", "-", "test_scores_std", ",", "test_scores_mean", "+", "test_scores_std", ",", "alpha", "=", "0.1", ",", "color", "=", "\"g\"", ")", "ax", ".", "plot", "(", "train_sizes", ",", "train_scores_mean", ",", "'o-'", ",", "color", "=", "\"r\"", ",", "label", "=", "\"Training score\"", ")", "ax", ".", "plot", "(", "train_sizes", ",", "test_scores_mean", ",", "'o-'", ",", "color", "=", "\"g\"", ",", "label", "=", "\"Cross-validation score\"", ")", "ax", ".", "tick_params", "(", "labelsize", "=", "text_fontsize", ")", "ax", ".", "legend", "(", "loc", "=", "\"best\"", ",", "fontsize", "=", "text_fontsize", ")", "return", "ax" ]
Generates a plot of the train and test learning curves for a classifier. Args: clf: Classifier instance that implements ``fit`` and ``predict`` methods. X (array-like, shape (n_samples, n_features)): Training vector, where n_samples is the number of samples and n_features is the number of features. y (array-like, shape (n_samples) or (n_samples, n_features)): Target relative to X for classification or regression; None for unsupervised learning. title (string, optional): Title of the generated plot. Defaults to "Learning Curve" cv (int, cross-validation generator, iterable, optional): Determines the cross-validation strategy to be used for splitting. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is not a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. shuffle (bool, optional): Used when do_cv is set to True. Determines whether to shuffle the training data before splitting using cross-validation. Default set to True. random_state (int :class:`RandomState`): Pseudo-random number generator state used for random sampling. train_sizes (iterable, optional): Determines the training sizes used to plot the learning curve. If None, ``np.linspace(.1, 1.0, 5)`` is used. n_jobs (int, optional): Number of jobs to run in parallel. Defaults to 1. scoring (string, callable or None, optional): default: None A string (see scikit-learn model evaluation documentation) or a scorerbcallable object / function with signature scorer(estimator, X, y). ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> rf = RandomForestClassifier() >>> skplt.estimators.plot_learning_curve(rf, X, y) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_learning_curve.png :align: center :alt: Learning Curve
[ "Generates", "a", "plot", "of", "the", "train", "and", "test", "learning", "curves", "for", "a", "classifier", "." ]
python
train
42.769912
adafruit/Adafruit_Python_DHT
Adafruit_DHT/platform_detect.py
https://github.com/adafruit/Adafruit_Python_DHT/blob/c9407aa0506321bbc63ec8ba3c59fc21291f4746/Adafruit_DHT/platform_detect.py#L61-L77
def pi_revision(): """Detect the revision number of a Raspberry Pi, useful for changing functionality like default I2C bus based on revision.""" # Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History with open('/proc/cpuinfo', 'r') as infile: for line in infile: # Match a line of the form "Revision : 0002" while ignoring extra # info in front of the revsion (like 1000 when the Pi was over-volted). match = re.match('Revision\s+:\s+.*(\w{4})$', line, flags=re.IGNORECASE) if match and match.group(1) in ['0000', '0002', '0003']: # Return revision 1 if revision ends with 0000, 0002 or 0003. return 1 elif match: # Assume revision 2 if revision ends with any other 4 chars. return 2 # Couldn't find the revision, throw an exception. raise RuntimeError('Could not determine Raspberry Pi revision.')
[ "def", "pi_revision", "(", ")", ":", "# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History", "with", "open", "(", "'/proc/cpuinfo'", ",", "'r'", ")", "as", "infile", ":", "for", "line", "in", "infile", ":", "# Match a line of the form \"Revision : 0002\" while ignoring extra", "# info in front of the revsion (like 1000 when the Pi was over-volted).", "match", "=", "re", ".", "match", "(", "'Revision\\s+:\\s+.*(\\w{4})$'", ",", "line", ",", "flags", "=", "re", ".", "IGNORECASE", ")", "if", "match", "and", "match", ".", "group", "(", "1", ")", "in", "[", "'0000'", ",", "'0002'", ",", "'0003'", "]", ":", "# Return revision 1 if revision ends with 0000, 0002 or 0003.", "return", "1", "elif", "match", ":", "# Assume revision 2 if revision ends with any other 4 chars.", "return", "2", "# Couldn't find the revision, throw an exception.", "raise", "RuntimeError", "(", "'Could not determine Raspberry Pi revision.'", ")" ]
Detect the revision number of a Raspberry Pi, useful for changing functionality like default I2C bus based on revision.
[ "Detect", "the", "revision", "number", "of", "a", "Raspberry", "Pi", "useful", "for", "changing", "functionality", "like", "default", "I2C", "bus", "based", "on", "revision", "." ]
python
train
57.764706
ajdavis/mongo-mockup-db
mockupdb/__init__.py
https://github.com/ajdavis/mongo-mockup-db/blob/ff8a3f793def59e9037397ef60607fbda6949dac/mockupdb/__init__.py#L1283-L1308
def stop(self): """Stop serving. Always call this to clean up after yourself.""" self._stopped = True threads = [self._accept_thread] threads.extend(self._server_threads) self._listening_sock.close() for sock in list(self._server_socks): try: sock.shutdown(socket.SHUT_RDWR) except socket.error: pass try: sock.close() except socket.error: pass with self._unlock(): for thread in threads: thread.join(10) if self._uds_path: try: os.unlink(self._uds_path) except OSError: pass
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_stopped", "=", "True", "threads", "=", "[", "self", ".", "_accept_thread", "]", "threads", ".", "extend", "(", "self", ".", "_server_threads", ")", "self", ".", "_listening_sock", ".", "close", "(", ")", "for", "sock", "in", "list", "(", "self", ".", "_server_socks", ")", ":", "try", ":", "sock", ".", "shutdown", "(", "socket", ".", "SHUT_RDWR", ")", "except", "socket", ".", "error", ":", "pass", "try", ":", "sock", ".", "close", "(", ")", "except", "socket", ".", "error", ":", "pass", "with", "self", ".", "_unlock", "(", ")", ":", "for", "thread", "in", "threads", ":", "thread", ".", "join", "(", "10", ")", "if", "self", ".", "_uds_path", ":", "try", ":", "os", ".", "unlink", "(", "self", ".", "_uds_path", ")", "except", "OSError", ":", "pass" ]
Stop serving. Always call this to clean up after yourself.
[ "Stop", "serving", ".", "Always", "call", "this", "to", "clean", "up", "after", "yourself", "." ]
python
train
27.423077
gem/oq-engine
openquake/hazardlib/gsim/montalva_2016.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/montalva_2016.py#L151-L173
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extract dictionaries of coefficients specific to required # intensity measure type and for PGA C = self.COEFFS[imt] # For inslab GMPEs the correction term is fixed at -0.3 dc1 = -0.3 C_PGA = self.COEFFS[PGA()] # compute median pga on rock (vs30=1000), needed for site response # term calculation pga1000 = np.exp( self._compute_pga_rock(C_PGA, dc1, sites, rup, dists)) mean = (self._compute_magnitude_term(C, dc1, rup.mag) + self._compute_distance_term(C, rup.mag, dists) + self._compute_focal_depth_term(C, rup) + self._compute_forearc_backarc_term(C, sites, dists) + self._compute_site_response_term(C, sites, pga1000)) stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30)) return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# extract dictionaries of coefficients specific to required", "# intensity measure type and for PGA", "C", "=", "self", ".", "COEFFS", "[", "imt", "]", "# For inslab GMPEs the correction term is fixed at -0.3", "dc1", "=", "-", "0.3", "C_PGA", "=", "self", ".", "COEFFS", "[", "PGA", "(", ")", "]", "# compute median pga on rock (vs30=1000), needed for site response", "# term calculation", "pga1000", "=", "np", ".", "exp", "(", "self", ".", "_compute_pga_rock", "(", "C_PGA", ",", "dc1", ",", "sites", ",", "rup", ",", "dists", ")", ")", "mean", "=", "(", "self", ".", "_compute_magnitude_term", "(", "C", ",", "dc1", ",", "rup", ".", "mag", ")", "+", "self", ".", "_compute_distance_term", "(", "C", ",", "rup", ".", "mag", ",", "dists", ")", "+", "self", ".", "_compute_focal_depth_term", "(", "C", ",", "rup", ")", "+", "self", ".", "_compute_forearc_backarc_term", "(", "C", ",", "sites", ",", "dists", ")", "+", "self", ".", "_compute_site_response_term", "(", "C", ",", "sites", ",", "pga1000", ")", ")", "stddevs", "=", "self", ".", "_get_stddevs", "(", "C", ",", "stddev_types", ",", "len", "(", "sites", ".", "vs30", ")", ")", "return", "mean", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
47.73913
pandas-dev/pandas
pandas/core/ops.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1088-L1120
def fill_binop(left, right, fill_value): """ If a non-None fill_value is given, replace null entries in left and right with this value, but only in positions where _one_ of left/right is null, not both. Parameters ---------- left : array-like right : array-like fill_value : object Returns ------- left : array-like right : array-like Notes ----- Makes copies if fill_value is not None """ # TODO: can we make a no-copy implementation? if fill_value is not None: left_mask = isna(left) right_mask = isna(right) left = left.copy() right = right.copy() # one but not both mask = left_mask ^ right_mask left[left_mask & mask] = fill_value right[right_mask & mask] = fill_value return left, right
[ "def", "fill_binop", "(", "left", ",", "right", ",", "fill_value", ")", ":", "# TODO: can we make a no-copy implementation?", "if", "fill_value", "is", "not", "None", ":", "left_mask", "=", "isna", "(", "left", ")", "right_mask", "=", "isna", "(", "right", ")", "left", "=", "left", ".", "copy", "(", ")", "right", "=", "right", ".", "copy", "(", ")", "# one but not both", "mask", "=", "left_mask", "^", "right_mask", "left", "[", "left_mask", "&", "mask", "]", "=", "fill_value", "right", "[", "right_mask", "&", "mask", "]", "=", "fill_value", "return", "left", ",", "right" ]
If a non-None fill_value is given, replace null entries in left and right with this value, but only in positions where _one_ of left/right is null, not both. Parameters ---------- left : array-like right : array-like fill_value : object Returns ------- left : array-like right : array-like Notes ----- Makes copies if fill_value is not None
[ "If", "a", "non", "-", "None", "fill_value", "is", "given", "replace", "null", "entries", "in", "left", "and", "right", "with", "this", "value", "but", "only", "in", "positions", "where", "_one_", "of", "left", "/", "right", "is", "null", "not", "both", "." ]
python
train
24.484848
airspeed-velocity/asv
asv/plugins/regressions.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/plugins/regressions.py#L230-L269
def get_graph_data(self, graph, benchmark): """ Iterator over graph data sets Yields ------ param_idx Flat index to parameter permutations for parameterized benchmarks. None if benchmark is not parameterized. entry_name Name for the data set. If benchmark is non-parameterized, this is the benchmark name. steps Steps to consider in regression detection. threshold User-specified threshold for regression detection. """ if benchmark.get('params'): param_iter = enumerate(zip(itertools.product(*benchmark['params']), graph.get_steps())) else: param_iter = [(None, (None, graph.get_steps()))] for j, (param, steps) in param_iter: if param is None: entry_name = benchmark['name'] else: entry_name = benchmark['name'] + '({0})'.format(', '.join(param)) start_revision = self._get_start_revision(graph, benchmark, entry_name) threshold = self._get_threshold(graph, benchmark, entry_name) if start_revision is None: # Skip detection continue steps = [step for step in steps if step[1] >= start_revision] yield j, entry_name, steps, threshold
[ "def", "get_graph_data", "(", "self", ",", "graph", ",", "benchmark", ")", ":", "if", "benchmark", ".", "get", "(", "'params'", ")", ":", "param_iter", "=", "enumerate", "(", "zip", "(", "itertools", ".", "product", "(", "*", "benchmark", "[", "'params'", "]", ")", ",", "graph", ".", "get_steps", "(", ")", ")", ")", "else", ":", "param_iter", "=", "[", "(", "None", ",", "(", "None", ",", "graph", ".", "get_steps", "(", ")", ")", ")", "]", "for", "j", ",", "(", "param", ",", "steps", ")", "in", "param_iter", ":", "if", "param", "is", "None", ":", "entry_name", "=", "benchmark", "[", "'name'", "]", "else", ":", "entry_name", "=", "benchmark", "[", "'name'", "]", "+", "'({0})'", ".", "format", "(", "', '", ".", "join", "(", "param", ")", ")", "start_revision", "=", "self", ".", "_get_start_revision", "(", "graph", ",", "benchmark", ",", "entry_name", ")", "threshold", "=", "self", ".", "_get_threshold", "(", "graph", ",", "benchmark", ",", "entry_name", ")", "if", "start_revision", "is", "None", ":", "# Skip detection", "continue", "steps", "=", "[", "step", "for", "step", "in", "steps", "if", "step", "[", "1", "]", ">=", "start_revision", "]", "yield", "j", ",", "entry_name", ",", "steps", ",", "threshold" ]
Iterator over graph data sets Yields ------ param_idx Flat index to parameter permutations for parameterized benchmarks. None if benchmark is not parameterized. entry_name Name for the data set. If benchmark is non-parameterized, this is the benchmark name. steps Steps to consider in regression detection. threshold User-specified threshold for regression detection.
[ "Iterator", "over", "graph", "data", "sets" ]
python
train
34.65
apache/spark
python/pyspark/streaming/dstream.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L73-L78
def count(self): """ Return a new DStream in which each RDD has a single element generated by counting each RDD of this DStream. """ return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add)
[ "def", "count", "(", "self", ")", ":", "return", "self", ".", "mapPartitions", "(", "lambda", "i", ":", "[", "sum", "(", "1", "for", "_", "in", "i", ")", "]", ")", ".", "reduce", "(", "operator", ".", "add", ")" ]
Return a new DStream in which each RDD has a single element generated by counting each RDD of this DStream.
[ "Return", "a", "new", "DStream", "in", "which", "each", "RDD", "has", "a", "single", "element", "generated", "by", "counting", "each", "RDD", "of", "this", "DStream", "." ]
python
train
40.833333
probcomp/crosscat
src/LocalEngine.py
https://github.com/probcomp/crosscat/blob/4a05bddb06a45f3b7b3e05e095720f16257d1535/src/LocalEngine.py#L360-L377
def simple_predictive_probability_multistate( self, M_c, X_L_list, X_D_list, Y, Q): """Calculate probability of a cell taking a value given a latent state. :param Y: A list of constraints to apply when querying. Each constraint is a triplet of (r,d,v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to query. Each value is triplet of (r,d,v): r is the row index, d is the column index, and v is the value at which the density is evaluated. :type Q: list of lists :returns: list of floats -- probabilities of the values specified by Q """ return su.simple_predictive_probability_multistate( M_c, X_L_list, X_D_list, Y, Q)
[ "def", "simple_predictive_probability_multistate", "(", "self", ",", "M_c", ",", "X_L_list", ",", "X_D_list", ",", "Y", ",", "Q", ")", ":", "return", "su", ".", "simple_predictive_probability_multistate", "(", "M_c", ",", "X_L_list", ",", "X_D_list", ",", "Y", ",", "Q", ")" ]
Calculate probability of a cell taking a value given a latent state. :param Y: A list of constraints to apply when querying. Each constraint is a triplet of (r,d,v): r is the row index, d is the column index and v is the value of the constraint :type Y: list of lists :param Q: A list of values to query. Each value is triplet of (r,d,v): r is the row index, d is the column index, and v is the value at which the density is evaluated. :type Q: list of lists :returns: list of floats -- probabilities of the values specified by Q
[ "Calculate", "probability", "of", "a", "cell", "taking", "a", "value", "given", "a", "latent", "state", "." ]
python
train
45.944444
DarkEnergySurvey/ugali
ugali/analysis/imf.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/imf.py#L155-L181
def pdf(cls, mass, log_mode=True): """ PDF for the Kroupa IMF. Normalization is set over the mass range from 0.1 Msun to 100 Msun """ log_mass = np.log10(mass) # From Eq 2 mb = mbreak = [0.08, 0.5] # Msun a = alpha = [0.3, 1.3, 2.3] # alpha # Normalization set from 0.1 -- 100 Msun norm = 0.27947743949440446 b = 1./norm c = b * mbreak[0]**(alpha[1]-alpha[0]) d = c * mbreak[1]**(alpha[2]-alpha[1]) dn_dm = b * (mass < 0.08) * mass**(-alpha[0]) dn_dm += c * (0.08 <= mass) * (mass < 0.5) * mass**(-alpha[1]) dn_dm += d * (0.5 <= mass) * mass**(-alpha[2]) if log_mode: # Number per logarithmic mass range, i.e., dN/dlog(M) return dn_dm * (mass * np.log(10)) else: # Number per linear mass range, i.e., dN/dM return dn_dm
[ "def", "pdf", "(", "cls", ",", "mass", ",", "log_mode", "=", "True", ")", ":", "log_mass", "=", "np", ".", "log10", "(", "mass", ")", "# From Eq 2", "mb", "=", "mbreak", "=", "[", "0.08", ",", "0.5", "]", "# Msun", "a", "=", "alpha", "=", "[", "0.3", ",", "1.3", ",", "2.3", "]", "# alpha", "# Normalization set from 0.1 -- 100 Msun", "norm", "=", "0.27947743949440446", "b", "=", "1.", "/", "norm", "c", "=", "b", "*", "mbreak", "[", "0", "]", "**", "(", "alpha", "[", "1", "]", "-", "alpha", "[", "0", "]", ")", "d", "=", "c", "*", "mbreak", "[", "1", "]", "**", "(", "alpha", "[", "2", "]", "-", "alpha", "[", "1", "]", ")", "dn_dm", "=", "b", "*", "(", "mass", "<", "0.08", ")", "*", "mass", "**", "(", "-", "alpha", "[", "0", "]", ")", "dn_dm", "+=", "c", "*", "(", "0.08", "<=", "mass", ")", "*", "(", "mass", "<", "0.5", ")", "*", "mass", "**", "(", "-", "alpha", "[", "1", "]", ")", "dn_dm", "+=", "d", "*", "(", "0.5", "<=", "mass", ")", "*", "mass", "**", "(", "-", "alpha", "[", "2", "]", ")", "if", "log_mode", ":", "# Number per logarithmic mass range, i.e., dN/dlog(M)", "return", "dn_dm", "*", "(", "mass", "*", "np", ".", "log", "(", "10", ")", ")", "else", ":", "# Number per linear mass range, i.e., dN/dM", "return", "dn_dm" ]
PDF for the Kroupa IMF. Normalization is set over the mass range from 0.1 Msun to 100 Msun
[ "PDF", "for", "the", "Kroupa", "IMF", "." ]
python
train
32.777778
joesecurity/jbxapi
jbxapi.py
https://github.com/joesecurity/jbxapi/blob/cea2f5edef9661d53fe3d58435d4e88701331f79/jbxapi.py#L222-L228
def submit_cookbook(self, cookbook, params={}, _extra_params={}): """ Submit a cookbook. """ self._check_user_parameters(params) files = {'cookbook': cookbook} return self._submit(params, files, _extra_params=_extra_params)
[ "def", "submit_cookbook", "(", "self", ",", "cookbook", ",", "params", "=", "{", "}", ",", "_extra_params", "=", "{", "}", ")", ":", "self", ".", "_check_user_parameters", "(", "params", ")", "files", "=", "{", "'cookbook'", ":", "cookbook", "}", "return", "self", ".", "_submit", "(", "params", ",", "files", ",", "_extra_params", "=", "_extra_params", ")" ]
Submit a cookbook.
[ "Submit", "a", "cookbook", "." ]
python
train
37.857143
pmacosta/pexdoc
pexdoc/exh.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/exh.py#L111-L147
def _merge_ex_dicts(sexdict, oexdict): """Merge callable look-up tables from two objects.""" # pylint: disable=R0101 for okey, ovalue in oexdict.items(): # Keys in other dictionary are callable paths or # callables ids depending on the full_cname argument # of the handler if okey in sexdict: svalue = sexdict[okey] # Each callable dictionary entry is itself a dictionary where the # key is (exception type, exception message) and the value is a # dictionary with keys 'function' (a list of callable paths to # the function), 'raised' (list of booleans indicating whether the # exception though each callable path was raised or not) and name for fkey, fvalue in ovalue.items(): if fkey not in svalue: # The other dictionary has a callable path not present in # the self dictionary, add it unmodified sexdict[okey][fkey] = fvalue else: # The other dictionary has a callable and exception # present in the self dictionary, have to check whether # the callable path is the same (in which case update # raise flag) or not (in which case add to 'function' and # 'raise' keys iobj = zip(fvalue["function"], fvalue["raised"]) for func, raised in iobj: if func not in sexdict[okey][fkey]["function"]: sexdict[okey][fkey]["function"].append(func) sexdict[okey][fkey]["raised"].append(raised) else: idx = sexdict[okey][fkey]["function"].index(func) sraised = sexdict[okey][fkey]["raised"][idx] if sraised or raised: sexdict[okey][fkey]["raised"][idx] = True else: sexdict[okey] = ovalue
[ "def", "_merge_ex_dicts", "(", "sexdict", ",", "oexdict", ")", ":", "# pylint: disable=R0101", "for", "okey", ",", "ovalue", "in", "oexdict", ".", "items", "(", ")", ":", "# Keys in other dictionary are callable paths or", "# callables ids depending on the full_cname argument", "# of the handler", "if", "okey", "in", "sexdict", ":", "svalue", "=", "sexdict", "[", "okey", "]", "# Each callable dictionary entry is itself a dictionary where the", "# key is (exception type, exception message) and the value is a", "# dictionary with keys 'function' (a list of callable paths to", "# the function), 'raised' (list of booleans indicating whether the", "# exception though each callable path was raised or not) and name", "for", "fkey", ",", "fvalue", "in", "ovalue", ".", "items", "(", ")", ":", "if", "fkey", "not", "in", "svalue", ":", "# The other dictionary has a callable path not present in", "# the self dictionary, add it unmodified", "sexdict", "[", "okey", "]", "[", "fkey", "]", "=", "fvalue", "else", ":", "# The other dictionary has a callable and exception", "# present in the self dictionary, have to check whether", "# the callable path is the same (in which case update", "# raise flag) or not (in which case add to 'function' and", "# 'raise' keys", "iobj", "=", "zip", "(", "fvalue", "[", "\"function\"", "]", ",", "fvalue", "[", "\"raised\"", "]", ")", "for", "func", ",", "raised", "in", "iobj", ":", "if", "func", "not", "in", "sexdict", "[", "okey", "]", "[", "fkey", "]", "[", "\"function\"", "]", ":", "sexdict", "[", "okey", "]", "[", "fkey", "]", "[", "\"function\"", "]", ".", "append", "(", "func", ")", "sexdict", "[", "okey", "]", "[", "fkey", "]", "[", "\"raised\"", "]", ".", "append", "(", "raised", ")", "else", ":", "idx", "=", "sexdict", "[", "okey", "]", "[", "fkey", "]", "[", "\"function\"", "]", ".", "index", "(", "func", ")", "sraised", "=", "sexdict", "[", "okey", "]", "[", "fkey", "]", "[", "\"raised\"", "]", "[", "idx", "]", "if", "sraised", "or", "raised", ":", "sexdict", "[", "okey", "]", "[", "fkey", "]", "[", "\"raised\"", "]", "[", "idx", "]", "=", "True", "else", ":", "sexdict", "[", "okey", "]", "=", "ovalue" ]
Merge callable look-up tables from two objects.
[ "Merge", "callable", "look", "-", "up", "tables", "from", "two", "objects", "." ]
python
train
55.135135
marrow/package
marrow/package/loader.py
https://github.com/marrow/package/blob/133d4bf67cc857d1b2423695938a00ff2dfa8af2/marrow/package/loader.py#L57-L104
def load(target:str, namespace:str=None, default=nodefault, executable:bool=False, separators:Sequence[str]=('.', ':'), protect:bool=True): """This helper function loads an object identified by a dotted-notation string. For example:: # Load class Foo from example.objects load('example.objects:Foo') # Load the result of the class method ``new`` of the Foo object load('example.objects:Foo.new', executable=True) If a plugin namespace is provided simple name references are allowed. For example:: # Load the plugin named 'routing' from the 'web.dispatch' namespace load('routing', 'web.dispatch') The ``executable``, ``protect``, and first tuple element of ``separators`` are passed to the traverse function. Providing a namespace does not prevent full object lookup (dot-colon notation) from working. """ assert check_argument_types() if namespace and ':' not in target: allowable = dict((i.name, i) for i in iter_entry_points(namespace)) if target not in allowable: raise LookupError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable)) return allowable[target].load() parts, _, target = target.partition(separators[1]) try: obj = __import__(parts) except ImportError: if default is not nodefault: return default raise return traverse( obj, separators[0].join(parts.split(separators[0])[1:] + target.split(separators[0])), default = default, executable = executable, protect = protect ) if target else obj
[ "def", "load", "(", "target", ":", "str", ",", "namespace", ":", "str", "=", "None", ",", "default", "=", "nodefault", ",", "executable", ":", "bool", "=", "False", ",", "separators", ":", "Sequence", "[", "str", "]", "=", "(", "'.'", ",", "':'", ")", ",", "protect", ":", "bool", "=", "True", ")", ":", "assert", "check_argument_types", "(", ")", "if", "namespace", "and", "':'", "not", "in", "target", ":", "allowable", "=", "dict", "(", "(", "i", ".", "name", ",", "i", ")", "for", "i", "in", "iter_entry_points", "(", "namespace", ")", ")", "if", "target", "not", "in", "allowable", ":", "raise", "LookupError", "(", "'Unknown plugin \"'", "+", "target", "+", "'\"; found: '", "+", "', '", ".", "join", "(", "allowable", ")", ")", "return", "allowable", "[", "target", "]", ".", "load", "(", ")", "parts", ",", "_", ",", "target", "=", "target", ".", "partition", "(", "separators", "[", "1", "]", ")", "try", ":", "obj", "=", "__import__", "(", "parts", ")", "except", "ImportError", ":", "if", "default", "is", "not", "nodefault", ":", "return", "default", "raise", "return", "traverse", "(", "obj", ",", "separators", "[", "0", "]", ".", "join", "(", "parts", ".", "split", "(", "separators", "[", "0", "]", ")", "[", "1", ":", "]", "+", "target", ".", "split", "(", "separators", "[", "0", "]", ")", ")", ",", "default", "=", "default", ",", "executable", "=", "executable", ",", "protect", "=", "protect", ")", "if", "target", "else", "obj" ]
This helper function loads an object identified by a dotted-notation string. For example:: # Load class Foo from example.objects load('example.objects:Foo') # Load the result of the class method ``new`` of the Foo object load('example.objects:Foo.new', executable=True) If a plugin namespace is provided simple name references are allowed. For example:: # Load the plugin named 'routing' from the 'web.dispatch' namespace load('routing', 'web.dispatch') The ``executable``, ``protect``, and first tuple element of ``separators`` are passed to the traverse function. Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
[ "This", "helper", "function", "loads", "an", "object", "identified", "by", "a", "dotted", "-", "notation", "string", ".", "For", "example", "::", "#", "Load", "class", "Foo", "from", "example", ".", "objects", "load", "(", "example", ".", "objects", ":", "Foo", ")", "#", "Load", "the", "result", "of", "the", "class", "method", "new", "of", "the", "Foo", "object", "load", "(", "example", ".", "objects", ":", "Foo", ".", "new", "executable", "=", "True", ")", "If", "a", "plugin", "namespace", "is", "provided", "simple", "name", "references", "are", "allowed", ".", "For", "example", "::", "#", "Load", "the", "plugin", "named", "routing", "from", "the", "web", ".", "dispatch", "namespace", "load", "(", "routing", "web", ".", "dispatch", ")", "The", "executable", "protect", "and", "first", "tuple", "element", "of", "separators", "are", "passed", "to", "the", "traverse", "function", ".", "Providing", "a", "namespace", "does", "not", "prevent", "full", "object", "lookup", "(", "dot", "-", "colon", "notation", ")", "from", "working", "." ]
python
test
30.604167
vmlaker/mpipe
src/UnorderedWorker.py
https://github.com/vmlaker/mpipe/blob/5a1804cf64271931f0cd3e4fff3e2b38291212dd/src/UnorderedWorker.py#L42-L73
def assemble( cls, args, input_tube, output_tubes, size, disable_result, do_stop_task, ): """Create, assemble and start workers. Workers are created of class *cls*, initialized with *args*, and given task/result communication channels *input_tube* and *output_tubes*. The number of workers created is according to *size* parameter. *do_stop_task* indicates whether doTask() will be called for "stop" request. """ # Create the workers. workers = [] for ii in range(size): worker = cls(**args) worker.init2( input_tube, output_tubes, size, disable_result, do_stop_task, ) workers.append(worker) # Start the workers. for worker in workers: worker.start()
[ "def", "assemble", "(", "cls", ",", "args", ",", "input_tube", ",", "output_tubes", ",", "size", ",", "disable_result", ",", "do_stop_task", ",", ")", ":", "# Create the workers.", "workers", "=", "[", "]", "for", "ii", "in", "range", "(", "size", ")", ":", "worker", "=", "cls", "(", "*", "*", "args", ")", "worker", ".", "init2", "(", "input_tube", ",", "output_tubes", ",", "size", ",", "disable_result", ",", "do_stop_task", ",", ")", "workers", ".", "append", "(", "worker", ")", "# Start the workers.", "for", "worker", "in", "workers", ":", "worker", ".", "start", "(", ")" ]
Create, assemble and start workers. Workers are created of class *cls*, initialized with *args*, and given task/result communication channels *input_tube* and *output_tubes*. The number of workers created is according to *size* parameter. *do_stop_task* indicates whether doTask() will be called for "stop" request.
[ "Create", "assemble", "and", "start", "workers", ".", "Workers", "are", "created", "of", "class", "*", "cls", "*", "initialized", "with", "*", "args", "*", "and", "given", "task", "/", "result", "communication", "channels", "*", "input_tube", "*", "and", "*", "output_tubes", "*", ".", "The", "number", "of", "workers", "created", "is", "according", "to", "*", "size", "*", "parameter", ".", "*", "do_stop_task", "*", "indicates", "whether", "doTask", "()", "will", "be", "called", "for", "stop", "request", "." ]
python
train
28.84375
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L935-L967
def _extract_links_from_lecture_assets(self, asset_ids): """ Extract links to files of the asset ids. @param asset_ids: List of asset ids. @type asset_ids: [str] @return: @see CourseraOnDemand._extract_links_from_text """ links = {} def _add_asset(name, url, destination): filename, extension = os.path.splitext(clean_url(name)) if extension is '': return extension = clean_filename( extension.lower().strip('.').strip(), self._unrestricted_filenames) basename = clean_filename( os.path.basename(filename), self._unrestricted_filenames) url = url.strip() if extension not in destination: destination[extension] = [] destination[extension].append((url, basename)) for asset_id in asset_ids: for asset in self._get_asset_urls(asset_id): _add_asset(asset['name'], asset['url'], links) return links
[ "def", "_extract_links_from_lecture_assets", "(", "self", ",", "asset_ids", ")", ":", "links", "=", "{", "}", "def", "_add_asset", "(", "name", ",", "url", ",", "destination", ")", ":", "filename", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "clean_url", "(", "name", ")", ")", "if", "extension", "is", "''", ":", "return", "extension", "=", "clean_filename", "(", "extension", ".", "lower", "(", ")", ".", "strip", "(", "'.'", ")", ".", "strip", "(", ")", ",", "self", ".", "_unrestricted_filenames", ")", "basename", "=", "clean_filename", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "self", ".", "_unrestricted_filenames", ")", "url", "=", "url", ".", "strip", "(", ")", "if", "extension", "not", "in", "destination", ":", "destination", "[", "extension", "]", "=", "[", "]", "destination", "[", "extension", "]", ".", "append", "(", "(", "url", ",", "basename", ")", ")", "for", "asset_id", "in", "asset_ids", ":", "for", "asset", "in", "self", ".", "_get_asset_urls", "(", "asset_id", ")", ":", "_add_asset", "(", "asset", "[", "'name'", "]", ",", "asset", "[", "'url'", "]", ",", "links", ")", "return", "links" ]
Extract links to files of the asset ids. @param asset_ids: List of asset ids. @type asset_ids: [str] @return: @see CourseraOnDemand._extract_links_from_text
[ "Extract", "links", "to", "files", "of", "the", "asset", "ids", "." ]
python
train
32.030303
EventTeam/beliefs
src/beliefs/beliefstate.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/beliefstate.py#L119-L132
def iter_breadth_first(self, root=None): """ Traverses the belief state's structure breadth-first """ if root == None: root = self yield root last = root for node in self.iter_breadth_first(root): if isinstance(node, DictCell): # recurse for subpart in node: yield subpart last = subpart if last == node: return
[ "def", "iter_breadth_first", "(", "self", ",", "root", "=", "None", ")", ":", "if", "root", "==", "None", ":", "root", "=", "self", "yield", "root", "last", "=", "root", "for", "node", "in", "self", ".", "iter_breadth_first", "(", "root", ")", ":", "if", "isinstance", "(", "node", ",", "DictCell", ")", ":", "# recurse", "for", "subpart", "in", "node", ":", "yield", "subpart", "last", "=", "subpart", "if", "last", "==", "node", ":", "return" ]
Traverses the belief state's structure breadth-first
[ "Traverses", "the", "belief", "state", "s", "structure", "breadth", "-", "first" ]
python
train
33.071429
TimBest/django-multi-form-view
multi_form_view/base.py
https://github.com/TimBest/django-multi-form-view/blob/d7f0a341881a5a36e4d567ca9bc29d233de01720/multi_form_view/base.py#L65-L79
def get_form_kwargs(self): """ Build the keyword arguments required to instantiate the form. """ kwargs = {} for key in six.iterkeys(self.form_classes): if self.request.method in ('POST', 'PUT'): kwargs[key] = { 'data': self.request.POST, 'files': self.request.FILES, } else: kwargs[key] = {} return kwargs
[ "def", "get_form_kwargs", "(", "self", ")", ":", "kwargs", "=", "{", "}", "for", "key", "in", "six", ".", "iterkeys", "(", "self", ".", "form_classes", ")", ":", "if", "self", ".", "request", ".", "method", "in", "(", "'POST'", ",", "'PUT'", ")", ":", "kwargs", "[", "key", "]", "=", "{", "'data'", ":", "self", ".", "request", ".", "POST", ",", "'files'", ":", "self", ".", "request", ".", "FILES", ",", "}", "else", ":", "kwargs", "[", "key", "]", "=", "{", "}", "return", "kwargs" ]
Build the keyword arguments required to instantiate the form.
[ "Build", "the", "keyword", "arguments", "required", "to", "instantiate", "the", "form", "." ]
python
train
30.2
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile_ext.py#L87-L99
def get_port_profile_for_intf_output_interface_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_profile_for_intf = ET.Element("get_port_profile_for_intf") config = get_port_profile_for_intf output = ET.SubElement(get_port_profile_for_intf, "output") interface = ET.SubElement(output, "interface") interface_type = ET.SubElement(interface, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_port_profile_for_intf_output_interface_interface_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_port_profile_for_intf", "=", "ET", ".", "Element", "(", "\"get_port_profile_for_intf\"", ")", "config", "=", "get_port_profile_for_intf", "output", "=", "ET", ".", "SubElement", "(", "get_port_profile_for_intf", ",", "\"output\"", ")", "interface", "=", "ET", ".", "SubElement", "(", "output", ",", "\"interface\"", ")", "interface_type", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-type\"", ")", "interface_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
46.692308
genomoncology/related
src/related/fields.py
https://github.com/genomoncology/related/blob/be47c0081e60fc60afcde3a25f00ebcad5d18510/src/related/fields.py#L51-L68
def DateField(formatter=types.DEFAULT_DATE_FORMAT, default=NOTHING, required=True, repr=True, cmp=True, key=None): """ Create new date field on a model. :param formatter: date formatter string (default: "%Y-%m-%d") :param default: any date or string that can be converted to a date value :param bool required: whether or not the object is invalid if not provided. :param bool repr: include this field should appear in object's repr. :param bool cmp: include this field in generated comparison. :param string key: override name of the value when converted to dict. """ default = _init_fields.init_default(required, default, None) validator = _init_fields.init_validator(required, date) converter = converters.to_date_field(formatter) return attrib(default=default, converter=converter, validator=validator, repr=repr, cmp=cmp, metadata=dict(formatter=formatter, key=key))
[ "def", "DateField", "(", "formatter", "=", "types", ".", "DEFAULT_DATE_FORMAT", ",", "default", "=", "NOTHING", ",", "required", "=", "True", ",", "repr", "=", "True", ",", "cmp", "=", "True", ",", "key", "=", "None", ")", ":", "default", "=", "_init_fields", ".", "init_default", "(", "required", ",", "default", ",", "None", ")", "validator", "=", "_init_fields", ".", "init_validator", "(", "required", ",", "date", ")", "converter", "=", "converters", ".", "to_date_field", "(", "formatter", ")", "return", "attrib", "(", "default", "=", "default", ",", "converter", "=", "converter", ",", "validator", "=", "validator", ",", "repr", "=", "repr", ",", "cmp", "=", "cmp", ",", "metadata", "=", "dict", "(", "formatter", "=", "formatter", ",", "key", "=", "key", ")", ")" ]
Create new date field on a model. :param formatter: date formatter string (default: "%Y-%m-%d") :param default: any date or string that can be converted to a date value :param bool required: whether or not the object is invalid if not provided. :param bool repr: include this field should appear in object's repr. :param bool cmp: include this field in generated comparison. :param string key: override name of the value when converted to dict.
[ "Create", "new", "date", "field", "on", "a", "model", "." ]
python
train
53.111111
ella/ella
ella/photos/models.py
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/models.py#L375-L400
def generate(self, save=True): """ Generates photo file in current format. If ``save`` is ``True``, file is saved too. """ stretched_photo, crop_box = self._generate_img() # set crop_box to (0,0,0,0) if photo not cropped if not crop_box: crop_box = 0, 0, 0, 0 self.crop_left, self.crop_top, right, bottom = crop_box self.crop_width = right - self.crop_left self.crop_height = bottom - self.crop_top self.width, self.height = stretched_photo.size f = StringIO() imgf = (self.photo._get_image().format or Image.EXTENSION[path.splitext(self.photo.image.name)[1]]) stretched_photo.save(f, format=imgf, quality=self.format.resample_quality) f.seek(0) self.image.save(self.file(), ContentFile(f.read()), save)
[ "def", "generate", "(", "self", ",", "save", "=", "True", ")", ":", "stretched_photo", ",", "crop_box", "=", "self", ".", "_generate_img", "(", ")", "# set crop_box to (0,0,0,0) if photo not cropped", "if", "not", "crop_box", ":", "crop_box", "=", "0", ",", "0", ",", "0", ",", "0", "self", ".", "crop_left", ",", "self", ".", "crop_top", ",", "right", ",", "bottom", "=", "crop_box", "self", ".", "crop_width", "=", "right", "-", "self", ".", "crop_left", "self", ".", "crop_height", "=", "bottom", "-", "self", ".", "crop_top", "self", ".", "width", ",", "self", ".", "height", "=", "stretched_photo", ".", "size", "f", "=", "StringIO", "(", ")", "imgf", "=", "(", "self", ".", "photo", ".", "_get_image", "(", ")", ".", "format", "or", "Image", ".", "EXTENSION", "[", "path", ".", "splitext", "(", "self", ".", "photo", ".", "image", ".", "name", ")", "[", "1", "]", "]", ")", "stretched_photo", ".", "save", "(", "f", ",", "format", "=", "imgf", ",", "quality", "=", "self", ".", "format", ".", "resample_quality", ")", "f", ".", "seek", "(", "0", ")", "self", ".", "image", ".", "save", "(", "self", ".", "file", "(", ")", ",", "ContentFile", "(", "f", ".", "read", "(", ")", ")", ",", "save", ")" ]
Generates photo file in current format. If ``save`` is ``True``, file is saved too.
[ "Generates", "photo", "file", "in", "current", "format", "." ]
python
train
32.346154
wonambi-python/wonambi
wonambi/widgets/info.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/info.py#L174-L248
def open_dataset(self, recent=None, debug_filename=None, bids=False): """Open a new dataset. Parameters ---------- recent : path to file one of the recent datasets to read """ if recent: filename = recent elif debug_filename is not None: filename = debug_filename else: try: dir_name = dirname(self.filename) except (AttributeError, TypeError): dir_name = self.parent.value('recording_dir') file_or_dir = choose_file_or_dir() if file_or_dir == 'dir': filename = QFileDialog.getExistingDirectory(self, 'Open directory', dir_name) elif file_or_dir == 'file': filename, _ = QFileDialog.getOpenFileName(self, 'Open file', dir_name) elif file_or_dir == 'abort': return if filename == '': return # clear previous dataset once the user opens another dataset if self.dataset is not None: self.parent.reset() self.parent.statusBar().showMessage('Reading dataset: ' + basename(filename)) lg.info('Reading dataset: ' + str(filename)) self.filename = filename # temp self.dataset = Dataset(filename) #temp #============================================================================== # try: # self.filename = filename # self.dataset = Dataset(filename) # except FileNotFoundError: # msg = 'File ' + basename(filename) + ' cannot be read' # self.parent.statusBar().showMessage(msg) # lg.info(msg) # error_dialog = QErrorMessage() # error_dialog.setWindowTitle('Error opening dataset') # error_dialog.showMessage(msg) # if debug_filename is None: # error_dialog.exec() # return # # except BaseException as err: # self.parent.statusBar().showMessage(str(err)) # lg.info('Error ' + str(err)) # error_dialog = QErrorMessage() # error_dialog.setWindowTitle('Error opening dataset') # error_dialog.showMessage(str(err)) # if debug_filename is None: # error_dialog.exec() # return #============================================================================== self.action['export'].setEnabled(True) self.parent.statusBar().showMessage('') self.parent.update()
[ "def", "open_dataset", "(", "self", ",", "recent", "=", "None", ",", "debug_filename", "=", "None", ",", "bids", "=", "False", ")", ":", "if", "recent", ":", "filename", "=", "recent", "elif", "debug_filename", "is", "not", "None", ":", "filename", "=", "debug_filename", "else", ":", "try", ":", "dir_name", "=", "dirname", "(", "self", ".", "filename", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "dir_name", "=", "self", ".", "parent", ".", "value", "(", "'recording_dir'", ")", "file_or_dir", "=", "choose_file_or_dir", "(", ")", "if", "file_or_dir", "==", "'dir'", ":", "filename", "=", "QFileDialog", ".", "getExistingDirectory", "(", "self", ",", "'Open directory'", ",", "dir_name", ")", "elif", "file_or_dir", "==", "'file'", ":", "filename", ",", "_", "=", "QFileDialog", ".", "getOpenFileName", "(", "self", ",", "'Open file'", ",", "dir_name", ")", "elif", "file_or_dir", "==", "'abort'", ":", "return", "if", "filename", "==", "''", ":", "return", "# clear previous dataset once the user opens another dataset", "if", "self", ".", "dataset", "is", "not", "None", ":", "self", ".", "parent", ".", "reset", "(", ")", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "'Reading dataset: '", "+", "basename", "(", "filename", ")", ")", "lg", ".", "info", "(", "'Reading dataset: '", "+", "str", "(", "filename", ")", ")", "self", ".", "filename", "=", "filename", "# temp", "self", ".", "dataset", "=", "Dataset", "(", "filename", ")", "#temp", "#==============================================================================", "# try:", "# self.filename = filename", "# self.dataset = Dataset(filename)", "# except FileNotFoundError:", "# msg = 'File ' + basename(filename) + ' cannot be read'", "# self.parent.statusBar().showMessage(msg)", "# lg.info(msg)", "# error_dialog = QErrorMessage()", "# error_dialog.setWindowTitle('Error opening dataset')", "# error_dialog.showMessage(msg)", "# if debug_filename is None:", "# error_dialog.exec()", "# return", "#", "# except BaseException as err:", "# self.parent.statusBar().showMessage(str(err))", "# lg.info('Error ' + str(err))", "# error_dialog = QErrorMessage()", "# error_dialog.setWindowTitle('Error opening dataset')", "# error_dialog.showMessage(str(err))", "# if debug_filename is None:", "# error_dialog.exec()", "# return", "#==============================================================================", "self", ".", "action", "[", "'export'", "]", ".", "setEnabled", "(", "True", ")", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "''", ")", "self", ".", "parent", ".", "update", "(", ")" ]
Open a new dataset. Parameters ---------- recent : path to file one of the recent datasets to read
[ "Open", "a", "new", "dataset", "." ]
python
train
36.226667
googledatalab/pydatalab
datalab/bigquery/_api.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L281-L298
def datasets_delete(self, dataset_name, delete_contents): """Issues a request to delete a dataset. Args: dataset_name: the name of the dataset to delete. delete_contents: if True, any tables in the dataset will be deleted. If False and the dataset is non-empty an exception will be raised. Returns: A parsed result object. Raises: Exception if there is an error performing the operation. """ url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name) args = {} if delete_contents: args['deleteContents'] = True return datalab.utils.Http.request(url, method='DELETE', args=args, credentials=self._credentials, raw_response=True)
[ "def", "datasets_delete", "(", "self", ",", "dataset_name", ",", "delete_contents", ")", ":", "url", "=", "Api", ".", "_ENDPOINT", "+", "(", "Api", ".", "_DATASETS_PATH", "%", "dataset_name", ")", "args", "=", "{", "}", "if", "delete_contents", ":", "args", "[", "'deleteContents'", "]", "=", "True", "return", "datalab", ".", "utils", ".", "Http", ".", "request", "(", "url", ",", "method", "=", "'DELETE'", ",", "args", "=", "args", ",", "credentials", "=", "self", ".", "_credentials", ",", "raw_response", "=", "True", ")" ]
Issues a request to delete a dataset. Args: dataset_name: the name of the dataset to delete. delete_contents: if True, any tables in the dataset will be deleted. If False and the dataset is non-empty an exception will be raised. Returns: A parsed result object. Raises: Exception if there is an error performing the operation.
[ "Issues", "a", "request", "to", "delete", "a", "dataset", "." ]
python
train
40.222222
sbg/sevenbridges-python
sevenbridges/meta/transformer.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/meta/transformer.py#L227-L237
def to_location(location): """Serializes location to string :param location: object to serialize :return: string """ if not location: raise SbgError('Location is required!') if isinstance(location, six.string_types): return location else: raise SbgError('Invalid location parameter!')
[ "def", "to_location", "(", "location", ")", ":", "if", "not", "location", ":", "raise", "SbgError", "(", "'Location is required!'", ")", "if", "isinstance", "(", "location", ",", "six", ".", "string_types", ")", ":", "return", "location", "else", ":", "raise", "SbgError", "(", "'Invalid location parameter!'", ")" ]
Serializes location to string :param location: object to serialize :return: string
[ "Serializes", "location", "to", "string", ":", "param", "location", ":", "object", "to", "serialize", ":", "return", ":", "string" ]
python
train
33.272727
razorpay/razorpay-python
razorpay/resources/invoice.py
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/resources/invoice.py#L15-L22
def all(self, data={}, **kwargs): """" Fetch all Invoice entities Returns: Dictionary of Invoice data """ return super(Invoice, self).all(data, **kwargs)
[ "def", "all", "(", "self", ",", "data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "Invoice", ",", "self", ")", ".", "all", "(", "data", ",", "*", "*", "kwargs", ")" ]
Fetch all Invoice entities Returns: Dictionary of Invoice data
[ "Fetch", "all", "Invoice", "entities" ]
python
train
24.875
federico123579/Trading212-API
tradingAPI/low_level.py
https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/low_level.py#L131-L166
def login(self, username, password, mode="demo"): """login function""" url = "https://trading212.com/it/login" try: logger.debug(f"visiting %s" % url) self.browser.visit(url) logger.debug(f"connected to %s" % url) except selenium.common.exceptions.WebDriverException: logger.critical("connection timed out") raise try: self.search_name("login[username]").fill(username) self.search_name("login[password]").fill(password) self.css1(path['log']).click() # define a timeout for logging in timeout = time.time() + 30 while not self.elCss(path['logo']): if time.time() > timeout: logger.critical("login failed") raise CredentialsException(username) time.sleep(1) logger.info(f"logged in as {username}") # check if it's a weekend if mode == "demo" and datetime.now().isoweekday() in range(5, 8): timeout = time.time() + 10 while not self.elCss(path['alert-box']): if time.time() > timeout: logger.warning("weekend trading alert-box not closed") break if self.elCss(path['alert-box']): self.css1(path['alert-box']).click() logger.debug("weekend trading alert-box closed") except Exception as e: logger.critical("login failed") raise exceptions.BaseExc(e) return True
[ "def", "login", "(", "self", ",", "username", ",", "password", ",", "mode", "=", "\"demo\"", ")", ":", "url", "=", "\"https://trading212.com/it/login\"", "try", ":", "logger", ".", "debug", "(", "f\"visiting %s\"", "%", "url", ")", "self", ".", "browser", ".", "visit", "(", "url", ")", "logger", ".", "debug", "(", "f\"connected to %s\"", "%", "url", ")", "except", "selenium", ".", "common", ".", "exceptions", ".", "WebDriverException", ":", "logger", ".", "critical", "(", "\"connection timed out\"", ")", "raise", "try", ":", "self", ".", "search_name", "(", "\"login[username]\"", ")", ".", "fill", "(", "username", ")", "self", ".", "search_name", "(", "\"login[password]\"", ")", ".", "fill", "(", "password", ")", "self", ".", "css1", "(", "path", "[", "'log'", "]", ")", ".", "click", "(", ")", "# define a timeout for logging in", "timeout", "=", "time", ".", "time", "(", ")", "+", "30", "while", "not", "self", ".", "elCss", "(", "path", "[", "'logo'", "]", ")", ":", "if", "time", ".", "time", "(", ")", ">", "timeout", ":", "logger", ".", "critical", "(", "\"login failed\"", ")", "raise", "CredentialsException", "(", "username", ")", "time", ".", "sleep", "(", "1", ")", "logger", ".", "info", "(", "f\"logged in as {username}\"", ")", "# check if it's a weekend", "if", "mode", "==", "\"demo\"", "and", "datetime", ".", "now", "(", ")", ".", "isoweekday", "(", ")", "in", "range", "(", "5", ",", "8", ")", ":", "timeout", "=", "time", ".", "time", "(", ")", "+", "10", "while", "not", "self", ".", "elCss", "(", "path", "[", "'alert-box'", "]", ")", ":", "if", "time", ".", "time", "(", ")", ">", "timeout", ":", "logger", ".", "warning", "(", "\"weekend trading alert-box not closed\"", ")", "break", "if", "self", ".", "elCss", "(", "path", "[", "'alert-box'", "]", ")", ":", "self", ".", "css1", "(", "path", "[", "'alert-box'", "]", ")", ".", "click", "(", ")", "logger", ".", "debug", "(", "\"weekend trading alert-box closed\"", ")", "except", "Exception", "as", "e", ":", "logger", ".", "critical", "(", "\"login failed\"", ")", "raise", "exceptions", ".", "BaseExc", "(", "e", ")", "return", "True" ]
login function
[ "login", "function" ]
python
train
44.333333
PGower/PyCanvas
pycanvas/apis/notification_preferences.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/notification_preferences.py#L111-L138
def get_preference_type(self, type, user_id, address, notification): """ Get a preference. Fetch the preference for the given notification for the given communicaiton channel """ path = {} data = {} params = {} # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id # REQUIRED - PATH - type """ID""" path["type"] = type # REQUIRED - PATH - address """ID""" path["address"] = address # REQUIRED - PATH - notification """ID""" path["notification"] = notification self.logger.debug("GET /api/v1/users/{user_id}/communication_channels/{type}/{address}/notification_preferences/{notification} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/users/{user_id}/communication_channels/{type}/{address}/notification_preferences/{notification}".format(**path), data=data, params=params, single_item=True)
[ "def", "get_preference_type", "(", "self", ",", "type", ",", "user_id", ",", "address", ",", "notification", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - user_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"user_id\"", "]", "=", "user_id", "# REQUIRED - PATH - type\r", "\"\"\"ID\"\"\"", "path", "[", "\"type\"", "]", "=", "type", "# REQUIRED - PATH - address\r", "\"\"\"ID\"\"\"", "path", "[", "\"address\"", "]", "=", "address", "# REQUIRED - PATH - notification\r", "\"\"\"ID\"\"\"", "path", "[", "\"notification\"", "]", "=", "notification", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/users/{user_id}/communication_channels/{type}/{address}/notification_preferences/{notification} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/users/{user_id}/communication_channels/{type}/{address}/notification_preferences/{notification}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "single_item", "=", "True", ")" ]
Get a preference. Fetch the preference for the given notification for the given communicaiton channel
[ "Get", "a", "preference", ".", "Fetch", "the", "preference", "for", "the", "given", "notification", "for", "the", "given", "communicaiton", "channel" ]
python
train
37.964286
rosenbrockc/fortpy
fortpy/scripts/analyze.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L765-L776
def do_rmpostfix(self, arg): """Removes a postfix function from a variable. See 'postfix'.""" altered = False if arg in self.curargs["functions"]: del self.curargs["functions"][arg] altered = True elif arg == "*": for varname in list(self.curargs["functions"].keys()): del self.curargs["functions"][varname] altered = True if altered: self.do_postfix("list")
[ "def", "do_rmpostfix", "(", "self", ",", "arg", ")", ":", "altered", "=", "False", "if", "arg", "in", "self", ".", "curargs", "[", "\"functions\"", "]", ":", "del", "self", ".", "curargs", "[", "\"functions\"", "]", "[", "arg", "]", "altered", "=", "True", "elif", "arg", "==", "\"*\"", ":", "for", "varname", "in", "list", "(", "self", ".", "curargs", "[", "\"functions\"", "]", ".", "keys", "(", ")", ")", ":", "del", "self", ".", "curargs", "[", "\"functions\"", "]", "[", "varname", "]", "altered", "=", "True", "if", "altered", ":", "self", ".", "do_postfix", "(", "\"list\"", ")" ]
Removes a postfix function from a variable. See 'postfix'.
[ "Removes", "a", "postfix", "function", "from", "a", "variable", ".", "See", "postfix", "." ]
python
train
38.583333
andreikop/qutepart
qutepart/__init__.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/__init__.py#L862-L879
def setExtraSelections(self, selections): """Set list of extra selections. Selections are list of tuples ``(startAbsolutePosition, length)``. Extra selections are reset on any text modification. This is reimplemented method of QPlainTextEdit, it has different signature. Do not use QPlainTextEdit method """ def _makeQtExtraSelection(startAbsolutePosition, length): selection = QTextEdit.ExtraSelection() cursor = QTextCursor(self.document()) cursor.setPosition(startAbsolutePosition) cursor.setPosition(startAbsolutePosition + length, QTextCursor.KeepAnchor) selection.cursor = cursor selection.format = self._userExtraSelectionFormat return selection self._userExtraSelections = [_makeQtExtraSelection(*item) for item in selections] self._updateExtraSelections()
[ "def", "setExtraSelections", "(", "self", ",", "selections", ")", ":", "def", "_makeQtExtraSelection", "(", "startAbsolutePosition", ",", "length", ")", ":", "selection", "=", "QTextEdit", ".", "ExtraSelection", "(", ")", "cursor", "=", "QTextCursor", "(", "self", ".", "document", "(", ")", ")", "cursor", ".", "setPosition", "(", "startAbsolutePosition", ")", "cursor", ".", "setPosition", "(", "startAbsolutePosition", "+", "length", ",", "QTextCursor", ".", "KeepAnchor", ")", "selection", ".", "cursor", "=", "cursor", "selection", ".", "format", "=", "self", ".", "_userExtraSelectionFormat", "return", "selection", "self", ".", "_userExtraSelections", "=", "[", "_makeQtExtraSelection", "(", "*", "item", ")", "for", "item", "in", "selections", "]", "self", ".", "_updateExtraSelections", "(", ")" ]
Set list of extra selections. Selections are list of tuples ``(startAbsolutePosition, length)``. Extra selections are reset on any text modification. This is reimplemented method of QPlainTextEdit, it has different signature. Do not use QPlainTextEdit method
[ "Set", "list", "of", "extra", "selections", ".", "Selections", "are", "list", "of", "tuples", "(", "startAbsolutePosition", "length", ")", ".", "Extra", "selections", "are", "reset", "on", "any", "text", "modification", "." ]
python
train
49.833333
bitesofcode/projexui
projexui/menus/xrecentfilesmenu.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/menus/xrecentfilesmenu.py#L114-L132
def setFilenames( self, filenames ): """ Sets the list of filenames that will be used for this menu to the \ inputed list. :param filenames | [<str>, ..] """ mapped = [] for filename in filenames: filename = nativestring(filename) if ( not filename ): continue mapped.append(filename) if ( len(mapped) == self.maximumLength() ): break self._filenames = mapped self.refresh()
[ "def", "setFilenames", "(", "self", ",", "filenames", ")", ":", "mapped", "=", "[", "]", "for", "filename", "in", "filenames", ":", "filename", "=", "nativestring", "(", "filename", ")", "if", "(", "not", "filename", ")", ":", "continue", "mapped", ".", "append", "(", "filename", ")", "if", "(", "len", "(", "mapped", ")", "==", "self", ".", "maximumLength", "(", ")", ")", ":", "break", "self", ".", "_filenames", "=", "mapped", "self", ".", "refresh", "(", ")" ]
Sets the list of filenames that will be used for this menu to the \ inputed list. :param filenames | [<str>, ..]
[ "Sets", "the", "list", "of", "filenames", "that", "will", "be", "used", "for", "this", "menu", "to", "the", "\\", "inputed", "list", ".", ":", "param", "filenames", "|", "[", "<str", ">", "..", "]" ]
python
train
28.631579
pymc-devs/pymc
pymc/Model.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L516-L579
def _assign_database_backend(self, db): """Assign Trace instance to stochastics and deterministics and Database instance to self. :Parameters: - `db` : string, Database instance The name of the database module (see below), or a Database instance. Available databases: - `no_trace` : Traces are not stored at all. - `ram` : Traces stored in memory. - `txt` : Traces stored in memory and saved in txt files at end of sampling. - `sqlite` : Traces stored in sqlite database. - `hdf5` : Traces stored in an HDF5 file. """ # Objects that are not to be tallied are assigned a no_trace.Trace # Tallyable objects are listed in the _nodes_to_tally set. no_trace = getattr(database, 'no_trace') self._variables_to_tally = set() for object in self.stochastics | self.deterministics: if object.keep_trace: self._variables_to_tally.add(object) try: if object.mask is None: # Standard stochastic self._funs_to_tally[object.__name__] = object.get_value else: # Has missing values, so only fetch stochastic elements # using mask self._funs_to_tally[ object.__name__] = object.get_stoch_value except AttributeError: # Not a stochastic object, so no mask self._funs_to_tally[object.__name__] = object.get_value else: object.trace = no_trace.Trace(object.__name__) check_valid_object_name(self._variables_to_tally) # If not already done, load the trace backend from the database # module, and assign a database instance to Model. if isinstance(db, str): if db in dir(database): module = getattr(database, db) # Assign a default name for the database output file. if self._db_args.get('dbname') is None: self._db_args['dbname'] = self.__name__ + '.' + db self.db = module.Database(**self._db_args) elif db in database.__modules__: raise ImportError( 'Database backend `%s` is not properly installed. Please see the documentation for instructions.' % db) else: raise AttributeError( 'Database backend `%s` is not defined in pymc.database.' % db) elif isinstance(db, database.base.Database): self.db = db self.restore_sampler_state() else: # What is this for? DH. self.db = db.Database(**self._db_args)
[ "def", "_assign_database_backend", "(", "self", ",", "db", ")", ":", "# Objects that are not to be tallied are assigned a no_trace.Trace", "# Tallyable objects are listed in the _nodes_to_tally set.", "no_trace", "=", "getattr", "(", "database", ",", "'no_trace'", ")", "self", ".", "_variables_to_tally", "=", "set", "(", ")", "for", "object", "in", "self", ".", "stochastics", "|", "self", ".", "deterministics", ":", "if", "object", ".", "keep_trace", ":", "self", ".", "_variables_to_tally", ".", "add", "(", "object", ")", "try", ":", "if", "object", ".", "mask", "is", "None", ":", "# Standard stochastic", "self", ".", "_funs_to_tally", "[", "object", ".", "__name__", "]", "=", "object", ".", "get_value", "else", ":", "# Has missing values, so only fetch stochastic elements", "# using mask", "self", ".", "_funs_to_tally", "[", "object", ".", "__name__", "]", "=", "object", ".", "get_stoch_value", "except", "AttributeError", ":", "# Not a stochastic object, so no mask", "self", ".", "_funs_to_tally", "[", "object", ".", "__name__", "]", "=", "object", ".", "get_value", "else", ":", "object", ".", "trace", "=", "no_trace", ".", "Trace", "(", "object", ".", "__name__", ")", "check_valid_object_name", "(", "self", ".", "_variables_to_tally", ")", "# If not already done, load the trace backend from the database", "# module, and assign a database instance to Model.", "if", "isinstance", "(", "db", ",", "str", ")", ":", "if", "db", "in", "dir", "(", "database", ")", ":", "module", "=", "getattr", "(", "database", ",", "db", ")", "# Assign a default name for the database output file.", "if", "self", ".", "_db_args", ".", "get", "(", "'dbname'", ")", "is", "None", ":", "self", ".", "_db_args", "[", "'dbname'", "]", "=", "self", ".", "__name__", "+", "'.'", "+", "db", "self", ".", "db", "=", "module", ".", "Database", "(", "*", "*", "self", ".", "_db_args", ")", "elif", "db", "in", "database", ".", "__modules__", ":", "raise", "ImportError", "(", "'Database backend `%s` is not properly installed. Please see the documentation for instructions.'", "%", "db", ")", "else", ":", "raise", "AttributeError", "(", "'Database backend `%s` is not defined in pymc.database.'", "%", "db", ")", "elif", "isinstance", "(", "db", ",", "database", ".", "base", ".", "Database", ")", ":", "self", ".", "db", "=", "db", "self", ".", "restore_sampler_state", "(", ")", "else", ":", "# What is this for? DH.", "self", ".", "db", "=", "db", ".", "Database", "(", "*", "*", "self", ".", "_db_args", ")" ]
Assign Trace instance to stochastics and deterministics and Database instance to self. :Parameters: - `db` : string, Database instance The name of the database module (see below), or a Database instance. Available databases: - `no_trace` : Traces are not stored at all. - `ram` : Traces stored in memory. - `txt` : Traces stored in memory and saved in txt files at end of sampling. - `sqlite` : Traces stored in sqlite database. - `hdf5` : Traces stored in an HDF5 file.
[ "Assign", "Trace", "instance", "to", "stochastics", "and", "deterministics", "and", "Database", "instance", "to", "self", "." ]
python
train
43.546875
faulkner/sphero
sphero/core.py
https://github.com/faulkner/sphero/blob/a7663df1804ae758e650a04770d50f607be486ae/sphero/core.py#L146-L152
def set_rotation_rate(self, val): """value ca be between 0x00 and 0xFF: value is a multiplied with 0.784 degrees/s except for: 0 --> 1 degrees/s 255 --> jumps to 400 degrees/s """ return self.write(request.SetRotationRate(self.seq, val))
[ "def", "set_rotation_rate", "(", "self", ",", "val", ")", ":", "return", "self", ".", "write", "(", "request", ".", "SetRotationRate", "(", "self", ".", "seq", ",", "val", ")", ")" ]
value ca be between 0x00 and 0xFF: value is a multiplied with 0.784 degrees/s except for: 0 --> 1 degrees/s 255 --> jumps to 400 degrees/s
[ "value", "ca", "be", "between", "0x00", "and", "0xFF", ":", "value", "is", "a", "multiplied", "with", "0", ".", "784", "degrees", "/", "s", "except", "for", ":", "0", "--", ">", "1", "degrees", "/", "s", "255", "--", ">", "jumps", "to", "400", "degrees", "/", "s" ]
python
train
41.857143
openego/ding0
ding0/core/network/__init__.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/core/network/__init__.py#L228-L237
def graph_nodes_sorted(self): """ Returns an (ascending) sorted list of graph's nodes (name is used as key). Returns ------- :any:`list` Description #TODO check """ return sorted(self._graph.nodes(), key=lambda _: repr(_))
[ "def", "graph_nodes_sorted", "(", "self", ")", ":", "return", "sorted", "(", "self", ".", "_graph", ".", "nodes", "(", ")", ",", "key", "=", "lambda", "_", ":", "repr", "(", "_", ")", ")" ]
Returns an (ascending) sorted list of graph's nodes (name is used as key). Returns ------- :any:`list` Description #TODO check
[ "Returns", "an", "(", "ascending", ")", "sorted", "list", "of", "graph", "s", "nodes", "(", "name", "is", "used", "as", "key", ")", ".", "Returns", "-------", ":", "any", ":", "list", "Description", "#TODO", "check" ]
python
train
28.3
UCL-INGI/INGInious
inginious/frontend/user_manager.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/user_manager.py#L723-L752
def course_is_open_to_user(self, course, username=None, lti=None): """ Checks if a user is can access a course :param course: a Course object :param username: The username of the user that we want to check. If None, uses self.session_username() :param lti: indicates if the user is currently in a LTI session or not. - None to ignore the check - True to indicate the user is in a LTI session - False to indicate the user is not in a LTI session - "auto" to enable the check and take the information from the current session :return: True if the user can access the course, False else """ if username is None: username = self.session_username() if lti == "auto": lti = self.session_lti_info() is not None if self.has_staff_rights_on_course(course, username): return True if not course.get_accessibility().is_open() or (not self.course_is_user_registered(course, username) and not course.allow_preview()): return False if lti and course.is_lti() != lti: return False if lti is False and course.is_lti(): return not course.lti_send_back_grade() return True
[ "def", "course_is_open_to_user", "(", "self", ",", "course", ",", "username", "=", "None", ",", "lti", "=", "None", ")", ":", "if", "username", "is", "None", ":", "username", "=", "self", ".", "session_username", "(", ")", "if", "lti", "==", "\"auto\"", ":", "lti", "=", "self", ".", "session_lti_info", "(", ")", "is", "not", "None", "if", "self", ".", "has_staff_rights_on_course", "(", "course", ",", "username", ")", ":", "return", "True", "if", "not", "course", ".", "get_accessibility", "(", ")", ".", "is_open", "(", ")", "or", "(", "not", "self", ".", "course_is_user_registered", "(", "course", ",", "username", ")", "and", "not", "course", ".", "allow_preview", "(", ")", ")", ":", "return", "False", "if", "lti", "and", "course", ".", "is_lti", "(", ")", "!=", "lti", ":", "return", "False", "if", "lti", "is", "False", "and", "course", ".", "is_lti", "(", ")", ":", "return", "not", "course", ".", "lti_send_back_grade", "(", ")", "return", "True" ]
Checks if a user is can access a course :param course: a Course object :param username: The username of the user that we want to check. If None, uses self.session_username() :param lti: indicates if the user is currently in a LTI session or not. - None to ignore the check - True to indicate the user is in a LTI session - False to indicate the user is not in a LTI session - "auto" to enable the check and take the information from the current session :return: True if the user can access the course, False else
[ "Checks", "if", "a", "user", "is", "can", "access", "a", "course", ":", "param", "course", ":", "a", "Course", "object", ":", "param", "username", ":", "The", "username", "of", "the", "user", "that", "we", "want", "to", "check", ".", "If", "None", "uses", "self", ".", "session_username", "()", ":", "param", "lti", ":", "indicates", "if", "the", "user", "is", "currently", "in", "a", "LTI", "session", "or", "not", ".", "-", "None", "to", "ignore", "the", "check", "-", "True", "to", "indicate", "the", "user", "is", "in", "a", "LTI", "session", "-", "False", "to", "indicate", "the", "user", "is", "not", "in", "a", "LTI", "session", "-", "auto", "to", "enable", "the", "check", "and", "take", "the", "information", "from", "the", "current", "session", ":", "return", ":", "True", "if", "the", "user", "can", "access", "the", "course", "False", "else" ]
python
train
42.033333
chrislit/abydos
abydos/distance/_editex.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/distance/_editex.py#L229-L263
def editex(src, tar, cost=(0, 1, 2), local=False): """Return the Editex distance between two strings. This is a wrapper for :py:meth:`Editex.dist_abs`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison cost : tuple A 3-tuple representing the cost of the four possible edits: match, same-group, and mismatch respectively (by default: (0, 1, 2)) local : bool If True, the local variant of Editex is used Returns ------- int Editex distance Examples -------- >>> editex('cat', 'hat') 2 >>> editex('Niall', 'Neil') 2 >>> editex('aluminum', 'Catalan') 12 >>> editex('ATCG', 'TAGC') 6 """ return Editex().dist_abs(src, tar, cost, local)
[ "def", "editex", "(", "src", ",", "tar", ",", "cost", "=", "(", "0", ",", "1", ",", "2", ")", ",", "local", "=", "False", ")", ":", "return", "Editex", "(", ")", ".", "dist_abs", "(", "src", ",", "tar", ",", "cost", ",", "local", ")" ]
Return the Editex distance between two strings. This is a wrapper for :py:meth:`Editex.dist_abs`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison cost : tuple A 3-tuple representing the cost of the four possible edits: match, same-group, and mismatch respectively (by default: (0, 1, 2)) local : bool If True, the local variant of Editex is used Returns ------- int Editex distance Examples -------- >>> editex('cat', 'hat') 2 >>> editex('Niall', 'Neil') 2 >>> editex('aluminum', 'Catalan') 12 >>> editex('ATCG', 'TAGC') 6
[ "Return", "the", "Editex", "distance", "between", "two", "strings", "." ]
python
valid
22.6
eddiejessup/spatious
spatious/geom.py
https://github.com/eddiejessup/spatious/blob/b7ae91bec029e85a45a7f303ee184076433723cd/spatious/geom.py#L260-L289
def point_seg_sep(ar, br1, br2): """Return the minimum separation vector between a point and a line segment, in 3 dimensions. Parameters ---------- ar: array-like, shape (3,) Coordinates of a point. br1, br2: array-like, shape (3,) Coordinates for the points of a line segment Returns ------- sep: float array, shape (3,) Separation vector between point and line segment. """ v = br2 - br1 w = ar - br1 c1 = np.dot(w, v) if c1 <= 0.0: return ar - br1 c2 = np.sum(np.square(v)) if c2 <= c1: return ar - br2 b = c1 / c2 bc = br1 + b * v return ar - bc
[ "def", "point_seg_sep", "(", "ar", ",", "br1", ",", "br2", ")", ":", "v", "=", "br2", "-", "br1", "w", "=", "ar", "-", "br1", "c1", "=", "np", ".", "dot", "(", "w", ",", "v", ")", "if", "c1", "<=", "0.0", ":", "return", "ar", "-", "br1", "c2", "=", "np", ".", "sum", "(", "np", ".", "square", "(", "v", ")", ")", "if", "c2", "<=", "c1", ":", "return", "ar", "-", "br2", "b", "=", "c1", "/", "c2", "bc", "=", "br1", "+", "b", "*", "v", "return", "ar", "-", "bc" ]
Return the minimum separation vector between a point and a line segment, in 3 dimensions. Parameters ---------- ar: array-like, shape (3,) Coordinates of a point. br1, br2: array-like, shape (3,) Coordinates for the points of a line segment Returns ------- sep: float array, shape (3,) Separation vector between point and line segment.
[ "Return", "the", "minimum", "separation", "vector", "between", "a", "point", "and", "a", "line", "segment", "in", "3", "dimensions", "." ]
python
train
21.366667
fabaff/python-glances-api
example.py
https://github.com/fabaff/python-glances-api/blob/7ed8a688617d0d0b1c8d5b107559fc4afcdbaaac/example.py#L12-L27
async def main(): """The main part of the example script.""" async with aiohttp.ClientSession() as session: data = Glances(loop, session, version=VERSION) # Get the metrics for the memory await data.get_metrics('mem') # Print the values print("Memory values:", data.values) # Get the metrics about the disks await data.get_metrics('diskio') # Print the values print("Disk values:", data.values)
[ "async", "def", "main", "(", ")", ":", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "data", "=", "Glances", "(", "loop", ",", "session", ",", "version", "=", "VERSION", ")", "# Get the metrics for the memory", "await", "data", ".", "get_metrics", "(", "'mem'", ")", "# Print the values", "print", "(", "\"Memory values:\"", ",", "data", ".", "values", ")", "# Get the metrics about the disks", "await", "data", ".", "get_metrics", "(", "'diskio'", ")", "# Print the values", "print", "(", "\"Disk values:\"", ",", "data", ".", "values", ")" ]
The main part of the example script.
[ "The", "main", "part", "of", "the", "example", "script", "." ]
python
train
28.9375
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/collectionseditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/collectionseditor.py#L325-L355
def data(self, index, role=Qt.DisplayRole): """Cell content""" if not index.isValid(): return to_qvariant() value = self.get_value(index) if index.column() == 3 and self.remote: value = value['view'] if index.column() == 3: display = value_to_display(value, minmax=self.minmax) else: if is_type_text_string(value): display = to_text_string(value, encoding="utf-8") else: display = to_text_string(value) if role == Qt.DisplayRole: return to_qvariant(display) elif role == Qt.EditRole: return to_qvariant(value_to_display(value)) elif role == Qt.TextAlignmentRole: if index.column() == 3: if len(display.splitlines()) < 3: return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter)) else: return to_qvariant(int(Qt.AlignLeft|Qt.AlignTop)) else: return to_qvariant(int(Qt.AlignLeft|Qt.AlignVCenter)) elif role == Qt.BackgroundColorRole: return to_qvariant( self.get_bgcolor(index) ) elif role == Qt.FontRole: return to_qvariant(get_font(font_size_delta=DEFAULT_SMALL_DELTA)) return to_qvariant()
[ "def", "data", "(", "self", ",", "index", ",", "role", "=", "Qt", ".", "DisplayRole", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", ":", "return", "to_qvariant", "(", ")", "value", "=", "self", ".", "get_value", "(", "index", ")", "if", "index", ".", "column", "(", ")", "==", "3", "and", "self", ".", "remote", ":", "value", "=", "value", "[", "'view'", "]", "if", "index", ".", "column", "(", ")", "==", "3", ":", "display", "=", "value_to_display", "(", "value", ",", "minmax", "=", "self", ".", "minmax", ")", "else", ":", "if", "is_type_text_string", "(", "value", ")", ":", "display", "=", "to_text_string", "(", "value", ",", "encoding", "=", "\"utf-8\"", ")", "else", ":", "display", "=", "to_text_string", "(", "value", ")", "if", "role", "==", "Qt", ".", "DisplayRole", ":", "return", "to_qvariant", "(", "display", ")", "elif", "role", "==", "Qt", ".", "EditRole", ":", "return", "to_qvariant", "(", "value_to_display", "(", "value", ")", ")", "elif", "role", "==", "Qt", ".", "TextAlignmentRole", ":", "if", "index", ".", "column", "(", ")", "==", "3", ":", "if", "len", "(", "display", ".", "splitlines", "(", ")", ")", "<", "3", ":", "return", "to_qvariant", "(", "int", "(", "Qt", ".", "AlignLeft", "|", "Qt", ".", "AlignVCenter", ")", ")", "else", ":", "return", "to_qvariant", "(", "int", "(", "Qt", ".", "AlignLeft", "|", "Qt", ".", "AlignTop", ")", ")", "else", ":", "return", "to_qvariant", "(", "int", "(", "Qt", ".", "AlignLeft", "|", "Qt", ".", "AlignVCenter", ")", ")", "elif", "role", "==", "Qt", ".", "BackgroundColorRole", ":", "return", "to_qvariant", "(", "self", ".", "get_bgcolor", "(", "index", ")", ")", "elif", "role", "==", "Qt", ".", "FontRole", ":", "return", "to_qvariant", "(", "get_font", "(", "font_size_delta", "=", "DEFAULT_SMALL_DELTA", ")", ")", "return", "to_qvariant", "(", ")" ]
Cell content
[ "Cell", "content" ]
python
train
43.032258
blockstack/virtualchain
virtualchain/lib/indexer.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/indexer.py#L647-L652
def get_state_paths(cls, impl, working_dir): """ Get the set of state paths that point to the current chain and state info. Returns a list of paths. """ return [config.get_db_filename(impl, working_dir), config.get_snapshots_filename(impl, working_dir)]
[ "def", "get_state_paths", "(", "cls", ",", "impl", ",", "working_dir", ")", ":", "return", "[", "config", ".", "get_db_filename", "(", "impl", ",", "working_dir", ")", ",", "config", ".", "get_snapshots_filename", "(", "impl", ",", "working_dir", ")", "]" ]
Get the set of state paths that point to the current chain and state info. Returns a list of paths.
[ "Get", "the", "set", "of", "state", "paths", "that", "point", "to", "the", "current", "chain", "and", "state", "info", ".", "Returns", "a", "list", "of", "paths", "." ]
python
train
48
elifiner/termenu
termenu/menu.py
https://github.com/elifiner/termenu/blob/a7a57a1b07d8451003ee750704cdf0d904e9e272/termenu/menu.py#L562-L574
def redirect_std(): """ Connect stdin/stdout to controlling terminal even if the scripts input and output were redirected. This is useful in utilities based on termenu. """ stdin = sys.stdin stdout = sys.stdout if not sys.stdin.isatty(): sys.stdin = open_raw("/dev/tty", "r", 0) if not sys.stdout.isatty(): sys.stdout = open_raw("/dev/tty", "w", 0) return stdin, stdout
[ "def", "redirect_std", "(", ")", ":", "stdin", "=", "sys", ".", "stdin", "stdout", "=", "sys", ".", "stdout", "if", "not", "sys", ".", "stdin", ".", "isatty", "(", ")", ":", "sys", ".", "stdin", "=", "open_raw", "(", "\"/dev/tty\"", ",", "\"r\"", ",", "0", ")", "if", "not", "sys", ".", "stdout", ".", "isatty", "(", ")", ":", "sys", ".", "stdout", "=", "open_raw", "(", "\"/dev/tty\"", ",", "\"w\"", ",", "0", ")", "return", "stdin", ",", "stdout" ]
Connect stdin/stdout to controlling terminal even if the scripts input and output were redirected. This is useful in utilities based on termenu.
[ "Connect", "stdin", "/", "stdout", "to", "controlling", "terminal", "even", "if", "the", "scripts", "input", "and", "output", "were", "redirected", ".", "This", "is", "useful", "in", "utilities", "based", "on", "termenu", "." ]
python
train
31.538462
mesbahamin/chronophore
chronophore/qtview.py
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/qtview.py#L278-L286
def update_user_type(self): """Return either 'tutor' or 'student' based on which radio button is selected. """ if self.rb_tutor.isChecked(): self.user_type = 'tutor' elif self.rb_student.isChecked(): self.user_type = 'student' self.accept()
[ "def", "update_user_type", "(", "self", ")", ":", "if", "self", ".", "rb_tutor", ".", "isChecked", "(", ")", ":", "self", ".", "user_type", "=", "'tutor'", "elif", "self", ".", "rb_student", ".", "isChecked", "(", ")", ":", "self", ".", "user_type", "=", "'student'", "self", ".", "accept", "(", ")" ]
Return either 'tutor' or 'student' based on which radio button is selected.
[ "Return", "either", "tutor", "or", "student", "based", "on", "which", "radio", "button", "is", "selected", "." ]
python
train
33.777778
ninuxorg/nodeshot
nodeshot/core/websockets/registrars/nodes.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/websockets/registrars/nodes.py#L42-L46
def disconnect(): """ disconnect signals """ post_save.disconnect(node_created_handler, sender=Node) node_status_changed.disconnect(node_status_changed_handler) pre_delete.disconnect(node_deleted_handler, sender=Node)
[ "def", "disconnect", "(", ")", ":", "post_save", ".", "disconnect", "(", "node_created_handler", ",", "sender", "=", "Node", ")", "node_status_changed", ".", "disconnect", "(", "node_status_changed_handler", ")", "pre_delete", ".", "disconnect", "(", "node_deleted_handler", ",", "sender", "=", "Node", ")" ]
disconnect signals
[ "disconnect", "signals" ]
python
train
45.8
aiogram/aiogram
aiogram/types/message.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/types/message.py#L824-L885
async def send_animation(self, animation: typing.Union[base.InputFile, base.String], duration: typing.Union[base.Integer, None] = None, width: typing.Union[base.Integer, None] = None, height: typing.Union[base.Integer, None] = None, thumb: typing.Union[typing.Union[base.InputFile, base.String], None] = None, caption: typing.Union[base.String, None] = None, parse_mode: typing.Union[base.String, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_markup=None, reply=True) -> Message: """ Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound). On success, the sent Message is returned. Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future. Source https://core.telegram.org/bots/api#sendanimation :param animation: Animation to send. Pass a file_id as String to send an animation that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation from the Internet, or upload a new animation using multipart/form-data :type animation: :obj:`typing.Union[base.InputFile, base.String]` :param duration: Duration of sent animation in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param width: Animation width :type width: :obj:`typing.Union[base.Integer, None]` :param height: Animation height :type height: :obj:`typing.Union[base.Integer, None]` :param thumb: Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail‘s width and height should not exceed 90. :type thumb: :obj:`typing.Union[typing.Union[base.InputFile, base.String], None]` :param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters :type caption: :obj:`typing.Union[base.String, None]` :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption :type parse_mode: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user :type reply_markup: :obj:`typing.Union[typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply], None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned :rtype: :obj:`types.Message` """ warn_deprecated('"Message.send_animation" method will be removed in 2.2 version.\n' 'Use "Message.reply_animation" instead.', stacklevel=8) return await self.bot.send_animation(self.chat.id, animation=animation, duration=duration, width=width, height=height, thumb=thumb, caption=caption, parse_mode=parse_mode, disable_notification=disable_notification, reply_to_message_id=self.message_id if reply else None, reply_markup=reply_markup )
[ "async", "def", "send_animation", "(", "self", ",", "animation", ":", "typing", ".", "Union", "[", "base", ".", "InputFile", ",", "base", ".", "String", "]", ",", "duration", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "width", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "height", ":", "typing", ".", "Union", "[", "base", ".", "Integer", ",", "None", "]", "=", "None", ",", "thumb", ":", "typing", ".", "Union", "[", "typing", ".", "Union", "[", "base", ".", "InputFile", ",", "base", ".", "String", "]", ",", "None", "]", "=", "None", ",", "caption", ":", "typing", ".", "Union", "[", "base", ".", "String", ",", "None", "]", "=", "None", ",", "parse_mode", ":", "typing", ".", "Union", "[", "base", ".", "String", ",", "None", "]", "=", "None", ",", "disable_notification", ":", "typing", ".", "Union", "[", "base", ".", "Boolean", ",", "None", "]", "=", "None", ",", "reply_markup", "=", "None", ",", "reply", "=", "True", ")", "->", "Message", ":", "warn_deprecated", "(", "'\"Message.send_animation\" method will be removed in 2.2 version.\\n'", "'Use \"Message.reply_animation\" instead.'", ",", "stacklevel", "=", "8", ")", "return", "await", "self", ".", "bot", ".", "send_animation", "(", "self", ".", "chat", ".", "id", ",", "animation", "=", "animation", ",", "duration", "=", "duration", ",", "width", "=", "width", ",", "height", "=", "height", ",", "thumb", "=", "thumb", ",", "caption", "=", "caption", ",", "parse_mode", "=", "parse_mode", ",", "disable_notification", "=", "disable_notification", ",", "reply_to_message_id", "=", "self", ".", "message_id", "if", "reply", "else", "None", ",", "reply_markup", "=", "reply_markup", ")" ]
Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound). On success, the sent Message is returned. Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future. Source https://core.telegram.org/bots/api#sendanimation :param animation: Animation to send. Pass a file_id as String to send an animation that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation from the Internet, or upload a new animation using multipart/form-data :type animation: :obj:`typing.Union[base.InputFile, base.String]` :param duration: Duration of sent animation in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param width: Animation width :type width: :obj:`typing.Union[base.Integer, None]` :param height: Animation height :type height: :obj:`typing.Union[base.Integer, None]` :param thumb: Thumbnail of the file sent. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail‘s width and height should not exceed 90. :type thumb: :obj:`typing.Union[typing.Union[base.InputFile, base.String], None]` :param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters :type caption: :obj:`typing.Union[base.String, None]` :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption :type parse_mode: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user :type reply_markup: :obj:`typing.Union[typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply], None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned :rtype: :obj:`types.Message`
[ "Use", "this", "method", "to", "send", "animation", "files", "(", "GIF", "or", "H", ".", "264", "/", "MPEG", "-", "4", "AVC", "video", "without", "sound", ")", "." ]
python
train
67.048387
nicolargo/glances
glances/plugins/glances_plugin.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_plugin.py#L162-L167
def reset_stats_history(self): """Reset the stats history (dict of GlancesAttribute).""" if self.history_enable(): reset_list = [a['name'] for a in self.get_items_history_list()] logger.debug("Reset history for plugin {} (items: {})".format(self.plugin_name, reset_list)) self.stats_history.reset()
[ "def", "reset_stats_history", "(", "self", ")", ":", "if", "self", ".", "history_enable", "(", ")", ":", "reset_list", "=", "[", "a", "[", "'name'", "]", "for", "a", "in", "self", ".", "get_items_history_list", "(", ")", "]", "logger", ".", "debug", "(", "\"Reset history for plugin {} (items: {})\"", ".", "format", "(", "self", ".", "plugin_name", ",", "reset_list", ")", ")", "self", ".", "stats_history", ".", "reset", "(", ")" ]
Reset the stats history (dict of GlancesAttribute).
[ "Reset", "the", "stats", "history", "(", "dict", "of", "GlancesAttribute", ")", "." ]
python
train
57.5
buriburisuri/sugartensor
sugartensor/sg_transform.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L533-L547
def sg_lookup(tensor, opt): r"""Looks up the `tensor`, which is the embedding matrix. Args: tensor: A tensor ( automatically given by chain ) opt: emb: A 2-D `Tensor`. An embedding matrix. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.emb is not None, 'emb is mandatory.' return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)
[ "def", "sg_lookup", "(", "tensor", ",", "opt", ")", ":", "assert", "opt", ".", "emb", "is", "not", "None", ",", "'emb is mandatory.'", "return", "tf", ".", "nn", ".", "embedding_lookup", "(", "opt", ".", "emb", ",", "tensor", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Looks up the `tensor`, which is the embedding matrix. Args: tensor: A tensor ( automatically given by chain ) opt: emb: A 2-D `Tensor`. An embedding matrix. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Looks", "up", "the", "tensor", "which", "is", "the", "embedding", "matrix", "." ]
python
train
28.666667
SHDShim/pytheos
pytheos/eqn_vinet.py
https://github.com/SHDShim/pytheos/blob/be079624405e92fbec60c5ead253eb5917e55237/pytheos/eqn_vinet.py#L139-L151
def vinet_k_num(v, v0, k0, k0p, precision=1.e-5): """ calculate bulk modulus numerically from volume, not pressure according to test this differs from analytical result by 1.e-5 :param v: unit-cell volume in A^3 :param v0: unit-cell volume in A^3 at 1 bar :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at reference conditions :param precision: precision for numerical calc (default = 1.e-5 * v0) :return: dP/dV """ return -1. * v * vinet_dPdV(v, v0, k0, k0p, precision=precision)
[ "def", "vinet_k_num", "(", "v", ",", "v0", ",", "k0", ",", "k0p", ",", "precision", "=", "1.e-5", ")", ":", "return", "-", "1.", "*", "v", "*", "vinet_dPdV", "(", "v", ",", "v0", ",", "k0", ",", "k0p", ",", "precision", "=", "precision", ")" ]
calculate bulk modulus numerically from volume, not pressure according to test this differs from analytical result by 1.e-5 :param v: unit-cell volume in A^3 :param v0: unit-cell volume in A^3 at 1 bar :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at reference conditions :param precision: precision for numerical calc (default = 1.e-5 * v0) :return: dP/dV
[ "calculate", "bulk", "modulus", "numerically", "from", "volume", "not", "pressure", "according", "to", "test", "this", "differs", "from", "analytical", "result", "by", "1", ".", "e", "-", "5" ]
python
train
43.230769
idlesign/django-sitecats
sitecats/utils.py
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/utils.py#L150-L158
def get_child_ids(self, parent_alias): """Returns child IDs of the given parent category :param str parent_alias: Parent category alias :rtype: list :return: a list of child IDs """ self._cache_init() return self._cache_get_entry(self.CACHE_NAME_PARENTS, parent_alias, [])
[ "def", "get_child_ids", "(", "self", ",", "parent_alias", ")", ":", "self", ".", "_cache_init", "(", ")", "return", "self", ".", "_cache_get_entry", "(", "self", ".", "CACHE_NAME_PARENTS", ",", "parent_alias", ",", "[", "]", ")" ]
Returns child IDs of the given parent category :param str parent_alias: Parent category alias :rtype: list :return: a list of child IDs
[ "Returns", "child", "IDs", "of", "the", "given", "parent", "category" ]
python
train
35.666667
cjdrake/pyeda
pyeda/parsing/boolexpr.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/boolexpr.py#L605-L619
def _variable(lexer): """Return a variable expression.""" names = _names(lexer) tok = next(lexer) # NAMES '[' ... ']' if isinstance(tok, LBRACK): indices = _indices(lexer) _expect_token(lexer, {RBRACK}) # NAMES else: lexer.unpop_token(tok) indices = tuple() return ('var', names, indices)
[ "def", "_variable", "(", "lexer", ")", ":", "names", "=", "_names", "(", "lexer", ")", "tok", "=", "next", "(", "lexer", ")", "# NAMES '[' ... ']'", "if", "isinstance", "(", "tok", ",", "LBRACK", ")", ":", "indices", "=", "_indices", "(", "lexer", ")", "_expect_token", "(", "lexer", ",", "{", "RBRACK", "}", ")", "# NAMES", "else", ":", "lexer", ".", "unpop_token", "(", "tok", ")", "indices", "=", "tuple", "(", ")", "return", "(", "'var'", ",", "names", ",", "indices", ")" ]
Return a variable expression.
[ "Return", "a", "variable", "expression", "." ]
python
train
22.666667
DataDog/integrations-core
kubelet/datadog_checks/kubelet/prometheus.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kubelet/datadog_checks/kubelet/prometheus.py#L267-L298
def _process_container_metric(self, type, metric_name, metric, scraper_config): """ Takes a simple metric about a container, reports it as a rate or gauge. If several series are found for a given container, values are summed before submission. """ if metric.type not in METRIC_TYPES: self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name)) return samples = self._sum_values_by_context(metric, self._get_entity_id_if_container_metric) for c_id, sample in iteritems(samples): pod_uid = self._get_pod_uid(sample[self.SAMPLE_LABELS]) if self.pod_list_utils.is_excluded(c_id, pod_uid): continue tags = tagger.tag(c_id, tagger.HIGH) tags += scraper_config['custom_tags'] # FIXME we are forced to do that because the Kubelet PodList isn't updated # for static pods, see https://github.com/kubernetes/kubernetes/pull/59948 pod = self._get_pod_by_metric_label(sample[self.SAMPLE_LABELS]) if pod is not None and is_static_pending_pod(pod): tags += tagger.tag('kubernetes_pod://%s' % pod["metadata"]["uid"], tagger.HIGH) tags += self._get_kube_container_name(sample[self.SAMPLE_LABELS]) tags = list(set(tags)) val = sample[self.SAMPLE_VALUE] if "rate" == type: self.rate(metric_name, val, tags) elif "gauge" == type: self.gauge(metric_name, val, tags)
[ "def", "_process_container_metric", "(", "self", ",", "type", ",", "metric_name", ",", "metric", ",", "scraper_config", ")", ":", "if", "metric", ".", "type", "not", "in", "METRIC_TYPES", ":", "self", ".", "log", ".", "error", "(", "\"Metric type %s unsupported for metric %s\"", "%", "(", "metric", ".", "type", ",", "metric", ".", "name", ")", ")", "return", "samples", "=", "self", ".", "_sum_values_by_context", "(", "metric", ",", "self", ".", "_get_entity_id_if_container_metric", ")", "for", "c_id", ",", "sample", "in", "iteritems", "(", "samples", ")", ":", "pod_uid", "=", "self", ".", "_get_pod_uid", "(", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ")", "if", "self", ".", "pod_list_utils", ".", "is_excluded", "(", "c_id", ",", "pod_uid", ")", ":", "continue", "tags", "=", "tagger", ".", "tag", "(", "c_id", ",", "tagger", ".", "HIGH", ")", "tags", "+=", "scraper_config", "[", "'custom_tags'", "]", "# FIXME we are forced to do that because the Kubelet PodList isn't updated", "# for static pods, see https://github.com/kubernetes/kubernetes/pull/59948", "pod", "=", "self", ".", "_get_pod_by_metric_label", "(", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ")", "if", "pod", "is", "not", "None", "and", "is_static_pending_pod", "(", "pod", ")", ":", "tags", "+=", "tagger", ".", "tag", "(", "'kubernetes_pod://%s'", "%", "pod", "[", "\"metadata\"", "]", "[", "\"uid\"", "]", ",", "tagger", ".", "HIGH", ")", "tags", "+=", "self", ".", "_get_kube_container_name", "(", "sample", "[", "self", ".", "SAMPLE_LABELS", "]", ")", "tags", "=", "list", "(", "set", "(", "tags", ")", ")", "val", "=", "sample", "[", "self", ".", "SAMPLE_VALUE", "]", "if", "\"rate\"", "==", "type", ":", "self", ".", "rate", "(", "metric_name", ",", "val", ",", "tags", ")", "elif", "\"gauge\"", "==", "type", ":", "self", ".", "gauge", "(", "metric_name", ",", "val", ",", "tags", ")" ]
Takes a simple metric about a container, reports it as a rate or gauge. If several series are found for a given container, values are summed before submission.
[ "Takes", "a", "simple", "metric", "about", "a", "container", "reports", "it", "as", "a", "rate", "or", "gauge", ".", "If", "several", "series", "are", "found", "for", "a", "given", "container", "values", "are", "summed", "before", "submission", "." ]
python
train
48.5625
inasafe/inasafe
safe/gui/tools/print_report_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/print_report_dialog.py#L368-L409
def open_in_composer(self): """Open in layout designer a given MapReport instance. .. versionadded: 4.3.0 """ impact_layer = self.impact_function.analysis_impacted report_path = dirname(impact_layer.source()) impact_report = self.impact_function.impact_report custom_map_report_metadata = impact_report.metadata custom_map_report_product = ( custom_map_report_metadata.component_by_tags( [final_product_tag, pdf_product_tag])) for template_path in self.retrieve_paths( custom_map_report_product, report_path=report_path, suffix='.qpt'): layout = QgsPrintLayout(QgsProject.instance()) with open(template_path) as template_file: template_content = template_file.read() document = QtXml.QDomDocument() document.setContent(template_content) # load layout object rwcontext = QgsReadWriteContext() load_status = layout.loadFromTemplate(document, rwcontext) if not load_status: # noinspection PyCallByClass,PyTypeChecker QtWidgets.QMessageBox.warning( self, tr('InaSAFE'), tr('Error loading template: %s') % template_path) return QgsProject.instance().layoutManager().addLayout(layout) self.iface.openLayoutDesigner(layout)
[ "def", "open_in_composer", "(", "self", ")", ":", "impact_layer", "=", "self", ".", "impact_function", ".", "analysis_impacted", "report_path", "=", "dirname", "(", "impact_layer", ".", "source", "(", ")", ")", "impact_report", "=", "self", ".", "impact_function", ".", "impact_report", "custom_map_report_metadata", "=", "impact_report", ".", "metadata", "custom_map_report_product", "=", "(", "custom_map_report_metadata", ".", "component_by_tags", "(", "[", "final_product_tag", ",", "pdf_product_tag", "]", ")", ")", "for", "template_path", "in", "self", ".", "retrieve_paths", "(", "custom_map_report_product", ",", "report_path", "=", "report_path", ",", "suffix", "=", "'.qpt'", ")", ":", "layout", "=", "QgsPrintLayout", "(", "QgsProject", ".", "instance", "(", ")", ")", "with", "open", "(", "template_path", ")", "as", "template_file", ":", "template_content", "=", "template_file", ".", "read", "(", ")", "document", "=", "QtXml", ".", "QDomDocument", "(", ")", "document", ".", "setContent", "(", "template_content", ")", "# load layout object", "rwcontext", "=", "QgsReadWriteContext", "(", ")", "load_status", "=", "layout", ".", "loadFromTemplate", "(", "document", ",", "rwcontext", ")", "if", "not", "load_status", ":", "# noinspection PyCallByClass,PyTypeChecker", "QtWidgets", ".", "QMessageBox", ".", "warning", "(", "self", ",", "tr", "(", "'InaSAFE'", ")", ",", "tr", "(", "'Error loading template: %s'", ")", "%", "template_path", ")", "return", "QgsProject", ".", "instance", "(", ")", ".", "layoutManager", "(", ")", ".", "addLayout", "(", "layout", ")", "self", ".", "iface", ".", "openLayoutDesigner", "(", "layout", ")" ]
Open in layout designer a given MapReport instance. .. versionadded: 4.3.0
[ "Open", "in", "layout", "designer", "a", "given", "MapReport", "instance", "." ]
python
train
35.166667
hobson/aima
aima/learning.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/learning.py#L478-L483
def replicated_dataset(dataset, weights, n=None): "Copy dataset, replicating each example in proportion to its weight." n = n or len(dataset.examples) result = copy.copy(dataset) result.examples = weighted_replicate(dataset.examples, weights, n) return result
[ "def", "replicated_dataset", "(", "dataset", ",", "weights", ",", "n", "=", "None", ")", ":", "n", "=", "n", "or", "len", "(", "dataset", ".", "examples", ")", "result", "=", "copy", ".", "copy", "(", "dataset", ")", "result", ".", "examples", "=", "weighted_replicate", "(", "dataset", ".", "examples", ",", "weights", ",", "n", ")", "return", "result" ]
Copy dataset, replicating each example in proportion to its weight.
[ "Copy", "dataset", "replicating", "each", "example", "in", "proportion", "to", "its", "weight", "." ]
python
valid
45.666667
python-diamond/Diamond
src/collectors/mountstats/mountstats.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/mountstats/mountstats.py#L108-L195
def collect(self): """Collect statistics from /proc/self/mountstats. Currently, we do fairly naive parsing and do not actually check the statvers value returned by mountstats. """ if str_to_bool(self.config['use_sudo']): if not os.access(self.config['sudo_cmd'], os.X_OK): self.log.error("Cannot find or exec %s" % self.config['sudo_cmd']) return None command = [self.config['sudo_cmd'], '/bin/cat', self.MOUNTSTATS] p = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0][:-1] lines = p.split("\n") else: if not os.access(self.MOUNTSTATS, os.R_OK): self.log.error("Cannot read path %s" % self.MOUNTSTATS) return None f = open(self.MOUNTSTATS) lines = f.readlines() f.close() path = None for line in lines: tokens = line.split() if len(tokens) == 0: continue if tokens[0] == 'device': path = tokens[4] skip = False if self.exclude_reg: skip = self.exclude_reg.match(path) if self.include_reg: skip = not self.include_reg.match(path) if skip: self.log.debug("Ignoring %s", path) else: self.log.debug("Keeping %s", path) path = path.replace('.', '_') path = path.replace('/', '_') elif skip: # If we are in a skip state, don't pay any attention to # anything that isn't the next device line continue elif tokens[0] == 'events:': for i in range(0, len(self.EVENTS_MAP)): metric_name = "%s.events.%s" % (path, self.EVENTS_MAP[i]) metric_value = long(tokens[i + 1]) self.publish_counter(metric_name, metric_value) elif tokens[0] == 'bytes:': for i in range(0, len(self.BYTES_MAP)): metric_name = "%s.bytes.%s" % (path, self.BYTES_MAP[i]) metric_value = long(tokens[i + 1]) self.publish_counter(metric_name, metric_value) elif tokens[0] == 'xprt:': proto = tokens[1] if not self.XPRT_MAP[proto]: self.log.error("Unknown protocol %s", proto) continue for i in range(0, len(self.XPRT_MAP[proto])): metric_name = "%s.xprt.%s.%s" % (path, proto, self.XPRT_MAP[proto][i]) metric_value = long(tokens[i + 2]) self.publish_counter(metric_name, metric_value) elif tokens[0][:-1] in self.RPCS_MAP: rpc = tokens[0][:-1] ops = long(tokens[1]) rtt = long(tokens[7]) exe = long(tokens[8]) metric_fmt = "%s.rpc.%s.%s" ops_name = metric_fmt % (path, rpc.lower(), 'ops') rtt_name = metric_fmt % (path, rpc.lower(), 'rtt') exe_name = metric_fmt % (path, rpc.lower(), 'exe') self.publish_counter(ops_name, ops) self.publish_counter(rtt_name, rtt) self.publish_counter(exe_name, exe)
[ "def", "collect", "(", "self", ")", ":", "if", "str_to_bool", "(", "self", ".", "config", "[", "'use_sudo'", "]", ")", ":", "if", "not", "os", ".", "access", "(", "self", ".", "config", "[", "'sudo_cmd'", "]", ",", "os", ".", "X_OK", ")", ":", "self", ".", "log", ".", "error", "(", "\"Cannot find or exec %s\"", "%", "self", ".", "config", "[", "'sudo_cmd'", "]", ")", "return", "None", "command", "=", "[", "self", ".", "config", "[", "'sudo_cmd'", "]", ",", "'/bin/cat'", ",", "self", ".", "MOUNTSTATS", "]", "p", "=", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "[", ":", "-", "1", "]", "lines", "=", "p", ".", "split", "(", "\"\\n\"", ")", "else", ":", "if", "not", "os", ".", "access", "(", "self", ".", "MOUNTSTATS", ",", "os", ".", "R_OK", ")", ":", "self", ".", "log", ".", "error", "(", "\"Cannot read path %s\"", "%", "self", ".", "MOUNTSTATS", ")", "return", "None", "f", "=", "open", "(", "self", ".", "MOUNTSTATS", ")", "lines", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "path", "=", "None", "for", "line", "in", "lines", ":", "tokens", "=", "line", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", "==", "0", ":", "continue", "if", "tokens", "[", "0", "]", "==", "'device'", ":", "path", "=", "tokens", "[", "4", "]", "skip", "=", "False", "if", "self", ".", "exclude_reg", ":", "skip", "=", "self", ".", "exclude_reg", ".", "match", "(", "path", ")", "if", "self", ".", "include_reg", ":", "skip", "=", "not", "self", ".", "include_reg", ".", "match", "(", "path", ")", "if", "skip", ":", "self", ".", "log", ".", "debug", "(", "\"Ignoring %s\"", ",", "path", ")", "else", ":", "self", ".", "log", ".", "debug", "(", "\"Keeping %s\"", ",", "path", ")", "path", "=", "path", ".", "replace", "(", "'.'", ",", "'_'", ")", "path", "=", "path", ".", "replace", "(", "'/'", ",", "'_'", ")", "elif", "skip", ":", "# If we are in a skip state, don't pay any attention to", "# anything that isn't the next device line", "continue", "elif", "tokens", "[", "0", "]", "==", "'events:'", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "self", ".", "EVENTS_MAP", ")", ")", ":", "metric_name", "=", "\"%s.events.%s\"", "%", "(", "path", ",", "self", ".", "EVENTS_MAP", "[", "i", "]", ")", "metric_value", "=", "long", "(", "tokens", "[", "i", "+", "1", "]", ")", "self", ".", "publish_counter", "(", "metric_name", ",", "metric_value", ")", "elif", "tokens", "[", "0", "]", "==", "'bytes:'", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "self", ".", "BYTES_MAP", ")", ")", ":", "metric_name", "=", "\"%s.bytes.%s\"", "%", "(", "path", ",", "self", ".", "BYTES_MAP", "[", "i", "]", ")", "metric_value", "=", "long", "(", "tokens", "[", "i", "+", "1", "]", ")", "self", ".", "publish_counter", "(", "metric_name", ",", "metric_value", ")", "elif", "tokens", "[", "0", "]", "==", "'xprt:'", ":", "proto", "=", "tokens", "[", "1", "]", "if", "not", "self", ".", "XPRT_MAP", "[", "proto", "]", ":", "self", ".", "log", ".", "error", "(", "\"Unknown protocol %s\"", ",", "proto", ")", "continue", "for", "i", "in", "range", "(", "0", ",", "len", "(", "self", ".", "XPRT_MAP", "[", "proto", "]", ")", ")", ":", "metric_name", "=", "\"%s.xprt.%s.%s\"", "%", "(", "path", ",", "proto", ",", "self", ".", "XPRT_MAP", "[", "proto", "]", "[", "i", "]", ")", "metric_value", "=", "long", "(", "tokens", "[", "i", "+", "2", "]", ")", "self", ".", "publish_counter", "(", "metric_name", ",", "metric_value", ")", "elif", "tokens", "[", "0", "]", "[", ":", "-", "1", "]", "in", "self", ".", "RPCS_MAP", ":", "rpc", "=", "tokens", "[", "0", "]", "[", ":", "-", "1", "]", "ops", "=", "long", "(", "tokens", "[", "1", "]", ")", "rtt", "=", "long", "(", "tokens", "[", "7", "]", ")", "exe", "=", "long", "(", "tokens", "[", "8", "]", ")", "metric_fmt", "=", "\"%s.rpc.%s.%s\"", "ops_name", "=", "metric_fmt", "%", "(", "path", ",", "rpc", ".", "lower", "(", ")", ",", "'ops'", ")", "rtt_name", "=", "metric_fmt", "%", "(", "path", ",", "rpc", ".", "lower", "(", ")", ",", "'rtt'", ")", "exe_name", "=", "metric_fmt", "%", "(", "path", ",", "rpc", ".", "lower", "(", ")", ",", "'exe'", ")", "self", ".", "publish_counter", "(", "ops_name", ",", "ops", ")", "self", ".", "publish_counter", "(", "rtt_name", ",", "rtt", ")", "self", ".", "publish_counter", "(", "exe_name", ",", "exe", ")" ]
Collect statistics from /proc/self/mountstats. Currently, we do fairly naive parsing and do not actually check the statvers value returned by mountstats.
[ "Collect", "statistics", "from", "/", "proc", "/", "self", "/", "mountstats", "." ]
python
train
39.579545
bpsmith/tia
tia/analysis/talib_wrapper.py
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/talib_wrapper.py#L475-L477
def MFI(frame, n=14, high_col='high', low_col='low', close_col='close', vol_col='Volume'): """money flow inedx""" return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.MFI, n)
[ "def", "MFI", "(", "frame", ",", "n", "=", "14", ",", "high_col", "=", "'high'", ",", "low_col", "=", "'low'", ",", "close_col", "=", "'close'", ",", "vol_col", "=", "'Volume'", ")", ":", "return", "_frame_to_series", "(", "frame", ",", "[", "high_col", ",", "low_col", ",", "close_col", ",", "vol_col", "]", ",", "talib", ".", "MFI", ",", "n", ")" ]
money flow inedx
[ "money", "flow", "inedx" ]
python
train
68.333333
Dallinger/Dallinger
demos/dlgr/demos/bartlett1932/experiment.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/demos/dlgr/demos/bartlett1932/experiment.py#L84-L111
def participate(self): """Finish reading and send text""" try: logger.info("Entering participate method") ready = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "finish-reading")) ) stimulus = self.driver.find_element_by_id("stimulus") story = stimulus.find_element_by_id("story") story_text = story.text logger.info("Stimulus text:") logger.info(story_text) ready.click() submit = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "submit-response")) ) textarea = WebDriverWait(self.driver, 10).until( EC.element_to_be_clickable((By.ID, "reproduction")) ) textarea.clear() text = self.transform_text(story_text) logger.info("Transformed text:") logger.info(text) textarea.send_keys(text) submit.click() return True except TimeoutException: return False
[ "def", "participate", "(", "self", ")", ":", "try", ":", "logger", ".", "info", "(", "\"Entering participate method\"", ")", "ready", "=", "WebDriverWait", "(", "self", ".", "driver", ",", "10", ")", ".", "until", "(", "EC", ".", "element_to_be_clickable", "(", "(", "By", ".", "ID", ",", "\"finish-reading\"", ")", ")", ")", "stimulus", "=", "self", ".", "driver", ".", "find_element_by_id", "(", "\"stimulus\"", ")", "story", "=", "stimulus", ".", "find_element_by_id", "(", "\"story\"", ")", "story_text", "=", "story", ".", "text", "logger", ".", "info", "(", "\"Stimulus text:\"", ")", "logger", ".", "info", "(", "story_text", ")", "ready", ".", "click", "(", ")", "submit", "=", "WebDriverWait", "(", "self", ".", "driver", ",", "10", ")", ".", "until", "(", "EC", ".", "element_to_be_clickable", "(", "(", "By", ".", "ID", ",", "\"submit-response\"", ")", ")", ")", "textarea", "=", "WebDriverWait", "(", "self", ".", "driver", ",", "10", ")", ".", "until", "(", "EC", ".", "element_to_be_clickable", "(", "(", "By", ".", "ID", ",", "\"reproduction\"", ")", ")", ")", "textarea", ".", "clear", "(", ")", "text", "=", "self", ".", "transform_text", "(", "story_text", ")", "logger", ".", "info", "(", "\"Transformed text:\"", ")", "logger", ".", "info", "(", "text", ")", "textarea", ".", "send_keys", "(", "text", ")", "submit", ".", "click", "(", ")", "return", "True", "except", "TimeoutException", ":", "return", "False" ]
Finish reading and send text
[ "Finish", "reading", "and", "send", "text" ]
python
train
39.25