code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
class Attack: <NEW_LINE> <INDENT> def __init__(self, value, element=None): <NEW_LINE> <INDENT> if element is None: <NEW_LINE> <INDENT> element = Element.physical if isinstance(value, int) else Element.summoner <NEW_LINE> <DEDENT> assert isinstance(element, Element) <NEW_LINE> assert isinstance(value, int) or (element == Element.summoner and isinstance(value, EnemyCategory)) <NEW_LINE> self.element = element <NEW_LINE> self.value = value | An enemy attack. It consists of an element and a value. The value is either the amount of damage
or -- if element == Element.summoner -- the EnemyCategory of the summoned enemy. *element* defaults to
physical or summoner attack, depending on *value*. | 6259903d45492302aabfd700 |
class Quant(Model): <NEW_LINE> <INDENT> __tablename__ = 'storm_stock_quant' <NEW_LINE> __table_args__ = ( sa.UniqueConstraint('location_id', 'lot_id'), ) <NEW_LINE> id = sa.Column(sa.Integer, primary_key=True) <NEW_LINE> location_id = sa.Column(sa.Integer, sa.ForeignKey(Location.id), nullable=False) <NEW_LINE> lot_id = sa.Column(sa.Integer, sa.ForeignKey(Lot.id), nullable=True) <NEW_LINE> quantity = sa.Column(sa.Numeric(18, 6), nullable=False) <NEW_LINE> location = relationship(Location, backref=backref('quants')) <NEW_LINE> lot = relationship(Lot, backref=backref('quants')) <NEW_LINE> stock_unit = association_proxy('lot', 'stock_unit') | The amount of products belonging to a certain lot in a given location. | 6259903db830903b9686ed8d |
class KikIOSMessageEventData(events.EventData): <NEW_LINE> <INDENT> DATA_TYPE = 'ios:kik:messaging' <NEW_LINE> def __init__(self): <NEW_LINE> <INDENT> super(KikIOSMessageEventData, self).__init__(data_type=self.DATA_TYPE) <NEW_LINE> self.body = None <NEW_LINE> self.displayname = None <NEW_LINE> self.message_status = None <NEW_LINE> self.message_type = None <NEW_LINE> self.offset = None <NEW_LINE> self.query = None <NEW_LINE> self.username = None | Kik message event data.
Attributes:
body (str): content of the message.
message_status (str): message status, such as:
read, unread, not sent, delivered, etc.
message_type (str): message type, either Sent or Received.
offset (str): identifier of the row, from which the event data was
extracted.
query (str): SQL query that was used to obtain the event data.
username (str): unique username of the sender or receiver. | 6259903d5e10d32532ce4216 |
class CloudSqlHook(CloudSQLHook): <NEW_LINE> <INDENT> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> warnings.warn(self.__doc__, DeprecationWarning, stacklevel=2) <NEW_LINE> super().__init__(*args, **kwargs) | This class is deprecated. Please use `airflow.providers.google.cloud.hooks.sql.CloudSQLHook`. | 6259903d71ff763f4b5e89c4 |
class VictimViewSet(viewsets.ModelViewSet): <NEW_LINE> <INDENT> queryset = Victim.objects.all() <NEW_LINE> serializer_class = VictimSerializer <NEW_LINE> def perform_create(self, serializer): <NEW_LINE> <INDENT> serializer.save(admitter=self.request.user) | This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action. | 6259903d3eb6a72ae038b892 |
class OverrideSettings(object): <NEW_LINE> <INDENT> @classmethod <NEW_LINE> def from_crawler(cls, crawler): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> op_file = crawler.settings.attributes['FEED_URI'].value <NEW_LINE> if op_file and os.path.exists(op_file): <NEW_LINE> <INDENT> os.remove(op_file) <NEW_LINE> log.msg("Delete the output file %s : Successful" %(op_file),level=log.INFO) <NEW_LINE> <DEDENT> <DEDENT> except OSError: <NEW_LINE> <INDENT> log.msg("Delete the output file %s : Failed" %(op_file),level=log.INFO) <NEW_LINE> pass <NEW_LINE> <DEDENT> spider = crawler._spider <NEW_LINE> if hasattr(spider, 'config'): <NEW_LINE> <INDENT> settings = spider.config.settings <NEW_LINE> spider_fields = ['handle_httpstatus_list'] <NEW_LINE> for k, v in settings.iteritems(): <NEW_LINE> <INDENT> if k in spider_fields: <NEW_LINE> <INDENT> setattr(spider, k, v) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> current_value = crawler.settings.get(k) <NEW_LINE> if isinstance(current_value,dict): <NEW_LINE> <INDENT> crawler.settings.get(k).update(v) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> crawler.settings.set(k, v) | all key and value in the settings object below will be read and update to spider settings (settings.py)
Here is example in config file :
"settings": {
"DOWNLOADER_MIDDLEWARES": {
"scrapy_balloons.useragent.RandomUserAgentMiddleware":500
},
"DOWNLOAD_DELAY": 5
} | 6259903d23e79379d538d727 |
class EmptyQueueException(Exception): <NEW_LINE> <INDENT> pass | Raised when trying to access element of empty Queue. | 6259903d8c3a8732951f7780 |
class Part(UserRelation): <NEW_LINE> <INDENT> _connection = None <NEW_LINE> _context = None <NEW_LINE> _heading = None <NEW_LINE> _master = None <NEW_LINE> tier_regexp = r'(?P<master>' + '|'.join( [c.tier_regexp for c in (Manual, Lookup, Imported, Computed)] ) + r'){1,1}' + '__' + r'(?P<part>' + _base_regexp + ')' <NEW_LINE> @ClassProperty <NEW_LINE> def connection(cls): <NEW_LINE> <INDENT> return cls._connection <NEW_LINE> <DEDENT> @ClassProperty <NEW_LINE> def full_table_name(cls): <NEW_LINE> <INDENT> return None if cls.database is None or cls.table_name is None else r"`{0:s}`.`{1:s}`".format( cls.database, cls.table_name) <NEW_LINE> <DEDENT> @ClassProperty <NEW_LINE> def master(cls): <NEW_LINE> <INDENT> return cls._master <NEW_LINE> <DEDENT> @ClassProperty <NEW_LINE> def table_name(cls): <NEW_LINE> <INDENT> return None if cls.master is None else cls.master.table_name + '__' + from_camel_case(cls.__name__) <NEW_LINE> <DEDENT> def delete(self, force=False): <NEW_LINE> <INDENT> if force: <NEW_LINE> <INDENT> super().delete() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise DataJointError('Cannot delete from a Part directly. Delete from master instead') <NEW_LINE> <DEDENT> <DEDENT> def drop(self, force=False): <NEW_LINE> <INDENT> if force: <NEW_LINE> <INDENT> super().drop() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise DataJointError('Cannot drop a Part directly. Delete from master instead') | Inherit from this class if the table's values are details of an entry in another relation
and if this table is populated by this relation. For example, the entries inheriting from
dj.Part could be single entries of a matrix, while the parent table refers to the entire matrix.
Part relations are implemented as classes inside classes. | 6259903d10dbd63aa1c71dfe |
class class_setup_deformable_bones(bpy.types.Operator): <NEW_LINE> <INDENT> bl_idname = "bear.setup_deformable_bones" <NEW_LINE> bl_label = "Toggle Non-mecanim Deform" <NEW_LINE> @classmethod <NEW_LINE> def poll(cls, context): <NEW_LINE> <INDENT> return len(context.selected_objects) is not 0 <NEW_LINE> <DEDENT> def execute(self, context): <NEW_LINE> <INDENT> setup_deformable_bones(context) <NEW_LINE> return {'FINISHED'} | Toggle deform status for bones not required by mecanim | 6259903de76e3b2f99fd9c34 |
class ClericalWordFeed(Feed): <NEW_LINE> <INDENT> title = ugettext_lazy("Geistliches Wort aus der Dreifaltigkeitskirchgemeinde") <NEW_LINE> description = ugettext_lazy("Geistliches Wort aus der Dreifaltigkeitskirchgemeinde") <NEW_LINE> feed_copyright = ugettext_lazy( "Copyright Ev.-Luth. Dreifaltigkeitskirchgemeinde Leipzig. Alle " "Rechte vorbehalten." ) <NEW_LINE> def get_object(self, request, *args, **kwargs): <NEW_LINE> <INDENT> self.request = request <NEW_LINE> return super().get_object(request, *args, **kwargs) <NEW_LINE> <DEDENT> def link(self): <NEW_LINE> <INDENT> return reverse("clerical_word") <NEW_LINE> <DEDENT> def items(self): <NEW_LINE> <INDENT> return ClericalWordAudioFile.objects.all().exclude(hidden=True) <NEW_LINE> <DEDENT> def item_title(self, item): <NEW_LINE> <INDENT> return item.title <NEW_LINE> <DEDENT> def item_description(self, item): <NEW_LINE> <INDENT> return item.description <NEW_LINE> <DEDENT> def item_link(self, item): <NEW_LINE> <INDENT> return self.item_enclosure_url(item) <NEW_LINE> <DEDENT> def item_enclosure_url(self, item): <NEW_LINE> <INDENT> current_site = get_current_site(self.request) <NEW_LINE> url = item.file.url <NEW_LINE> return add_domain(current_site.domain, url, self.request.is_secure()) <NEW_LINE> <DEDENT> def item_enclosure_length(self, item): <NEW_LINE> <INDENT> return item.file.size <NEW_LINE> <DEDENT> def item_enclosure_mime_type(self, item): <NEW_LINE> <INDENT> return item.mime_type <NEW_LINE> <DEDENT> def item_pubdate(self, item): <NEW_LINE> <INDENT> return item.pubdate <NEW_LINE> <DEDENT> def item_guid(self, item): <NEW_LINE> <INDENT> return item.uuid <NEW_LINE> <DEDENT> item_guid_is_permalink = False | Clerical word for services site. | 6259903d21bff66bcd723e92 |
class LeakyRectify(object): <NEW_LINE> <INDENT> def __init__(self, leakiness=0.01): <NEW_LINE> <INDENT> self.leakiness = leakiness <NEW_LINE> <DEDENT> def __call__(self, x): <NEW_LINE> <INDENT> return tensor.nnet.relu(x, self.leakiness) | Leaky rectifier :math:`\varphi(x) = (x > 0)? x : \alpha \cdot x`
The leaky rectifier was introduced in [1]_. Compared to the standard
rectifier :func:`rectify`, it has a nonzero gradient for negative input,
which often helps convergence.
Parameters
----------
leakiness : float
Slope for negative input, usually between 0 and 1.
A leakiness of 0 will lead to the standard rectifier,
a leakiness of 1 will lead to a linear activation function,
and any value in between will give a leaky rectifier.
Methods
-------
__call__(x)
Apply the leaky rectify function to the activation `x`.
Examples
--------
In contrast to other activation functions in this module, this is
a class that needs to be instantiated to obtain a callable:
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((None, 100))
>>> from lasagne.nonlinearities import LeakyRectify
>>> custom_rectify = LeakyRectify(0.1)
>>> l1 = DenseLayer(l_in, num_units=200, nonlinearity=custom_rectify)
Alternatively, you can use the provided instance for leakiness=0.01:
>>> from lasagne.nonlinearities import leaky_rectify
>>> l2 = DenseLayer(l_in, num_units=200, nonlinearity=leaky_rectify)
Or the one for a high leakiness of 1/3:
>>> from lasagne.nonlinearities import very_leaky_rectify
>>> l3 = DenseLayer(l_in, num_units=200, nonlinearity=very_leaky_rectify)
See Also
--------
leaky_rectify: Instance with default leakiness of 0.01, as in [1]_.
very_leaky_rectify: Instance with high leakiness of 1/3, as in [2]_.
References
----------
.. [1] Maas et al. (2013):
Rectifier Nonlinearities Improve Neural Network Acoustic Models,
http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf
.. [2] Graham, Benjamin (2014):
Spatially-sparse convolutional neural networks,
http://arxiv.org/abs/1409.6070 | 6259903d63b5f9789fe86395 |
class Author(BaseModel): <NEW_LINE> <INDENT> id = PrimaryKeyField() <NEW_LINE> name = CharField(max_length=256) <NEW_LINE> biography = TextField() <NEW_LINE> age = SmallIntegerField() <NEW_LINE> @staticmethod <NEW_LINE> def add_author(name, biography, age): <NEW_LINE> <INDENT> if len(name) <= 256: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return Author.create(name=name, biography=biography, age=age) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> return ValueError <NEW_LINE> <DEDENT> <DEDENT> return None <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def select_all(): <NEW_LINE> <INDENT> authors = Author.select() <NEW_LINE> if authors: <NEW_LINE> <INDENT> return authors <NEW_LINE> <DEDENT> return None <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def update_selected(author_id, name=None, biography=None, age=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> author = Author.get(Author.id == author_id) <NEW_LINE> if name and len(name) <= 256: <NEW_LINE> <INDENT> author.name = name <NEW_LINE> <DEDENT> if biography: <NEW_LINE> <INDENT> author.biography = biography <NEW_LINE> <DEDENT> if isinstance(age, int): <NEW_LINE> <INDENT> author.age = age <NEW_LINE> <DEDENT> author.save() <NEW_LINE> return author <NEW_LINE> <DEDENT> except Author.DoesNotExist: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return False <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def delete_selected(author_id): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> Author.get(Author.id == author_id).delete_instance() <NEW_LINE> return True <NEW_LINE> <DEDENT> except DoesNotExist: <NEW_LINE> <INDENT> return False | Author model.
id - primary key of author
name - name of the author
biograpy - a string of text about the author
age - age of the author | 6259903d8a349b6b4368746e |
class NewTaskForm(QtGui.QWidget): <NEW_LINE> <INDENT> @property <NEW_LINE> def exit_code(self): <NEW_LINE> <INDENT> return self._exit_code <NEW_LINE> <DEDENT> @property <NEW_LINE> def hide_tk_title_bar(self): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> def __init__(self, entity, step, user, parent): <NEW_LINE> <INDENT> QtGui.QWidget.__init__(self, parent) <NEW_LINE> self._app = sgtk.platform.current_bundle() <NEW_LINE> self._entity = entity <NEW_LINE> self._user = user <NEW_LINE> self._exit_code = QtGui.QDialog.Rejected <NEW_LINE> from .ui.new_task_form import Ui_NewTaskForm <NEW_LINE> self._ui = Ui_NewTaskForm() <NEW_LINE> self._ui.setupUi(self) <NEW_LINE> entity_name = "%s %s" % (self._entity["type"], self._entity.get("code") or entity.get("name")) <NEW_LINE> self._ui.entity.setText(entity_name) <NEW_LINE> username = self._user.get("name") if self._user else None <NEW_LINE> self._ui.assigned_to.setText(username or "<unassigned>") <NEW_LINE> sg_result = self._app.shotgun.find("Step", [["entity_type", "is", self._entity["type"]]], ["code", "id"]) <NEW_LINE> self._pipeline_step_dict = {} <NEW_LINE> for item in sg_result: <NEW_LINE> <INDENT> step_name = item.get("code") <NEW_LINE> if step_name is None: <NEW_LINE> <INDENT> step_name = "Unnamed Step" <NEW_LINE> <DEDENT> self._ui.pipeline_step.addItem(step_name, item["id"]) <NEW_LINE> self._pipeline_step_dict[item["id"]] = item <NEW_LINE> <DEDENT> if step: <NEW_LINE> <INDENT> step_id = step["id"] <NEW_LINE> idx = self._ui.pipeline_step.findData(step_id) <NEW_LINE> if idx != -1: <NEW_LINE> <INDENT> self._ui.pipeline_step.setCurrentIndex(idx) <NEW_LINE> <DEDENT> step_name = self._pipeline_step_dict.get(step_id, {}).get("code", "") <NEW_LINE> self._ui.task_name.setText(step_name) <NEW_LINE> <DEDENT> self._ui.task_name.setFocus() <NEW_LINE> self._ui.task_name.selectAll() <NEW_LINE> self._ui.create_btn.clicked.connect(self._on_create_btn_clicked) <NEW_LINE> self._ui.break_line.setFrameShadow(QtGui.QFrame.Plain) <NEW_LINE> clr = QtGui.QApplication.palette().text().color() <NEW_LINE> self._ui.break_line.setStyleSheet("#break_line{color: rgb(%d,%d,%d);}" % (clr.red() * 0.75, clr.green() * 0.75, clr.blue() * 0.75)) <NEW_LINE> <DEDENT> @property <NEW_LINE> def entity(self): <NEW_LINE> <INDENT> return self._entity <NEW_LINE> <DEDENT> @property <NEW_LINE> def assigned_to(self): <NEW_LINE> <INDENT> return self._user <NEW_LINE> <DEDENT> @property <NEW_LINE> def pipeline_step(self): <NEW_LINE> <INDENT> step_id = self._ui.pipeline_step.itemData(self._ui.pipeline_step.currentIndex()) <NEW_LINE> if hasattr(QtCore, "QVariant") and isinstance(step_id, QtCore.QVariant): <NEW_LINE> <INDENT> step_id = step_id.toPyObject() <NEW_LINE> <DEDENT> return self._pipeline_step_dict[step_id] <NEW_LINE> <DEDENT> @property <NEW_LINE> def task_name(self): <NEW_LINE> <INDENT> return value_to_str(self._ui.task_name.text()) <NEW_LINE> <DEDENT> def _on_create_btn_clicked(self): <NEW_LINE> <INDENT> self._exit_code = QtGui.QDialog.Accepted <NEW_LINE> self.close() | Form for requesting details needed to create
a new Shotgun task | 6259903d6fece00bbacccbd8 |
class NamedDistribution(object): <NEW_LINE> <INDENT> def __init__(self, name, distribution, required_gpus=None, required_tpu=False): <NEW_LINE> <INDENT> self._distribution = distribution <NEW_LINE> self._name = name <NEW_LINE> self._required_gpus = required_gpus <NEW_LINE> self._required_tpu = required_tpu <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return self._name <NEW_LINE> <DEDENT> @property <NEW_LINE> def strategy(self): <NEW_LINE> <INDENT> return self._distribution <NEW_LINE> <DEDENT> @property <NEW_LINE> def required_gpus(self): <NEW_LINE> <INDENT> return self._required_gpus <NEW_LINE> <DEDENT> @property <NEW_LINE> def required_tpu(self): <NEW_LINE> <INDENT> return self._required_tpu | Translates DistributionStrategy and its data into a good name. | 6259903d15baa723494631ba |
class LogSummary(models.Model): <NEW_LINE> <INDENT> checksum = models.CharField(max_length=32, primary_key=True) <NEW_LINE> level = models.PositiveIntegerField(choices=LEVEL_CHOICES, default=logbook.ERROR, blank=True, db_index=True) <NEW_LINE> source = models.CharField(max_length=128, blank=True, db_index=True) <NEW_LINE> host = models.CharField(max_length=200, blank=True, null=True, db_index=True) <NEW_LINE> earliest = models.DateTimeField(default=datetime.datetime.now, db_index=True) <NEW_LINE> latest = models.DateTimeField(default=datetime.datetime.now, db_index=True) <NEW_LINE> hits = models.IntegerField(default=0, null=False) <NEW_LINE> headline = models.CharField(max_length=HEADLINE_LENGTH, default='', blank=True) <NEW_LINE> latest_msg = models.TextField() <NEW_LINE> summary_only = models.BooleanField(default=False) <NEW_LINE> class Meta: <NEW_LINE> <INDENT> verbose_name = 'Log Summary' <NEW_LINE> verbose_name_plural = 'Log Summaries' <NEW_LINE> ordering = ['-latest'] <NEW_LINE> app_label = 'django_logbook' <NEW_LINE> <DEDENT> def __unicode__(self): <NEW_LINE> <INDENT> return u"<LOGSUMMARY {0} {1} {2} {3}>".format( LEVEL_CHOICES_DICT.get(self.level, 'UNKNOWN'), self.host, self.source, self.headline) <NEW_LINE> <DEDENT> def abbrev_msg(self, maxlen=500): <NEW_LINE> <INDENT> if len(self.latest_msg) > maxlen: <NEW_LINE> <INDENT> return u'%s ...' % self.latest_msg[:maxlen] <NEW_LINE> <DEDENT> return self.latest_msg <NEW_LINE> <DEDENT> abbrev_msg.short_description = u'Most recent msg' <NEW_LINE> def latest_fmt(self): <NEW_LINE> <INDENT> return self.latest.strftime('%Y%m%d\nT%H%m') <NEW_LINE> <DEDENT> latest_fmt.short_description = 'Latest' <NEW_LINE> def earliest_fmt(self): <NEW_LINE> <INDENT> return self.earliest.strftime('%Y%m%d\nT%H%m') <NEW_LINE> <DEDENT> earliest_fmt.short_description = 'Earliest' | A summary of the log messages | 6259903d07d97122c4217ec7 |
class UniquenessMixin(object): <NEW_LINE> <INDENT> def _perform_unique_checks(self, unique_checks): <NEW_LINE> <INDENT> errors = {} <NEW_LINE> for model_class, unique_check in unique_checks: <NEW_LINE> <INDENT> lookup_kwargs = {} <NEW_LINE> for field_name in unique_check: <NEW_LINE> <INDENT> f = self._meta.get_field(field_name) <NEW_LINE> lookup_value = getattr(self, f.attname) <NEW_LINE> if lookup_value is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if f.primary_key and not self._state.adding: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> lookup = str(field_name) <NEW_LINE> if isinstance(lookup_value, (list, set, tuple)): <NEW_LINE> <INDENT> lookup = "%s__overlap" % lookup <NEW_LINE> <DEDENT> lookup_kwargs[lookup] = lookup_value <NEW_LINE> <DEDENT> if len(unique_check) != len(lookup_kwargs): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if len([x for x in lookup_kwargs if x.endswith("__in")]) > 1: <NEW_LINE> <INDENT> raise NotSupportedError("You cannot currently have two list fields in a unique combination") <NEW_LINE> <DEDENT> lookups = [] <NEW_LINE> for k, v in lookup_kwargs.iteritems(): <NEW_LINE> <INDENT> if (k.endswith("__in") or k.endswith("__overlap")) and len(v) > MAX_ALLOWABLE_QUERIES: <NEW_LINE> <INDENT> v = list(v) <NEW_LINE> while v: <NEW_LINE> <INDENT> new_lookup = lookup_kwargs.copy() <NEW_LINE> new_lookup[k] = v[:30] <NEW_LINE> v = v[30:] <NEW_LINE> lookups.append(new_lookup) <NEW_LINE> <DEDENT> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> lookups = [lookup_kwargs] <NEW_LINE> <DEDENT> for lookup_kwargs in lookups: <NEW_LINE> <INDENT> qs = model_class._default_manager.filter(**lookup_kwargs).values_list("pk", flat=True) <NEW_LINE> model_class_pk = self._get_pk_val(model_class._meta) <NEW_LINE> result = list(qs) <NEW_LINE> if not self._state.adding and model_class_pk is not None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> result.remove(model_class_pk) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> if result: <NEW_LINE> <INDENT> if len(unique_check) == 1: <NEW_LINE> <INDENT> key = unique_check[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> key = NON_FIELD_ERRORS <NEW_LINE> <DEDENT> errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return errors | Mixin overriding the methods checking value uniqueness.
For models defining unique constraints this mixin should be inherited from.
When iterable (list or set) fields are marked as unique it must be used.
This is a copy of Django's implementation, save for the part marked by the comment. | 6259903d50485f2cf55dc1ac |
class BanditNArmedRandom(BanditEnv): <NEW_LINE> <INDENT> def __init__( self, nb_bandits: int = 10, nb_prices_per_bandit: int = 100, stdev: int = 1, seed: int = 42, ): <NEW_LINE> <INDENT> np.random.seed(seed) <NEW_LINE> reward_params = [] <NEW_LINE> for _i in range(nb_bandits): <NEW_LINE> <INDENT> mean = np.random.uniform(0, nb_prices_per_bandit) <NEW_LINE> reward_params.append((mean, stdev)) <NEW_LINE> <DEDENT> BanditEnv.__init__( self, nb_bandits=nb_bandits, nb_prices_per_bandit=nb_prices_per_bandit, reward_params=reward_params, ) | N-armed bandit randomly initialized. | 6259903dd10714528d69efa0 |
class RDMCPStorageObject(StorageObject): <NEW_LINE> <INDENT> def __init__(self, name, size=None, wwn=None, nullio=False): <NEW_LINE> <INDENT> if size is not None: <NEW_LINE> <INDENT> super(RDMCPStorageObject, self).__init__(name, 'create') <NEW_LINE> try: <NEW_LINE> <INDENT> self._configure(size, wwn, nullio) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> self.delete() <NEW_LINE> raise <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> super(RDMCPStorageObject, self).__init__(name, 'lookup') <NEW_LINE> <DEDENT> <DEDENT> def _configure(self, size, wwn, nullio): <NEW_LINE> <INDENT> self._check_self() <NEW_LINE> size = round(float(size)/resource.getpagesize()) <NEW_LINE> if size == 0: <NEW_LINE> <INDENT> size = 1 <NEW_LINE> <DEDENT> self._control("rd_pages=%d" % size) <NEW_LINE> if nullio: <NEW_LINE> <INDENT> self._control("rd_nullio=1") <NEW_LINE> <DEDENT> self._enable() <NEW_LINE> super(RDMCPStorageObject, self)._configure(wwn) <NEW_LINE> <DEDENT> def _get_page_size(self): <NEW_LINE> <INDENT> self._check_self() <NEW_LINE> return int(self._parse_info("PAGES/PAGE_SIZE").split('*')[1]) <NEW_LINE> <DEDENT> def _get_pages(self): <NEW_LINE> <INDENT> self._check_self() <NEW_LINE> return int(self._parse_info("PAGES/PAGE_SIZE").split('*')[0]) <NEW_LINE> <DEDENT> def _get_size(self): <NEW_LINE> <INDENT> self._check_self() <NEW_LINE> size = self._get_page_size() * self._get_pages() <NEW_LINE> return size <NEW_LINE> <DEDENT> def _get_nullio(self): <NEW_LINE> <INDENT> self._check_self() <NEW_LINE> try: <NEW_LINE> <INDENT> return bool(int(self._parse_info('nullio'))) <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> page_size = property(_get_page_size, doc="Get the ramdisk page size.") <NEW_LINE> pages = property(_get_pages, doc="Get the ramdisk number of pages.") <NEW_LINE> size = property(_get_size, doc="Get the ramdisk size in bytes.") <NEW_LINE> nullio = property(_get_nullio, doc="Get the nullio status.") <NEW_LINE> def dump(self): <NEW_LINE> <INDENT> d = super(RDMCPStorageObject, self).dump() <NEW_LINE> d['wwn'] = self.wwn <NEW_LINE> d['size'] = self.size <NEW_LINE> if self.nullio: <NEW_LINE> <INDENT> d['nullio'] = True <NEW_LINE> <DEDENT> return d | An interface to configFS storage objects for rd_mcp backstore. | 6259903db830903b9686ed8e |
class WideHelpFormatter(argparse.HelpFormatter): <NEW_LINE> <INDENT> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> kwargs["max_help_position"] = 40 <NEW_LINE> super().__init__(*args, **kwargs) | Command-line help text formatter with wider help text. | 6259903d23e79379d538d729 |
class Perceptron(LinearClassifier): <NEW_LINE> <INDENT> def findE_in(self, X, Y, do_transform=True): <NEW_LINE> <INDENT> errs = self.err(X, Y, do_transform) <NEW_LINE> self._badInds = np.where(errs)[0] <NEW_LINE> return np.mean(errs) <NEW_LINE> <DEDENT> def fit(self, X, Y, **conditions): <NEW_LINE> <INDENT> X = X if not self.transform else self.transform(X) <NEW_LINE> X = self.check_input_dim(X) <NEW_LINE> maxIters = conditions.get('maxIters', None) <NEW_LINE> errBound = conditions.get('errBound', 0.0) <NEW_LINE> wDiffBound = conditions.get('wDiffBound', None) <NEW_LINE> it = 0 <NEW_LINE> w_old = self._weights.copy() <NEW_LINE> E_ins = [self.findE_in(X, Y, do_transform=False)] <NEW_LINE> if errBound is not None and E_ins[-1] <= errBound: <NEW_LINE> <INDENT> return it, np.array(E_ins) <NEW_LINE> <DEDENT> while True: <NEW_LINE> <INDENT> w_old = self._weights.copy() <NEW_LINE> ind = np.random.choice(self._badInds) <NEW_LINE> self._weights += Y[ind] * X[ind] <NEW_LINE> it += 1 <NEW_LINE> E_ins.append(self.findE_in(X, Y, do_transform=False)) <NEW_LINE> if self.isDone(it, w_old, E_ins[-1], maxIters, wDiffBound, errBound): <NEW_LINE> <INDENT> return it, np.array(E_ins) | The Perceptron Model (a binary classifier), uses PLA as the learning
algorithm | 6259903d379a373c97d9a252 |
class Stub(Service): <NEW_LINE> <INDENT> do_not_discover = True <NEW_LINE> def print_error(self, *errors): <NEW_LINE> <INDENT> for x in errors: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> report('error_handler for generic service', str(errors) ) <NEW_LINE> <DEDENT> def stop(self): <NEW_LINE> <INDENT> Service.stop(self) <NEW_LINE> report('Custom stop for', self) <NEW_LINE> <DEDENT> @constraint(boot_first='postoffice') <NEW_LINE> def start(self): <NEW_LINE> <INDENT> Service.start(self) <NEW_LINE> report('Custom start for', self) <NEW_LINE> <DEDENT> def iterate(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def play(self): <NEW_LINE> <INDENT> return Service.play(self) | Stub Service:
start: brief description of service start-up here
stop: brief description service shutdown here | 6259903d3c8af77a43b68851 |
class _patch_dict(object): <NEW_LINE> <INDENT> def __init__(self, in_dict, values=(), clear=False, **kwargs): <NEW_LINE> <INDENT> self.in_dict = in_dict <NEW_LINE> self.values = dict(values) <NEW_LINE> self.values.update(kwargs) <NEW_LINE> self.clear = clear <NEW_LINE> self._original = None <NEW_LINE> <DEDENT> def __call__(self, f): <NEW_LINE> <INDENT> if isinstance(f, ClassTypes): <NEW_LINE> <INDENT> return self.decorate_class(f) <NEW_LINE> <DEDENT> @wraps(f) <NEW_LINE> def _inner(*args, **kw): <NEW_LINE> <INDENT> self._patch_dict() <NEW_LINE> try: <NEW_LINE> <INDENT> return f(*args, **kw) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> self._unpatch_dict() <NEW_LINE> <DEDENT> <DEDENT> return _inner <NEW_LINE> <DEDENT> def decorate_class(self, klass): <NEW_LINE> <INDENT> for attr in dir(klass): <NEW_LINE> <INDENT> attr_value = getattr(klass, attr) <NEW_LINE> if (attr.startswith(patch.TEST_PREFIX) and hasattr(attr_value, "__call__")): <NEW_LINE> <INDENT> decorator = _patch_dict(self.in_dict, self.values, self.clear) <NEW_LINE> decorated = decorator(attr_value) <NEW_LINE> setattr(klass, attr, decorated) <NEW_LINE> <DEDENT> <DEDENT> return klass <NEW_LINE> <DEDENT> def __enter__(self): <NEW_LINE> <INDENT> self._patch_dict() <NEW_LINE> <DEDENT> def _patch_dict(self): <NEW_LINE> <INDENT> values = self.values <NEW_LINE> if isinstance(self.in_dict, basestring): <NEW_LINE> <INDENT> self.in_dict = _importer(self.in_dict) <NEW_LINE> <DEDENT> in_dict = self.in_dict <NEW_LINE> clear = self.clear <NEW_LINE> try: <NEW_LINE> <INDENT> original = in_dict.copy() <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> original = {} <NEW_LINE> for key in in_dict: <NEW_LINE> <INDENT> original[key] = in_dict[key] <NEW_LINE> <DEDENT> <DEDENT> self._original = original <NEW_LINE> if clear: <NEW_LINE> <INDENT> _clear_dict(in_dict) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> in_dict.update(values) <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> for key in values: <NEW_LINE> <INDENT> in_dict[key] = values[key] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def _unpatch_dict(self): <NEW_LINE> <INDENT> in_dict = self.in_dict <NEW_LINE> original = self._original <NEW_LINE> _clear_dict(in_dict) <NEW_LINE> try: <NEW_LINE> <INDENT> in_dict.update(original) <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> for key in original: <NEW_LINE> <INDENT> in_dict[key] = original[key] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def __exit__(self, *args): <NEW_LINE> <INDENT> self._unpatch_dict() <NEW_LINE> return False <NEW_LINE> <DEDENT> start = __enter__ <NEW_LINE> stop = __exit__ | Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap. | 6259903da79ad1619776b2a9 |
@classify("requests", "class") <NEW_LINE> class ResponseList(NotEmptyList, CommonAttributeList): <NEW_LINE> <INDENT> def set(self, resp): <NEW_LINE> <INDENT> self[:] = list_from(resp) <NEW_LINE> <DEDENT> @property <NEW_LINE> def single_item(self): <NEW_LINE> <INDENT> return only_item_of(self) <NEW_LINE> <DEDENT> def build_and_set(self, *args, **kwargs): <NEW_LINE> <INDENT> self.set(ResponseInfo(*args, **kwargs)) <NEW_LINE> <DEDENT> def run_response_callbacks(self): <NEW_LINE> <INDENT> for resp_info in self: <NEW_LINE> <INDENT> resp_info.run_response_callback() | A list specialized for testing, w/ResponseInfo object items.
To best understand this class, it is important to have
a strong understanding of :py:class:`CommonAttributeList` and
:py:class:`ResponseInfo`.
The common workflow for ``ResponseList`` relies greatly on those other connected
pieces.
For example, you might utilize this class in a ways such as this::
>>> responses = ResponseList()
>>> responses.build_and_set(response=client.get_thing())
>>>
>>> # All the `.response` fields, see `CommonAttributeList`
>>> for response in responses.response:
... assert response.json()["thing"]
>>>
>>>
>>> responses.set(
... ResponseInfo(response=client.get_thing(param),
description=f"Getting {param}...")
... for param in my_params
... ) | 6259903db5575c28eb7135de |
class FixedFilterAction(FilterAction): <NEW_LINE> <INDENT> filter_type = 'fixed' <NEW_LINE> needs_preloading = True <NEW_LINE> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> super(FixedFilterAction, self).__init__(args, kwargs) <NEW_LINE> self.fixed_buttons = self.get_fixed_buttons() <NEW_LINE> self.filter_string = '' <NEW_LINE> <DEDENT> def filter(self, table, images, filter_string): <NEW_LINE> <INDENT> self.filter_string = filter_string <NEW_LINE> categories = self.categorize(table, images) <NEW_LINE> self.categories = defaultdict(list, categories) <NEW_LINE> for button in self.fixed_buttons: <NEW_LINE> <INDENT> button['count'] = len(self.categories[button['value']]) <NEW_LINE> <DEDENT> if not filter_string: <NEW_LINE> <INDENT> return images <NEW_LINE> <DEDENT> return self.categories[filter_string] <NEW_LINE> <DEDENT> def get_fixed_buttons(self): <NEW_LINE> <INDENT> raise NotImplementedError("The get_fixed_buttons method has " "not been implemented by %s." % self.__class__) <NEW_LINE> <DEDENT> def categorize(self, table, images): <NEW_LINE> <INDENT> raise NotImplementedError("The categorize method has not been " "implemented by %s." % self.__class__) | A filter action with fixed buttons. | 6259903d21bff66bcd723e94 |
class CAP_PieExportObject(Menu): <NEW_LINE> <INDENT> bl_idname = "pie.export_object" <NEW_LINE> bl_label = "Select Location" <NEW_LINE> def draw(self, context): <NEW_LINE> <INDENT> layout = self.layout <NEW_LINE> pie = layout.menu_pie() <NEW_LINE> obj = context.object.CAPObj <NEW_LINE> user_preferences = context.user_preferences <NEW_LINE> addon_prefs = user_preferences.addons[__package__].preferences <NEW_LINE> exp = bpy.data.objects[addon_prefs.default_datablock].CAPExp <NEW_LINE> i = 0 <NEW_LINE> for loc in exp.file_presets: <NEW_LINE> <INDENT> pie.operator("capsule.export_select_object", text=exp.file_presets[i].name, icon="SCRIPTWIN").loc = i <NEW_LINE> i += 1 | Displays the export default options for objects. | 6259903d94891a1f408ba00c |
class ExpressRoutePortsLocation(Resource): <NEW_LINE> <INDENT> _validation = { 'name': {'readonly': True}, 'type': {'readonly': True}, 'address': {'readonly': True}, 'contact': {'readonly': True}, 'provisioning_state': {'readonly': True}, } <NEW_LINE> _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'address': {'key': 'properties.address', 'type': 'str'}, 'contact': {'key': 'properties.contact', 'type': 'str'}, 'available_bandwidths': {'key': 'properties.availableBandwidths', 'type': '[ExpressRoutePortsLocationBandwidths]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } <NEW_LINE> def __init__( self, **kwargs ): <NEW_LINE> <INDENT> super(ExpressRoutePortsLocation, self).__init__(**kwargs) <NEW_LINE> self.address = None <NEW_LINE> self.contact = None <NEW_LINE> self.available_bandwidths = kwargs.get('available_bandwidths', None) <NEW_LINE> self.provisioning_state = None | Definition of the ExpressRoutePorts peering location resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar address: Address of peering location.
:vartype address: str
:ivar contact: Contact details of peering locations.
:vartype contact: str
:param available_bandwidths: The inventory of available ExpressRoutePort bandwidths.
:type available_bandwidths:
list[~azure.mgmt.network.v2021_05_01.models.ExpressRoutePortsLocationBandwidths]
:ivar provisioning_state: The provisioning state of the express route port location resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2021_05_01.models.ProvisioningState | 6259903d16aa5153ce401717 |
class Part(object): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.traces = [] <NEW_LINE> return <NEW_LINE> <DEDENT> def close(self): <NEW_LINE> <INDENT> for trace in self.traces: <NEW_LINE> <INDENT> trace.close() <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> def applykerf(self, kerf): <NEW_LINE> <INDENT> for trace in self.traces: <NEW_LINE> <INDENT> trace.applykerf(kerf) <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> def svg(self): <NEW_LINE> <INDENT> return " ".join([trace.svg() for trace in self.traces]) <NEW_LINE> <DEDENT> def bbox(self): <NEW_LINE> <INDENT> bboxes = [trace.bbox() for trace in self.traces] <NEW_LINE> x = [p1.x for p1, p2 in bboxes] + [p2.x for p1, p2 in bboxes] <NEW_LINE> y = [p1.y for p1, p2 in bboxes] + [p2.y for p1, p2 in bboxes] <NEW_LINE> return Rectangle(Point(min(x), min(y)), Point(max(x), max(y))) <NEW_LINE> <DEDENT> def area(self): <NEW_LINE> <INDENT> bbox = self.bbox() <NEW_LINE> return bbox.area() <NEW_LINE> <DEDENT> def size(self): <NEW_LINE> <INDENT> bbox = self.bbox() <NEW_LINE> return bbox.size() <NEW_LINE> <DEDENT> def __add__(self, other): <NEW_LINE> <INDENT> p = Part() <NEW_LINE> if isinstance(other, Part): <NEW_LINE> <INDENT> p.traces = self.traces + deepcopy(other.traces) <NEW_LINE> <DEDENT> elif isinstance(other, Trace): <NEW_LINE> <INDENT> p.traces = deepcopy(self.traces) <NEW_LINE> p.traces.append(other) <NEW_LINE> <DEDENT> elif isinstance(other, Point): <NEW_LINE> <INDENT> p.traces = self.traces <NEW_LINE> for trace in p.traces: <NEW_LINE> <INDENT> trace.offset(other) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise RuntimeError("Can only add a Part, Trace or Point to an existing Part.") <NEW_LINE> <DEDENT> return p <NEW_LINE> <DEDENT> def __iadd__(self, other): <NEW_LINE> <INDENT> if isinstance(other, Part): <NEW_LINE> <INDENT> self.traces.extend(other.traces) <NEW_LINE> <DEDENT> elif isinstance(other, Trace): <NEW_LINE> <INDENT> self.traces.append(deepcopy(other)) <NEW_LINE> <DEDENT> elif isinstance(other, Point): <NEW_LINE> <INDENT> for trace in self.traces: <NEW_LINE> <INDENT> trace.offset(other) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise RuntimeError("Can only add a Part, Trace or Point to an existing Part.") <NEW_LINE> <DEDENT> return self <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return "Part" + str(self) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> l = len(self.traces) <NEW_LINE> return "(" + str(l) + " trace" + ("s" if l>1 else "") + ")" <NEW_LINE> <DEDENT> def __lt__(self, other): <NEW_LINE> <INDENT> return self.area() < other.area() | List of traces that make up a part. | 6259903d50485f2cf55dc1ae |
class Form(forms.ModelForm): <NEW_LINE> <INDENT> class Meta: <NEW_LINE> <INDENT> model = models.TextFragment <NEW_LINE> fields = ['lang', 'text'] <NEW_LINE> <DEDENT> def save(self, commit=True): <NEW_LINE> <INDENT> self.instance.type = "ann" <NEW_LINE> return super().save(commit) | Override form to set default type | 6259903d26238365f5fadd82 |
class FixedBoundedFloatStrategy(SearchStrategy): <NEW_LINE> <INDENT> def __init__(self, lower_bound, upper_bound, width): <NEW_LINE> <INDENT> SearchStrategy.__init__(self) <NEW_LINE> assert isinstance(lower_bound, float) <NEW_LINE> assert isinstance(upper_bound, float) <NEW_LINE> assert 0 <= lower_bound < upper_bound <NEW_LINE> assert math.copysign(1, lower_bound) == 1, "lower bound may not be -0.0" <NEW_LINE> assert width in (16, 32, 64) <NEW_LINE> self.lower_bound = lower_bound <NEW_LINE> self.upper_bound = upper_bound <NEW_LINE> self.width = width <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return "FixedBoundedFloatStrategy(%s, %s, %s)" % ( self.lower_bound, self.upper_bound, self.width, ) <NEW_LINE> <DEDENT> def do_draw(self, data): <NEW_LINE> <INDENT> f = self.lower_bound + ( self.upper_bound - self.lower_bound ) * d.fractional_float(data) <NEW_LINE> if self.width < 64: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> f = float_of(f, self.width) <NEW_LINE> <DEDENT> except OverflowError: <NEW_LINE> <INDENT> reject() <NEW_LINE> <DEDENT> <DEDENT> assume(self.lower_bound <= f <= self.upper_bound) <NEW_LINE> return f | A strategy for floats distributed between two endpoints.
The conditional distribution tries to produce values clustered
closer to one of the ends. | 6259903dd99f1b3c44d068d2 |
class TwoDimPolyN: <NEW_LINE> <INDENT> def __str__(self): <NEW_LINE> <INDENT> rstring = str(self.norder) <NEW_LINE> for key in self.twodkeys: <NEW_LINE> <INDENT> rstring += str(key) <NEW_LINE> <DEDENT> return rstring <NEW_LINE> <DEDENT> def __getitem__(self, index): <NEW_LINE> <INDENT> if index > len(self.twodkeys)-1: <NEW_LINE> <INDENT> err_msg = "Index: {0:s} does not exist!".format(str(index)) <NEW_LINE> raise aXeError(err_msg) <NEW_LINE> <DEDENT> return self.twodkeys[index] <NEW_LINE> <DEDENT> def __setitem__(self, index, obj): <NEW_LINE> <INDENT> if (index > (len(self.twodkeys))-1): <NEW_LINE> <INDENT> err_msg = 'Index ' + str(index) + ' does not exist!' <NEW_LINE> raise aXeError(err_msg) <NEW_LINE> <DEDENT> elif (not isinstance(type(self[0]), obj)): <NEW_LINE> <INDENT> err_msg = ("Object: {0:s} has wrong type: {1:s}!" .format(str(obj), str(type(obj)))) <NEW_LINE> raise aXeError(err_msg) <NEW_LINE> <DEDENT> self.twodkeys[index] = obj <NEW_LINE> <DEDENT> def _find_order(self, prefix, ident, keylist): <NEW_LINE> <INDENT> order_key = prefix + 'ORDER_' + ident <NEW_LINE> return self._find_key(order_key, keylist) <NEW_LINE> <DEDENT> def _find_twodkeys(self, prefix, ident, keylist): <NEW_LINE> <INDENT> twodkeys = [] <NEW_LINE> for ii in range(int(self.norder.keyvalue)+1): <NEW_LINE> <INDENT> twodkey = prefix + ident + '_' + str(ii) <NEW_LINE> newkey = self._find_key(twodkey, keylist, 1) <NEW_LINE> if self._check_twodkey(newkey): <NEW_LINE> <INDENT> twodkeys.append(newkey) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise CKeyLengthWrong(ident, twodkey) <NEW_LINE> <DEDENT> <DEDENT> return twodkeys <NEW_LINE> <DEDENT> def _find_key(self, keyword, keylist, lkey=0): <NEW_LINE> <INDENT> iindex = 0 <NEW_LINE> found = -1 <NEW_LINE> for key in keylist: <NEW_LINE> <INDENT> if key.keyword == keyword: <NEW_LINE> <INDENT> if lkey: <NEW_LINE> <INDENT> nkey = ConfListKey(key.keyword, key.keyvalue, key.comment) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nkey = ConfKey(key.keyword, key.keyvalue, key.comment) <NEW_LINE> <DEDENT> found = iindex <NEW_LINE> <DEDENT> iindex += 1 <NEW_LINE> <DEDENT> if found < 0: <NEW_LINE> <INDENT> raise CKeyNotFound(keyword) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> del keylist[found] <NEW_LINE> <DEDENT> return nkey <NEW_LINE> <DEDENT> def _check_twodkey(self, inkey): <NEW_LINE> <INDENT> n = float(len(inkey.kvallist)) <NEW_LINE> m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0 <NEW_LINE> if math.fabs(m-int(m)) > 1.0e-16: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> return 1 <NEW_LINE> <DEDENT> def str_header(self, description): <NEW_LINE> <INDENT> rstring = '\n#\n# ' <NEW_LINE> rstring += description <NEW_LINE> rstring += ':\n#\n' <NEW_LINE> return rstring | Object for a polynomial with 2D variance | 6259903da4f1c619b294f79d |
class ColorWidget(QtGui.QWidget): <NEW_LINE> <INDENT> colorChanged = QtCore.Signal(QtGui.QColor) <NEW_LINE> def __init__(self, parent=None): <NEW_LINE> <INDENT> super(ColorWidget, self).__init__(parent) <NEW_LINE> self.setContentsMargins(0,0,0,0) <NEW_LINE> self._color = QtGui.QColor() <NEW_LINE> <DEDENT> def color(self): <NEW_LINE> <INDENT> return QtGui.QColor(self._color) <NEW_LINE> <DEDENT> def updateColor(self, color): <NEW_LINE> <INDENT> self._color = QtGui.QColor(color) <NEW_LINE> self.repaint() <NEW_LINE> <DEDENT> def setColor(self, color): <NEW_LINE> <INDENT> self.updateColor(color) <NEW_LINE> self.colorChanged.emit(self.color()) | Base class for widgets manipulating colors. | 6259903dec188e330fdf9ac5 |
class BaseClass(metaclass=MetaBase): <NEW_LINE> <INDENT> def __repr__(self) -> str: <NEW_LINE> <INDENT> def quote(item: tuple) -> tuple: <NEW_LINE> <INDENT> (key, value) = item <NEW_LINE> value = f"'{value}'" if isinstance(value, str) else value <NEW_LINE> return (key, value) <NEW_LINE> <DEDENT> values = ",".join("{}={}".format(*item) for item in map(quote, vars(self).items())) <NEW_LINE> return f"{type(self).__name__}({values})" | Mercury BaseClass.
This class is used as the main inheritance for all Mercury classes
and provides default methods and properties across all the library. | 6259903d3eb6a72ae038b896 |
class TimestamperInterface(object): <NEW_LINE> <INDENT> def time(self): <NEW_LINE> <INDENT> return time.time() | This is the only source for current time in the application.
Override this for generating unix timestamp in different way. | 6259903d287bf620b6272e17 |
class MishPulse(nn.Module): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> <DEDENT> def forward(self, x_input): <NEW_LINE> <INDENT> return fmishpulse(x_input) | Applies the mishpulse function element-wise:
mishpulse(x) = -sign(x) * mish(-abs(x) + 0.6361099463262276) + step(x)
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Examples:
>>> m = MishPulse()
>>> x_input = torch.randn(2)
>>> output = m(input) | 6259903d507cdc57c63a5fc7 |
class TestInvitationTicket(unittest.TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def tearDown(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def testInvitationTicket(self): <NEW_LINE> <INDENT> model = TweakApi.models.invitation_ticket.InvitationTicket() | InvitationTicket unit test stubs | 6259903d24f1403a926861e3 |
class Quote(): <NEW_LINE> <INDENT> def __init__(self, symbol): <NEW_LINE> <INDENT> self._symbol = symbol <NEW_LINE> self.prev_bid = 0 <NEW_LINE> self.prev_ask = 0 <NEW_LINE> self.prev_spread = 0 <NEW_LINE> self.bid = 0 <NEW_LINE> self.ask = 0 <NEW_LINE> self.bid_size = 0 <NEW_LINE> self.ask_size = 0 <NEW_LINE> self.spread = 0 <NEW_LINE> self.traded = True <NEW_LINE> self.level_ct = 1 <NEW_LINE> self.time = 0 <NEW_LINE> <DEDENT> def reset(self): <NEW_LINE> <INDENT> self.traded = False <NEW_LINE> self.level_ct += 1 <NEW_LINE> <DEDENT> def update(self, data): <NEW_LINE> <INDENT> self.bid_size = data.bidsize <NEW_LINE> self.ask_size = data.asksize <NEW_LINE> if ( self.bid != data.bidprice and self.ask != data.askprice and round(data.askprice - data.bidprice, 2) == .01 ): <NEW_LINE> <INDENT> self.prev_bid = self.bid <NEW_LINE> self.prev_ask = self.ask <NEW_LINE> self.bid = data.bidprice <NEW_LINE> self.ask = data.askprice <NEW_LINE> self.time = data.timestamp <NEW_LINE> self.prev_spread = round(self.prev_ask - self.prev_bid, 3) <NEW_LINE> self.spread = round(self.ask - self.bid, 3) <NEW_LINE> slog.msg('level change', s=self._symbol, prev_bid=self.prev_bid, prev_ask=self.prev_ask, prev_spread=self.prev_spread, bid=self.bid, ask=self.ask, spread=self.spread, ) <NEW_LINE> if self.prev_spread == 0.01: <NEW_LINE> <INDENT> self.reset() | We use Quote objects to represent the bid/ask spread. When we encounter a
'level change', a move of exactly 1 penny, we may attempt to make one
trade. Whether or not the trade is successfully filled, we do not submit
another trade until we see another level change.
Note: Only moves of 1 penny are considered eligible because larger moves
could potentially indicate some newsworthy event for the stock, which this
algorithm is not tuned to trade. | 6259903d82261d6c527307da |
class InsightsPage(Page): <NEW_LINE> <INDENT> def __init__(self, version, response, solution): <NEW_LINE> <INDENT> super(InsightsPage, self).__init__(version, response) <NEW_LINE> self._solution = solution <NEW_LINE> <DEDENT> def get_instance(self, payload): <NEW_LINE> <INDENT> return InsightsInstance(self._version, payload, business_sid=self._solution['business_sid'], ) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return '<Twilio.Preview.TrustedComms.InsightsPage>' | PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. | 6259903d16aa5153ce401719 |
class IndexCommodBanner(BaseModel): <NEW_LINE> <INDENT> sku = models.ForeignKey( 'CommodSKU', on_delete=models.CASCADE, verbose_name='商品') <NEW_LINE> image = models.ImageField(upload_to='banner', verbose_name='图片') <NEW_LINE> index = models.SmallIntegerField(default=0, verbose_name='展示顺序') <NEW_LINE> class Meta: <NEW_LINE> <INDENT> db_table = 'df_index_banner' <NEW_LINE> verbose_name = '首页轮播商品' <NEW_LINE> verbose_name_plural = verbose_name | 首页轮播商品展示模型类 | 6259903d96565a6dacd2d8a1 |
class UnaryOpNode(OpNode): <NEW_LINE> <INDENT> __slots__ = ('rhs',) <NEW_LINE> ops = { '-': (operator.neg, 3), '+': (operator.pos, 3), } <NEW_LINE> def __init__(self, tok): <NEW_LINE> <INDENT> op, self.rhs = tok[0] <NEW_LINE> super(UnaryOpNode, self).__init__(op) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return "UnaryOp(%s%s)" % (self.op, self.rhs) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return "%s%s" % (self.op, self.rhs) <NEW_LINE> <DEDENT> def eval(self, vars, state): <NEW_LINE> <INDENT> rhs, rd = self.rhs.coerce_numeric(vars, state) <NEW_LINE> if state.get('desc', False): <NEW_LINE> <INDENT> desc = "%s%s" % (self.op, rd) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> desc = '' <NEW_LINE> <DEDENT> return self.opfunc(rhs), desc | An EvalNode for an operator that takes only a single operand. | 6259903dd164cc61758221a3 |
class ExcelReader: <NEW_LINE> <INDENT> def __init__(self, xlsfile: BinaryIO): <NEW_LINE> <INDENT> book = load_workbook(xlsfile, read_only=True) <NEW_LINE> self.sheet = book.worksheets[0] <NEW_LINE> self.fieldnames = [n.value.strip() for n in self.sheet[1]] <NEW_LINE> self.line_num = 1 <NEW_LINE> self._rows = self.sheet.iter_rows(min_row=2) <NEW_LINE> <DEDENT> def __iter__(self): <NEW_LINE> <INDENT> return self <NEW_LINE> <DEDENT> def __next__(self): <NEW_LINE> <INDENT> self.line_num += 1 <NEW_LINE> row = next(self._rows) <NEW_LINE> row = [c.value for c in row] <NEW_LINE> return OrderedDict(zip(self.fieldnames, row)) | Like csv.DictReader, but read MS Excel file.
| 6259903d8a43f66fc4bf33bc |
class Polytope(cdd.CDDMatrix): <NEW_LINE> <INDENT> @staticmethod <NEW_LINE> def fromcdd(m): <NEW_LINE> <INDENT> x = Polytope([]) <NEW_LINE> x._m = m._m <NEW_LINE> return x <NEW_LINE> <DEDENT> def contains(self, x): <NEW_LINE> <INDENT> if isinstance(x, Polytope): <NEW_LINE> <INDENT> return not cdd.pempty(cdd.pinters(self, x)) <NEW_LINE> <DEDENT> elif isinstance(x, np.ndarray): <NEW_LINE> <INDENT> return not cdd.pempty( cdd.pinters(self, Polytope([np.insert(x, 0, 1)], False))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception("Not implemented") <NEW_LINE> <DEDENT> <DEDENT> @property <NEW_LINE> def n(self): <NEW_LINE> <INDENT> return self.col_size - 1 | A polytope. Mostly a convenience class | 6259903d50485f2cf55dc1b0 |
class BaseBackend(object): <NEW_LINE> <INDENT> def track(self, period, id=None, bucket=None, old_id=None, old_bucket=None, date=None): <NEW_LINE> <INDENT> raise NotImplementedError() <NEW_LINE> <DEDENT> def collapse(self, period, date=None, max_periods=1, buckets=None, aggregate_buckets=None): <NEW_LINE> <INDENT> raise NotImplementedError() <NEW_LINE> <DEDENT> def lookup(self, period, start=None, end=None, buckets=None): <NEW_LINE> <INDENT> raise NotImplementedError() | The base backend class.
All backends implement these methods and accept these arguments, though
some may also accept additional keyword arguments. | 6259903d26238365f5fadd84 |
class CollectionsBkLogin(): <NEW_LINE> <INDENT> def __init__(self, client): <NEW_LINE> <INDENT> self.client = client <NEW_LINE> self.get_all_user = ComponentAPI( client=self.client, method='GET', path='/api/c/compapi/bk_login/get_all_user/', description='获取所有用户信息', ) <NEW_LINE> self.get_batch_user = ComponentAPI( client=self.client, method='GET', path='/api/c/compapi/bk_login/get_batch_user/', description='获取多个用户信息', ) <NEW_LINE> self.get_user = ComponentAPI( client=self.client, method='GET', path='/api/c/compapi/bk_login/get_user/', description='获取用户信息', ) | Collections of BK_LOGIN APIS | 6259903d8da39b475be0441c |
class FileHandler(MethodHandler): <NEW_LINE> <INDENT> def __init__(self, request, root): <NEW_LINE> <INDENT> self.root = root <NEW_LINE> super(FileHandler, self).__init__(request) <NEW_LINE> <DEDENT> def head(self): <NEW_LINE> <INDENT> return self.get(skip_body=True) <NEW_LINE> <DEDENT> def get(self, skip_body=False): <NEW_LINE> <INDENT> route_args = self.route_args <NEW_LINE> path = route_args["path"] <NEW_LINE> assert path <NEW_LINE> abspath = os.path.abspath(os.path.join(self.root, path)) <NEW_LINE> if not abspath.startswith(self.root): <NEW_LINE> <INDENT> return forbidden() <NEW_LINE> <DEDENT> if not os.path.exists(abspath): <NEW_LINE> <INDENT> return not_found() <NEW_LINE> <DEDENT> if not os.path.isfile(abspath): <NEW_LINE> <INDENT> return forbidden() <NEW_LINE> <DEDENT> mime_type, encoding = mimetypes.guess_type(abspath) <NEW_LINE> response = HTTPResponse(mime_type or "plain/text", encoding) <NEW_LINE> if not skip_body: <NEW_LINE> <INDENT> response.headers.append(HTTP_HEADER_ACCEPT_RANGE_NONE) <NEW_LINE> file = open(abspath, "rb") <NEW_LINE> try: <NEW_LINE> <INDENT> response.write_bytes(file.read()) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> file.close() <NEW_LINE> <DEDENT> <DEDENT> return response | Serves static files out of some directory. | 6259903dd99f1b3c44d068d4 |
class OperatorAPIStub(object): <NEW_LINE> <INDENT> def __init__(self, channel): <NEW_LINE> <INDENT> self.SetUserStatus = channel.unary_unary( '/api.OperatorAPI/SetUserStatus', request_serializer=proto__pb2.Report.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) | Missing associated documentation comment in .proto file. | 6259903dd6c5a102081e3353 |
class DualCamera(SensorInterface): <NEW_LINE> <INDENT> def __init__(self, racecar_name): <NEW_LINE> <INDENT> self.image_buffer_left = utils.DoubleBuffer() <NEW_LINE> self.image_buffer_right = utils.DoubleBuffer() <NEW_LINE> rospy.Subscriber('/{}/camera/zed/rgb/image_rect_color'.format(racecar_name), sensor_image, self._left_camera_cb_) <NEW_LINE> rospy.Subscriber('/{}/camera/zed_right/rgb/image_rect_color_right'.format(racecar_name), sensor_image, self._right_camera_cb_) <NEW_LINE> self.sensor_type = Input.STEREO.value <NEW_LINE> <DEDENT> def get_observation_space(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return get_observation_space(Input.STEREO.value) <NEW_LINE> <DEDENT> except GenericError as ex: <NEW_LINE> <INDENT> ex.log_except_and_exit(SIMAPP_SIMULATION_WORKER_EXCEPTION) <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> raise GenericRolloutException('{}'.format(ex)) <NEW_LINE> <DEDENT> <DEDENT> def get_state(self, block=True): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> image_data = self.image_buffer_left.get(block=block) <NEW_LINE> left_img = Image.frombytes('RGB', (image_data.width, image_data.height), image_data.data, 'raw', 'RGB', 0, 1) <NEW_LINE> left_img = left_img.resize(TRAINING_IMAGE_SIZE, resample=2).convert('L') <NEW_LINE> image_data = self.image_buffer_right.get(block=block) <NEW_LINE> right_img = Image.frombytes('RGB', (image_data.width, image_data.height), image_data.data, 'raw', 'RGB', 0, 1) <NEW_LINE> right_img = right_img.resize(TRAINING_IMAGE_SIZE, resample=2).convert('L') <NEW_LINE> return {Input.STEREO.value: np.array(np.stack((left_img, right_img), axis=2))} <NEW_LINE> <DEDENT> except utils.DoubleBuffer.Empty: <NEW_LINE> <INDENT> return {} <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> raise GenericRolloutException("Unable to set state: {}".format(ex)) <NEW_LINE> <DEDENT> <DEDENT> def reset(self): <NEW_LINE> <INDENT> self.image_buffer_left.clear() <NEW_LINE> self.image_buffer_right.clear() <NEW_LINE> <DEDENT> def get_input_embedders(self, network_type): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return get_stereo_camera_embedders(network_type) <NEW_LINE> <DEDENT> except GenericError as ex: <NEW_LINE> <INDENT> ex.log_except_and_exit(SIMAPP_SIMULATION_WORKER_EXCEPTION) <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> raise GenericRolloutException('{}'.format(ex)) <NEW_LINE> <DEDENT> <DEDENT> def _left_camera_cb_(self, data): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.image_buffer_left.put(data) <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> LOGGER.info("Unable to retrieve frame: %s", ex) <NEW_LINE> <DEDENT> <DEDENT> def _right_camera_cb_(self, data): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.image_buffer_right.put(data) <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> LOGGER.info("Unable to retrieve frame: %s", ex) | This class handles the data for dual cameras | 6259903dcad5886f8bdc5993 |
class Grammar(object): <NEW_LINE> <INDENT> __metaclass__ = GrammarBase <NEW_LINE> grammar = None <NEW_LINE> globals = None <NEW_LINE> def parse(self, source, ast, rule_name): <NEW_LINE> <INDENT> func_name = '%sRule' % rule_name <NEW_LINE> node.next_is(ast, ast) <NEW_LINE> dsl_parser.Parsing.oBaseParser.parsedStream(source) <NEW_LINE> if not hasattr(self, func_name): <NEW_LINE> <INDENT> raise Exception("First rule doesn't exist : %s" % func_name) <NEW_LINE> <DEDENT> result = getattr(self, func_name)(ast) <NEW_LINE> if not result: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> dsl_parser.Parsing.oBaseParser.readWs() <NEW_LINE> return dsl_parser.Parsing.oBaseParser.readEOF() | Base class for all grammars.
This class turn any class A that inherit it into a grammar.
Taking the description of the grammar in parameter it will add
all what is what is needed for A to parse it. | 6259903d07f4c71912bb0660 |
class Word(): <NEW_LINE> <INDENT> def __init__(self,w,b_links=None,f_links=None): <NEW_LINE> <INDENT> self.w = w <NEW_LINE> self.bkwrd_links = b_links if b_links else set([]) <NEW_LINE> self.frwrd_links = f_links if f_links else set([]) <NEW_LINE> <DEDENT> def getw(self): <NEW_LINE> <INDENT> return self.w <NEW_LINE> <DEDENT> def setw(self,w): <NEW_LINE> <INDENT> self.w = w <NEW_LINE> <DEDENT> def getbkwrd_links(self): <NEW_LINE> <INDENT> return self.bkwrd_links <NEW_LINE> <DEDENT> def setbkwrd_links(self,b_links): <NEW_LINE> <INDENT> self.bkwrd_links = b_links <NEW_LINE> <DEDENT> def getfrwrd_links(self): <NEW_LINE> <INDENT> return self.frwrd_links <NEW_LINE> <DEDENT> def setfrwrd_links(self,f_links): <NEW_LINE> <INDENT> self.frwrd_links = f_links <NEW_LINE> <DEDENT> def addtobkwrd_links(self, links): <NEW_LINE> <INDENT> self.bkwrd_links.update(links) <NEW_LINE> <DEDENT> def addtofrwrd_links(self, links): <NEW_LINE> <INDENT> self.frwrd_links.update(links) | Word-Entity
Attributes
--
w : word itself
bkwrd_links : words which have this word as their relational word.
frwrd_links : words which are realtional to this word. | 6259903d287bf620b6272e19 |
class UniqueClient(formencode.FancyValidator): <NEW_LINE> <INDENT> messages = { 'client_taken': 'Client already taken', } <NEW_LINE> def validate_python(self, value, state): <NEW_LINE> <INDENT> if state is not None and hasattr(state, 'session'): <NEW_LINE> <INDENT> clients =[] <NEW_LINE> cli = state.session.query(Clients).all() <NEW_LINE> for index in range(len(cli)): <NEW_LINE> <INDENT> clients.append(cli[index].name.lower()) <NEW_LINE> <DEDENT> if value.lower() in clients: <NEW_LINE> <INDENT> raise formencode.Invalid(self.message("client_taken", state), value, state) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("state object needs session attribute", value, state) | Validate that the value is a unique client Name (i.e. the client
does not already exist in the database).
Requires an object to be passed in as ``state`` that contains a
``session`` attribute pointing to a SQLAlchemy Session object.
The validator uses the Session object to access the database. | 6259903d24f1403a926861e4 |
@dataclass <NEW_LINE> class LongformerMaskedLMOutput(ModelOutput): <NEW_LINE> <INDENT> loss: Optional[torch.FloatTensor] = None <NEW_LINE> logits: torch.FloatTensor = None <NEW_LINE> hidden_states: Optional[Tuple[torch.FloatTensor]] = None <NEW_LINE> attentions: Optional[Tuple[torch.FloatTensor]] = None <NEW_LINE> global_attentions: Optional[Tuple[torch.FloatTensor]] = None | Base class for masked language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence. | 6259903d8e05c05ec3f6f772 |
class mfaddoutsidefile(Package): <NEW_LINE> <INDENT> def __init__(self, model, name, extension, unitnumber): <NEW_LINE> <INDENT> Package.__init__( self, model, extension, name, unitnumber, allowDuplicates=True ) <NEW_LINE> self.parent.add_package(self) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return "Outside Package class" <NEW_LINE> <DEDENT> def write_file(self): <NEW_LINE> <INDENT> pass | Add a file for which you have a MODFLOW input file | 6259903d16aa5153ce40171b |
class APIStub(object): <NEW_LINE> <INDENT> def __init__(self, channel): <NEW_LINE> <INDENT> self.Listen = channel.unary_stream( '/proxy.API/Listen', request_serializer=python__pachyderm_dot_proto_dot_v2_dot_proxy_dot_proxy__pb2.ListenRequest.SerializeToString, response_deserializer=python__pachyderm_dot_proto_dot_v2_dot_proxy_dot_proxy__pb2.ListenResponse.FromString, ) | Missing associated documentation comment in .proto file. | 6259903d91af0d3eaad3b064 |
class CrmHome(APIView): <NEW_LINE> <INDENT> view_name = 'crm_home' <NEW_LINE> template_name = 'crm/index.html' <NEW_LINE> @method_decorator(login_required) <NEW_LINE> def get(self, request): <NEW_LINE> <INDENT> context = dict() <NEW_LINE> try: <NEW_LINE> <INDENT> print(1 / 0) <NEW_LINE> <DEDENT> except Exception as error: <NEW_LINE> <INDENT> logger.info(error) <NEW_LINE> <DEDENT> context['user'] = request.xuser <NEW_LINE> return render(request, self.template_name, context) | 后台首页 | 6259903d96565a6dacd2d8a2 |
class SACauchy(SimulatedAnnealingBase): <NEW_LINE> <INDENT> def __init__(self, func,lb,ub, x0, T_max=100, T_min=1e-7, L=300, max_stay_counter=150, **kwargs): <NEW_LINE> <INDENT> super().__init__(func,lb,ub, x0, T_max, T_min, L, max_stay_counter, **kwargs) <NEW_LINE> self.learn_rate = kwargs.get('m', 0.5) <NEW_LINE> <DEDENT> def get_new_x(self, x): <NEW_LINE> <INDENT> u = np.random.uniform(-np.pi / 2, np.pi / 2, size=self.n_dims) <NEW_LINE> xc = self.learn_rate * self.T * np.tan(u) <NEW_LINE> x_new = x + xc <NEW_LINE> x_new[np.where(x_new<self.lb)]=self.lb[np.where(x_new<self.lb)] <NEW_LINE> x_new[np.where(x_new > self.ub)] = self.ub[np.where(x_new > self.ub)] <NEW_LINE> return x_new <NEW_LINE> <DEDENT> def cool_down(self): <NEW_LINE> <INDENT> self.T = self.T_max / (1 + self.iter_cycle) | u ~ Uniform(-pi/2, pi/2, size=d)
xc = learn_rate * T * tan(u)
x_new = x_old + xc
T_new = T0 / (1 + k) | 6259903d76d4e153a661db8b |
@register('role') <NEW_LINE> class RoleCheck(Check): <NEW_LINE> <INDENT> def __call__(self, target, creds, enforcer, current_rule=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> match = self.match % target <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if 'roles' in creds: <NEW_LINE> <INDENT> return match.lower() in [x.lower() for x in creds['roles']] <NEW_LINE> <DEDENT> return False | Check that there is a matching role in the ``creds`` dict. | 6259903d15baa723494631c0 |
class VdtValueError(ValidateError): <NEW_LINE> <INDENT> def __init__(self, value): <NEW_LINE> <INDENT> ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,)) | The value supplied was of the correct type, but was not an allowed value. | 6259903d0a366e3fb87ddc14 |
class Conference(nb.Model): <NEW_LINE> <INDENT> name = ndb.StringProperty(required=True) <NEW_LINE> description = ndb.StringProperty() <NEW_LINE> organizerUserId = ndb.StringProperty() <NEW_LINE> topics = ndb.StringProperty(repeated = True) <NEW_LINE> city = ndb.StringProperty() <NEW_LINE> startDate = ndb.DateProperty() <NEW_LINE> month = ndb.IntegerIntegerProperty() <NEW_LINE> endDate = ndb.DateProperty() <NEW_LINE> maxAttendees = ndb.IntegerProperty() <NEW_LINE> seatsAvailable = ndb.IntegerProperty() | Conference Object | 6259903db57a9660fecd2caa |
class ListBands(Command): <NEW_LINE> <INDENT> command_name = 'list_posts' <NEW_LINE> option_list = ( Option('--title', '-t', dest='title'), ) <NEW_LINE> def run(self, title=None): <NEW_LINE> <INDENT> bands = Band.objects <NEW_LINE> if title: <NEW_LINE> <INDENT> bands = bands(title=title) <NEW_LINE> <DEDENT> for band in bands: <NEW_LINE> <INDENT> print(band) | prints a list of bands | 6259903d23849d37ff8522e9 |
class event_type(models.Model): <NEW_LINE> <INDENT> _name = 'event.type' <NEW_LINE> _description = 'Event Type' <NEW_LINE> name = fields.Char(string='Event Type', required=True) <NEW_LINE> default_reply_to = fields.Char(string='Default Reply-To', help="The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one.") <NEW_LINE> default_email_event = fields.Many2one('email.template', string='Event Confirmation Email', help="It will select this default confirmation event mail value when you choose this event") <NEW_LINE> default_email_registration = fields.Many2one('email.template', string='Registration Confirmation Email', help="It will select this default confirmation registration mail value when you choose this event") <NEW_LINE> default_registration_min = fields.Integer(string='Default Minimum Registration', default=0, help="It will select this default minimum value when you choose this event") <NEW_LINE> default_registration_max = fields.Integer(string='Default Maximum Registration', default=0, help="It will select this default maximum value when you choose this event") | Event Type | 6259903d71ff763f4b5e89cc |
class EnvironmentBase(object): <NEW_LINE> <INDENT> @property <NEW_LINE> def name(self): <NEW_LINE> <INDENT> raise NotImplementedError() <NEW_LINE> <DEDENT> def get_executable(self): <NEW_LINE> <INDENT> raise NotImplementedError() | Base class for all environments | 6259903d8c3a8732951f7787 |
class _DesiredFunctionFound(BaseException): <NEW_LINE> <INDENT> pass | Exception to raise when expected function is found. | 6259903d63f4b57ef008668c |
class OnUpdateStatic(_OnUpdate): <NEW_LINE> <INDENT> update_type = 'static file' <NEW_LINE> def sync_pickup_file_in_ram(self, ctx): <NEW_LINE> <INDENT> self.server.static_config.read_file(ctx.file_path, ctx.file_name) | Updates a static resource in memory and file system.
| 6259903d8e05c05ec3f6f773 |
class Start: <NEW_LINE> <INDENT> def __get__(self, instance: Optional[CidLine], owner: Type[CidLine]) -> int: <NEW_LINE> <INDENT> return START_COLUMN.get(owner.__name__, 27) | The starting position for parsing a CID file line with a prefix. | 6259903d94891a1f408ba00f |
class BooleanValueField(Field): <NEW_LINE> <INDENT> widget = checkbox_button <NEW_LINE> def __init__(self, label=None, validators=None, **kwargs): <NEW_LINE> <INDENT> super(BooleanValueField, self).__init__(label, validators, **kwargs) <NEW_LINE> <DEDENT> def process_data(self, value): <NEW_LINE> <INDENT> self.data = bool(value) <NEW_LINE> <DEDENT> def process_formdata(self, valuelist): <NEW_LINE> <INDENT> self.data = valuelist[0] == u"1" <NEW_LINE> <DEDENT> def _value(self): <NEW_LINE> <INDENT> if self.raw_data: <NEW_LINE> <INDENT> return unicode(self.raw_data[0]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return u'y' | Represents an checkbox button | 6259903d15baa723494631c2 |
class Solution: <NEW_LINE> <INDENT> def numDecodings(self, s): <NEW_LINE> <INDENT> if s[0] == '0': <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> n = len(s) <NEW_LINE> DP = [0] * n <NEW_LINE> DP[0] = 1 <NEW_LINE> for i in range(1, n): <NEW_LINE> <INDENT> if s[i] != '0': <NEW_LINE> <INDENT> DP[i] = DP[i-1] <NEW_LINE> <DEDENT> if i<n and 10<=int(s[i-1:i+1])<=26: <NEW_LINE> <INDENT> if i == 1: <NEW_LINE> <INDENT> DP[i] += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> DP[i] += DP[i-2] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return DP[n-1] | MY CODE VERSION
Thought:
Dynamic Programming template:
State: DP[i] - number of ways to interpret a string at s[i]
Transition:
- DP[i] = DP[i-1] ----> s[i] 可以被翻译, s[i-1]+s[i] 不可以被翻译
- DP[i] = DP[i-2] ----> s[i] = 0, 但 s[i-1]+s[i] 在一起可以被翻译
- DP[i] = DP[i-1] + DP[i-2] ----> s[i] 可以被翻译, s[i-1]+s[i] 也可以被翻译
- DP[i] = 0 ----> s[i] = 0, 并且 s[i-1]+s[i] 在一起不能被翻译
Initial states: DP[0] = 1
Complexity:
Time: O(n)
Space: O(n) - space optimization available | 6259903dd99f1b3c44d068d6 |
class ConvCnstrMODMaskDcpl_IterSM(ConvCnstrMODMaskDcplBase): <NEW_LINE> <INDENT> class Options(ConvCnstrMODMaskDcplBase.Options): <NEW_LINE> <INDENT> defaults = copy.deepcopy(ConvCnstrMODMaskDcplBase.Options.defaults) <NEW_LINE> def __init__(self, opt=None): <NEW_LINE> <INDENT> if opt is None: <NEW_LINE> <INDENT> opt = {} <NEW_LINE> <DEDENT> ConvCnstrMODMaskDcplBase.Options.__init__(self, opt) <NEW_LINE> <DEDENT> <DEDENT> def __init__(self, Z, S, W, dsz, opt=None, dimK=1, dimN=2): <NEW_LINE> <INDENT> if opt is None: <NEW_LINE> <INDENT> opt = ConvCnstrMODMaskDcpl_IterSM.Options() <NEW_LINE> <DEDENT> super(ConvCnstrMODMaskDcpl_IterSM, self).__init__(Z, S, W, dsz, opt, dimK, dimN) <NEW_LINE> <DEDENT> def xstep(self): <NEW_LINE> <INDENT> self.YU[:] = self.Y - self.U <NEW_LINE> self.block_sep0(self.YU)[:] += self.S <NEW_LINE> YUf = sl.rfftn(self.YU, None, self.cri.axisN) <NEW_LINE> b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf), axis=self.cri.axisK) + self.block_sep1(YUf) <NEW_LINE> self.Xf[:] = sl.solvemdbi_ism(self.Zf, 1.0, b, self.cri.axisM, self.cri.axisK) <NEW_LINE> self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) <NEW_LINE> self.xstep_check(b) | ADMM algorithm for Convolutional Constrained MOD with Mask Decoupling
:cite:`heide-2015-fast` with the :math:`\mathbf{x}` step solved via
iterated application of the Sherman-Morrison equation
:cite:`wohlberg-2016-efficient`.
|
.. inheritance-diagram:: ConvCnstrMODMaskDcpl_IterSM
:parts: 2
|
Multi-channel signals/images are supported
:cite:`wohlberg-2016-convolutional`. See
:class:`.ConvCnstrMODMaskDcplBase` for interface details. | 6259903db57a9660fecd2cac |
class AsyncBatchAnnotateImagesRequest(proto.Message): <NEW_LINE> <INDENT> requests = proto.RepeatedField( proto.MESSAGE, number=1, message="AnnotateImageRequest", ) <NEW_LINE> output_config = proto.Field(proto.MESSAGE, number=2, message="OutputConfig",) <NEW_LINE> parent = proto.Field(proto.STRING, number=4,) | Request for async image annotation for a list of images.
Attributes:
requests (Sequence[google.cloud.vision_v1.types.AnnotateImageRequest]):
Required. Individual image annotation
requests for this batch.
output_config (google.cloud.vision_v1.types.OutputConfig):
Required. The desired output location and
metadata (e.g. format).
parent (str):
Optional. Target project and location to make a call.
Format: ``projects/{project-id}/locations/{location-id}``.
If no parent is specified, a region will be chosen
automatically.
Supported location-ids: ``us``: USA country only, ``asia``:
East asia areas, like Japan, Taiwan, ``eu``: The European
Union.
Example: ``projects/project-A/locations/eu``. | 6259903db830903b9686ed92 |
class HashSchedulerTest(functional.FunctionalTest): <NEW_LINE> <INDENT> messages = 100 <NEW_LINE> def configure_tempesta(self): <NEW_LINE> <INDENT> functional.FunctionalTest.configure_tempesta(self) <NEW_LINE> for sg in self.tempesta.config.server_groups: <NEW_LINE> <INDENT> sg.sched = 'hash' <NEW_LINE> <DEDENT> <DEDENT> def create_tester(self): <NEW_LINE> <INDENT> self.tester = HashTester(self.client, self.servers) <NEW_LINE> <DEDENT> def chains(self): <NEW_LINE> <INDENT> chain = chains.base() <NEW_LINE> return [chain for _ in range(self.messages)] <NEW_LINE> <DEDENT> def test_hash_scheduler(self): <NEW_LINE> <INDENT> self.generic_test_routine('cache 0;\n', self.chains()) | Hash scheduler functional test, check that the same server connection
is used for the same resource. | 6259903d21a7993f00c6719f |
class Sum(Aggregation): <NEW_LINE> <INDENT> def __init__(self, column_name): <NEW_LINE> <INDENT> self._column_name = column_name <NEW_LINE> <DEDENT> def get_aggregate_data_type(self, table): <NEW_LINE> <INDENT> return Number() <NEW_LINE> <DEDENT> def validate(self, table): <NEW_LINE> <INDENT> column = table.columns[self._column_name] <NEW_LINE> if not isinstance(column.data_type, Number): <NEW_LINE> <INDENT> raise DataTypeError('Sum can only be applied to columns containing Number data.') <NEW_LINE> <DEDENT> <DEDENT> def run(self, table): <NEW_LINE> <INDENT> column = table.columns[self._column_name] <NEW_LINE> return sum(column.values_without_nulls()) | Calculate the sum of a column containing :class:`.Number` data. | 6259903da4f1c619b294f7a0 |
class RpMusicDefinitionsJson(_UniqueMcFileJsonMulti[ResourcePack]): <NEW_LINE> <INDENT> pack_path: ClassVar[str] = 'sounds/music_definitions.json' <NEW_LINE> def keys(self) -> Tuple[str, ...]: <NEW_LINE> <INDENT> result: List[str] = [] <NEW_LINE> if isinstance(self.json.data, dict): <NEW_LINE> <INDENT> for key in self.json.data.keys(): <NEW_LINE> <INDENT> if isinstance(key, str): <NEW_LINE> <INDENT> result.append(key) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return tuple(result) <NEW_LINE> <DEDENT> def __getitem__(self, key: str) -> JsonWalker: <NEW_LINE> <INDENT> result = self.json / key <NEW_LINE> if isinstance(result.data, Exception): <NEW_LINE> <INDENT> raise KeyError(key) <NEW_LINE> <DEDENT> return result | music_definitions.json file. | 6259903dd53ae8145f91968e |
class Solution: <NEW_LINE> <INDENT> def fizzBuzz(self, n): <NEW_LINE> <INDENT> ret = [] <NEW_LINE> for i in range(1, n+1): <NEW_LINE> <INDENT> if i % 15 == 0: <NEW_LINE> <INDENT> ret.append("fizz buzz") <NEW_LINE> <DEDENT> elif i % 5 == 0: <NEW_LINE> <INDENT> ret.append("buzz") <NEW_LINE> <DEDENT> elif i % 3 == 0: <NEW_LINE> <INDENT> ret.append("fizz") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ret.append(str(i)) <NEW_LINE> <DEDENT> <DEDENT> return ret | @param n: An integer as description
@return: A list of strings.
For example, if n = 7, your code should return
["1", "2", "fizz", "4", "buzz", "fizz", "7"] | 6259903d82261d6c527307dd |
class ProgressFrame(Frame): <NEW_LINE> <INDENT> def __init__(self, parent, labels, set_frame): <NEW_LINE> <INDENT> super().__init__(parent) <NEW_LINE> self.set_frame = set_frame <NEW_LINE> self.parent = parent <NEW_LINE> self.buttons = [ Button( self, text=label, width=15, bg=REG_COLOR, activebackground=REG_COLOR, command=f(self, i), ) for i, label in enumerate(labels) ] <NEW_LINE> self.curr = -1 <NEW_LINE> self.set(0) <NEW_LINE> self.build() <NEW_LINE> <DEDENT> def set(self, i, parent_update=True): <NEW_LINE> <INDENT> if self.curr == i: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> update = True <NEW_LINE> if parent_update: <NEW_LINE> <INDENT> update = self.set_frame(i) <NEW_LINE> <DEDENT> if update: <NEW_LINE> <INDENT> self.buttons[self.curr]["bg"] = REG_COLOR <NEW_LINE> self.buttons[i]["bg"] = CURR_COLOR <NEW_LINE> self.curr = i <NEW_LINE> <DEDENT> <DEDENT> def build(self): <NEW_LINE> <INDENT> for button in self.buttons: <NEW_LINE> <INDENT> button.pack(side=LEFT) | Mimic 'tabs' in a browser window
control which edit window is showing | 6259903d16aa5153ce401720 |
class DateTimeArg(DateArg): <NEW_LINE> <INDENT> def externalize(self, value: Optional[int]) -> str: <NEW_LINE> <INDENT> return formatTime(value) | Argument whose value is a date and a time.
| 6259903d50485f2cf55dc1b6 |
class Pyjo_Content_Single(Pyjo.Content.object, Pyjo.String.Mixin.object): <NEW_LINE> <INDENT> def __init__(self, **kwargs): <NEW_LINE> <INDENT> super(Pyjo_Content_Single, self).__init__(**kwargs) <NEW_LINE> self.asset = notnone(kwargs.get('asset'), lambda: Pyjo.Asset.Memory.new(auto_upgrade=True)) <NEW_LINE> self.auto_upgrade = kwargs.get('auto_upgrade', True) <NEW_LINE> def read_cb(content, chunk): <NEW_LINE> <INDENT> content.set(asset=content.asset.add_chunk(chunk)) <NEW_LINE> <DEDENT> self._on_read = self.on(read_cb, 'read') <NEW_LINE> <DEDENT> def body_contains(self, chunk): <NEW_LINE> <INDENT> return self.asset.contains(chunk) >= 0 <NEW_LINE> <DEDENT> @property <NEW_LINE> def body_size(self): <NEW_LINE> <INDENT> if self._dynamic: <NEW_LINE> <INDENT> return convert(self.headers.content_length, int, 0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.asset.size <NEW_LINE> <DEDENT> <DEDENT> def clone(self): <NEW_LINE> <INDENT> clone = super(Pyjo_Content_Single, self).clone() <NEW_LINE> if clone is not None: <NEW_LINE> <INDENT> clone.asset = self.asset <NEW_LINE> return clone <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> def get_body_chunk(self, offset): <NEW_LINE> <INDENT> if self._dynamic: <NEW_LINE> <INDENT> return self.generate_body_chunk(offset) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.asset.get_chunk(offset) <NEW_LINE> <DEDENT> <DEDENT> def parse(self, chunk): <NEW_LINE> <INDENT> self._parse_until_body(chunk) <NEW_LINE> if not self.auto_upgrade or self.boundary is None: <NEW_LINE> <INDENT> return super(Pyjo_Content_Single, self).parse() <NEW_LINE> <DEDENT> self.unsubscribe('read', self._on_read) <NEW_LINE> multi = Pyjo.Content.MultiPart.new(**vars(self)) <NEW_LINE> self.emit('upgrade', multi) <NEW_LINE> return multi.parse() | :mod:`Pyjo.Content.Single` inherits all attributes and methods from
:mod:`Pyjo.Content` and implements the following new ones. | 6259903dd10714528d69efa5 |
class quasilinear_forward_euler(quasilinear_time_stepper): <NEW_LINE> <INDENT> def _step(self): <NEW_LINE> <INDENT> self.q = self.q + self.dt*self.A(self.t, self.q).dot(self.q) | The matrix A may be a function of time and state | 6259903db57a9660fecd2cae |
class Api: <NEW_LINE> <INDENT> def __init__(self, app=None, auth=None): <NEW_LINE> <INDENT> if app: <NEW_LINE> <INDENT> self.init_app(app, auth) <NEW_LINE> <DEDENT> <DEDENT> def init_app(self, app, auth=None): <NEW_LINE> <INDENT> self.app = app <NEW_LINE> self.auth = auth <NEW_LINE> <DEDENT> def public(self, view): <NEW_LINE> <INDENT> view.public = True <NEW_LINE> return view <NEW_LINE> <DEDENT> def grant(self, *roles): <NEW_LINE> <INDENT> def view(fn): <NEW_LINE> <INDENT> fn.roles = roles <NEW_LINE> return fn <NEW_LINE> <DEDENT> return view <NEW_LINE> <DEDENT> def resource(self, prefix): <NEW_LINE> <INDENT> def wrapper(cls): <NEW_LINE> <INDENT> clsinit = getattr(cls, '__init__', lambda self: None) <NEW_LINE> cls = type(cls.__name__, (Resource,), dict(cls.__dict__)) <NEW_LINE> aliases = getattr(cls, 'aliases', None) <NEW_LINE> if isinstance(aliases, dict) and len(aliases) > 0: <NEW_LINE> <INDENT> cls.preparer = FieldsPreparer(fields=aliases) <NEW_LINE> <DEDENT> api = self <NEW_LINE> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> super(cls, self).__init__(api) <NEW_LINE> clsinit(self, *args, **kwargs) <NEW_LINE> <DEDENT> cls.__init__ = __init__ <NEW_LINE> cls.add_url_rules(self.app, prefix) <NEW_LINE> return cls <NEW_LINE> <DEDENT> return wrapper | Provides an abstraction from the rest API framework being used | 6259903d07f4c71912bb0665 |
class change_dual_unit(models.TransientModel): <NEW_LINE> <INDENT> _name = 'change.dual.unit' <NEW_LINE> _description = 'Modification of the product dual unit' <NEW_LINE> _rec_name = 'product_id' <NEW_LINE> @api.model <NEW_LINE> def _dual_type_get(self): <NEW_LINE> <INDENT> return [ ('fixed', _('Fixed')), ('variable', _('Variable')), ] <NEW_LINE> <DEDENT> dual_unit = fields.Boolean(string='Dual unit', default=False) <NEW_LINE> sec_uom_id = fields.Many2one('product.uom', string='Second unit of measure', required=False) <NEW_LINE> product_id = fields.Many2one('product.product', string='Product', required=False, ondelete='cascade') <NEW_LINE> dual_unit_type = fields.Selection('_dual_type_get', string='Dual unit type', default='fixed') <NEW_LINE> @api.model <NEW_LINE> def default_get(self, fields_list): <NEW_LINE> <INDENT> res = super(change_dual_unit, self).default_get(fields_list=fields_list) <NEW_LINE> context = self.env.context <NEW_LINE> if context.get('active_model') == 'product.product' and context.get('active_id'): <NEW_LINE> <INDENT> product = self.env['product.product'].browse(context['active_id']) <NEW_LINE> res['dual_unit'] = product.dual_unit <NEW_LINE> res['dual_unit_type'] = product.dual_unit_type <NEW_LINE> res['product_id'] = product.id <NEW_LINE> res['sec_uom_id'] = product.sec_uom_id.id <NEW_LINE> <DEDENT> return res <NEW_LINE> <DEDENT> @api.multi <NEW_LINE> def change_dual_unit(self): <NEW_LINE> <INDENT> return self.product_id.with_context(force_change_dual_unit=True).write({ 'dual_unit': self.dual_unit, 'dual_unit_type': self.dual_unit_type, 'sec_uom_id': self.sec_uom_id.id, }) | Wizard to change the product dual unit | 6259903dec188e330fdf9acd |
class Sys(object): <NEW_LINE> <INDENT> def Install(self): <NEW_LINE> <INDENT> print(' Running install.') <NEW_LINE> Utility.Utilities.MakeDir(HOME_BIN_DIR, 'pyhouse') <NEW_LINE> Utility.Utilities.MakeDir(CONFIG_DIR, 'pyhouse') <NEW_LINE> Utility.Utilities.MakeDir(LOG_DIR, 'pyhouse') <NEW_LINE> User._copy_bin_files() | This is a director that will run various installation sections.
| 6259903da79ad1619776b2b3 |
class TestGenderListResponse(unittest.TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def tearDown(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def testGenderListResponse(self): <NEW_LINE> <INDENT> model = kinow_client.models.gender_list_response.GenderListResponse() | GenderListResponse unit test stubs | 6259903d379a373c97d9a25d |
class SymbolNotFound(Exception): <NEW_LINE> <INDENT> pass | Raised when symbol cannot be found in table | 6259903de76e3b2f99fd9c40 |
class Adaline(Perceptron): <NEW_LINE> <INDENT> def FitInternal(self, X, y): <NEW_LINE> <INDENT> nsamples = X.shape[0] <NEW_LINE> nfeatures = X.shape[1] <NEW_LINE> self.cost_ = [] <NEW_LINE> self.w_ = np.random.RandomState().normal(loc = 0.0, scale = 0.01, size = 1 + nfeatures) <NEW_LINE> self.PrintModel("initial random") <NEW_LINE> for i in range(self.n_iter_): <NEW_LINE> <INDENT> print("==============next iteration======================") <NEW_LINE> output = self.Activation(X) <NEW_LINE> print("output calculated - shape is " + str(np.shape(output))) <NEW_LINE> errors = y - output <NEW_LINE> print("errors calculated - shape is " + str(np.shape(errors))) <NEW_LINE> w_del = X.T.dot(errors) <NEW_LINE> print("weight delta calculated: " + str(w_del)) <NEW_LINE> self.w_[1:] += self.eta_ * w_del <NEW_LINE> self.w_[0] += self.eta_ * errors.sum() <NEW_LINE> cost = (errors**2).sum() / 2.0 <NEW_LINE> print("sum of errors squared = " + str(cost)) <NEW_LINE> self.cost_.append(cost) <NEW_LINE> self.PrintModel("iteration " + str(i)) <NEW_LINE> self.AccuracyInternal(X, y) | Adaline is a neural network which is like a perceptron with 2 differences both
- The fitting is done wrt to the output of the activation function (real values),
not the final (binary) classification values
- In Adaline, changes to weights depends on all samples. That is
w --sample1, sample2, ..., sample n--> w' --sample1, sample2, ..., sample n---> w'' | 6259903d73bcbd0ca4bcb4be |
class Drop(Command): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> super().__init__('Drop') <NEW_LINE> self.requires(subsystems.elevator) <NEW_LINE> <DEDENT> def execute(self): <NEW_LINE> <INDENT> subsystems.elevator.down() <NEW_LINE> <DEDENT> def end(self): <NEW_LINE> <INDENT> subsystems.elevator.hold() | Drop command | 6259903d82261d6c527307de |
class Struct(common.SourceLocation): <NEW_LINE> <INDENT> def __init__(self, file_name, line, column): <NEW_LINE> <INDENT> self.name = None <NEW_LINE> self.description = None <NEW_LINE> self.strict = True <NEW_LINE> self.chained_types = [] <NEW_LINE> self.fields = [] <NEW_LINE> super(Struct, self).__init__(file_name, line, column) | IDL struct information.
All fields are either required or have a non-None default. | 6259903d004d5f362081f8fe |
class Remote: <NEW_LINE> <INDENT> user = None <NEW_LINE> host = None <NEW_LINE> port = None <NEW_LINE> @typed <NEW_LINE> def __init__(self, user: str, host: str, port: numbers.Integral=22): <NEW_LINE> <INDENT> self.user = user <NEW_LINE> self.host = host <NEW_LINE> self.port = port <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return (isinstance(other, type(self)) and self.user == other.user and self.host == other.host and self.port == other.port) <NEW_LINE> <DEDENT> def __ne__(self, other): <NEW_LINE> <INDENT> return not (self == other) <NEW_LINE> <DEDENT> def __hash__(self): <NEW_LINE> <INDENT> return hash((self.user, self.host, self.port)) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return '{}@{}:{}'.format(self.user, self.host, self.port) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return '{0.__module__}.{0.__qualname__}({1!r}, {2!r}, {3!r})'.format( type(self), self.user, self.host, self.port ) | Remote node to SSH.
:param user: the username to :program:`ssh`
:type user: :class:`str`
:param host: the host to access
:type host: :class:`str`
:param port: the port number to :program:`ssh`.
the default is 22 which is the default :program:`ssh` port
:type port: :class:`numbers.Integral` | 6259903d63b5f9789fe863a1 |
class ListVpnGatewaysResult(msrest.serialization.Model): <NEW_LINE> <INDENT> _attribute_map = { 'value': {'key': 'value', 'type': '[VpnGateway]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } <NEW_LINE> def __init__( self, **kwargs ): <NEW_LINE> <INDENT> super(ListVpnGatewaysResult, self).__init__(**kwargs) <NEW_LINE> self.value = kwargs.get('value', None) <NEW_LINE> self.next_link = kwargs.get('next_link', None) | Result of the request to list VpnGateways. It contains a list of VpnGateways and a URL nextLink to get the next set of results.
:param value: List of VpnGateways.
:type value: list[~azure.mgmt.network.v2020_11_01.models.VpnGateway]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str | 6259903dd164cc61758221ab |
class PolylineCommand(BaseCommand): <NEW_LINE> <INDENT> def __init__(self, document): <NEW_LINE> <INDENT> BaseCommand.__init__(self, document) <NEW_LINE> self.exception=[ExcPoint] <NEW_LINE> self.defaultValue=[None] <NEW_LINE> self.message=["Give Me A Point: "] <NEW_LINE> self.raiseStop=False <NEW_LINE> self.automaticApply=False <NEW_LINE> <DEDENT> def __setitem__(self, key, value): <NEW_LINE> <INDENT> value=self.translateCmdValue(value) <NEW_LINE> if isinstance(value, Point): <NEW_LINE> <INDENT> self.value.append(value) <NEW_LINE> self.exception.append(ExcPoint) <NEW_LINE> self.message.append("Give Me A Point") <NEW_LINE> self.defaultValue.append(None) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.raiseStop=True <NEW_LINE> <DEDENT> <DEDENT> def applyCommand(self): <NEW_LINE> <INDENT> i=0 <NEW_LINE> args={} <NEW_LINE> for k in self.value: <NEW_LINE> <INDENT> args["POLYLINE_%s"%str(i)]=k <NEW_LINE> i+=1 <NEW_LINE> <DEDENT> pline=Polyline(args) <NEW_LINE> self.document.saveEntity(pline) | this class represents the polyline command | 6259903d07f4c71912bb0667 |
class AverageNumberOfIndependentVoicesFeature(featuresModule.FeatureExtractor): <NEW_LINE> <INDENT> id = 'T2' <NEW_LINE> def __init__(self, dataOrStream=None, *arguments, **keywords): <NEW_LINE> <INDENT> super().__init__(dataOrStream=dataOrStream, *arguments, **keywords) <NEW_LINE> self.name = 'Average Number of Independent Voices' <NEW_LINE> self.description = ('Average number of different channels in which notes have ' 'sounded simultaneously. Rests are not included in this ' 'calculation. Here, Parts are treated as voices') <NEW_LINE> self.isSequential = True <NEW_LINE> self.dimensions = 1 <NEW_LINE> <DEDENT> def process(self): <NEW_LINE> <INDENT> found = [] <NEW_LINE> for c in self.data['chordify.flat.getElementsByClass(Chord)']: <NEW_LINE> <INDENT> g = base.Groups() <NEW_LINE> for p in c.pitches: <NEW_LINE> <INDENT> for gSub in p.groups: <NEW_LINE> <INDENT> g.append(gSub) <NEW_LINE> <DEDENT> <DEDENT> found.append(len(g)) <NEW_LINE> <DEDENT> if not found: <NEW_LINE> <INDENT> raise JSymbolicFeatureException('input lacks notes') <NEW_LINE> <DEDENT> self.feature.vector[0] = sum(found) / len(found) | Average number of different channels in which notes have sounded simultaneously.
Rests are not included in this calculation. Here, Parts are treated as voices
>>> s = corpus.parse('handel/rinaldo/lascia_chio_pianga')
>>> fe = features.jSymbolic.AverageNumberOfIndependentVoicesFeature(s)
>>> f = fe.extract()
>>> f.vector
[1.528...]
>>> s = corpus.parse('bwv66.6')
>>> fe = features.jSymbolic.AverageNumberOfIndependentVoicesFeature(s)
>>> f = fe.extract()
>>> f.vector
[3.90...] | 6259903d21a7993f00c671a3 |
class RawModel: <NEW_LINE> <INDENT> _haar_detector = None <NEW_LINE> _hog_detector = None <NEW_LINE> _cnn_detector = None <NEW_LINE> _shape_predictor = None <NEW_LINE> _shape_predictor_small = None <NEW_LINE> @classmethod <NEW_LINE> def haar_detector(cls) -> cv2.CascadeClassifier: <NEW_LINE> <INDENT> if cls._haar_detector is None: <NEW_LINE> <INDENT> cls._haar_detector = cv2.CascadeClassifier(Paths.HAAR_FACE_DETECTOR_MODEL) <NEW_LINE> <DEDENT> return cls._haar_detector <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def hog_detector(cls) -> dlib.fhog_object_detector: <NEW_LINE> <INDENT> if cls._hog_detector is None: <NEW_LINE> <INDENT> cls._hog_detector = dlib.get_frontal_face_detector() <NEW_LINE> <DEDENT> return cls._hog_detector <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def cnn_detector(cls) -> dlib.cnn_face_detection_model_v1: <NEW_LINE> <INDENT> if cls._cnn_detector is None: <NEW_LINE> <INDENT> cls._cnn_detector = dlib.cnn_face_detection_model_v1(Paths.CNN_FACE_DETECTOR_MODEL) <NEW_LINE> <DEDENT> return cls._cnn_detector <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def shape_predictor(cls) -> dlib.shape_predictor: <NEW_LINE> <INDENT> if cls._shape_predictor is None: <NEW_LINE> <INDENT> cls._shape_predictor = dlib.shape_predictor(Paths.FACE_LANDMARKS_MODEL) <NEW_LINE> <DEDENT> return cls._shape_predictor <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def shape_predictor_small(cls) -> dlib.shape_predictor: <NEW_LINE> <INDENT> if cls._shape_predictor_small is None: <NEW_LINE> <INDENT> cls._shape_predictor_small = dlib.shape_predictor(Paths.FACE_LANDMARKS_SMALL_MODEL) <NEW_LINE> <DEDENT> return cls._shape_predictor_small | Raw Dlib and OpenCV detection models. | 6259903d30c21e258be99a42 |
class DescribeL7HealthConfigResponse(AbstractModel): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.HealthConfig = None <NEW_LINE> self.RequestId = None <NEW_LINE> <DEDENT> def _deserialize(self, params): <NEW_LINE> <INDENT> if params.get("HealthConfig") is not None: <NEW_LINE> <INDENT> self.HealthConfig = [] <NEW_LINE> for item in params.get("HealthConfig"): <NEW_LINE> <INDENT> obj = L7HealthConfig() <NEW_LINE> obj._deserialize(item) <NEW_LINE> self.HealthConfig.append(obj) <NEW_LINE> <DEDENT> <DEDENT> self.RequestId = params.get("RequestId") | DescribeL7HealthConfig返回参数结构体
| 6259903d71ff763f4b5e89d2 |
class WidgetSettings(models.Model): <NEW_LINE> <INDENT> widgets_area = models.ForeignKey(WidgetsArea) <NEW_LINE> column = models.SmallIntegerField() <NEW_LINE> order = models.SmallIntegerField() <NEW_LINE> widget_class = models.CharField(max_length=30) <NEW_LINE> def __unicode__(self): <NEW_LINE> <INDENT> return "on widgets area '%s'" % self.widgets_area.name <NEW_LINE> <DEDENT> def save(self): <NEW_LINE> <INDENT> if not self.pk: <NEW_LINE> <INDENT> self.order = self.widgets_area.num_widgets(self.column) + 1 <NEW_LINE> <DEDENT> super(WidgetSettings, self).save() <NEW_LINE> <DEDENT> def get_widget(self): <NEW_LINE> <INDENT> from netadmin.plugins.core import load_plugins <NEW_LINE> plugins = load_plugins() <NEW_LINE> for plugin in plugins: <NEW_LINE> <INDENT> widgets = [widget() for widget in plugin.widgets()] <NEW_LINE> for widget in widgets: <NEW_LINE> <INDENT> if widget.__class__.__name__ == self.widget_class: <NEW_LINE> <INDENT> return widget <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return None <NEW_LINE> <DEDENT> def get_user(self): <NEW_LINE> <INDENT> return self.widgets_area.user <NEW_LINE> <DEDENT> def set_user(self, user): <NEW_LINE> <INDENT> area = self.widgets_area <NEW_LINE> area.user = user <NEW_LINE> area.save() <NEW_LINE> <DEDENT> user = property(get_user, set_user) <NEW_LINE> def widget_title(self): <NEW_LINE> <INDENT> return self.get_widget().get_title(self) <NEW_LINE> <DEDENT> def move(self, new_order, new_column=None): <NEW_LINE> <INDENT> changed = False <NEW_LINE> if new_order != self.order: <NEW_LINE> <INDENT> self.order = new_order <NEW_LINE> changed = True <NEW_LINE> <DEDENT> if new_column and new_column != self.column: <NEW_LINE> <INDENT> self.column = new_column <NEW_LINE> changed = True <NEW_LINE> <DEDENT> if changed: <NEW_LINE> <INDENT> col_widgets = WidgetSettings.objects.filter(column=self.column, widgets_area=self.widgets_area, order__gte=self.order) <NEW_LINE> for widget in col_widgets: <NEW_LINE> <INDENT> if widget != self: <NEW_LINE> <INDENT> widget.order += 1 <NEW_LINE> widget.save() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return changed | Settings that specifies widget's position in selected area.
The field 'widget_class' indicates Widget subclass defined in plugin. | 6259903d8c3a8732951f778d |
class Money: <NEW_LINE> <INDENT> def __init__(self, amount, currency): <NEW_LINE> <INDENT> self.amount = amount <NEW_LINE> self.currency = currency <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> if self.currency.symbol: <NEW_LINE> <INDENT> return f"{self.currency.symbol}{self.amount:.{self.currency.digits}f}" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return f"{self.currency.code} {self.amount:.{self.currency.digits}f}" <NEW_LINE> <DEDENT> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return f"<Money {str(self)}>" <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return (type(self) == type(other) and self.amount == other.amount and self.currency == other.currency) <NEW_LINE> <DEDENT> def add(self, other): <NEW_LINE> <INDENT> if self.currency != other.currency: <NEW_LINE> <INDENT> raise DifferentCurrencyError() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return Money((self.amount + other.amount),(self.currency)) <NEW_LINE> <DEDENT> <DEDENT> def sub(self, other): <NEW_LINE> <INDENT> if self.currency != other.currency: <NEW_LINE> <INDENT> raise DifferentCurrencyError() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return Money((self.amount - other.amount), (self.currency)) <NEW_LINE> <DEDENT> <DEDENT> def mul(self, multiplier): <NEW_LINE> <INDENT> return Money((self.amount * multiplier), (self.currency)) <NEW_LINE> <DEDENT> def div(self, divisor): <NEW_LINE> <INDENT> return Money((self.amount / divisor), (self.currency)) | Represents an amount of money. Requires an amount and a currency. | 6259903d287bf620b6272e22 |
class DbLazy(DbProxy): <NEW_LINE> <INDENT> def __init__(self, config): <NEW_LINE> <INDENT> super(DbLazy, self).__init__(config) <NEW_LINE> self._backend = "lazydb" <NEW_LINE> self._db_name = self.config['storage_config'].get('cache_db_name') or 'cache_db_name' <NEW_LINE> self.db = Db(self._db_name) <NEW_LINE> self.set_value = self.db.put <NEW_LINE> self.has_value = self.db.has | Database proxy for LazyDB | 6259903d1f5feb6acb163e29 |
class TensorInfo( tfx_namedtuple.namedtuple('TensorInfo', ['dtype', 'shape', 'temporary_asset_info'])): <NEW_LINE> <INDENT> def __new__( cls: Type['TensorInfo'], dtype: tf.dtypes.DType, shape: Sequence[Optional[int]], temporary_asset_info: Optional[TemporaryAssetInfo]) -> 'TensorInfo': <NEW_LINE> <INDENT> if not isinstance(dtype, tf.DType): <NEW_LINE> <INDENT> raise TypeError('dtype must be a TensorFlow dtype, got {}'.format(dtype)) <NEW_LINE> <DEDENT> if temporary_asset_info is not None and not isinstance( temporary_asset_info, TemporaryAssetInfo): <NEW_LINE> <INDENT> raise TypeError( 'temporary_asset_info should be an instance of TemporaryAssetInfo or ' f'None, got {temporary_asset_info}') <NEW_LINE> <DEDENT> return super(TensorInfo, cls).__new__( cls, dtype=dtype, shape=shape, temporary_asset_info=temporary_asset_info) | A container for attributes of output tensors from analyzers.
Fields:
dtype: The TensorFlow dtype.
shape: The shape of the tensor.
temporary_asset_info: A named tuple containing information about the
temporary asset file to write out while tracing the TF graph. | 6259903d21bff66bcd723ea0 |
class Contracts(_ObjectWidgetBar): <NEW_LINE> <INDENT> pass | A model representing widget bar of the contract object | 6259903d26068e7796d4db7d |
class BreathDataGenerator(keras.utils.Sequence): <NEW_LINE> <INDENT> def __init__(self, directory, list_labels=['normal', 'deep', 'strong'], batch_size=32, dim=None, classes=None, shuffle=True): <NEW_LINE> <INDENT> self.directory = directory <NEW_LINE> self.list_labels = list_labels <NEW_LINE> self.dim = dim <NEW_LINE> self.__flow_from_directory(self.directory) <NEW_LINE> self.batch_size = batch_size <NEW_LINE> self.classes = len(self.list_labels) <NEW_LINE> self.shuffle = shuffle <NEW_LINE> self.on_epoch_end() <NEW_LINE> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return int(np.floor(len(self.wavs) / self.batch_size)) <NEW_LINE> <DEDENT> def __getitem__(self, index): <NEW_LINE> <INDENT> indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] <NEW_LINE> rawX = [self.wavs[k] for k in indexes] <NEW_LINE> rawY = [self.labels[k] for k in indexes] <NEW_LINE> X, Y = self.__feature_extraction(rawX, rawY) <NEW_LINE> return X, Y <NEW_LINE> <DEDENT> def __flow_from_directory(self, directory): <NEW_LINE> <INDENT> self.wavs = [] <NEW_LINE> self.labels = [] <NEW_LINE> for dir in os.listdir(directory): <NEW_LINE> <INDENT> sub_dir = os.path.join(directory, dir) <NEW_LINE> if os.path.isdir(sub_dir) and dir in self.list_labels: <NEW_LINE> <INDENT> label = self.list_labels.index(dir) <NEW_LINE> for file in os.listdir(sub_dir): <NEW_LINE> <INDENT> self.wavs.append(os.path.join(sub_dir, file)) <NEW_LINE> self.labels.append(label) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> def on_epoch_end(self): <NEW_LINE> <INDENT> self.indexes = np.arange(len(self.wavs)) <NEW_LINE> if self.shuffle == True: <NEW_LINE> <INDENT> np.random.shuffle(self.indexes) <NEW_LINE> <DEDENT> <DEDENT> def __feature_extraction(self, list_wav, list_label): <NEW_LINE> <INDENT> X = [] <NEW_LINE> Y = [] <NEW_LINE> for i in range(self.batch_size): <NEW_LINE> <INDENT> rate, data = wavfile.read(list_wav[i]) <NEW_LINE> data = np.array(data, dtype=np.float32) <NEW_LINE> data *= 1./32768 <NEW_LINE> feature = librosa.feature.mfcc(y=data, sr=rate, n_mfcc=40, fmin=0, fmax=8000, n_fft=int(16*64), hop_length=int(16*32), power=2.0) <NEW_LINE> feature = np.resize(feature, self.dim) <NEW_LINE> category_label = to_categorical(list_label[i], num_classes= len(self.list_labels) ) <NEW_LINE> X.append(feature) <NEW_LINE> Y.append(category_label) <NEW_LINE> <DEDENT> X = np.array(X, dtype=np.float32) <NEW_LINE> Y = np.array(Y, dtype=int) <NEW_LINE> return X, Y | Generates data for Keras | 6259903ddc8b845886d547ec |
class Map(collections.Mapping): <NEW_LINE> <INDENT> __slots__ = 'value', <NEW_LINE> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> self.value = dict(*args, **kwargs) <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> if not (isinstance(other, collections.Mapping) and len(self.value) == len(other)): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> for k, v in self.items(): <NEW_LINE> <INDENT> if k in other and other[k] == v: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> return False <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> def __ne__(self, other): <NEW_LINE> <INDENT> return not (self == other) <NEW_LINE> <DEDENT> def __iter__(self): <NEW_LINE> <INDENT> return iter(self.value) <NEW_LINE> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return len(self.value) <NEW_LINE> <DEDENT> def __getitem__(self, key): <NEW_LINE> <INDENT> return self.value[key] <NEW_LINE> <DEDENT> def __contains__(self, key): <NEW_LINE> <INDENT> return key in self.value <NEW_LINE> <DEDENT> def __reduce__(self): <NEW_LINE> <INDENT> return type(self), (self.value,) <NEW_LINE> <DEDENT> def __bool__(self): <NEW_LINE> <INDENT> return bool(self.value) <NEW_LINE> <DEDENT> __nonzero__ = __bool__ <NEW_LINE> def __hash__(self): <NEW_LINE> <INDENT> return hash(tuple(self.items())) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> if self: <NEW_LINE> <INDENT> items = sorted(self.value.items()) <NEW_LINE> format_item = '{0!r}: {1!r}'.format <NEW_LINE> args = '{' + ', '.join(format_item(*item) for item in items) + '}' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> args = '' <NEW_LINE> <DEDENT> return '{0.__module__}.{0.__name__}({1})'.format(type(self), args) | As Python standard library doesn't provide immutable :class:`dict`,
Nirum runtime itself need to define one. | 6259903d15baa723494631c8 |
class SetLiveWatermarkStatusRequest(AbstractModel): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.WatermarkId = None <NEW_LINE> self.Status = None <NEW_LINE> <DEDENT> def _deserialize(self, params): <NEW_LINE> <INDENT> self.WatermarkId = params.get("WatermarkId") <NEW_LINE> self.Status = params.get("Status") | SetLiveWatermarkStatus请求参数结构体
| 6259903d23e79379d538d736 |
class writeDouble_args(object): <NEW_LINE> <INDENT> def __init__(self, _v=None,): <NEW_LINE> <INDENT> self._v = _v <NEW_LINE> <DEDENT> def read(self, iprot): <NEW_LINE> <INDENT> if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: <NEW_LINE> <INDENT> iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) <NEW_LINE> return <NEW_LINE> <DEDENT> iprot.readStructBegin() <NEW_LINE> while True: <NEW_LINE> <INDENT> (fname, ftype, fid) = iprot.readFieldBegin() <NEW_LINE> if ftype == TType.STOP: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if fid == 1: <NEW_LINE> <INDENT> if ftype == TType.DOUBLE: <NEW_LINE> <INDENT> self._v = iprot.readDouble() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> iprot.skip(ftype) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> iprot.skip(ftype) <NEW_LINE> <DEDENT> iprot.readFieldEnd() <NEW_LINE> <DEDENT> iprot.readStructEnd() <NEW_LINE> <DEDENT> def write(self, oprot): <NEW_LINE> <INDENT> if oprot._fast_encode is not None and self.thrift_spec is not None: <NEW_LINE> <INDENT> oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) <NEW_LINE> return <NEW_LINE> <DEDENT> oprot.writeStructBegin('writeDouble_args') <NEW_LINE> if self._v is not None: <NEW_LINE> <INDENT> oprot.writeFieldBegin('_v', TType.DOUBLE, 1) <NEW_LINE> oprot.writeDouble(self._v) <NEW_LINE> oprot.writeFieldEnd() <NEW_LINE> <DEDENT> oprot.writeFieldStop() <NEW_LINE> oprot.writeStructEnd() <NEW_LINE> <DEDENT> def validate(self): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] <NEW_LINE> return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ <NEW_LINE> <DEDENT> def __ne__(self, other): <NEW_LINE> <INDENT> return not (self == other) | Attributes:
- _v | 6259903d10dbd63aa1c71e0e |
class ArticleModelMethodTests(TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> column = Column.objects.create(name='myColumn2') <NEW_LINE> tag = Tag.objects.create(name="myTag2") <NEW_LINE> self.article = Article.objects.create( title='my article', slug='my-article', column=column, tag=tag, summary='my article summary', content='my article content' ) <NEW_LINE> <DEDENT> def test_article_get_absolute_url(self): <NEW_LINE> <INDENT> self.assertEqual(self.article.get_absolute_url(), '/blog/article/' + str(self.article.slug)) | 测试文章模型的方法 | 6259903d6e29344779b0188a |
class VisualSemanticEmbedding(nn.Module): <NEW_LINE> <INDENT> def __init__(self, i_dim, t_dim, c_dim, margin=0.2, bow=False): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.i_dim = i_dim <NEW_LINE> self.t_dim = t_dim <NEW_LINE> self.c_dim = c_dim <NEW_LINE> self.bow = bow <NEW_LINE> self.margin = margin <NEW_LINE> self.Wi = nn.Linear(i_dim, c_dim, bias=False) <NEW_LINE> self.Wt = nn.Linear(t_dim, c_dim, bias=False) <NEW_LINE> <DEDENT> def forward(self, i_data: torch.Tensor, t_data: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor: <NEW_LINE> <INDENT> assert len(i_data) == len(t_data) == len(mask) <NEW_LINE> *shape, _ = i_data.shape <NEW_LINE> if self.bow: <NEW_LINE> <INDENT> t_norm = t_data.sum(dim=-1, keepdim=True) <NEW_LINE> t_data = t_data / t_norm.clamp_min(1e-10) <NEW_LINE> t_mask = t_norm > 0.0 <NEW_LINE> if mask is None: <NEW_LINE> <INDENT> mask = t_mask <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mask = mask * t_mask <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> mask = None if mask is None else mask.view(-1, 1) <NEW_LINE> <DEDENT> i_feat = self.Wi(i_data).view(-1, self.c_dim) <NEW_LINE> t_feat = self.Wt(t_data).view(-1, self.c_dim) <NEW_LINE> loss = contrastive_loss(i_feat, t_feat, self.margin, mask=mask, reduction="none") <NEW_LINE> return loss.view(*shape) | The Visual Semantic embedding layer with ranking loss :meth:`torchutils.loss.contrastive_loss`
Args:
i_dim (int): dimension for image data
t_dim (int): dimension for text data
c_dim (int): dimension for embedding space
margin (float, optional): margin for loss. Defaults to 0.2.
bow (bool, optional): whether the input is bag of words. Defaults to False. | 6259903d1f5feb6acb163e2b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.