code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
class SimpleFragmenter(object): <NEW_LINE> <INDENT> def __init__(self, size=70): <NEW_LINE> <INDENT> self.size = size <NEW_LINE> <DEDENT> def __call__(self, text, tokens): <NEW_LINE> <INDENT> size = self.size <NEW_LINE> first = None <NEW_LINE> frag = [] <NEW_LINE> for t in tokens: <NEW_LINE> <INDENT> if first is None: <NEW_LINE> <INDENT> first = t.startchar <NEW_LINE> <DEDENT> if t.endchar - first > size: <NEW_LINE> <INDENT> first = None <NEW_LINE> if frag: <NEW_LINE> <INDENT> yield Fragment(frag) <NEW_LINE> <DEDENT> frag = [] <NEW_LINE> <DEDENT> frag.append(t) <NEW_LINE> <DEDENT> if frag: <NEW_LINE> <INDENT> yield Fragment(frag) | Simply splits the text into roughly equal sized chunks.
| 62599043004d5f362081f963 |
class AutoAuthPage(PageObject): <NEW_LINE> <INDENT> def __init__(self, browser, username=None, email=None, password=None, staff=None, course_id=None, roles=None): <NEW_LINE> <INDENT> super().__init__(browser) <NEW_LINE> self._params = {} <NEW_LINE> if username is not None: <NEW_LINE> <INDENT> self._params['username'] = username <NEW_LINE> <DEDENT> if email is not None: <NEW_LINE> <INDENT> self._params['email'] = email <NEW_LINE> <DEDENT> if password is not None: <NEW_LINE> <INDENT> self._params['password'] = password <NEW_LINE> <DEDENT> if staff is not None: <NEW_LINE> <INDENT> self._params['staff'] = "true" if staff else "false" <NEW_LINE> <DEDENT> if course_id is not None: <NEW_LINE> <INDENT> self._params['course_id'] = course_id <NEW_LINE> <DEDENT> if roles is not None: <NEW_LINE> <INDENT> self._params['roles'] = roles <NEW_LINE> <DEDENT> self.data = {} <NEW_LINE> <DEDENT> @property <NEW_LINE> def url(self): <NEW_LINE> <INDENT> url = ORA_SANDBOX_URL + "/auto_auth" <NEW_LINE> query_str = urllib.parse.urlencode(self._params) <NEW_LINE> if query_str: <NEW_LINE> <INDENT> url += "?" + query_str <NEW_LINE> <DEDENT> return url <NEW_LINE> <DEDENT> def is_browser_on_page(self): <NEW_LINE> <INDENT> self.data = json.loads(self.q(css='BODY').text[0]) <NEW_LINE> return self.data['created_status'] == "Logged in" <NEW_LINE> <DEDENT> def get_user_id(self): <NEW_LINE> <INDENT> return self.data['user_id'] <NEW_LINE> <DEDENT> def get_username_and_email(self): <NEW_LINE> <INDENT> return self.data['username'], self.data['email'] | The automatic authorization page.
When allowed via the django settings file, visiting
this url will create a user and log them in. | 6259904366673b3332c316f5 |
class SourceError(errors.PartsError): <NEW_LINE> <INDENT> pass | Base class for source handler errors. | 62599043711fe17d825e161b |
class Pagination(object): <NEW_LINE> <INDENT> def __init__(self, builder, entries, page, per_page, url_key): <NEW_LINE> <INDENT> self.builder = builder <NEW_LINE> self.entries = entries <NEW_LINE> self.page = page <NEW_LINE> self.per_page = per_page <NEW_LINE> self.url_key = url_key <NEW_LINE> <DEDENT> @property <NEW_LINE> def total(self): <NEW_LINE> <INDENT> return len(self.entries) <NEW_LINE> <DEDENT> @property <NEW_LINE> def pages(self): <NEW_LINE> <INDENT> return int(ceil(self.total / float(self.per_page))) <NEW_LINE> <DEDENT> def get_prev(self): <NEW_LINE> <INDENT> return Pagination(self.builder, self.entries, self.page - 1, self.per_page, self.url_key) <NEW_LINE> <DEDENT> @property <NEW_LINE> def prev_num(self): <NEW_LINE> <INDENT> return self.page - 1 <NEW_LINE> <DEDENT> @property <NEW_LINE> def has_prev(self): <NEW_LINE> <INDENT> return self.page > 1 <NEW_LINE> <DEDENT> def get_next(self): <NEW_LINE> <INDENT> return Pagination(self.builder, self.entries, self.page + 1, self.per_page, self.url_key) <NEW_LINE> <DEDENT> @property <NEW_LINE> def has_next(self): <NEW_LINE> <INDENT> return self.page < self.pages <NEW_LINE> <DEDENT> @property <NEW_LINE> def next_num(self): <NEW_LINE> <INDENT> return self.page + 1 <NEW_LINE> <DEDENT> def get_slice(self): <NEW_LINE> <INDENT> return self.entries[(self.page - 1) * self.per_page: self.page * self.per_page] <NEW_LINE> <DEDENT> def iter_pages(self, left_edge=2, left_current=2, right_current=5, right_edge=2): <NEW_LINE> <INDENT> last = 0 <NEW_LINE> for num in range(1, self.pages + 1): <NEW_LINE> <INDENT> if num <= left_edge or (num > self.page - left_current - 1 and num < self.page + right_current) or num > self.pages - right_edge: <NEW_LINE> <INDENT> if last + 1 != num: <NEW_LINE> <INDENT> yield None <NEW_LINE> <DEDENT> yield num <NEW_LINE> last = num <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return self.builder.render_template('_pagination.html', { 'pagination': self }) <NEW_LINE> <DEDENT> def __html__(self): <NEW_LINE> <INDENT> return Markup(six.text_type(self)) | Internal helper class for paginations | 62599043d99f1b3c44d0699b |
class Redo(Trace): <NEW_LINE> <INDENT> def __init__(self, name, *args, **kwargs): <NEW_LINE> <INDENT> self._start_at = kwargs.pop('start_at', None) <NEW_LINE> self._end_at = kwargs.pop('start_at', None) <NEW_LINE> super(Redo, self).__init__(name, *args, **kwargs) <NEW_LINE> <DEDENT> def before_exec_msg(self): <NEW_LINE> <INDENT> self._start_at = datetime.now() <NEW_LINE> return self._start_at.isoformat() + ' | ' + super(Redo, self).__str__() <NEW_LINE> <DEDENT> def after_exec_msg(self): <NEW_LINE> <INDENT> self._end_at = datetime.now() <NEW_LINE> return ' | ' + self._end_at.isoformat() | Redo log object | 6259904350485f2cf55dc282 |
class QueueFull(Exception): <NEW_LINE> <INDENT> pass | Raised when trying to enqueue a full queue. | 625990438c3a8732951f7857 |
class VideoListYoutube(VideoList): <NEW_LINE> <INDENT> def __init__(self, keyword=None, username=None, playlist=None, page=1): <NEW_LINE> <INDENT> super(VideoListYoutube, self).__init__() <NEW_LINE> self.get_style_context().add_class('video_list_youtube') <NEW_LINE> start_index = page_to_index(page) <NEW_LINE> entries = None <NEW_LINE> self._parsed_entries = None <NEW_LINE> if keyword: <NEW_LINE> <INDENT> entries = search_youtube_by_keyword( keyword, start_index=start_index, parent_control=self.ParentalControl) <NEW_LINE> logger.info('searching by keyword: ' + keyword) <NEW_LINE> <DEDENT> elif username: <NEW_LINE> <INDENT> entries = search_youtube_by_user( username, parent_control=self.ParentalControl) <NEW_LINE> logger.info('listing by username: ' + username) <NEW_LINE> <DEDENT> elif playlist: <NEW_LINE> <INDENT> entries = playlist <NEW_LINE> logger.info('listing playlist: ' + playlist) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> entries = search_youtube_by_user( 'KanoComputing', parent_control=self.ParentalControl) <NEW_LINE> logger.info('listing default videos by KanoComputing') <NEW_LINE> <DEDENT> if entries: <NEW_LINE> <INDENT> self._parsed_entries = parse_youtube_entries(entries) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._grid.attach(self._no_results, 0, 0, 1, 1) <NEW_LINE> <DEDENT> self.refresh() <NEW_LINE> <DEDENT> def refresh(self): <NEW_LINE> <INDENT> if self._parsed_entries: <NEW_LINE> <INDENT> for i, e in enumerate(self._parsed_entries): <NEW_LINE> <INDENT> e['local_path'] = None <NEW_LINE> entry = VideoEntry(e) <NEW_LINE> self._grid.attach(entry, 0, i, 1, 1) | A video collection list used for videos on YouTube | 6259904323849d37ff8523b7 |
class Property(models.Model): <NEW_LINE> <INDENT> owner = models.ForeignKey(settings.AUTH_USER_MODEL) <NEW_LINE> help_text = _("Utilize este espaço para descrever seu anúncio.") <NEW_LINE> cep = models.CharField(max_length=8, default='') <NEW_LINE> address = models.CharField(_('Endereço'), max_length=140) <NEW_LINE> state = models.CharField(_('Estado'), max_length=2) <NEW_LINE> city = models.CharField(_('Cidade'), max_length=140) <NEW_LINE> district = models.CharField(_('Bairro'), max_length=140) <NEW_LINE> property_type = models.ForeignKey(PropertyType) <NEW_LINE> category = ChainedForeignKey( Category, chained_field='property_type', chained_model_field='property_type' ) <NEW_LINE> rooms = models.PositiveSmallIntegerField(_('Quartos')) <NEW_LINE> util_area = models.PositiveSmallIntegerField( _('Área útil'), blank=True, null=True ) <NEW_LINE> total_area = models.PositiveSmallIntegerField(_('Área total')) <NEW_LINE> title = models.CharField(_('Titulo'), max_length=140) <NEW_LINE> image = models.ImageField(_('Imagem'), upload_to='img/') <NEW_LINE> description = models.TextField(_('Descrição')) <NEW_LINE> rent_price = models.DecimalField( _('Valor do aluguel'), max_digits=19, decimal_places=10 ) <NEW_LINE> timestamp = models.DateTimeField(auto_now=True) <NEW_LINE> class Meta: <NEW_LINE> <INDENT> verbose_name = _("Imóvel") <NEW_LINE> verbose_name_plural = _('Imóveis') <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return self.title <NEW_LINE> <DEDENT> def get_absolute_url(self): <NEW_LINE> <INDENT> return reverse('properties:show_property', args=[self.id]) | Model for all properties. | 6259904376d4e153a661dbf3 |
class Theme(models.Model): <NEW_LINE> <INDENT> site = models.OneToOneField(Site, related_name='theme', on_delete=models.CASCADE) <NEW_LINE> name = models.CharField(max_length=255) <NEW_LINE> def __eq__(self, other): <NEW_LINE> <INDENT> return (self.name, self.path) == (other.name, other.path) <NEW_LINE> <DEDENT> def __hash__(self): <NEW_LINE> <INDENT> return hash((self.name, self.path)) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return f"<Theme: {self.name} at '{self.path}'>" <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> return self.__str__() <NEW_LINE> <DEDENT> @property <NEW_LINE> def base_dir(self): <NEW_LINE> <INDENT> return Path(theming.get_base_dir(str(self.name))) <NEW_LINE> <DEDENT> @property <NEW_LINE> def path(self): <NEW_LINE> <INDENT> return self.base_dir / self.name <NEW_LINE> <DEDENT> @property <NEW_LINE> def template_dirs(self): <NEW_LINE> <INDENT> return [ self.path / 'templates', ] <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def get_theme(site): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return site.theme <NEW_LINE> <DEDENT> except ObjectDoesNotExist: <NEW_LINE> <INDENT> if settings.THEMING.get('DEFAULT', None): <NEW_LINE> <INDENT> return Theme(site=site, name=settings.THEMING['DEFAULT']) <NEW_LINE> <DEDENT> return None | Django ORM model for Theme db table.
Fields:
site (ForeignKey): Foreign Key field pointing to django Site model
theme_dir_name (CharField): Contains directory name for any site's theme (e.g. 'red-theme') | 62599043287bf620b6272ee4 |
class Float(Numeric): <NEW_LINE> <INDENT> @property <NEW_LINE> def value(self): <NEW_LINE> <INDENT> return float(self.client.get(self.prefixer(self.key)) or 0) <NEW_LINE> <DEDENT> @value.setter <NEW_LINE> def value(self, value): <NEW_LINE> <INDENT> if value is not None: <NEW_LINE> <INDENT> self.client.set(self.prefixer(self.key), value) <NEW_LINE> <DEDENT> <DEDENT> def __isub__(self, f): <NEW_LINE> <INDENT> self.client.incrbyfloat(self.prefixer(self.key), f * -1) <NEW_LINE> return self <NEW_LINE> <DEDENT> def __iadd__(self, f): <NEW_LINE> <INDENT> self.client.incrbyfloat(self.prefixer(self.key), f * 1) <NEW_LINE> return self | Redis float <-> Python float. | 62599043507cdc57c63a6099 |
class Memory(): <NEW_LINE> <INDENT> def __init__(self, memeLabels): <NEW_LINE> <INDENT> self.freeRows = list() <NEW_LINE> self.columns = dict() <NEW_LINE> for i, label in enumerate(memeLabels): <NEW_LINE> <INDENT> self.columns[label] = i <NEW_LINE> <DEDENT> self.ID2Row = dict() <NEW_LINE> self.memory = np.zeros([0, len(memeLabels)]) <NEW_LINE> self.getID = itertools.count().__next__ <NEW_LINE> <DEDENT> def addMeme(self, meme): <NEW_LINE> <INDENT> memeID = self.getID() <NEW_LINE> if len(self.freeRows) > 0: <NEW_LINE> <INDENT> row = self.freeRows.pop() <NEW_LINE> self.memory[row] = meme <NEW_LINE> self.ID2Row[memeID] = row <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> row = self.memory.shape[0] <NEW_LINE> self.memory = np.vstack(( self.memory, meme)) <NEW_LINE> self.ID2Row[memeID] = row <NEW_LINE> <DEDENT> return memeID <NEW_LINE> <DEDENT> def remMeme(self,memeID): <NEW_LINE> <INDENT> row = self.ID2Row[memeID] <NEW_LINE> self.memory[row] = np.nan <NEW_LINE> self.freeRows.append(row) <NEW_LINE> del self.ID2Row[memeID] <NEW_LINE> <DEDENT> def getMeme(self,memeID,columns): <NEW_LINE> <INDENT> cols = [self.columns[x] for x in columns] <NEW_LINE> rows = [self.ID2Row[x] for x in memeID] <NEW_LINE> return self.memory[np.ix_(rows,cols)] | deprecated | 625990433eb6a72ae038b95e |
class AdminReport(FlaskForm): <NEW_LINE> <INDENT> period = SelectField('统计周期', validators=[DataRequired()], choices=[('D', '今天'), ('W', '本周'), ('M', '本月')]) <NEW_LINE> unit = SelectField('统计单位', validators=[DataRequired()], choices=[('department', '部门'), ('individual', '个人')]) <NEW_LINE> submit = SubmitField('生成报表') | 管理员报表 | 6259904373bcbd0ca4bcb588 |
class TVMContext(ctypes.Structure): <NEW_LINE> <INDENT> _fields_ = [("device_type", ctypes.c_int), ("device_id", ctypes.c_int)] <NEW_LINE> MASK2STR = { 1 : 'cpu', 2 : 'gpu', 4 : 'opencl', 7 : 'vulkan', 8 : 'metal', 9 : 'vpi', 10: 'rocm', 11: 'opengl', 12: 'ext_dev', } <NEW_LINE> STR2MASK = { 'llvm': 1, 'stackvm': 1, 'cpu': 1, 'gpu': 2, 'cuda': 2, 'nvptx': 2, 'cl': 4, 'opencl': 4, 'vulkan': 7, 'metal': 8, 'vpi': 9, 'rocm': 10, 'opengl': 11, 'ext_dev': 12, } <NEW_LINE> def __init__(self, device_type, device_id): <NEW_LINE> <INDENT> super(TVMContext, self).__init__() <NEW_LINE> self.device_type = device_type <NEW_LINE> self.device_id = device_id <NEW_LINE> <DEDENT> @property <NEW_LINE> def exist(self): <NEW_LINE> <INDENT> return _api_internal._GetDeviceAttr( self.device_type, self.device_id, 0) != 0 <NEW_LINE> <DEDENT> @property <NEW_LINE> def max_threads_per_block(self): <NEW_LINE> <INDENT> return _api_internal._GetDeviceAttr( self.device_type, self.device_id, 1) <NEW_LINE> <DEDENT> @property <NEW_LINE> def warp_size(self): <NEW_LINE> <INDENT> return _api_internal._GetDeviceAttr( self.device_type, self.device_id, 2) <NEW_LINE> <DEDENT> @property <NEW_LINE> def max_shared_memory_per_block(self): <NEW_LINE> <INDENT> return _api_internal._GetDeviceAttr( self.device_type, self.device_id, 3) <NEW_LINE> <DEDENT> @property <NEW_LINE> def compute_version(self): <NEW_LINE> <INDENT> return _api_internal._GetDeviceAttr( self.device_type, self.device_id, 4) <NEW_LINE> <DEDENT> @property <NEW_LINE> def device_name(self): <NEW_LINE> <INDENT> return _api_internal._GetDeviceAttr( self.device_type, self.device_id, 5) <NEW_LINE> <DEDENT> @property <NEW_LINE> def max_clock_rate(self): <NEW_LINE> <INDENT> return _api_internal._GetDeviceAttr( self.device_type, self.device_id, 6) <NEW_LINE> <DEDENT> @property <NEW_LINE> def multi_processor_count(self): <NEW_LINE> <INDENT> return _api_internal._GetDeviceAttr( self.device_type, self.device_id, 7) <NEW_LINE> <DEDENT> def sync(self): <NEW_LINE> <INDENT> check_call(_LIB.TVMSynchronize(self.device_type, self.device_id, None)) <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return (isinstance(other, TVMContext) and self.device_id == other.device_id and self.device_type == other.device_type) <NEW_LINE> <DEDENT> def __ne__(self, other): <NEW_LINE> <INDENT> return not self.__eq__(other) <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> if self.device_type >= RPC_SESS_MASK: <NEW_LINE> <INDENT> tbl_id = self.device_type / RPC_SESS_MASK - 1 <NEW_LINE> dev_type = self.device_type % RPC_SESS_MASK <NEW_LINE> return "remote[%d]:%s(%d)" % ( tbl_id, TVMContext.MASK2STR[dev_type], self.device_id) <NEW_LINE> <DEDENT> return "%s(%d)" % ( TVMContext.MASK2STR[self.device_type], self.device_id) | TVM context strucure. | 62599043004d5f362081f964 |
class EddiBoost: <NEW_LINE> <INDENT> log = logging.getLogger(__name__) <NEW_LINE> def __init__(self, server_conn): <NEW_LINE> <INDENT> self._sc = server_conn <NEW_LINE> self.desired_temp = 35 <NEW_LINE> self._in_time_window = False <NEW_LINE> self._heater = 1 <NEW_LINE> <DEDENT> def _stop_boost(self, eddi): <NEW_LINE> <INDENT> self.log.info('Stopping boost') <NEW_LINE> self._sc.stop_eddi_boost(eddi.sno, self._heater) <NEW_LINE> <DEDENT> def _check_for_boost_start(self, eddi): <NEW_LINE> <INDENT> if eddi.temp_1 < self.desired_temp: <NEW_LINE> <INDENT> self.log.info('Starting boost') <NEW_LINE> self._sc.start_boost(eddi.sno, self._heater, 60) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.log.info('Temp reached') <NEW_LINE> <DEDENT> <DEDENT> def run(self, eddi, in_time_window): <NEW_LINE> <INDENT> self.log.info('Updating: In time window: {}'.format(in_time_window)) <NEW_LINE> if in_time_window and not self._in_time_window: <NEW_LINE> <INDENT> self._in_time_window = True <NEW_LINE> self._check_for_boost_start(eddi) <NEW_LINE> return <NEW_LINE> <DEDENT> if not in_time_window: <NEW_LINE> <INDENT> if self._in_time_window: <NEW_LINE> <INDENT> self._stop_boost(eddi) <NEW_LINE> self._in_time_window = False <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> if eddi.charge_rate == 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if eddi.status != 'Boost': <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.desired_temp < eddi.temp_1: <NEW_LINE> <INDENT> self.log.info('Desired temp reached') <NEW_LINE> self._stop_boost(eddi) | Class for setting the Eddi boost | 6259904324f1403a9268624b |
class ReduceMana(ManaConsequence): <NEW_LINE> <INDENT> def resolve(self, game_state): <NEW_LINE> <INDENT> game_state.reduce_mana(self.c_dict) <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> return 'Reduce mana with: ' + str(self.c_dict) | Pay Mana | 62599043b57a9660fecd2d7a |
class CloudLoggingHandler(logging.StreamHandler): <NEW_LINE> <INDENT> def __init__(self, client, name=DEFAULT_LOGGER_NAME, transport=BackgroundThreadTransport): <NEW_LINE> <INDENT> super(CloudLoggingHandler, self).__init__() <NEW_LINE> self.name = name <NEW_LINE> self.client = client <NEW_LINE> self.transport = transport(client, name) <NEW_LINE> <DEDENT> def emit(self, record): <NEW_LINE> <INDENT> message = super(CloudLoggingHandler, self).format(record) <NEW_LINE> self.transport.send(record, message) | Handler that directly makes Stackdriver logging API calls.
This is a Python standard ``logging`` handler using that can be used to
route Python standard logging messages directly to the Stackdriver
Logging API.
This handler supports both an asynchronous and synchronous transport.
:type client: :class:`google.cloud.logging.client`
:param client: the authenticated Google Cloud Logging client for this
handler to use
:type name: str
:param name: the name of the custom log in Stackdriver Logging. Defaults
to 'python'. The name of the Python logger will be represented
in the ``python_logger`` field.
:type transport: type
:param transport: Class for creating new transport objects. It should
extend from the base :class:`.Transport` type and
implement :meth`.Transport.send`. Defaults to
:class:`.BackgroundThreadTransport`. The other
option is :class:`.SyncTransport`.
Example:
.. code-block:: python
import logging
import google.cloud.logging
from google.cloud.logging.handlers import CloudLoggingHandler
client = google.cloud.logging.Client()
handler = CloudLoggingHandler(client)
cloud_logger = logging.getLogger('cloudLogger')
cloud_logger.setLevel(logging.INFO)
cloud_logger.addHandler(handler)
cloud_logger.error('bad news') # API call | 625990433617ad0b5ee07437 |
class TopEventsView(APIView): <NEW_LINE> <INDENT> permission_classes = (IsAuthenticated,) <NEW_LINE> def get(self, request, *args, **kwargs): <NEW_LINE> <INDENT> if 'me' in request.query_params: <NEW_LINE> <INDENT> qs = Event.objects.filter(user=request.user) <NEW_LINE> <DEDENT> elif request.query_params.get('user'): <NEW_LINE> <INDENT> qs = Event.objects.filter(user__username=request.query_params.get('user')) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> qs = Event.objects.all() <NEW_LINE> <DEDENT> events = qs.annotate(event_type=RawSQL("((event->>%s)::text)", ('type',)) ).values('event_type').annotate(Count("event_type")).order_by('-event_type__count') <NEW_LINE> data = { 'count': len(events), 'results': [{'event': e['event_type'], 'count': e['event_type__count']} for e in events] } <NEW_LINE> return Response(data) | Get the types of events and number of events for each type. | 625990438a43f66fc4bf3490 |
class NotMatched(Exception): <NEW_LINE> <INDENT> pass | This is raised when the path provided cannot be parsed with the current
rule. | 6259904326238365f5fade58 |
class Scipy2Corpus: <NEW_LINE> <INDENT> def __init__(self, vecs): <NEW_LINE> <INDENT> self.vecs = vecs <NEW_LINE> <DEDENT> def __iter__(self): <NEW_LINE> <INDENT> for vec in self.vecs: <NEW_LINE> <INDENT> if isinstance(vec, np.ndarray): <NEW_LINE> <INDENT> yield full2sparse(vec) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yield scipy2sparse(vec) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def __len__(self): <NEW_LINE> <INDENT> return len(self.vecs) | Convert a sequence of dense/sparse vectors into a streamed Gensim corpus object.
See Also
--------
:func:`~gensim.matutils.corpus2csc`
Convert corpus in Gensim format to `scipy.sparse.csc` matrix. | 62599043d53ae8145f91975c |
class DropoutLayer(Layer): <NEW_LINE> <INDENT> def __init__(self, incoming, p=0.5, rescale=True, **kwargs): <NEW_LINE> <INDENT> super(DropoutLayer, self).__init__(incoming, **kwargs) <NEW_LINE> self._srng = RandomStreams(get_rng().randint(1, 2147462579)) <NEW_LINE> self.p = p <NEW_LINE> self.rescale = rescale <NEW_LINE> <DEDENT> def get_output_for(self, input, deterministic=False, **kwargs): <NEW_LINE> <INDENT> if deterministic or self.p == 0: <NEW_LINE> <INDENT> return input <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> retain_prob = 1 - self.p <NEW_LINE> if self.rescale: <NEW_LINE> <INDENT> input /= retain_prob <NEW_LINE> <DEDENT> input_shape = self.input_shape <NEW_LINE> if any(s is None for s in input_shape): <NEW_LINE> <INDENT> input_shape = input.shape <NEW_LINE> <DEDENT> return input * self._srng.binomial(input_shape, p=retain_prob, dtype=input.dtype) | Dropout layer
Sets values to zero with probability p. See notes for disabling dropout
during testing.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
the layer feeding into this layer, or the expected input shape
p : float or scalar tensor
The probability of setting a value to zero
rescale : bool
If true the input is rescaled with input / (1-p) when deterministic
is False.
Notes
-----
The dropout layer is a regularizer that randomly sets input values to
zero; see [1]_, [2]_ for why this might improve generalization.
During training you should set deterministic to false and during
testing you should set deterministic to true.
If rescale is true the input is scaled with input / (1-p) when
deterministic is false, see references for further discussion. Note that
this implementation scales the input at training time.
References
----------
.. [1] Hinton, G., Srivastava, N., Krizhevsky, A., Sutskever, I.,
Salakhutdinov, R. R. (2012):
Improving neural networks by preventing co-adaptation of feature
detectors. arXiv preprint arXiv:1207.0580.
.. [2] Srivastava Nitish, Hinton, G., Krizhevsky, A., Sutskever,
I., & Salakhutdinov, R. R. (2014):
Dropout: A Simple Way to Prevent Neural Networks from Overfitting.
Journal of Machine Learning Research, 5(Jun)(2), 1929-1958. | 6259904323849d37ff8523b9 |
class Chibi_object( metaclass=Chibi_object_meta ): <NEW_LINE> <INDENT> class Meta( Chibi_object_meta_base ): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def __init__( self, *args, **kargs ): <NEW_LINE> <INDENT> for name, field in self._meta._fields.items(): <NEW_LINE> <INDENT> setattr( self, name, kargs.get( name, field.default ) ) | Notes
-----
la clase de Meta se guarda como una clase interna llamada _meta en la
instancia
atributos de la clase _meta
---------------------------
_fields: este dicionario guarda el nombre y la instancia del campo | 6259904323e79379d538d7fd |
class DictWithGames(dict): <NEW_LINE> <INDENT> SORTING_KEY_ATTRIBUTE = 'sorting_key' <NEW_LINE> def at_least(self, n): <NEW_LINE> <INDENT> return DictWithGames((key, games) for (key, games) in self.items() if len(games) >= n) <NEW_LINE> <DEDENT> def get_sorting_key(self): <NEW_LINE> <INDENT> default_key = lambda t: (-t[1].percentage, t[0]) <NEW_LINE> return getattr(self, self.SORTING_KEY_ATTRIBUTE, default_key) <NEW_LINE> <DEDENT> def sorted_by(self, key): <NEW_LINE> <INDENT> setattr(self, self.SORTING_KEY_ATTRIBUTE, key) <NEW_LINE> return self <NEW_LINE> <DEDENT> def sorted_by_keys(self, keys): <NEW_LINE> <INDENT> return self.sorted_by(key=lambda t: keys.index(t[0])) <NEW_LINE> <DEDENT> def __nonzero__(self): <NEW_LINE> <INDENT> keys = self.keys() <NEW_LINE> if keys == [] or keys == [AVERAGE_KEY]: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> if not self: <NEW_LINE> <INDENT> return '' <NEW_LINE> <DEDENT> rows = [] <NEW_LINE> k_wr = [(key, games.winrate) for (key, games) in self.items()] <NEW_LINE> display_key_for_average = '-' * (1 + max(len(k) for (k, wr) in k_wr if k != AVERAGE_KEY)) <NEW_LINE> for (key, winrate) in sorted(k_wr, key=self.get_sorting_key()): <NEW_LINE> <INDENT> rows.append([display_key_for_average if key == AVERAGE_KEY else key + ": "] + winrate.text_row) <NEW_LINE> <DEDENT> return text.align_rows(rows) | Type: Dict[str, GameList] | 6259904391af0d3eaad3b123 |
class SelectedLattice(MessageData): <NEW_LINE> <INDENT> INTENT = "DOCUMENT" <NEW_LINE> def __init__(self, data_model, lattice_format, solution): <NEW_LINE> <INDENT> if lattice_format in INDEXING_FORMATS: <NEW_LINE> <INDENT> self._lattice_format = lattice_format <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError( "Indexing format %s not in supported formats: %s" % (lattice_format, INDEXING_FORMATS) ) <NEW_LINE> <DEDENT> self._solution = tuple(solution) <NEW_LINE> self._strategyDetectorSetting = data_model.detector_setting <NEW_LINE> self._strategyWavelength = data_model.wavelengths[0] <NEW_LINE> self._strategyControl = json.dumps( data_model.strategy_options, sort_keys=True ) <NEW_LINE> <DEDENT> @property <NEW_LINE> def lattice_format(self): <NEW_LINE> <INDENT> return self._lattice_format <NEW_LINE> <DEDENT> @property <NEW_LINE> def solution(self): <NEW_LINE> <INDENT> return self._solution <NEW_LINE> <DEDENT> @property <NEW_LINE> def strategyDetectorSetting(self): <NEW_LINE> <INDENT> return self._strategyDetectorSetting <NEW_LINE> <DEDENT> @property <NEW_LINE> def strategyWavelength(self): <NEW_LINE> <INDENT> return self._strategyWavelength <NEW_LINE> <DEDENT> @property <NEW_LINE> def strategyControl(self): <NEW_LINE> <INDENT> return self._strategyControl | Lattice selected message | 625990430fa83653e46f61db |
class TestInlineResponse20115(unittest.TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def tearDown(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def testInlineResponse20115(self): <NEW_LINE> <INDENT> pass | InlineResponse20115 unit test stubs | 6259904330dc7b76659a0b30 |
class PairwiseHingeLoss(object): <NEW_LINE> <INDENT> def __init__(self, config): <NEW_LINE> <INDENT> self.margin = float(config["margin"]) <NEW_LINE> <DEDENT> def ops(self, score_pos, score_neg): <NEW_LINE> <INDENT> return tf.reduce_mean(tf.maximum(0., score_neg + self.margin - score_pos)) | a layer class: pairwise hinge loss | 6259904363b5f9789fe8646c |
class String(Concatenable, TypeEngine): <NEW_LINE> <INDENT> __visit_name__ = 'string' <NEW_LINE> def __init__(self, length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False ): <NEW_LINE> <INDENT> if unicode_error is not None and convert_unicode != 'force': <NEW_LINE> <INDENT> raise exc.ArgumentError("convert_unicode must be 'force' " "when unicode_error is set.") <NEW_LINE> <DEDENT> self.length = length <NEW_LINE> self.collation = collation <NEW_LINE> self.convert_unicode = convert_unicode <NEW_LINE> self.unicode_error = unicode_error <NEW_LINE> self._warn_on_bytestring = _warn_on_bytestring <NEW_LINE> <DEDENT> def bind_processor(self, dialect): <NEW_LINE> <INDENT> if self.convert_unicode or dialect.convert_unicode: <NEW_LINE> <INDENT> if dialect.supports_unicode_binds and self.convert_unicode != 'force': <NEW_LINE> <INDENT> if self._warn_on_bytestring: <NEW_LINE> <INDENT> def process(value): <NEW_LINE> <INDENT> if isinstance(value, str): <NEW_LINE> <INDENT> util.warn("Unicode type received non-unicode bind " "param value.") <NEW_LINE> <DEDENT> return value <NEW_LINE> <DEDENT> return process <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> encoder = codecs.getencoder(dialect.encoding) <NEW_LINE> warn_on_bytestring = self._warn_on_bytestring <NEW_LINE> def process(value): <NEW_LINE> <INDENT> if isinstance(value, unicode): <NEW_LINE> <INDENT> return encoder(value, self.unicode_error)[0] <NEW_LINE> <DEDENT> elif warn_on_bytestring and value is not None: <NEW_LINE> <INDENT> util.warn("Unicode type received non-unicode bind " "param value") <NEW_LINE> <DEDENT> return value <NEW_LINE> <DEDENT> <DEDENT> return process <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> def result_processor(self, dialect, coltype): <NEW_LINE> <INDENT> wants_unicode = self.convert_unicode or dialect.convert_unicode <NEW_LINE> needs_convert = wants_unicode and (dialect.returns_unicode_strings is not True or self.convert_unicode == 'force') <NEW_LINE> if needs_convert: <NEW_LINE> <INDENT> to_unicode = processors.to_unicode_processor_factory( dialect.encoding, self.unicode_error) <NEW_LINE> if dialect.returns_unicode_strings: <NEW_LINE> <INDENT> def process(value): <NEW_LINE> <INDENT> if isinstance(value, unicode): <NEW_LINE> <INDENT> return value <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return to_unicode(value) <NEW_LINE> <DEDENT> <DEDENT> return process <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return to_unicode <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> @property <NEW_LINE> def python_type(self): <NEW_LINE> <INDENT> if self.convert_unicode: <NEW_LINE> <INDENT> return unicode <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return str <NEW_LINE> <DEDENT> <DEDENT> def get_dbapi_type(self, dbapi): <NEW_LINE> <INDENT> return dbapi.STRING | The base for all string and character types.
In SQL, corresponds to VARCHAR. Can also take Python unicode objects
and encode to the database's encoding in bind params (and the reverse for
result sets.)
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases. | 62599043711fe17d825e161d |
class AliasedLoader(object): <NEW_LINE> <INDENT> def __init__(self, wrapped): <NEW_LINE> <INDENT> self.wrapped = wrapped <NEW_LINE> <DEDENT> def __getitem__(self, name): <NEW_LINE> <INDENT> return self.wrapped[name] <NEW_LINE> <DEDENT> def __getattr__(self, name): <NEW_LINE> <INDENT> return getattr(self.wrapped, name) | Light wrapper around the LazyLoader to redirect 'cmd.run' calls to
'cmd.shell', for easy use of shellisms during templating calls
Dotted aliases ('cmd.run') must resolve to another dotted alias
(e.g. 'cmd.shell')
Non-dotted aliases ('cmd') must resolve to a dictionary of function
aliases for that module (e.g. {'run': 'shell'}) | 62599043d99f1b3c44d0699e |
class FuncClass(object): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.theta = None <NEW_LINE> <DEDENT> def set_x(self, x): <NEW_LINE> <INDENT> self.theta = x <NEW_LINE> <DEDENT> def func(self): <NEW_LINE> <INDENT> x = self.theta[0] <NEW_LINE> y = self.theta[1] <NEW_LINE> return (2 * x ** 2 - 4 * x * y + y ** 4 + 2, np.array([4 * x - 4 * y, - 4 * x + 4 * y ** 3])) | Function has local minima: 1 at (-1, -1) and 1 at (1, 1), saddle point at (0, 0) | 62599043d99f1b3c44d0699f |
class InstanceMultiParam(AbstractModel): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.CurrentValue = None <NEW_LINE> self.DefaultValue = None <NEW_LINE> self.EnumValue = None <NEW_LINE> self.NeedRestart = None <NEW_LINE> self.ParamName = None <NEW_LINE> self.Status = None <NEW_LINE> self.Tips = None <NEW_LINE> self.ValueType = None <NEW_LINE> <DEDENT> def _deserialize(self, params): <NEW_LINE> <INDENT> self.CurrentValue = params.get("CurrentValue") <NEW_LINE> self.DefaultValue = params.get("DefaultValue") <NEW_LINE> self.EnumValue = params.get("EnumValue") <NEW_LINE> self.NeedRestart = params.get("NeedRestart") <NEW_LINE> self.ParamName = params.get("ParamName") <NEW_LINE> self.Status = params.get("Status") <NEW_LINE> self.Tips = params.get("Tips") <NEW_LINE> self.ValueType = params.get("ValueType") <NEW_LINE> memeber_set = set(params.keys()) <NEW_LINE> for name, value in vars(self).items(): <NEW_LINE> <INDENT> if name in memeber_set: <NEW_LINE> <INDENT> memeber_set.remove(name) <NEW_LINE> <DEDENT> <DEDENT> if len(memeber_set) > 0: <NEW_LINE> <INDENT> warnings.warn("%s fileds are useless." % ",".join(memeber_set)) | 实例可修改参数Multi类型集合。
| 625990433eb6a72ae038b961 |
class RemoveTabAction(TextStoreAction): <NEW_LINE> <INDENT> def isDoable(self): <NEW_LINE> <INDENT> return not (self.column == 0) <NEW_LINE> <DEDENT> def performDoOperation(self): <NEW_LINE> <INDENT> operation = RemoveTabOperation(self.cursor, self.textStore, self.settings) <NEW_LINE> operation.perform() <NEW_LINE> <DEDENT> def performUndoOperation(self): <NEW_LINE> <INDENT> operation = InsertTabOperation(self.cursor, self.textStore, self.settings) <NEW_LINE> operation.perform() | Action to remove a tab | 62599043a79ad1619776b380 |
class AllocateParams(NamedTuple): <NEW_LINE> <INDENT> account_pubkey: PublicKey <NEW_LINE> space: int | Allocate account with seed system transaction params. | 6259904373bcbd0ca4bcb58d |
class LazyContext(object): <NEW_LINE> <INDENT> instance = None <NEW_LINE> def __init__(self, *args, **params): <NEW_LINE> <INDENT> kwargs = params.copy() <NEW_LINE> for norm in args: <NEW_LINE> <INDENT> kwargs = norm(kwargs) <NEW_LINE> <DEDENT> self.kwargs = kwargs <NEW_LINE> self.__prev_instance = None <NEW_LINE> self.values_allowed = False <NEW_LINE> <DEDENT> def __enter__(self): <NEW_LINE> <INDENT> self.__prev_instance = self.instance <NEW_LINE> self.__class__.instance = self <NEW_LINE> return self.kwargs <NEW_LINE> <DEDENT> def __exit__(self, exc_type, exc_val, exc_tb): <NEW_LINE> <INDENT> self.__class__.instance = self.__prev_instance <NEW_LINE> <DEDENT> def get(self, key, safe=False): <NEW_LINE> <INDENT> if not (self.values_allowed or safe): <NEW_LINE> <INDENT> raise RuntimeError("Get real value is unsafe now") <NEW_LINE> <DEDENT> return self.kwargs[key] <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def allow_values(cls): <NEW_LINE> <INDENT> if cls.instance: <NEW_LINE> <INDENT> cls.instance.values_allowed = True | Context manager for actual parameters values lookup. | 62599043b57a9660fecd2d7d |
class MobileNetV3(nn.Layer): <NEW_LINE> <INDENT> def __init__(self, config, last_channel, scale=1.0, num_classes=1000, with_pool=True): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.config = config <NEW_LINE> self.scale = scale <NEW_LINE> self.last_channel = last_channel <NEW_LINE> self.num_classes = num_classes <NEW_LINE> self.with_pool = with_pool <NEW_LINE> self.firstconv_in_channels = config[0].in_channels <NEW_LINE> self.lastconv_in_channels = config[-1].in_channels <NEW_LINE> self.lastconv_out_channels = self.lastconv_in_channels * 6 <NEW_LINE> norm_layer = partial(nn.BatchNorm2D, epsilon=0.001, momentum=0.99) <NEW_LINE> self.conv = ConvNormActivation( in_channels=3, out_channels=self.firstconv_in_channels, kernel_size=3, stride=2, padding=1, groups=1, activation_layer=nn.Hardswish, norm_layer=norm_layer) <NEW_LINE> self.blocks = nn.Sequential(*[ InvertedResidual( in_channels=cfg.in_channels, expanded_channels=cfg.expanded_channels, out_channels=cfg.out_channels, filter_size=cfg.kernel, stride=cfg.stride, use_se=cfg.use_se, activation_layer=cfg.activation_layer, norm_layer=norm_layer) for cfg in self.config ]) <NEW_LINE> self.lastconv = ConvNormActivation( in_channels=self.lastconv_in_channels, out_channels=self.lastconv_out_channels, kernel_size=1, stride=1, padding=0, groups=1, norm_layer=norm_layer, activation_layer=nn.Hardswish) <NEW_LINE> if with_pool: <NEW_LINE> <INDENT> self.avgpool = nn.AdaptiveAvgPool2D(1) <NEW_LINE> <DEDENT> if num_classes > 0: <NEW_LINE> <INDENT> self.classifier = nn.Sequential( nn.Linear(self.lastconv_out_channels, self.last_channel), nn.Hardswish(), nn.Dropout(p=0.2), nn.Linear(self.last_channel, num_classes)) <NEW_LINE> <DEDENT> <DEDENT> def forward(self, x): <NEW_LINE> <INDENT> x = self.conv(x) <NEW_LINE> x = self.blocks(x) <NEW_LINE> x = self.lastconv(x) <NEW_LINE> if self.with_pool: <NEW_LINE> <INDENT> x = self.avgpool(x) <NEW_LINE> <DEDENT> if self.num_classes > 0: <NEW_LINE> <INDENT> x = paddle.flatten(x, 1) <NEW_LINE> x = self.classifier(x) <NEW_LINE> <DEDENT> return x | MobileNetV3 model from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Args:
config (list[InvertedResidualConfig]): MobileNetV3 depthwise blocks config.
last_channel (int): The number of channels on the penultimate layer.
scale (float, optional): Scale of channels in each layer. Default: 1.0.
num_classes (int, optional): Output dim of last fc layer. If num_classes <=0, last fc layer
will not be defined. Default: 1000.
with_pool (bool, optional): Use pool before the last fc layer or not. Default: True. | 6259904382261d6c52730845 |
class CloudFilesUSStorageDriver(CloudFilesStorageDriver): <NEW_LINE> <INDENT> type = Provider.CLOUDFILES_US <NEW_LINE> name = 'CloudFiles (US)' <NEW_LINE> def __init__(self, *args, **kwargs): <NEW_LINE> <INDENT> kwargs['region'] = 'ord' <NEW_LINE> super(CloudFilesUSStorageDriver, self).__init__(*args, **kwargs) | Cloudfiles storage driver for the US endpoint. | 625990434e696a045264e7a1 |
class VaultDict(Vault): <NEW_LINE> <INDENT> def __init__(self, d=None): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self._dict = { } if d is None else d <NEW_LINE> <DEDENT> @contextlib.contextmanager <NEW_LINE> def _write_context(self, i): <NEW_LINE> <INDENT> f = io.BytesIO() <NEW_LINE> yield f <NEW_LINE> f.seek(0) <NEW_LINE> self._dict[i] = f.read() <NEW_LINE> <DEDENT> @contextlib.contextmanager <NEW_LINE> def _read_context(self, i): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> f = io.BytesIO(self._dict[i]) <NEW_LINE> yield f <NEW_LINE> <DEDENT> except KeyError as e: <NEW_LINE> <INDENT> raise AngrVaultError from e <NEW_LINE> <DEDENT> <DEDENT> def is_stored(self, i): <NEW_LINE> <INDENT> return i in self._dict <NEW_LINE> <DEDENT> def keys(self): <NEW_LINE> <INDENT> return self._dict.keys() | A Vault that uses a dictionary for storage. | 625990431f5feb6acb163ef6 |
class OutOfRangeError(AlluxioError): <NEW_LINE> <INDENT> def __init__(self, message): <NEW_LINE> <INDENT> super(OutOfRangeError, self).__init__(Status.OUT_OF_RANGE, message) | Exception indicating that and operation was attempted past the valid range. E.g., seeking or
reading past end of file.
Unlike :class:`InvalidArgumentException`, this error indicates a problem that may be fixed if
the system state changes. For example, a 32-bit file system will generate
:class:`InvalidArgumentException` if asked to read at an offset that is not in the range
[0,2^32-1], but it will generate :class:`OutOfRangeException` if asked to read from an offset
past the current file size.
There is a fair bit of overlap between :class:`FailedPreconditionException` and
:class:`OutOfRangeException`.
We recommend using :class:`OutOfRangeException` (the more specific error) when it applies so
that callers who are iterating through a space can easily look for an
:class:`OutOfRangeException` to detect when they are done.
Args:
message (str): The error message. | 6259904324f1403a9268624d |
class TextClassifier(Pretrained, ABC): <NEW_LINE> <INDENT> def __init__(self, num_classes=None, **kwargs): <NEW_LINE> <INDENT> super().__init__(**kwargs) <NEW_LINE> self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) <NEW_LINE> model_state_dict = torch.load(self.local_paths[0], map_location=lambda storage, loc: storage) <NEW_LINE> self.model = BertForSequenceClassification.from_pretrained('bert-base-uncased', state_dict=model_state_dict, num_labels=num_classes) <NEW_LINE> self.model.to(self.device) <NEW_LINE> <DEDENT> def y_predict(self, x_predict): <NEW_LINE> <INDENT> all_input_ids, all_input_mask, all_segment_ids = convert_examples_to_features([x_predict], self.tokenizer, max_seq_length=128) <NEW_LINE> eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids) <NEW_LINE> eval_sampler = SequentialSampler(eval_data) <NEW_LINE> eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=1) <NEW_LINE> self.model.eval() <NEW_LINE> for input_ids, input_mask, segment_ids in eval_dataloader: <NEW_LINE> <INDENT> input_ids = input_ids.to(self.device) <NEW_LINE> input_mask = input_mask.to(self.device) <NEW_LINE> segment_ids = segment_ids.to(self.device) <NEW_LINE> with torch.no_grad(): <NEW_LINE> <INDENT> logits = self.model(input_ids, segment_ids, input_mask) <NEW_LINE> <DEDENT> logits = logits.detach().cpu().numpy() <NEW_LINE> for logit in logits: <NEW_LINE> <INDENT> exp = np.exp(logit) <NEW_LINE> exp = exp / np.sum(exp) <NEW_LINE> y_pred = exp <NEW_LINE> <DEDENT> <DEDENT> return y_pred | A pre-trained TextClassifier class based on Google AI's BERT model.
Attributes:
model: Type of BERT model to be used for the classification task. E.g:- Uncased, Cased, etc.
The current pre-trained models are using 'bert-base-uncased'.
tokenizer: Tokenizer used with BERT model. | 625990438e05c05ec3f6f7db |
class JettyServer(Server): <NEW_LINE> <INDENT> async def check_server(self): <NEW_LINE> <INDENT> status = Config.UP <NEW_LINE> if await self._jetty_is_down(): <NEW_LINE> <INDENT> status = status | Config.assertions['JETTY_DOWN'] <NEW_LINE> if await self._server_is_down(): <NEW_LINE> <INDENT> status = status | Config.assertions['SERVER_DOWN'] <NEW_LINE> <DEDENT> <DEDENT> return status <NEW_LINE> <DEDENT> async def _jetty_is_down(self): <NEW_LINE> <INDENT> return await jetty.is_down(host=self._host) | The JettyServer uniquely identifies services provided by the PASTA
Gatekeeper service as identified by the host name "pasta". | 6259904307f4c71912bb0733 |
class UOPFTestCase(unittest.TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> case = self.case = Case.load(DATA_FILE) <NEW_LINE> self.solver = UDOPF(case, dc=True) <NEW_LINE> <DEDENT> def test_dc(self): <NEW_LINE> <INDENT> solution = self.solver.solve() <NEW_LINE> generators = self.case.generators <NEW_LINE> self.assertTrue(solution["converged"] == True) <NEW_LINE> self.assertFalse(generators[0].online) <NEW_LINE> self.assertAlmostEqual(generators[1].p, 110.80, places=2) <NEW_LINE> self.assertAlmostEqual(generators[2].p, 99.20, places=2) <NEW_LINE> self.assertAlmostEqual(solution["f"], 2841.59, places=2) <NEW_LINE> <DEDENT> def test_pwl(self): <NEW_LINE> <INDENT> case = Case.load(PWL_FILE) <NEW_LINE> solver = UDOPF(case, dc=True) <NEW_LINE> solution = solver.solve() <NEW_LINE> generators = self.case.generators <NEW_LINE> self.assertTrue(solution["converged"] == True) <NEW_LINE> self.assertTrue(False not in [g.online for g in generators]) | Defines a test case for the UOPF routine.
| 625990438a43f66fc4bf3494 |
class x_charmm_mdin_method(MCategory): <NEW_LINE> <INDENT> m_def = Category( a_legacy=LegacyDefinition(name='x_charmm_mdin_method')) | Parameters of mdin belonging to section method. | 62599043379a373c97d9a32c |
class CheckingAccount(BankAccount): <NEW_LINE> <INDENT> WITHDRAW_FEE = 1 <NEW_LINE> def __init__(self, cust_email, account_type, initial_balance = BankAccount.MIN_BALANCE): <NEW_LINE> <INDENT> BankAccount.__init__(self, cust_email ,account_type, initial_balance) <NEW_LINE> <DEDENT> def withdraw(self, amount): <NEW_LINE> <INDENT> return BankAccount.withdraw(self, amount + CheckingAccount.WITHDRAW_FEE) | CheckingAccount inherits from the BankAccount class. The main additions here will be a withdrawal fee | 6259904396565a6dacd2d90b |
class ImageWidget(QWidget): <NEW_LINE> <INDENT> def __init__(self, video_service, CameraID, parent=None): <NEW_LINE> <INDENT> QWidget.__init__(self, parent) <NEW_LINE> self.video_service = video_service <NEW_LINE> self._image = QImage() <NEW_LINE> self.setWindowTitle('Robot') <NEW_LINE> self._imgWidth = 320 <NEW_LINE> self._imgHeight = 240 <NEW_LINE> self._cameraID = CameraID <NEW_LINE> self.resize(self._imgWidth, self._imgHeight) <NEW_LINE> self._imgClient = "" <NEW_LINE> self._alImage = None <NEW_LINE> self._registerImageClient() <NEW_LINE> self.startTimer(100) <NEW_LINE> <DEDENT> def _registerImageClient(self): <NEW_LINE> <INDENT> resolution = vision_definitions.kQVGA <NEW_LINE> colorSpace = vision_definitions.kRGBColorSpace <NEW_LINE> self._imgClient = self.video_service.subscribe("_client", resolution, colorSpace, 5) <NEW_LINE> self.video_service.setParam(vision_definitions.kCameraSelectID, self._cameraID) <NEW_LINE> <DEDENT> def _unregisterImageClient(self): <NEW_LINE> <INDENT> if self._imgClient != "": <NEW_LINE> <INDENT> self.video_service.unsubscribe(self._imgClient) <NEW_LINE> <DEDENT> <DEDENT> def paintEvent(self, event): <NEW_LINE> <INDENT> painter = QPainter(self) <NEW_LINE> painter.drawImage(painter.viewport(), self._image) <NEW_LINE> <DEDENT> def _updateImage(self): <NEW_LINE> <INDENT> self._alImage = self.video_service.getImageRemote(self._imgClient) <NEW_LINE> self._image = QImage(self._alImage[6], self._alImage[0], self._alImage[1], QImage.Format_RGB888) <NEW_LINE> <DEDENT> def timerEvent(self, event): <NEW_LINE> <INDENT> self._updateImage() <NEW_LINE> self.update() <NEW_LINE> <DEDENT> def __del__(self): <NEW_LINE> <INDENT> self._unregisterImageClient() | Tiny widget to display camera images from Naoqi. | 62599043d53ae8145f919760 |
class CryptoURL(object): <NEW_LINE> <INDENT> def __init__(self, key): <NEW_LINE> <INDENT> if not PYCRYPTOFOUND: <NEW_LINE> <INDENT> raise RuntimeError('pyCrypto could not be found,' + ' please install it before using libthumbor') <NEW_LINE> <DEDENT> if isinstance(key, str): <NEW_LINE> <INDENT> key = bytes(key, encoding='ascii') <NEW_LINE> <DEDENT> self.key = key <NEW_LINE> self.computed_key = (key * 16)[:16] <NEW_LINE> <DEDENT> def generate_old(self, options): <NEW_LINE> <INDENT> url = url_for(**options) <NEW_LINE> url = bytes(url, encoding='ascii') <NEW_LINE> pad = lambda s: s + (16 - len(s) % 16) * "{" <NEW_LINE> cypher = AES.new(self.computed_key) <NEW_LINE> encrypted = base64.urlsafe_b64encode(cypher.encrypt(pad(url))) <NEW_LINE> return "/%s/%s" % (str(encrypted, encoding='utf-8'), options['image_url']) <NEW_LINE> <DEDENT> def generate_new(self, options): <NEW_LINE> <INDENT> url = plain_image_url(**options) <NEW_LINE> signature = base64.urlsafe_b64encode(hmac.new(self.key, bytes(url, encoding='ascii'), hashlib.sha1).digest()) <NEW_LINE> return '/%s/%s' % (str(signature, encoding='utf-8'), url) <NEW_LINE> <DEDENT> def generate(self, **options): <NEW_LINE> <INDENT> if options.get('unsafe', False): <NEW_LINE> <INDENT> return unsafe_url(**options) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> is_old = options.get('old', False) <NEW_LINE> if is_old: <NEW_LINE> <INDENT> return self.generate_old(options) <NEW_LINE> <DEDENT> return self.generate_new(options) | Class responsible for generating encrypted URLs for thumbor. | 6259904307d97122c4217fa2 |
class DoPredictions(luigi.WrapperTask): <NEW_LINE> <INDENT> def requires(self): <NEW_LINE> <INDENT> yield predictions.TrainAndPredict() | Dummy task to trigger final predictions | 62599043baa26c4b54d505ab |
class IProfilePortlet(IPortletDataProvider): <NEW_LINE> <INDENT> pass | A portlet which can render the logged user profile information.
| 6259904323e79379d538d800 |
class InvalidPersonName(Exception): <NEW_LINE> <INDENT> pass | An exception which is raised when a person is attempted to be created with an invalid name. | 62599043c432627299fa4283 |
class UniqueCharactersTest(unittest.TestCase): <NEW_LINE> <INDENT> def test_edge(self): <NEW_LINE> <INDENT> self.assertFalse(self.assertFalse()) <NEW_LINE> <DEDENT> def test_space(self): <NEW_LINE> <INDENT> self.assertEqual(" ", first_non_repeat_of(" hello")) <NEW_LINE> self.assertFalse(first_non_repeat_of(" hello ")) <NEW_LINE> <DEDENT> def test_no_nonreapeats(self): <NEW_LINE> <INDENT> self.assertFalse(first_non_repeat_of("hello")) <NEW_LINE> self.assertFalse(first_non_repeat_of("aabbccdd")) <NEW_LINE> self.assertFalse(first_non_repeat_of("abcdefGa")) <NEW_LINE> <DEDENT> def test_repeats(self): <NEW_LINE> <INDENT> self.assertEqual("a", first_non_repeat_of("abbccddeeff")) <NEW_LINE> self.assertEqual("a", first_non_repeat_of("abcdefg")) <NEW_LINE> self.assertEqual("f", first_non_repeat_of("aabbccddeeFFFFFFFFf")) | Unit test for first_non_repeat_of | 62599043a79ad1619776b382 |
class OptionConfig(BareConfig): <NEW_LINE> <INDENT> def __init__(self, options=None, defaults=None, root=None): <NEW_LINE> <INDENT> BareConfig.__init__(self, root=root) <NEW_LINE> if options and 'output' in options: <NEW_LINE> <INDENT> self.output = options['output'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.output = Message() <NEW_LINE> <DEDENT> self.update_defaults(defaults) <NEW_LINE> self.update(options) <NEW_LINE> return <NEW_LINE> <DEDENT> def update(self, options): <NEW_LINE> <INDENT> if options is not None: <NEW_LINE> <INDENT> keys = sorted(options) <NEW_LINE> if 'quiet' in keys: <NEW_LINE> <INDENT> self.set_option('quiet', options['quiet']) <NEW_LINE> options.pop('quiet') <NEW_LINE> <DEDENT> elif 'quietness' in keys: <NEW_LINE> <INDENT> self._set_quietness(options['quietness']) <NEW_LINE> options.pop('quietness') <NEW_LINE> <DEDENT> self._options.update(options) <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> def update_defaults(self, new_defaults): <NEW_LINE> <INDENT> if new_defaults is not None: <NEW_LINE> <INDENT> self._defaults.update(new_defaults) <NEW_LINE> <DEDENT> return | This subclasses BareConfig adding functions to make overriding
or resetting defaults and/or setting options much easier
by using dictionaries. | 6259904382261d6c52730846 |
class RemoteGitRepo(RemoteRepo): <NEW_LINE> <INDENT> def __init__(self, url): <NEW_LINE> <INDENT> self.uri = url <NEW_LINE> self.dir = mkdtemp() <NEW_LINE> if call(['git', 'clone', '--depth=1', '--bare', url, self.dir]) > 0: <NEW_LINE> <INDENT> raise RuntimeError() <NEW_LINE> <DEDENT> self.repo = Repo(self.dir) <NEW_LINE> <DEDENT> def __enter__(self): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> def __exit__(self, *_): <NEW_LINE> <INDENT> self.cleanup() <NEW_LINE> <DEDENT> def get_object(self, oid): <NEW_LINE> <INDENT> head, tail = oid[0:2], oid[2:] <NEW_LINE> opath = path.join(self.dir, 'objects', head, tail) <NEW_LINE> return open(opath, 'rb').read() <NEW_LINE> <DEDENT> def _fetch_tree(self, obj, tpath): <NEW_LINE> <INDENT> res = [] <NEW_LINE> ents = parse_object(self.get_object(obj), tpath).unwrap() <NEW_LINE> for is_dir, sha, opath in ents: <NEW_LINE> <INDENT> res.append((opath.decode('utf-8') + ('/' if is_dir else ''), sha)) <NEW_LINE> if is_dir: <NEW_LINE> <INDENT> res.extend(self._fetch_tree(sha, opath)) <NEW_LINE> <DEDENT> <DEDENT> return res <NEW_LINE> <DEDENT> def fetch_tree(self): <NEW_LINE> <INDENT> if hasattr(self, '_tree'): <NEW_LINE> <INDENT> return [x for x, y in self._tree] <NEW_LINE> <DEDENT> tid = self.repo.tree().hexsha <NEW_LINE> res = self._fetch_tree(tid, b'') <NEW_LINE> self._tree = res <NEW_LINE> return [x for x, y in res] <NEW_LINE> <DEDENT> def fetch_subtree(self, key): <NEW_LINE> <INDENT> k = normalize_key(key, True) <NEW_LINE> self.fetch_tree() <NEW_LINE> dic = dict(self._tree) <NEW_LINE> if k not in dic: <NEW_LINE> <INDENT> return Result.Err() <NEW_LINE> <DEDENT> l = len(key) <NEW_LINE> return Result.Ok( [tpath[l:] for tpath in self.fetch_tree() if tpath.startswith(k)]) <NEW_LINE> <DEDENT> def fetch_file(self, key): <NEW_LINE> <INDENT> k = normalize_key(key) <NEW_LINE> self.fetch_tree() <NEW_LINE> dic = dict(self._tree) <NEW_LINE> if k not in dic: <NEW_LINE> <INDENT> return Result.Err() <NEW_LINE> <DEDENT> return parse_object(self.get_object(dic[k])) <NEW_LINE> <DEDENT> def cleanup(self): <NEW_LINE> <INDENT> rmtree(self.dir) | A class responsible for git remotes | 625990433c8af77a43b688be |
class ValidationWarning: <NEW_LINE> <INDENT> def __init__(self, message: str, value: str = None, row: int = -1, column: str = None): <NEW_LINE> <INDENT> self.message = message <NEW_LINE> self.value = value <NEW_LINE> self.row = row <NEW_LINE> self.column = column <NEW_LINE> <DEDENT> def __str__(self) -> str: <NEW_LINE> <INDENT> if self.row is not None and self.column is not None and self.value is not None: <NEW_LINE> <INDENT> return '{{row: {}, column: "{}"}}: "{}" {}'.format(self.row, self.column, self.value, self.message) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.message | Represents a difference between the schema and data frame, found during the validation of the data frame | 62599043004d5f362081f967 |
class VisualSoftDotAttention(nn.Module): <NEW_LINE> <INDENT> def __init__(self, h_dim, v_dim, dot_dim=256): <NEW_LINE> <INDENT> super(VisualSoftDotAttention, self).__init__() <NEW_LINE> self.linear_in_h = nn.Linear(h_dim, dot_dim, bias=True) <NEW_LINE> self.linear_in_v = nn.Linear(v_dim, dot_dim, bias=True) <NEW_LINE> self.sm = nn.Softmax(dim=1) <NEW_LINE> <DEDENT> def forward(self, h, visual_context, mask=None): <NEW_LINE> <INDENT> target = self.linear_in_h(h).unsqueeze(2) <NEW_LINE> context = self.linear_in_v(visual_context) <NEW_LINE> attn = torch.bmm(context, target).squeeze(2) <NEW_LINE> attn = self.sm(attn) <NEW_LINE> attn3 = attn.view(attn.size(0), 1, attn.size(1)) <NEW_LINE> weighted_context = torch.bmm( attn3, visual_context).squeeze(1) <NEW_LINE> return weighted_context, attn | Visual Dot Attention Layer. | 62599043a4f1c619b294f809 |
class AILocalData(AITask): <NEW_LINE> <INDENT> date = ClosestDateParameter(default=datetime.date.today()) <NEW_LINE> batchsize = luigi.IntParameter(default=25000, significant=False) <NEW_LINE> def requires(self): <NEW_LINE> <INDENT> return AILicensing(date=self.date, drop=True) <NEW_LINE> <DEDENT> def run(self): <NEW_LINE> <INDENT> output = shellout("""unpigz -c {input} | span-local-data -b {size} | LC_ALL=C sort --ignore-case -S20% -t, -k3 > {output} """, size=self.batchsize, input=self.input().path) <NEW_LINE> luigi.LocalTarget(output).move(self.output().path) <NEW_LINE> <DEDENT> def output(self): <NEW_LINE> <INDENT> return luigi.LocalTarget(path=self.path(ext='csv')) | Extract a CSV about source, id, doi and institutions for deduplication. | 625990431f5feb6acb163ef8 |
class MattijnError(Exception): <NEW_LINE> <INDENT> pass | 42000
42001
42002
42003
42004
42005 | 62599043498bea3a75a58e21 |
class Billboard(_CZMLBaseObject): <NEW_LINE> <INDENT> image = None <NEW_LINE> show = None <NEW_LINE> _color = None <NEW_LINE> scale = None <NEW_LINE> def __init__(self, color=None, image=None, scale=None): <NEW_LINE> <INDENT> self.image = image <NEW_LINE> self.color = color <NEW_LINE> self.scale = scale <NEW_LINE> <DEDENT> @property <NEW_LINE> def color(self): <NEW_LINE> <INDENT> if self._color is not None: <NEW_LINE> <INDENT> return self._color.data() <NEW_LINE> <DEDENT> <DEDENT> @color.setter <NEW_LINE> def color(self, color): <NEW_LINE> <INDENT> if isinstance(color, Color): <NEW_LINE> <INDENT> self._color = color <NEW_LINE> <DEDENT> elif isinstance(color, dict): <NEW_LINE> <INDENT> col = Color() <NEW_LINE> col.load(color) <NEW_LINE> self._color = col <NEW_LINE> <DEDENT> elif color is None: <NEW_LINE> <INDENT> self._color = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError <NEW_LINE> <DEDENT> <DEDENT> def data(self): <NEW_LINE> <INDENT> d = {} <NEW_LINE> if self.show: <NEW_LINE> <INDENT> d['show'] = True <NEW_LINE> <DEDENT> if self.show == False: <NEW_LINE> <INDENT> d['show'] = False <NEW_LINE> <DEDENT> if self.image: <NEW_LINE> <INDENT> d['image'] = self.image <NEW_LINE> <DEDENT> if self.scale: <NEW_LINE> <INDENT> d['scale'] = self.scale <NEW_LINE> <DEDENT> if self.color is not None: <NEW_LINE> <INDENT> d['color'] = self.color <NEW_LINE> <DEDENT> return d <NEW_LINE> <DEDENT> def load(self, data): <NEW_LINE> <INDENT> self.show = data.get('show', None) <NEW_LINE> self.image = data.get('image', None) <NEW_LINE> self.scale = data.get('scale', None) <NEW_LINE> self.color = data.get('color', None) | A billboard, or viewport-aligned image. The billboard is positioned
in the scene by the position property.
A billboard is sometimes called a marker. | 62599043b57a9660fecd2d80 |
class InlineQueryResultCachedAudio(InlineQueryResult): <NEW_LINE> <INDENT> def __init__(self, id, audio_file_id, caption=None, reply_markup=None, input_message_content=None, **kwargs): <NEW_LINE> <INDENT> super(InlineQueryResultCachedAudio, self).__init__('audio', id) <NEW_LINE> self.audio_file_id = audio_file_id <NEW_LINE> if caption: <NEW_LINE> <INDENT> self.caption = caption <NEW_LINE> <DEDENT> if reply_markup: <NEW_LINE> <INDENT> self.reply_markup = reply_markup <NEW_LINE> <DEDENT> if input_message_content: <NEW_LINE> <INDENT> self.input_message_content = input_message_content <NEW_LINE> <DEDENT> <DEDENT> @classmethod <NEW_LINE> def de_json(cls, data, bot): <NEW_LINE> <INDENT> data = super(InlineQueryResultCachedAudio, cls).de_json(data, bot) <NEW_LINE> if not data: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'), bot) <NEW_LINE> data['input_message_content'] = InputMessageContent.de_json( data.get('input_message_content'), bot) <NEW_LINE> return cls(**data) | Represents a link to an mp3 audio file stored on the Telegram servers. By default, this audio
file will be sent by the user. Alternatively, you can use :attr:`input_message_content` to
send amessage with the specified content instead of the audio.
Attributes:
type (:obj:`str`): 'audio'.
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
audio_file_id (:obj:`str`): A valid file identifier for the audio file.
caption (:obj:`str`): Optional. Caption, 0-200 characters
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the audio.
Args:
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
audio_file_id (:obj:`str`): A valid file identifier for the audio file.
caption (:obj:`str`, optional): Caption, 0-200 characters
reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the
message to be sent instead of the audio.
**kwargs (:obj:`dict`): Arbitrary keyword arguments. | 62599043d164cc617582227b |
class InvalidClientError(OAuthError): <NEW_LINE> <INDENT> error = 'invalid_client' <NEW_LINE> description = __doc__ | Either your client_id or client_secret is invalid. | 625990438a43f66fc4bf3496 |
class PlayerCmdSet(CmdSet): <NEW_LINE> <INDENT> key = "DefaultPlayer" <NEW_LINE> priority = -10 <NEW_LINE> def at_cmdset_creation(self): <NEW_LINE> <INDENT> self.add(player.CmdOOCLook()) <NEW_LINE> self.add(player.CmdIC()) <NEW_LINE> self.add(player.CmdOOC()) <NEW_LINE> self.add(player.CmdCharCreate()) <NEW_LINE> self.add(player.CmdWho()) <NEW_LINE> self.add(player.CmdOption()) <NEW_LINE> self.add(player.CmdQuit()) <NEW_LINE> self.add(player.CmdPassword()) <NEW_LINE> self.add(player.CmdColorTest()) <NEW_LINE> self.add(player.CmdQuell()) <NEW_LINE> self.add(building.CmdExamine()) <NEW_LINE> self.add(help.CmdHelp()) <NEW_LINE> self.add(system.CmdReload()) <NEW_LINE> self.add(system.CmdReset()) <NEW_LINE> self.add(system.CmdShutdown()) <NEW_LINE> self.add(system.CmdPy()) <NEW_LINE> self.add(admin.CmdDelPlayer()) <NEW_LINE> self.add(admin.CmdNewPassword()) <NEW_LINE> self.add(comms.CmdAddCom()) <NEW_LINE> self.add(comms.CmdDelCom()) <NEW_LINE> self.add(comms.CmdAllCom()) <NEW_LINE> self.add(comms.CmdChannels()) <NEW_LINE> self.add(comms.CmdCdestroy()) <NEW_LINE> self.add(comms.CmdChannelCreate()) <NEW_LINE> self.add(comms.CmdClock()) <NEW_LINE> self.add(comms.CmdCBoot()) <NEW_LINE> self.add(comms.CmdCemit()) <NEW_LINE> self.add(comms.CmdCWho()) <NEW_LINE> self.add(comms.CmdCdesc()) <NEW_LINE> self.add(comms.CmdPage()) <NEW_LINE> self.add(comms.CmdIRC2Chan()) <NEW_LINE> self.add(comms.CmdRSS2Chan()) | Implements the player command set. | 62599043d7e4931a7ef3d37a |
class WENSS_Survey(HEASARC_Survey, SkyView_Survey): <NEW_LINE> <INDENT> def __init__(self, coord, radius, **kwargs): <NEW_LINE> <INDENT> HEASARC_Survey.__init__(self, coord, radius, 'wenss', **kwargs) <NEW_LINE> SkyView_Survey.__init__(self, coord, radius, 'wenss', **kwargs) <NEW_LINE> self.survey = 'WENSS' | Uses SkyView an HEASARC to get both images and catalogs for the WSRT northern sky survey at 325 MHz.
| 6259904363b5f9789fe86470 |
class Divzero(NumError): <NEW_LINE> <INDENT> pass | 除以0的错误 | 62599043462c4b4f79dbcd01 |
class JSLintTest(gocept.jslint.TestCase): <NEW_LINE> <INDENT> jshint_command = os.environ.get('JSHINT_COMMAND', '/bin/false') <NEW_LINE> options = (gocept.jslint.TestCase.options + ( 'evil', 'eqnull', 'multistr', 'sub', 'undef', 'browser', 'jquery', 'devel' )) | Base test class for JS lint tests. | 6259904326238365f5fade5e |
class EditableFile(object): <NEW_LINE> <INDENT> platform_default_editors: Mapping[str, str] = collections.defaultdict( lambda: 'edit', win32='notepad', linux2='vi', ) <NEW_LINE> encoding = 'utf-8' <NEW_LINE> def __init__(self, data='', content_type='text/plain'): <NEW_LINE> <INDENT> self.data = str(data) <NEW_LINE> self.content_type = content_type <NEW_LINE> <DEDENT> def __enter__(self): <NEW_LINE> <INDENT> extension = mimetypes.guess_extension(self.content_type) or '' <NEW_LINE> fobj, self.name = tempfile.mkstemp(extension) <NEW_LINE> os.write(fobj, self.data.encode(self.encoding)) <NEW_LINE> os.close(fobj) <NEW_LINE> return self <NEW_LINE> <DEDENT> def read(self): <NEW_LINE> <INDENT> with open(self.name, 'rb') as f: <NEW_LINE> <INDENT> return f.read().decode(self.encoding) <NEW_LINE> <DEDENT> <DEDENT> def __exit__(self, *tb_info): <NEW_LINE> <INDENT> os.remove(self.name) <NEW_LINE> <DEDENT> def edit(self): <NEW_LINE> <INDENT> self.changed = False <NEW_LINE> with self: <NEW_LINE> <INDENT> editor = self.get_editor() <NEW_LINE> cmd = [editor, self.name] <NEW_LINE> try: <NEW_LINE> <INDENT> res = subprocess.call(cmd) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print("Error launching editor %(editor)s" % locals()) <NEW_LINE> print(e) <NEW_LINE> return <NEW_LINE> <DEDENT> if res != 0: <NEW_LINE> <INDENT> msg = '%(editor)s returned error status %(res)d' % locals() <NEW_LINE> raise EditProcessException(msg) <NEW_LINE> <DEDENT> new_data = self.read() <NEW_LINE> if new_data != self.data: <NEW_LINE> <INDENT> self.changed = self._save_diff(self.data, new_data) <NEW_LINE> self.data = new_data <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> @staticmethod <NEW_LINE> def _search_env(keys): <NEW_LINE> <INDENT> matches = (os.environ[key] for key in keys if key in os.environ) <NEW_LINE> return next(matches, None) <NEW_LINE> <DEDENT> def get_editor(self): <NEW_LINE> <INDENT> env_search = ['EDITOR'] <NEW_LINE> if 'xml' in self.content_type: <NEW_LINE> <INDENT> env_search.insert(0, 'XML_EDITOR') <NEW_LINE> <DEDENT> default_editor = self.platform_default_editors[sys.platform] <NEW_LINE> return self._search_env(env_search) or default_editor <NEW_LINE> <DEDENT> @staticmethod <NEW_LINE> def _save_diff(*versions): <NEW_LINE> <INDENT> def get_lines(content): <NEW_LINE> <INDENT> return list(io.StringIO(content)) <NEW_LINE> <DEDENT> lines = map(get_lines, versions) <NEW_LINE> diff = difflib.context_diff(*lines) <NEW_LINE> return tuple(diff) | EditableFile saves some data to a temporary file, launches a
platform editor for interactive editing, and then reloads the data,
setting .changed to True if the data was edited.
e.g.::
x = EditableFile('foo')
x.edit()
if x.changed:
print(x.data)
The EDITOR environment variable can define which executable to use
(also XML_EDITOR if the content-type to edit includes 'xml'). If no
EDITOR is defined, defaults to 'notepad' on Windows and 'edit' on
other platforms. | 62599043d53ae8145f919762 |
class MatchExpression(Element): <NEW_LINE> <INDENT> typeof = 'match_expression' <NEW_LINE> @classmethod <NEW_LINE> def create(cls, name, user=None, network_element=None, domain_name=None, zone=None, executable=None): <NEW_LINE> <INDENT> ref_list = [] <NEW_LINE> if user: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if network_element: <NEW_LINE> <INDENT> ref_list.append(network_element.href) <NEW_LINE> <DEDENT> if domain_name: <NEW_LINE> <INDENT> ref_list.append(domain_name.href) <NEW_LINE> <DEDENT> if zone: <NEW_LINE> <INDENT> ref_list.append(zone.href) <NEW_LINE> <DEDENT> if executable: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> json = {'name': name, 'ref': ref_list} <NEW_LINE> return ElementCreator(cls, json) <NEW_LINE> <DEDENT> def values(self): <NEW_LINE> <INDENT> return [Element.from_href(ref) for ref in self.data.get('ref')] | A match expression is used in the source / destination / service fields to group
together elements into an 'AND'ed configuration. For example, a normal rule might
have a source field that could include network=172.18.1.0/24 and zone=Internal
objects. A match expression enables you to AND these elements together to enforce
the match requires both. Logically it would be represented as
(network 172.18.1.0/24 AND zone Internal).
>>> from smc.elements.network import Host, Zone
>>> from smc.policy.rule_elements import MatchExpression
>>> from smc.policy.layer3 import FirewallPolicy
>>> match = MatchExpression.create(name='mymatch', network_element=Host('kali'), zone=Zone('Mail'))
>>> policy = FirewallPolicy('smcpython')
>>> policy.fw_ipv4_access_rules.create(name='myrule', sources=[match], destinations='any', services='any')
'http://172.18.1.150:8082/6.2/elements/fw_policy/261/fw_ipv4_access_rule/2099740'
>>> rule = policy.search_rule('myrule')
...
>>> for source in rule[0].sources.all():
... print(source, source.values())
...
MatchExpression(name=MatchExpression _1491760686976_2) [Zone(name=Mail), Host(name=kali)]
.. note::
MatchExpression is currently only supported on source and destination fields. | 625990436fece00bbaccccb7 |
class GameStats(): <NEW_LINE> <INDENT> def __init__(self, ai_settings): <NEW_LINE> <INDENT> self.ai_settings = ai_settings <NEW_LINE> self.reset_stats() <NEW_LINE> self.game_active = False <NEW_LINE> self.high_score = 0 <NEW_LINE> <DEDENT> def reset_stats(self): <NEW_LINE> <INDENT> self.ship_left = self.ai_settings.ship_limit <NEW_LINE> self.score = 0 <NEW_LINE> self.level = 1 | Track statistics for alien invasion | 6259904345492302aabfd7df |
class About(wx.Dialog): <NEW_LINE> <INDENT> def __init__(self, parent): <NEW_LINE> <INDENT> wx.Dialog.__init__(self, parent, wx.ID_ANY, 'About pyBrew', style=wx.DEFAULT_DIALOG_STYLE|wx.CLOSE_BOX) <NEW_LINE> lines = [] <NEW_LINE> lines.append(wx.StaticText(self,-1,'This is pyBrew Version 1.1')) <NEW_LINE> lines.append(wx.StaticText(self,-1,'Created by Tyler Voskuilen')) <NEW_LINE> lines.append(wx.StaticText(self,-1,'Copyright 2011')) <NEW_LINE> vBox = wx.BoxSizer(wx.VERTICAL) <NEW_LINE> vBox.AddSpacer((-1,50)) <NEW_LINE> for line in lines: <NEW_LINE> <INDENT> vBox.Add(line, 0, wx.LEFT|wx.RIGHT, 50) <NEW_LINE> <DEDENT> vBox.AddSpacer((-1,50)) <NEW_LINE> self.SetSizer(vBox) <NEW_LINE> vBox.Fit(self) | Show info about the program | 6259904326068e7796d4dc4a |
class TroubleshootingDetails(msrest.serialization.Model): <NEW_LINE> <INDENT> _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'reason_type': {'key': 'reasonType', 'type': 'str'}, 'summary': {'key': 'summary', 'type': 'str'}, 'detail': {'key': 'detail', 'type': 'str'}, 'recommended_actions': {'key': 'recommendedActions', 'type': '[TroubleshootingRecommendedActions]'}, } <NEW_LINE> def __init__( self, **kwargs ): <NEW_LINE> <INDENT> super(TroubleshootingDetails, self).__init__(**kwargs) <NEW_LINE> self.id = kwargs.get('id', None) <NEW_LINE> self.reason_type = kwargs.get('reason_type', None) <NEW_LINE> self.summary = kwargs.get('summary', None) <NEW_LINE> self.detail = kwargs.get('detail', None) <NEW_LINE> self.recommended_actions = kwargs.get('recommended_actions', None) | Information gained from troubleshooting of specified resource.
:param id: The id of the get troubleshoot operation.
:type id: str
:param reason_type: Reason type of failure.
:type reason_type: str
:param summary: A summary of troubleshooting.
:type summary: str
:param detail: Details on troubleshooting results.
:type detail: str
:param recommended_actions: List of recommended actions.
:type recommended_actions:
list[~azure.mgmt.network.v2020_06_01.models.TroubleshootingRecommendedActions] | 625990431d351010ab8f4e23 |
class Channel: <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.userdict = IRCDict() <NEW_LINE> self.operdict = IRCDict() <NEW_LINE> self.voiceddict = IRCDict() <NEW_LINE> self.modes = {} <NEW_LINE> <DEDENT> def users(self): <NEW_LINE> <INDENT> return self.userdict.keys() <NEW_LINE> <DEDENT> def opers(self): <NEW_LINE> <INDENT> return self.operdict.keys() <NEW_LINE> <DEDENT> def voiced(self): <NEW_LINE> <INDENT> return self.voiceddict.keys() <NEW_LINE> <DEDENT> def has_user(self, nick): <NEW_LINE> <INDENT> return nick in self.userdict <NEW_LINE> <DEDENT> def is_oper(self, nick): <NEW_LINE> <INDENT> return nick in self.operdict <NEW_LINE> <DEDENT> def is_voiced(self, nick): <NEW_LINE> <INDENT> return nick in self.voiceddict <NEW_LINE> <DEDENT> def add_user(self, nick): <NEW_LINE> <INDENT> self.userdict[nick] = 1 <NEW_LINE> <DEDENT> def remove_user(self, nick): <NEW_LINE> <INDENT> for d in self.userdict, self.operdict, self.voiceddict: <NEW_LINE> <INDENT> if nick in d: <NEW_LINE> <INDENT> del d[nick] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def change_nick(self, before, after): <NEW_LINE> <INDENT> self.userdict[after] = 1 <NEW_LINE> del self.userdict[before] <NEW_LINE> if before in self.operdict: <NEW_LINE> <INDENT> self.operdict[after] = 1 <NEW_LINE> del self.operdict[before] <NEW_LINE> <DEDENT> if before in self.voiceddict: <NEW_LINE> <INDENT> self.voiceddict[after] = 1 <NEW_LINE> del self.voiceddict[before] <NEW_LINE> <DEDENT> <DEDENT> def set_mode(self, mode, value=None): <NEW_LINE> <INDENT> if mode == b"o": <NEW_LINE> <INDENT> self.operdict[value] = 1 <NEW_LINE> <DEDENT> elif mode == b"v": <NEW_LINE> <INDENT> self.voiceddict[value] = 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.modes[mode] = value <NEW_LINE> <DEDENT> <DEDENT> def clear_mode(self, mode, value=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if mode == b"o": <NEW_LINE> <INDENT> del self.operdict[value] <NEW_LINE> <DEDENT> elif mode == b"v": <NEW_LINE> <INDENT> del self.voiceddict[value] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> del self.modes[mode] <NEW_LINE> <DEDENT> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> def has_mode(self, mode): <NEW_LINE> <INDENT> return mode in self.modes <NEW_LINE> <DEDENT> def is_moderated(self): <NEW_LINE> <INDENT> return self.has_mode(b"m") <NEW_LINE> <DEDENT> def is_secret(self): <NEW_LINE> <INDENT> return self.has_mode(b"s") <NEW_LINE> <DEDENT> def is_protected(self): <NEW_LINE> <INDENT> return self.has_mode(b"p") <NEW_LINE> <DEDENT> def has_topic_lock(self): <NEW_LINE> <INDENT> return self.has_mode(b"t") <NEW_LINE> <DEDENT> def is_invite_only(self): <NEW_LINE> <INDENT> return self.has_mode(b"i") <NEW_LINE> <DEDENT> def has_allow_external_messages(self): <NEW_LINE> <INDENT> return self.has_mode(b"n") <NEW_LINE> <DEDENT> def has_limit(self): <NEW_LINE> <INDENT> return self.has_mode(b"l") <NEW_LINE> <DEDENT> def limit(self): <NEW_LINE> <INDENT> if self.has_limit(): <NEW_LINE> <INDENT> return self.modes[b"l"] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> def has_key(self): <NEW_LINE> <INDENT> return self.has_mode(b"k") <NEW_LINE> <DEDENT> def key(self): <NEW_LINE> <INDENT> if self.has_key(): <NEW_LINE> <INDENT> return self.modes[b"k"] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None | A class for keeping information about an IRC channel.
This class can be improved a lot. | 625990446e29344779b01957 |
class Metaio(Package): <NEW_LINE> <INDENT> homepage = "https://www.lsc-group.phys.uwm.edu/daswg/projects/metaio.html" <NEW_LINE> url = "http://software.ligo.org/lscsoft/source/metaio-8.4.0.tar.gz" <NEW_LINE> version('8.4.0', '65661cfb47643623bc8cbe97ddbe7b91') <NEW_LINE> version('8.3.1', '2a68dc6aed8da8582cee66d4b37b50da') <NEW_LINE> version('8.3.0', '4d244197051fc1c1a9c2c5f82e14dc4c') <NEW_LINE> variant('matlab', False, 'Enable Matlab support') <NEW_LINE> depends_on("zlib") <NEW_LINE> depends_on("matlab", when="+matlab") <NEW_LINE> def install(self, spec, prefix): <NEW_LINE> <INDENT> config_args = ['--prefix=%s' % prefix] <NEW_LINE> config_args.append('--with-matlab' if '+matlab' in spec else '--without-matlab') <NEW_LINE> configure(*config_args) <NEW_LINE> make() <NEW_LINE> make("install") | LIGO Light-Weight XML Library.
This code implements a simple recursive-descent parsing scheme
for LIGO_LW files, based on the example in Chapter 2 of
"Compilers: Principles, Techniques and Tools" by Aho, Sethi and
Ullman. | 62599044596a897236128f31 |
class Meta(SelfClosingTag): <NEW_LINE> <INDENT> tag = "meta charset" <NEW_LINE> def render(self, file_out, cur_ind=""): <NEW_LINE> <INDENT> file_out.write(cur_ind + "<{}=\"UTF-8\" /> \n".format(self.tag)) | Meta subclass of SelfClosingTag | 625990443c8af77a43b688bf |
class Activity(object): <NEW_LINE> <INDENT> def __init__(self, date, hour, name, description): <NEW_LINE> <INDENT> self.date = date <NEW_LINE> self.hour = hour <NEW_LINE> self.name = name <NEW_LINE> self.description = description <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> from pprint import pformat <NEW_LINE> return pformat(vars(self), indent=4, width=1) | Represent an activity definition. | 625990441f5feb6acb163efa |
class C2H5OH2(Chemical): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> super(C2H5OH2, self).__init__() <NEW_LINE> pos = self.get_positions() <NEW_LINE> adj = self.get_edges() <NEW_LINE> sym = [strToSym('H'), strToSym('H'), strToSym('H'), strToSym('C'), strToSym('C'), strToSym('H'), strToSym('H'), strToSym('O'), strToSym('H'), strToSym('H')] <NEW_LINE> self.setData(pos=pos, adj=adj, pxMode=False, symbol=sym, antialias=True) <NEW_LINE> <DEDENT> def get_positions(self): <NEW_LINE> <INDENT> return np.array([ [-1, 5], [-2, 6], [-1, 7], [-1, 6], [0, 6], [0, 5], [0, 7], [1, 6], [2, 6], [1, 7], ], dtype=float) <NEW_LINE> <DEDENT> def get_edges(self): <NEW_LINE> <INDENT> return np.array([ [0, 3], [1, 3], [2, 3], [3, 4], [5, 4], [6, 4], [4, 7], [7, 8], [7, 9], ]) | Class for C2H5OH2.
Needs plus sign | 62599044ec188e330fdf9ba0 |
class PrivateClassroomApiTests(TestCase): <NEW_LINE> <INDENT> def setUp(self): <NEW_LINE> <INDENT> self.client = APIClient() <NEW_LINE> self.client.force_authenticate(sample_user()) <NEW_LINE> <DEDENT> def test_retrive_classroom_list(self): <NEW_LINE> <INDENT> sample_classroom(name='CR 1', identifier='BC01') <NEW_LINE> sample_classroom(name='CR 2', identifier='AB01', grade=sample_grade()) <NEW_LINE> classrooms = Classroom.objects.all().order_by('identifier') <NEW_LINE> serializer = ClassroomListSerializer(classrooms, many=True) <NEW_LINE> res = self.client.get(CLASSROOM_LIST_URL) <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_200_OK) <NEW_LINE> self.assertEqual(res.data, serializer.data) <NEW_LINE> <DEDENT> def test_retrieve_classroom_detail(self): <NEW_LINE> <INDENT> classroom = sample_classroom(name='Test', grade=sample_grade()) <NEW_LINE> serializer = ClassroomDetailSerializer(classroom) <NEW_LINE> res = self.client.get(classroom_detail_url(classroom.id)) <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_200_OK) <NEW_LINE> self.assertEqual(res.data, serializer.data) <NEW_LINE> <DEDENT> def test_create_classroom_successful(self): <NEW_LINE> <INDENT> payload = sample_classroom_payload( name='Testing', grade=sample_grade().id, days_of_week='2,4' ) <NEW_LINE> res = self.client.post(CLASSROOM_LIST_URL, payload) <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_201_CREATED) <NEW_LINE> self.assertTrue(Classroom.objects.filter(**payload).exists()) <NEW_LINE> <DEDENT> def test_create_classroom_unsuccessful(self): <NEW_LINE> <INDENT> payloads = [ sample_classroom_payload( name='Test Class', days_of_week='1,2,9' ), sample_classroom_payload( name='Test Class', days_of_week='24' ), sample_classroom_payload( name='Test Class', days_of_week='2.4' ), ] <NEW_LINE> for payload in payloads: <NEW_LINE> <INDENT> res = self.client.post(CLASSROOM_LIST_URL, payload) <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) <NEW_LINE> self.assertFalse(Classroom.objects.filter(**payload).exists()) <NEW_LINE> <DEDENT> <DEDENT> def test_full_update(self): <NEW_LINE> <INDENT> classroom = sample_classroom(name="Test CR", room='A45') <NEW_LINE> payload = sample_classroom_payload( name="Other name", days_of_week='1', grade=sample_grade().id ) <NEW_LINE> res = self.client.put(classroom_detail_url(classroom.id), payload) <NEW_LINE> classroom.refresh_from_db() <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_200_OK) <NEW_LINE> self.assertEqual(Classroom.objects.get(**payload), classroom) <NEW_LINE> <DEDENT> def test_partial_update(self): <NEW_LINE> <INDENT> classroom = sample_classroom(name="Test CR", room='A45') <NEW_LINE> payload = {'name': 'Other name', 'room': 'Other room'} <NEW_LINE> res = self.client.patch(classroom_detail_url(classroom.id), payload) <NEW_LINE> classroom.refresh_from_db() <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_200_OK) <NEW_LINE> self.assertEqual(classroom.name, payload['name']) <NEW_LINE> self.assertEqual(classroom.room, payload['room']) | Test the classroom api with authenticated requests | 62599044be383301e0254b1c |
class MinimaxAgent(MultiAgentSearchAgent): <NEW_LINE> <INDENT> def getAction(self, gameState): <NEW_LINE> <INDENT> def max_score(gameState,depth,ghosts): <NEW_LINE> <INDENT> if gameState.isWin() or gameState.isLose() or (depth==0): <NEW_LINE> <INDENT> return self.evaluationFunction(gameState) <NEW_LINE> <DEDENT> totalLegalActions = gameState.getLegalActions(0) <NEW_LINE> v = -(float("inf")) <NEW_LINE> for action in totalLegalActions: <NEW_LINE> <INDENT> v = max(v,min_score(gameState.generateSuccessor(0,action),depth, 1, ghosts)) <NEW_LINE> <DEDENT> return v <NEW_LINE> <DEDENT> def min_score(gameState, depth, agentNumber, ghosts): <NEW_LINE> <INDENT> if gameState.isWin() or gameState.isLose() or (depth==0): <NEW_LINE> <INDENT> return self.evaluationFunction(gameState) <NEW_LINE> <DEDENT> v = (float("inf")) <NEW_LINE> totalLegalActions = gameState.getLegalActions(agentNumber) <NEW_LINE> if(agentNumber == ghosts): <NEW_LINE> <INDENT> for action in totalLegalActions: <NEW_LINE> <INDENT> v = min(v, max_score(gameState.generateSuccessor(agentNumber, action), depth-1, ghosts)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for action in totalLegalActions: <NEW_LINE> <INDENT> v = min(v, min_score(gameState.generateSuccessor(agentNumber, action), depth, agentNumber+1, ghosts)) <NEW_LINE> <DEDENT> <DEDENT> return v <NEW_LINE> <DEDENT> score = -(float("inf")) <NEW_LINE> ghosts = gameState.getNumAgents() -1 <NEW_LINE> totalLegalActions = gameState.getLegalActions(0) <NEW_LINE> priorityQueue = util.PriorityQueue() <NEW_LINE> bestAction = Directions.STOP <NEW_LINE> for action in totalLegalActions: <NEW_LINE> <INDENT> successor = gameState.generateSuccessor(0,action) <NEW_LINE> score = min_score(successor, self.depth, 1, ghosts) <NEW_LINE> priorityQueue.push(action,score) <NEW_LINE> <DEDENT> while not priorityQueue.isEmpty(): <NEW_LINE> <INDENT> bestAction = priorityQueue.pop() <NEW_LINE> <DEDENT> return bestAction <NEW_LINE> util.raiseNotDefined() | Your minimax agent (question 2) | 62599044d164cc617582227d |
class MutatorFlag(object): <NEW_LINE> <INDENT> NO_LOG_SYNC = 1 <NEW_LINE> IGNORE_UNKNOWN_CFS = 2 <NEW_LINE> NO_LOG = 4 <NEW_LINE> _VALUES_TO_NAMES = { 1: "NO_LOG_SYNC", 2: "IGNORE_UNKNOWN_CFS", 4: "NO_LOG", } <NEW_LINE> _NAMES_TO_VALUES = { "NO_LOG_SYNC": 1, "IGNORE_UNKNOWN_CFS": 2, "NO_LOG": 4, } | Mutator creation flags
NO_LOG_SYNC: Do not sync the commit log
IGNORE_UNKNOWN_CFS: Don't throw exception if mutator writes to unknown column family
NO_LOG: Don't write to the commit log | 62599044d7e4931a7ef3d37c |
class DuelingDQN(DQN): <NEW_LINE> <INDENT> def _build_net(self): <NEW_LINE> <INDENT> self.eval_net = PADuelingDQNNet(self.n_states, self.n_actions) <NEW_LINE> self.target_net = PADuelingDQNNet(self.n_states, self.n_actions) | DuelingDQN和DQN只是在Net上不同,所以可以直接继承DQN | 62599044d99f1b3c44d069a4 |
class _KeyBuffer(object): <NEW_LINE> <INDENT> def __init__(self, bufferSize, kb_id): <NEW_LINE> <INDENT> self.bufferSize = bufferSize <NEW_LINE> self._evts = deque() <NEW_LINE> allInds, names, keyboards = hid.get_keyboard_indices() <NEW_LINE> self._keys = deque() <NEW_LINE> self._keysStillDown = deque() <NEW_LINE> if kb_id == -1: <NEW_LINE> <INDENT> self.dev = hid.Keyboard() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.dev = hid.Keyboard(kb_id) <NEW_LINE> <DEDENT> self.dev._create_queue(bufferSize, win_handle=None) <NEW_LINE> <DEDENT> def flush(self): <NEW_LINE> <INDENT> self._processEvts() <NEW_LINE> <DEDENT> def _flushEvts(self): <NEW_LINE> <INDENT> ptb.WaitSecs('YieldSecs', 0.00001) <NEW_LINE> while self.dev.flush(): <NEW_LINE> <INDENT> evt, remaining = self.dev.queue_get_event() <NEW_LINE> key = {} <NEW_LINE> key['keycode'] = int(evt['Keycode']) <NEW_LINE> key['down'] = bool(evt['Pressed']) <NEW_LINE> key['time'] = evt['Time'] <NEW_LINE> self._evts.append(key) <NEW_LINE> <DEDENT> <DEDENT> def getKeys(self, keyList=[], waitRelease=True, clear=True): <NEW_LINE> <INDENT> self._processEvts() <NEW_LINE> if not keyList and not waitRelease: <NEW_LINE> <INDENT> keyPresses = list(self._keysStillDown) <NEW_LINE> for k in list(self._keys): <NEW_LINE> <INDENT> if k not in keyPresses: <NEW_LINE> <INDENT> keyPresses.append(k) <NEW_LINE> <DEDENT> <DEDENT> if clear: <NEW_LINE> <INDENT> self._keys = deque() <NEW_LINE> self._keysStillDown = deque() <NEW_LINE> <DEDENT> return keyPresses <NEW_LINE> <DEDENT> keyPresses = deque() <NEW_LINE> for keyPress in self._keys: <NEW_LINE> <INDENT> if waitRelease and not keyPress.duration: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if keyList and keyPress.name not in keyList: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> keyPresses.append(keyPress) <NEW_LINE> <DEDENT> if clear: <NEW_LINE> <INDENT> for key in keyPresses: <NEW_LINE> <INDENT> self._keys.remove(key) <NEW_LINE> <DEDENT> <DEDENT> return keyPresses <NEW_LINE> <DEDENT> def _clearEvents(self): <NEW_LINE> <INDENT> self._evts.clear() <NEW_LINE> <DEDENT> def start(self): <NEW_LINE> <INDENT> self.dev.queue_start() <NEW_LINE> <DEDENT> def stop(self): <NEW_LINE> <INDENT> self.dev.queue_stop() <NEW_LINE> <DEDENT> def _processEvts(self): <NEW_LINE> <INDENT> self._flushEvts() <NEW_LINE> evts = deque(self._evts) <NEW_LINE> self._clearEvents() <NEW_LINE> for evt in evts: <NEW_LINE> <INDENT> if evt['down']: <NEW_LINE> <INDENT> newKey = KeyPress(code=evt['keycode'], tDown=evt['time']) <NEW_LINE> self._keys.append(newKey) <NEW_LINE> self._keysStillDown.append(newKey) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for key in self._keysStillDown: <NEW_LINE> <INDENT> if key.code == evt['keycode']: <NEW_LINE> <INDENT> key.duration = evt['time'] - key.tDown <NEW_LINE> self._keysStillDown.remove(key) <NEW_LINE> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass | This is our own local buffer of events with more control over clearing.
The user shouldn't use this directly. It is fetched from the _keybuffers
It stores events from a single physical device
It's built on a collections.deque which is like a more efficient list
that also supports a max length | 6259904450485f2cf55dc28c |
class MockInstancesApi(MockApiBase): <NEW_LINE> <INDENT> def get(self, project='wrong_project', instance='wrong_instance', **kwargs): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance}, **kwargs) <NEW_LINE> <DEDENT> def insert(self, project='wrong_project', body='wrong_instance_resource', **kwargs): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'body': body}, **kwargs) <NEW_LINE> <DEDENT> def delete(self, project='wrong_project', instance='wrong_instance', **kwargs): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance}, **kwargs) <NEW_LINE> <DEDENT> def list(self, project='wrong_project', zone='wrong_zone', **unused_kwargs): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'zone': zone}) <NEW_LINE> <DEDENT> def addAccessConfig(self, project='wrong_project', instance='wrong_instance', network_interface='wrong_network_interface', body='wrong_instance_resource', **kwargs): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance, 'network_interface': network_interface, 'body': body}, **kwargs) <NEW_LINE> <DEDENT> def deleteAccessConfig(self, project='wrong_project', instance='wrong_instance', networkInterface='wrong_network_interface', accessConfig='wrong_access_config', **kwargs): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance, 'networkInterface': networkInterface, 'accessConfig': accessConfig}, **kwargs) <NEW_LINE> <DEDENT> def reset(self, project='wrong_project', instance='wrong_instance', zone='wrong_zone'): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance, 'zone': zone}) <NEW_LINE> <DEDENT> def setMetadata(self, project='wrong_project', instance='wrong_instance', body='wrong_metadata', zone='wrong_zone'): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance, 'body': body, 'zone': zone}) <NEW_LINE> <DEDENT> def setTags(self, project='wrong_project', instance='wrong_instance', body='wrong_tags', zone='wrong_zone'): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance, 'body': body, 'zone': zone}) <NEW_LINE> <DEDENT> def attachDisk(self, project='wrong_project', instance='wrong_instance', body='wrong_disk_body', zone='wrong_zone'): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance, 'body': body, 'zone': zone}) <NEW_LINE> <DEDENT> def detachDisk(self, project='wrong_project', instance='wrong_instance', deviceName='wrong_disk_name', zone='wrong_zone'): <NEW_LINE> <INDENT> return self.RegisterRequest({'project': project, 'instance': instance, 'deviceName': deviceName, 'zone': zone}) | Mock return result of the MockApi.instances() method. | 62599044d99f1b3c44d069a5 |
class GrpcContext: <NEW_LINE> <INDENT> def __init__(self, request_stream, response_stream): <NEW_LINE> <INDENT> self.request_stream = request_stream <NEW_LINE> self.response_stream = response_stream <NEW_LINE> <DEDENT> def set_code(self, code): <NEW_LINE> <INDENT> self.response_stream.trailers.set(("grpc-status", str(code))) <NEW_LINE> <DEDENT> def set_details(self, details): <NEW_LINE> <INDENT> self.response_stream.trailers.set(("grpc-message", details)) <NEW_LINE> <DEDENT> def invocation_metadata(self): <NEW_LINE> <INDENT> return self.request_stream.headers.for_application <NEW_LINE> <DEDENT> def send_initial_metadata(self, metadata): <NEW_LINE> <INDENT> self.response_stream.headers.set(*metadata) <NEW_LINE> <DEDENT> def set_trailing_metadata(self, metadata): <NEW_LINE> <INDENT> self.response_stream.trailers.set(*metadata) | Context object passed to GRPC methods.
Gives access to request metadata and allows response metadata to be set. | 6259904476d4e153a661dbf8 |
class LocationsResponse(msrest.serialization.Model): <NEW_LINE> <INDENT> _attribute_map = { 'value': {'key': 'value', 'type': '[Location]'}, } <NEW_LINE> def __init__( self, **kwargs ): <NEW_LINE> <INDENT> super(LocationsResponse, self).__init__(**kwargs) <NEW_LINE> self.value = kwargs.get('value', None) | Locations response.
:param value: locations.
:type value: list[~storage_import_export.models.Location] | 6259904491af0d3eaad3b12b |
class PiMotionArray(PiArrayOutput): <NEW_LINE> <INDENT> def flush(self): <NEW_LINE> <INDENT> super(PiMotionArray, self).flush() <NEW_LINE> width, height = self.size or self.camera.resolution <NEW_LINE> cols = ((width + 15) // 16) + 1 <NEW_LINE> rows = (height + 15) // 16 <NEW_LINE> b = self.getvalue() <NEW_LINE> frames = len(b) // (cols * rows * motion_dtype.itemsize) <NEW_LINE> self.array = np.frombuffer(b, dtype=motion_dtype).reshape((frames, rows, cols)) | Produces a 3-dimensional array of motion vectors from the H.264 encoder.
This custom output class is intended to be used with the *motion_output*
parameter of the :meth:`~picamera.PiCamera.start_recording` method. Once
recording has finished, the class generates a 3-dimensional numpy array
organized as (frames, rows, columns) where ``rows`` and ``columns`` are the
number of rows and columns of `macro-blocks`_ (16x16 pixel blocks) in the
original frames. There is always one extra column of macro-blocks present
in motion vector data.
The data-type of the :attr:`~PiArrayOutput.array` is an (x, y, sad)
structure where ``x`` and ``y`` are signed 1-byte values, and ``sad`` is an
unsigned 2-byte value representing the `sum of absolute differences`_ of
the block. For example::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
with picamera.array.PiMotionArray(camera) as output:
camera.resolution = (640, 480)
camera.start_recording(
'/dev/null', format='h264', motion_output=output)
camera.wait_recording(30)
camera.stop_recording()
print('Captured %d frames' % output.array.shape[0])
print('Frames are %dx%d blocks big' % (
output.array.shape[2], output.array.shape[1]))
If you are using the GPU resizer with your recording, use the optional
*size* parameter to specify the resizer's output resolution when
constructing the array::
import picamera
import picamera.array
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
with picamera.array.PiMotionArray(camera, size=(320, 240)) as output:
camera.start_recording(
'/dev/null', format='h264', motion_output=output,
resize=(320, 240))
camera.wait_recording(30)
camera.stop_recording()
print('Captured %d frames' % output.array.shape[0])
print('Frames are %dx%d blocks big' % (
output.array.shape[2], output.array.shape[1]))
.. note::
This class is not suitable for real-time analysis of motion vector
data. See the :class:`PiMotionAnalysis` class instead.
.. _macro-blocks: http://en.wikipedia.org/wiki/Macroblock
.. _sum of absolute differences: http://en.wikipedia.org/wiki/Sum_of_absolute_differences | 625990446e29344779b01959 |
class HiddenProducts(grok.GlobalUtility): <NEW_LINE> <INDENT> implements(INonInstallable) <NEW_LINE> grok.name('iloactemp.backgroundnote.upgrades') <NEW_LINE> def getNonInstallableProducts(self): <NEW_LINE> <INDENT> return [ 'iloactemp.backgroundnote.upgrades', ] | This hides the upgrade profiles from the quick installer tool. | 6259904415baa72349463299 |
class TotalEnrollmentDAO(EnrollmentsDAO): <NEW_LINE> <INDENT> DTO = TotalEnrollmentDTO <NEW_LINE> ENTITY = TotalEnrollmentEntity <NEW_LINE> @classmethod <NEW_LINE> def get(cls, namespace_name): <NEW_LINE> <INDENT> return cls.load_or_default(namespace_name).get() <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def set(cls, namespace_name, count): <NEW_LINE> <INDENT> dto = cls.new_dto(namespace_name, the_dict={}) <NEW_LINE> dto.set(count) <NEW_LINE> cls._save(dto) <NEW_LINE> return dto <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> @db.transactional(xg=True) <NEW_LINE> def inc(cls, namespace_name, offset=1): <NEW_LINE> <INDENT> dto = cls.load_or_default(namespace_name) <NEW_LINE> dto.inc(offset=offset) <NEW_LINE> cls._save(dto) <NEW_LINE> return dto | A single total enrollment counter for each course. | 62599044b57a9660fecd2d83 |
class ReferencePointType(Serializable): <NEW_LINE> <INDENT> _fields = ('ECF', 'Line', 'Sample', 'name') <NEW_LINE> _required = ('ECF', 'Line', 'Sample') <NEW_LINE> _set_as_attribute = ('name', ) <NEW_LINE> _numeric_format = {'Line': FLOAT_FORMAT, 'Sample': FLOAT_FORMAT} <NEW_LINE> ECF = SerializableDescriptor( 'ECF', XYZType, _required, strict=DEFAULT_STRICT, docstring='The geographical coordinates for the reference point.') <NEW_LINE> Line = FloatDescriptor( 'Line', _required, strict=DEFAULT_STRICT, docstring='The reference point line index.') <NEW_LINE> Sample = FloatDescriptor( 'Sample', _required, strict=DEFAULT_STRICT, docstring='The reference point sample index.') <NEW_LINE> name = StringDescriptor( 'name', _required, strict=DEFAULT_STRICT, docstring='The reference point name.') <NEW_LINE> def __init__(self, ECF=None, Line=None, Sample=None, name=None, **kwargs): <NEW_LINE> <INDENT> if '_xml_ns' in kwargs: <NEW_LINE> <INDENT> self._xml_ns = kwargs['_xml_ns'] <NEW_LINE> <DEDENT> if '_xml_ns_key' in kwargs: <NEW_LINE> <INDENT> self._xml_ns_key = kwargs['_xml_ns_key'] <NEW_LINE> <DEDENT> self.ECF = ECF <NEW_LINE> self.Line = Line <NEW_LINE> self.Sample = Sample <NEW_LINE> self.name = name <NEW_LINE> super(ReferencePointType, self).__init__(**kwargs) | The reference point definition | 625990443eb6a72ae038b968 |
class FilterVsIn(object): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.random = random.Random() <NEW_LINE> self.random.seed(42) <NEW_LINE> <DEDENT> def __call__(self, name, _): <NEW_LINE> <INDENT> if 'vs_in' in grouptools.split(name): <NEW_LINE> <INDENT> return self.random.random() <= .2 <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> def reset(self): <NEW_LINE> <INDENT> self.random.seed(42) | Filter out 80% of the Vertex Attrib 64 vs_in tests. | 62599044d53ae8145f919765 |
class GroupScale(object): <NEW_LINE> <INDENT> def __init__(self, size, interpolation=Image.BILINEAR): <NEW_LINE> <INDENT> self.worker = torchvision.transforms.Resize(size, interpolation) <NEW_LINE> <DEDENT> def __call__(self, img_group): <NEW_LINE> <INDENT> return [self.worker(img) for img in img_group] | Rescales the input PIL.Image to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR | 6259904407d97122c4217fa7 |
class getCat_args: <NEW_LINE> <INDENT> thrift_spec = ( None, (1, TType.STRING, 'name', None, None, ), ) <NEW_LINE> def __init__(self, name=None,): <NEW_LINE> <INDENT> self.name = name <NEW_LINE> <DEDENT> def read(self, iprot): <NEW_LINE> <INDENT> if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: <NEW_LINE> <INDENT> fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) <NEW_LINE> return <NEW_LINE> <DEDENT> iprot.readStructBegin() <NEW_LINE> while True: <NEW_LINE> <INDENT> (fname, ftype, fid) = iprot.readFieldBegin() <NEW_LINE> if ftype == TType.STOP: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if fid == 1: <NEW_LINE> <INDENT> if ftype == TType.STRING: <NEW_LINE> <INDENT> self.name = iprot.readString() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> iprot.skip(ftype) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> iprot.skip(ftype) <NEW_LINE> <DEDENT> iprot.readFieldEnd() <NEW_LINE> <DEDENT> iprot.readStructEnd() <NEW_LINE> <DEDENT> def write(self, oprot): <NEW_LINE> <INDENT> if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: <NEW_LINE> <INDENT> oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) <NEW_LINE> return <NEW_LINE> <DEDENT> oprot.writeStructBegin('getCat_args') <NEW_LINE> if self.name is not None: <NEW_LINE> <INDENT> oprot.writeFieldBegin('name', TType.STRING, 1) <NEW_LINE> oprot.writeString(self.name) <NEW_LINE> oprot.writeFieldEnd() <NEW_LINE> <DEDENT> oprot.writeFieldStop() <NEW_LINE> oprot.writeStructEnd() <NEW_LINE> <DEDENT> def validate(self): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> def __hash__(self): <NEW_LINE> <INDENT> value = 17 <NEW_LINE> value = (value * 31) ^ hash(self.name) <NEW_LINE> return value <NEW_LINE> <DEDENT> def __repr__(self): <NEW_LINE> <INDENT> L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] <NEW_LINE> return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) <NEW_LINE> <DEDENT> def __eq__(self, other): <NEW_LINE> <INDENT> return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ <NEW_LINE> <DEDENT> def __ne__(self, other): <NEW_LINE> <INDENT> return not (self == other) | Attributes:
- name | 62599044a4f1c619b294f80b |
@zm.pipeline.Ingress.register("entity.delete") <NEW_LINE> class EntityDeleteMessage(formencode.Schema): <NEW_LINE> <INDENT> id = formencode.validators.Int() | Message received upon deletion of an entity. | 625990443617ad0b5ee07441 |
class RBFLayer(nn.Module): <NEW_LINE> <INDENT> def __init__(self, low=0, high=30, gap=0.1, dim=1): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self._low = low <NEW_LINE> self._high = high <NEW_LINE> self._gap = gap <NEW_LINE> self._dim = dim <NEW_LINE> self._n_centers = int(np.ceil((high - low) / gap)) <NEW_LINE> centers = np.linspace(low, high, self._n_centers) <NEW_LINE> self.centers = th.tensor(centers, dtype=th.float, requires_grad=False) <NEW_LINE> self.centers = nn.Parameter(self.centers, requires_grad=False) <NEW_LINE> self._fan_out = self._dim * self._n_centers <NEW_LINE> self._gap = centers[1] - centers[0] <NEW_LINE> <DEDENT> def dis2rbf(self, edges): <NEW_LINE> <INDENT> dist = edges.data["distance"] <NEW_LINE> radial = dist - self.centers <NEW_LINE> coef = -1 / self._gap <NEW_LINE> rbf = th.exp(coef * (radial**2)) <NEW_LINE> return {"rbf": rbf} <NEW_LINE> <DEDENT> def forward(self, g): <NEW_LINE> <INDENT> g.apply_edges(self.dis2rbf) <NEW_LINE> return g.edata["rbf"] | Radial basis functions Layer.
e(d) = exp(- gamma * ||d - mu_k||^2)
default settings:
gamma = 10
0 <= mu_k <= 30 for k=1~300 | 625990448a43f66fc4bf349a |
class DaySchedule(Schedule): <NEW_LINE> <INDENT> type = 'Day' <NEW_LINE> def __init__(self, start, builder, range=None): <NEW_LINE> <INDENT> super(DaySchedule, self).__init__( start=start, range=range if range else datetime.timedelta(days=1), builder=builder ) <NEW_LINE> <DEDENT> def up(self): <NEW_LINE> <INDENT> return WeekSchedule(start=self.start, builder=self.builder) <NEW_LINE> <DEDENT> def __unicode__(self): <NEW_LINE> <INDENT> return u'{:%A, %d %b %Y}'.format(self.start) <NEW_LINE> <DEDENT> @d_models.permalink <NEW_LINE> def get_absolute_url(self): <NEW_LINE> <INDENT> return ( 'schedule.views.schedule_day', (), dict(zip(['year', 'week', 'weekday'], self.start.isocalendar())) ) | A schedule type that specifically works for day schedule ranges. | 62599044d7e4931a7ef3d37e |
@component.adapter( interfaces.IQuerySchemaSearch, IPluggableAuthentication) <NEW_LINE> @zope.interface.implementer( ILocation, IQueriableAuthenticator, interfaces.IQuerySchemaSearch) <NEW_LINE> class QuerySchemaSearchAdapter(object): <NEW_LINE> <INDENT> def __init__(self, authplugin, pau): <NEW_LINE> <INDENT> if (ILocation.providedBy(authplugin) and authplugin.__parent__ is not None): <NEW_LINE> <INDENT> self.__parent__ = authplugin.__parent__ <NEW_LINE> self.__name__ = authplugin.__name__ <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.__parent__ = pau <NEW_LINE> self.__name__ = "" <NEW_LINE> <DEDENT> self.authplugin = authplugin <NEW_LINE> self.pau = pau <NEW_LINE> self.schema = authplugin.schema <NEW_LINE> <DEDENT> def search(self, query, start=None, batch_size=None): <NEW_LINE> <INDENT> for id in self.authplugin.search(query, start, batch_size): <NEW_LINE> <INDENT> yield self.pau.prefix + id | Performs schema-based principal searches on behalf of a PAU.
Delegates the search to the adapted authenticator (which also provides
IQuerySchemaSearch) and prepends the PAU prefix to the resulting principal
IDs. | 6259904463b5f9789fe86474 |
class MathsOutput(TraitedSpec): <NEW_LINE> <INDENT> out_file = File(desc='image written after calculations') | Output Spec for seg_maths interfaces. | 6259904494891a1f408ba07a |
@method_decorator(csrf_exempt, name='dispatch') <NEW_LINE> @method_decorator(login_required, name="dispatch") <NEW_LINE> class PreviewMarkdownAjaxView(View): <NEW_LINE> <INDENT> def post(self, request, *args, **kwargs): <NEW_LINE> <INDENT> return HttpResponse(markdown_filter(request.POST['text'])) | Transform Markdown text into HTML. | 6259904445492302aabfd7e3 |
class Question(models.Model): <NEW_LINE> <INDENT> question_text = models.CharField(max_length=200) <NEW_LINE> pub_date = models.DateTimeField('date published') <NEW_LINE> end_date = models.DateTimeField('end date') <NEW_LINE> def __str__(self): <NEW_LINE> <INDENT> return self.question_text <NEW_LINE> <DEDENT> def is_published(self): <NEW_LINE> <INDENT> return timezone.now() >= self.pub_date <NEW_LINE> <DEDENT> def can_vote(self): <NEW_LINE> <INDENT> return self.pub_date <= timezone.now() <= self.end_date <NEW_LINE> <DEDENT> def was_published_recently(self): <NEW_LINE> <INDENT> now = timezone.now() <NEW_LINE> return now - datetime.timedelta(days=1) <= self.pub_date <= now <NEW_LINE> <DEDENT> was_published_recently.admin_order_field = 'pub_date' <NEW_LINE> was_published_recently.boolean = True <NEW_LINE> was_published_recently.short_description = 'Published recently?' | Class that contain question option. | 62599044379a373c97d9a332 |
class MCQToRating(Change): <NEW_LINE> <INDENT> @staticmethod <NEW_LINE> def applies_to(node): <NEW_LINE> <INDENT> return node.tag in ("pb-mcq", "pb-mrq") and "type" in node.attrib <NEW_LINE> <DEDENT> def apply(self): <NEW_LINE> <INDENT> if self.node.tag == "pb-mcq" and self.node.get("type") == "rating": <NEW_LINE> <INDENT> self.node.tag = "pb-rating" <NEW_LINE> <DEDENT> self.node.attrib.pop("type") | <mcq type="rating"> is now just <rating>, and we never use type="choices" on MCQ/MRQ | 625990441d351010ab8f4e27 |
class TestRandomWalk(unittest.TestCase): <NEW_LINE> <INDENT> alpha = 0.95 <NEW_LINE> err_tol = 1e-3 <NEW_LINE> max_iter = 100 <NEW_LINE> def test_random_walk(self): <NEW_LINE> <INDENT> data = [0, 1, 1, 0] <NEW_LINE> matrix = construct_2x2_csr_matrix(data) <NEW_LINE> pi = random_walk(matrix, alpha=self.alpha, err_tol=self.err_tol, max_iter=self.max_iter) <NEW_LINE> self.assertEqual(len(pi), 2) <NEW_LINE> self.assertAlmostEqual(pi[0], 0.5, 3) <NEW_LINE> self.assertAlmostEqual(pi[1], 0.5, 3) <NEW_LINE> data = [1, 0, 1, 0] <NEW_LINE> matrix = construct_2x2_csr_matrix(data) <NEW_LINE> pi = random_walk(matrix, alpha=1, err_tol=self.err_tol, max_iter=self.max_iter) <NEW_LINE> self.assertEqual(len(pi), 2) <NEW_LINE> self.assertAlmostEqual(pi[0], 1, 3) <NEW_LINE> self.assertAlmostEqual(pi[1], 0, 3) | Unit tests for random_walk | 625990443eb6a72ae038b969 |
class Tsallis_pdf(PDF) : <NEW_LINE> <INDENT> def __init__ ( self , pt , mass = 0 , n = None , T = None , name = '' ) : <NEW_LINE> <INDENT> PDF.__init__ ( self , name ) <NEW_LINE> if not isinstance ( pt , ROOT.RooAbsReal ) : <NEW_LINE> <INDENT> raise AttributeError( "Tsallis(%s): invalid 'pt'-parameter %s" % ( name , pt ) ) <NEW_LINE> <DEDENT> self.pt = pt <NEW_LINE> self.m = self.pt <NEW_LINE> self.mass = self.pt <NEW_LINE> self.m0 = makeVar ( mass , 'm0_%s' % name , 'm0(%s)' % name , mass , 0 , 1e+6 ) <NEW_LINE> self.n = makeVar ( n , 'n_%s' % name , 'n(%s) ' % name , n , 0.01 , 1000 ) <NEW_LINE> self.n = makeVar ( T , 'n_%s' % name , 'n(%s) ' % name , n , 1.e-3 , 1e+6 ) <NEW_LINE> self.pdf = Ostap.Models.Tsallis ( 'tsallis_' + name , 'Tsallis(%s)' % name , self.pt , self.n , self.T , self.m0 ) | Useful function to describe pT-spectra of particles
- C. Tsallis,
Possible generalization of Boltzmann-Gibbs statistics,
J. Statist. Phys. 52 (1988) 479.
- C. Tsallis,
Nonextensive statistics: theoretical, experimental and computational
evidences and connections, Braz. J. Phys. 29 (1999) 1.
\f[ \frac{d\sigma}{dp_T} \propto
p_T\times \left( 1 + \frac{E_{kin}}{Tn}\right)^{-n}\f],
where \f$E_{kin} = \sqrt{p_T^2-M^2}-M\f$
is transverse kinetic energy | 62599044a79ad1619776b388 |
@registry.register("classification_report") <NEW_LINE> class ClassficationReport(UnionReport): <NEW_LINE> <INDENT> def __init__(self, datasets): <NEW_LINE> <INDENT> self._datasets = datasets <NEW_LINE> super().__init__() <NEW_LINE> <DEDENT> @property <NEW_LINE> def required_measurements(self): <NEW_LINE> <INDENT> metrics = ["accuracy", "ece", "nll", "brier"] <NEW_LINE> for dataset in self._datasets: <NEW_LINE> <INDENT> for metric in metrics: <NEW_LINE> <INDENT> yield MeasurementSpec(dataset, metric) | Computes commonly used classification metrics.
The model should compute the class probabilities (not logits). This report
will compute accuracy, expected calibration error, negative log-likelihood,
and Brier scores of the predictions. | 62599044d10714528d69f011 |
class UserDeleteResult(bb.Union): <NEW_LINE> <INDENT> _catch_all = 'other' <NEW_LINE> other = None <NEW_LINE> @classmethod <NEW_LINE> def success(cls, val): <NEW_LINE> <INDENT> return cls('success', val) <NEW_LINE> <DEDENT> @classmethod <NEW_LINE> def invalid_user(cls, val): <NEW_LINE> <INDENT> return cls('invalid_user', val) <NEW_LINE> <DEDENT> def is_success(self): <NEW_LINE> <INDENT> return self._tag == 'success' <NEW_LINE> <DEDENT> def is_invalid_user(self): <NEW_LINE> <INDENT> return self._tag == 'invalid_user' <NEW_LINE> <DEDENT> def is_other(self): <NEW_LINE> <INDENT> return self._tag == 'other' <NEW_LINE> <DEDENT> def get_success(self): <NEW_LINE> <INDENT> if not self.is_success(): <NEW_LINE> <INDENT> raise AttributeError("tag 'success' not set") <NEW_LINE> <DEDENT> return self._value <NEW_LINE> <DEDENT> def get_invalid_user(self): <NEW_LINE> <INDENT> if not self.is_invalid_user(): <NEW_LINE> <INDENT> raise AttributeError("tag 'invalid_user' not set") <NEW_LINE> <DEDENT> return self._value <NEW_LINE> <DEDENT> def _process_custom_annotations(self, annotation_type, field_path, processor): <NEW_LINE> <INDENT> super(UserDeleteResult, self)._process_custom_annotations(annotation_type, field_path, processor) | Result of trying to delete a user's secondary emails. 'success' is the only
value indicating that a user was successfully retrieved for deleting
secondary emails. The other values explain the type of error that occurred,
and include the user for which the error occurred.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar UserDeleteEmailsResult UserDeleteResult.success: Describes a user and
the results for each attempt to delete a secondary email.
:ivar UserSelectorArg UserDeleteResult.invalid_user: Specified user is not a
valid target for deleting secondary emails. | 625990446e29344779b0195b |
class Course_Object(): <NEW_LINE> <INDENT> def __init__(self, filename): <NEW_LINE> <INDENT> self.data = pd.read_csv(filename).drop_duplicates(subset='module_id', keep='first') <NEW_LINE> self.module_ids = [] <NEW_LINE> self.course_info = {} <NEW_LINE> self.module_info = {} <NEW_LINE> self.course_info_count = {} <NEW_LINE> for _, row in self.data.iterrows(): <NEW_LINE> <INDENT> self.module_ids.append(row['module_id']) <NEW_LINE> self.module_info[row['module_id']] = {'course_id': row['course_id'], 'category': row['category'], 'children': row['children'], 'start': row['start']} <NEW_LINE> if row['course_id'] not in self.course_info: <NEW_LINE> <INDENT> self.course_info[row['course_id']] = {row['module_id']: {'category': row['category'], 'children': row['children'], 'start': row['start']}} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.course_info[row['course_id']][row['module_id']] = {'category': row['category'], 'children': row['children'], 'start': row['start']} <NEW_LINE> <DEDENT> <DEDENT> for key in self.course_info: <NEW_LINE> <INDENT> self.course_info_count[key] = {'module_number': len(self.course_info[key])} <NEW_LINE> <DEDENT> print('%s loaded! Number of modules: %i' % (filename, len(self.module_info))) <NEW_LINE> <DEDENT> def get_data(self): <NEW_LINE> <INDENT> return self.data <NEW_LINE> <DEDENT> def get_module_ids(self): <NEW_LINE> <INDENT> return self.module_ids <NEW_LINE> <DEDENT> def get_module_info(self): <NEW_LINE> <INDENT> return self.module_info <NEW_LINE> <DEDENT> def get_course_info(self): <NEW_LINE> <INDENT> return self.course_info <NEW_LINE> <DEDENT> def get_course_info_count(self): <NEW_LINE> <INDENT> return self.course_info_count | Course_Object class to load the data from object.csv
Args:
filename (str): path of the file
Attributes:
get_data (df): get the pandas dataframe
get_module_ids (list): get the list of modules ids
get_module_info (dict): get the dictionary of module information
get_course_info (dict): get the dictionary of the courses information
get_course_info_count (dict): get the dictionary of courses information with number of modules each course contains | 6259904407d97122c4217fa9 |
class UniversalRecipe(object, metaclass=MetaUniversalRecipe): <NEW_LINE> <INDENT> def __init__(self, config): <NEW_LINE> <INDENT> self._config = config <NEW_LINE> self._recipes = {} <NEW_LINE> self._proxy_recipe = None <NEW_LINE> <DEDENT> def __str__(self): <NEW_LINE> <INDENT> if list(self._recipes.values()): <NEW_LINE> <INDENT> return str(list(self._recipes.values())[0]) <NEW_LINE> <DEDENT> return super(UniversalRecipe, self).__str__() <NEW_LINE> <DEDENT> def add_recipe(self, recipe): <NEW_LINE> <INDENT> if self._proxy_recipe is None: <NEW_LINE> <INDENT> self._proxy_recipe = recipe <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> for attr in ('name', 'deps', 'platform_deps'): <NEW_LINE> <INDENT> if getattr(recipe, attr) != getattr(self._proxy_recipe, attr): <NEW_LINE> <INDENT> raise FatalError(_("Recipes must have the same " + attr)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> self._recipes[recipe.config.target_arch] = recipe <NEW_LINE> <DEDENT> def is_empty(self): <NEW_LINE> <INDENT> return len(self._recipes) == 0 <NEW_LINE> <DEDENT> @property <NEW_LINE> def steps(self): <NEW_LINE> <INDENT> if self.is_empty(): <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> return self._proxy_recipe.steps[:] <NEW_LINE> <DEDENT> def __getattr__(self, name): <NEW_LINE> <INDENT> if not self._proxy_recipe: <NEW_LINE> <INDENT> raise AttributeError(_("Attribute %s was not found in the " "Universal recipe, which is empty. You might need to add a " "recipe first.")) <NEW_LINE> <DEDENT> return getattr(self._proxy_recipe, name) <NEW_LINE> <DEDENT> def __setattr__(self, name, value): <NEW_LINE> <INDENT> object.__setattr__(self, name, value) <NEW_LINE> if name not in ['_config', '_recipes', '_proxy_recipe']: <NEW_LINE> <INDENT> for o in self._recipes.values(): <NEW_LINE> <INDENT> setattr(o, name, value) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def get_for_arch (self, arch, name): <NEW_LINE> <INDENT> if arch: <NEW_LINE> <INDENT> return getattr (self._recipes[arch], name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return getattr (self, name) <NEW_LINE> <DEDENT> <DEDENT> def _do_step(self, step): <NEW_LINE> <INDENT> if step in BuildSteps.FETCH: <NEW_LINE> <INDENT> arch, recipe = list(self._recipes.items())[0] <NEW_LINE> stepfunc = getattr(recipe, step) <NEW_LINE> try: <NEW_LINE> <INDENT> stepfunc() <NEW_LINE> <DEDENT> except FatalError as e: <NEW_LINE> <INDENT> e.arch = arch <NEW_LINE> raise e <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> for arch, recipe in self._recipes.items(): <NEW_LINE> <INDENT> config = self._config.arch_config[arch] <NEW_LINE> config.do_setup_env() <NEW_LINE> stepfunc = getattr(recipe, step) <NEW_LINE> try: <NEW_LINE> <INDENT> stepfunc() <NEW_LINE> <DEDENT> except FatalError as e: <NEW_LINE> <INDENT> e.arch = arch <NEW_LINE> raise e | Stores similar recipe objects that are going to be built together
Useful for the universal architecture, where the same recipe needs
to be built for different architectures before being merged. For the
other targets, it will likely be a unitary group | 6259904430dc7b76659a0b3a |
class Square: <NEW_LINE> <INDENT> def __init__(self, size=0): <NEW_LINE> <INDENT> self.__size = size <NEW_LINE> <DEDENT> @property <NEW_LINE> def size(self): <NEW_LINE> <INDENT> return self.__size <NEW_LINE> <DEDENT> @size.setter <NEW_LINE> def size(self, size): <NEW_LINE> <INDENT> if isinstance(size, int) is False: <NEW_LINE> <INDENT> raise TypeError("size must be an integer") <NEW_LINE> <DEDENT> if size < 0: <NEW_LINE> <INDENT> raise ValueError("size must be >= 0") <NEW_LINE> <DEDENT> self.__size = size <NEW_LINE> <DEDENT> def area(self): <NEW_LINE> <INDENT> return self.__size ** 2 | class square | 625990448da39b475be044f9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.