blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09e6ba57f63d8ca9d88d39ff34881ab5d048bc96
|
e823bc36af457f229f6879d6e6a3ef6247c129aa
|
/virtualenv/Lib/site-packages/pyasn1_modules/rfc2560.py
|
e41994abf64813e9eb5f70b1dd1b03519a320ed1
|
[
"MIT"
] |
permissive
|
William-An/DFB_Final
|
e772fa979c41f2f83a4bf657cde499456215fb3b
|
49a9244c98116574676992ebecd1d9435e1d5b1e
|
refs/heads/master
| 2022-11-07T15:47:36.189057 | 2017-07-22T01:01:37 | 2017-07-22T01:01:43 | 97,426,562 | 1 | 1 |
MIT
| 2022-10-15T02:45:57 | 2017-07-17T02:21:42 |
Python
|
UTF-8
|
Python
| false | false | 8,251 |
py
|
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
# OCSP request/response syntax
#
# Derived from a minimal OCSP library (RFC2560) code written by
# Bud P. Bruegger <[email protected]>
# Copyright: Ancitel, S.p.a, Rome, Italy
# License: BSD
#
#
# current limitations:
# * request and response works only for a single certificate
# * only some values are parsed out of the response
# * the request does't set a nonce nor signature
# * there is no signature validation of the response
# * dates are left as strings in GeneralizedTime format -- datetime.datetime
# would be nicer
#
from pyasn1.type import tag, namedtype, namedval, univ, useful
from pyasn1_modules import rfc2459
# Start of OCSP module definitions
# This should be in directory Authentication Framework (X.509) module
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
# end of directory Authentication Framework (X.509) module
# This should be in PKIX Certificate Extensions module
class GeneralName(univ.OctetString):
pass
# end of PKIX Certificate Extensions module
id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
class AcceptableResponses(univ.SequenceOf):
componentType = univ.ObjectIdentifier()
class ArchiveCutoff(useful.GeneralizedTime):
pass
class UnknownInfo(univ.Null):
pass
class RevokedInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CertID(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('issuerNameHash', univ.OctetString()),
namedtype.NamedType('issuerKeyHash', univ.OctetString()),
namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
)
class CertStatus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('good',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('revoked',
RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('unknown',
UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class SingleResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certID', CertID()),
namedtype.NamedType('certStatus', CertStatus()),
namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class KeyHash(univ.OctetString):
pass
class ResponderID(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byName',
rfc2459.Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('byKey',
KeyHash().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class Version(univ.Integer):
namedValues = namedval.NamedValues(('v1', 0))
class ResponseData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('responderID', ResponderID()),
namedtype.NamedType('producedAt', useful.GeneralizedTime()),
namedtype.NamedType('responses', univ.SequenceOf(SingleResponse())),
namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class BasicOCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsResponseData', ResponseData()),
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class ResponseBytes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseType', univ.ObjectIdentifier()),
namedtype.NamedType('response', univ.OctetString())
)
class OCSPResponseStatus(univ.Enumerated):
namedValues = namedval.NamedValues(
('successful', 0),
('malformedRequest', 1),
('internalError', 2),
('tryLater', 3),
('undefinedStatus', 4), # should never occur
('sigRequired', 5),
('unauthorized', 6)
)
class OCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseStatus', OCSPResponseStatus()),
namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Request(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('reqCert', CertID()),
namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Signature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class TBSRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('requestList', univ.SequenceOf(Request())),
namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class OCSPRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsRequest', TBSRequest()),
namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
|
[
"[email protected]"
] | |
ca99b5cf3ba81bd26679882f2f553c50dc8dabe1
|
eb40dce4039d528b9cd06dbeda75da09d09d7fc5
|
/need_install/Django-1.8.17/django/db/models/fields/__init__.py
|
8bba4cb3216e4eef1a0fcb5822bd70cfc3c685c8
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
MulticsYin/MulticsSH
|
39b62189446787c7f0f037b1640c9c780bd1dddd
|
5837a0bff0e7da0e8535e4e0b31ef6baf24274b4
|
refs/heads/master
| 2021-08-28T07:53:51.759679 | 2017-12-11T15:31:03 | 2017-12-11T15:31:03 | 82,428,902 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 89,179 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import copy
import datetime
import decimal
import math
import uuid
import warnings
from base64 import b64decode, b64encode
from itertools import tee
from django.apps import apps
from django.db import connection
from django.db.models.lookups import default_lookups, RegisterLookupMixin
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators, checks
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time, parse_duration
from django.utils.duration import duration_string
from django.utils.functional import cached_property, curry, total_ordering, Promise
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import (smart_text, force_text, force_bytes,
python_2_unicode_compatible)
from django.utils.ipv6 import clean_ipv6_address
from django.utils import six
from django.utils.itercompat import is_iterable
# When the _meta object was formalized, this exception was moved to
# django.core.exceptions. It is retained here for backwards compatibility
# purposes.
from django.core.exceptions import FieldDoesNotExist # NOQA
# Avoid "TypeError: Item in ``from list'' not a string" -- unicode_literals
# makes these strings unicode
__all__ = [str(x) for x in (
'AutoField', 'BLANK_CHOICE_DASH', 'BigIntegerField', 'BinaryField',
'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField',
'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty',
'Field', 'FieldDoesNotExist', 'FilePathField', 'FloatField',
'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED',
'NullBooleanField', 'PositiveIntegerField', 'PositiveSmallIntegerField',
'SlugField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField',
'UUIDField',
)]
class Empty(object):
pass
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
def _load_field(app_label, model_name, field_name):
return apps.get_model(app_label, model_name)._meta.get_field(field_name)
# A guide to Field parameters:
#
# * name: The name of the field specified in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
def _empty(of_cls):
new = Empty()
new.__class__ = of_cls
return new
@total_ordering
@python_2_unicode_compatible
class Field(RegisterLookupMixin):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
empty_values = list(validators.EMPTY_VALUES)
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
# Translators: The 'lookup_type' is one of 'date', 'year' or 'month'.
# Eg: "Title must be unique for pub_date year"
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
class_lookups = default_lookups.copy()
system_check_deprecated_details = None
system_check_removed_details = None
# Field flags
hidden = False
many_to_many = None
many_to_one = None
one_to_many = None
one_to_one = None
related_model = None
# Generic field type description, usually overridden by subclasses
def _description(self):
return _('Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name # May be set by set_attributes_from_name
self._verbose_name = verbose_name # Store original for deconstruction
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.is_relation = self.rel is not None
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date = unique_for_date
self.unique_for_month = unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self._validators = validators # Store for deconstruction later
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self._error_messages = error_messages # Store for deconstruction later
self.error_messages = messages
def __str__(self):
""" Return "app_label.model_label.field_name". """
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_choices())
errors.extend(self._check_db_index())
errors.extend(self._check_null_allowed_for_primary_keys())
errors.extend(self._check_backend_specific_checks(**kwargs))
errors.extend(self._check_deprecation_details())
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
if self.name.endswith('_'):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
elif '__' in self.name:
return [
checks.Error(
'Field names must not contain "__".',
hint=None,
obj=self,
id='fields.E002',
)
]
elif self.name == 'pk':
return [
checks.Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=self,
id='fields.E003',
)
]
else:
return []
def _check_choices(self):
if self.choices:
if (isinstance(self.choices, six.string_types) or
not is_iterable(self.choices)):
return [
checks.Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=self,
id='fields.E004',
)
]
elif any(isinstance(choice, six.string_types) or
not is_iterable(choice) or len(choice) != 2
for choice in self.choices):
return [
checks.Error(
("'choices' must be an iterable containing "
"(actual value, human readable name) tuples."),
hint=None,
obj=self,
id='fields.E005',
)
]
else:
return []
else:
return []
def _check_db_index(self):
if self.db_index not in (None, True, False):
return [
checks.Error(
"'db_index' must be None, True or False.",
hint=None,
obj=self,
id='fields.E006',
)
]
else:
return []
def _check_null_allowed_for_primary_keys(self):
if (self.primary_key and self.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
return [
checks.Error(
'Primary keys must not have null=True.',
hint=('Set null=False on the field, or '
'remove primary_key=True argument.'),
obj=self,
id='fields.E007',
)
]
else:
return []
def _check_backend_specific_checks(self, **kwargs):
return connection.validation.check_field(self, **kwargs)
def _check_deprecation_details(self):
if self.system_check_removed_details is not None:
return [
checks.Error(
self.system_check_removed_details.get(
'msg',
'%s has been removed except for support in historical '
'migrations.' % self.__class__.__name__
),
hint=self.system_check_removed_details.get('hint'),
obj=self,
id=self.system_check_removed_details.get('id', 'fields.EXXX'),
)
]
elif self.system_check_deprecated_details is not None:
return [
checks.Warning(
self.system_check_deprecated_details.get(
'msg',
'%s has been deprecated.' % self.__class__.__name__
),
hint=self.system_check_deprecated_details.get('hint'),
obj=self,
id=self.system_check_deprecated_details.get('id', 'fields.WXXX'),
)
]
return []
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
from django.db.models.expressions import Col
return Col(alias, self, output_field)
else:
return self.cached_col
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self)
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
* None, bool, str, unicode, int, long, float, complex, set, frozenset, list, tuple, dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"choices": "_choices",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def get_pk_value_on_save(self, instance):
"""
Hook to generate new PK values on save. This method is called when
saving instances with no primary key value set. If this method returns
something else than None, then the returned value is used when saving
the new instance.
"""
if self.default:
return self.get_default()
return None
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
# Some validators can't be created at field initialization time.
# This method provides a way to delay their creation until required.
return self.default_validators + self._validators
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific data_types dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.data_types[self.get_internal_type()] % data
except KeyError:
return None
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
type_string = self.db_type(connection)
try:
check_string = connection.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.data_types_suffix.get(self.get_internal_type())
def get_db_converters(self, connection):
if hasattr(self, 'from_db_value'):
return [self.from_db_value]
return []
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
self.concrete = self.column is not None
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_field(self, virtual=True)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in {
'iexact', 'contains', 'icontains',
'startswith', 'istartswith', 'endswith', 'iendswith',
'month', 'day', 'week_day', 'hour', 'minute', 'second',
'isnull', 'search', 'regex', 'iregex',
}:
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
return self.get_prep_value(value)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
prepared = True
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabeled_clone method it means the
# value will be handled later on.
if hasattr(value, 'relabeled_clone'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
'second', 'search', 'regex', 'iregex', 'contains',
'icontains', 'iexact', 'startswith', 'endswith',
'istartswith', 'iendswith'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if isinstance(self, DateTimeField):
return connection.ops.year_lookup_bounds_for_datetime_field(value)
elif isinstance(self, DateField):
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return [value] # this isn't supposed to happen
else:
return [value]
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return self.default
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
if not named_groups:
for choice, __ in choices:
if choice in ('', None):
blank_defined = True
break
first_choice = (blank_choice if include_blank and
not blank_defined else [])
if self.choices:
return first_choice + choices
rel_model = self.rel.to
limit_choices_to = limit_choices_to or self.get_limit_choices_to()
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_text(x))
for x in rel_model._default_manager.complex_filter(
limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = blank_choice if include_blank else []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_text(self._get_val_from_obj(obj))
def _get_choices(self):
if isinstance(self._choices, collections.Iterator):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice, value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
if choices_form_class is not None:
form_class = choices_form_class
else:
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in list(kwargs):
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
if form_class is None:
form_class = forms.CharField
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(AutoField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(AutoField, self).check(**kwargs)
errors.extend(self._check_primary_key())
return errors
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=self,
id='fields.E100',
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(AutoField, self).deconstruct()
del kwargs['blank']
kwargs['primary_key'] = True
return name, path, args, kwargs
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
value = super(AutoField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name, **kwargs):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name, **kwargs)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
super(BooleanField, self).__init__(*args, **kwargs)
def check(self, **kwargs):
errors = super(BooleanField, self).check(**kwargs)
errors.extend(self._check_null(**kwargs))
return errors
def _check_null(self, **kwargs):
if getattr(self, 'null', False):
return [
checks.Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=self,
id='fields.E110',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(BooleanField, self).deconstruct()
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(BooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_max_length_attribute(**kwargs))
return errors
def _check_max_length_attribute(self, **kwargs):
try:
max_length = int(self.max_length)
if max_length <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=self,
id='fields.E120',
)
]
except ValueError:
return [
checks.Error(
"'max_length' must be a positive integer.",
hint=None,
obj=self,
id='fields.E121',
)
]
else:
return []
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def get_prep_value(self, value):
value = super(CharField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _('Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateTimeCheckMixin(object):
def check(self, **kwargs):
errors = super(DateTimeCheckMixin, self).check(**kwargs)
errors.extend(self._check_mutually_exclusive_options())
errors.extend(self._check_fix_default_value())
return errors
def _check_mutually_exclusive_options(self):
# auto_now, auto_now_add, and default are mutually exclusive
# options. The use of more than one of these options together
# will trigger an Error
mutually_exclusive_options = [self.auto_now_add, self.auto_now,
self.has_default()]
enabled_options = [option not in (None, False)
for option in mutually_exclusive_options].count(True)
if enabled_options > 1:
return [
checks.Error(
"The options auto_now, auto_now_add, and default "
"are mutually exclusive. Only one of these options "
"may be present.",
hint=None,
obj=self,
id='fields.E160',
)
]
else:
return []
def _check_fix_default_value(self):
return []
class DateField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid date format. It must be "
"in YYYY-MM-DD format."),
'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(DateField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
if not timezone.is_naive(value):
value = timezone.make_naive(value, timezone.utc)
value = value.date()
elif isinstance(value, datetime.date):
# Nothing to do, as dates don't have tz information
pass
else:
# No explicit date / datetime value -- no checks necessary
return []
offset = datetime.timedelta(days=1)
lower = (now - offset).date()
upper = (now + offset).date()
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DateField, self).deconstruct()
if self.auto_now:
kwargs['auto_now'] = True
if self.auto_now_add:
kwargs['auto_now_add'] = True
if self.auto_now or self.auto_now_add:
del kwargs['editable']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
if settings.USE_TZ and timezone.is_aware(value):
# Convert aware datetimes to the default time zone
# before casting them to dates (#17742).
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
return value.date()
if isinstance(value, datetime.date):
return value
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name, **kwargs):
super(DateField, self).contribute_to_class(cls, name, **kwargs)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For dates lookups, convert the value to an int
# so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
value = super(DateField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _("'%(value)s' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.date):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
lower = datetime.datetime(lower.year, lower.month, lower.day)
upper = now + second_offset
upper = datetime.datetime(upper.year, upper.month, upper.day)
value = datetime.datetime(value.year, value.month, value.day)
else:
# No explicit date / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn("DateTimeField %s.%s received a naive datetime "
"(%s) while time zone support is active." %
(self.model.__name__, self.name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_datetime'],
code='invalid_datetime',
params={'value': value},
)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_date'],
code='invalid_date',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = super(DateTimeField, self).get_prep_value(value)
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
try:
name = '%s.%s' % (self.model.__name__, self.name)
except AttributeError:
name = '(unbound)'
warnings.warn("DateTimeField %s received a naive datetime (%s)"
" while time zone support is active." %
(name, value),
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(DecimalField, self).check(**kwargs)
digits_errors = self._check_decimal_places()
digits_errors.extend(self._check_max_digits())
if not digits_errors:
errors.extend(self._check_decimal_places_and_max_digits(**kwargs))
else:
errors.extend(digits_errors)
return errors
def _check_decimal_places(self):
try:
decimal_places = int(self.decimal_places)
if decimal_places < 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=self,
id='fields.E130',
)
]
except ValueError:
return [
checks.Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=self,
id='fields.E131',
)
]
else:
return []
def _check_max_digits(self):
try:
max_digits = int(self.max_digits)
if max_digits <= 0:
raise ValueError()
except TypeError:
return [
checks.Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=self,
id='fields.E132',
)
]
except ValueError:
return [
checks.Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=self,
id='fields.E133',
)
]
else:
return []
def _check_decimal_places_and_max_digits(self, **kwargs):
if int(self.decimal_places) > int(self.max_digits):
return [
checks.Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=self,
id='fields.E134',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(DecimalField, self).deconstruct()
if self.max_digits is not None:
kwargs['max_digits'] = self.max_digits
if self.decimal_places is not None:
kwargs['decimal_places'] = self.decimal_places
return name, path, args, kwargs
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def _format(self, value):
if isinstance(value, six.string_types):
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import utils
return utils.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
value = super(DecimalField, self).get_prep_value(value)
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class DurationField(Field):
"""Stores timedelta objects.
Uses interval on postgres, INVERAL DAY TO SECOND on Oracle, and bigint of
microseconds on other databases.
"""
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"[DD] [HH:[MM:]]ss[.uuuuuu] format.")
}
description = _("Duration")
def get_internal_type(self):
return "DurationField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.timedelta):
return value
try:
parsed = parse_duration(value)
except ValueError:
pass
else:
if parsed is not None:
return parsed
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.features.has_native_duration_field:
return value
if value is None:
return None
# Discard any fractional microseconds due to floating point arithmetic.
return int(round(value.total_seconds() * 1000000))
def get_db_converters(self, connection):
converters = []
if not connection.features.has_native_duration_field:
converters.append(connection.ops.convert_durationfield_value)
return converters + super(DurationField, self).get_db_converters(connection)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else duration_string(val)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.DurationField,
}
defaults.update(kwargs)
return super(DurationField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("Email address")
def __init__(self, *args, **kwargs):
# max_length=254 to be compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 254)
super(EmailField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(EmailField, self).deconstruct()
# We do not exclude max_length if it matches default as we want to change
# the default in future.
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FilePathField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FilePathField, self).check(**kwargs)
errors.extend(self._check_allowing_files_or_folders(**kwargs))
return errors
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=self,
id='fields.E140',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(FilePathField, self).deconstruct()
if self.path != '':
kwargs['path'] = self.path
if self.match is not None:
kwargs['match'] = self.match
if self.recursive is not False:
kwargs['recursive'] = self.recursive
if self.allow_files is not True:
kwargs['allow_files'] = self.allow_files
if self.allow_folders is not False:
kwargs['allow_folders'] = self.allow_folders
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
value = super(FloatField, self).get_prep_value(value)
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be an integer."),
}
description = _("Integer")
def check(self, **kwargs):
errors = super(IntegerField, self).check(**kwargs)
errors.extend(self._check_max_length_warning())
return errors
def _check_max_length_warning(self):
if self.max_length is not None:
return [
checks.Warning(
"'max_length' is ignored when used with IntegerField",
hint="Remove 'max_length' from field",
obj=self,
id='fields.W122',
)
]
return []
@cached_property
def validators(self):
# These validators can't be added at field initialization time since
# they're based on values retrieved from `connection`.
range_validators = []
internal_type = self.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
range_validators.append(validators.MinValueValidator(min_value))
if max_value is not None:
range_validators.append(validators.MaxValueValidator(max_value))
return super(IntegerField, self).validators + range_validators
def get_prep_value(self, value):
value = super(IntegerField, self).get_prep_value(value)
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
system_check_deprecated_details = {
'msg': (
'IPAddressField has been deprecated. Support for it (except in '
'historical migrations) will be removed in Django 1.9.'
),
'hint': 'Use GenericIPAddressField instead.',
'id': 'fields.W900',
}
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
super(IPAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(IPAddressField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return six.text_type(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.protocol = protocol
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
super(GenericIPAddressField, self).__init__(verbose_name, name, *args,
**kwargs)
def check(self, **kwargs):
errors = super(GenericIPAddressField, self).check(**kwargs)
errors.extend(self._check_blank_and_null_values(**kwargs))
return errors
def _check_blank_and_null_values(self, **kwargs):
if not getattr(self, 'null', False) and getattr(self, 'blank', False):
return [
checks.Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=self,
id='fields.E150',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(GenericIPAddressField, self).deconstruct()
if self.unpack_ipv4 is not False:
kwargs['unpack_ipv4'] = self.unpack_ipv4
if self.protocol != "both":
kwargs['protocol'] = self.protocol
if kwargs.get("max_length", None) == 39:
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_ipaddress(value)
def get_prep_value(self, value):
value = super(GenericIPAddressField, self).get_prep_value(value)
if value is None:
return None
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return six.text_type(value)
def formfield(self, **kwargs):
defaults = {
'protocol': self.protocol,
'form_class': forms.GenericIPAddressField,
}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
super(NullBooleanField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(NullBooleanField, self).deconstruct()
del kwargs['null']
del kwargs['blank']
return name, path, args, kwargs
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
value = super(NullBooleanField, self).get_prep_value(value)
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
default_validators = [validators.validate_slug]
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(SlugField, self).deconstruct()
if kwargs.get("max_length", None) == 50:
del kwargs['max_length']
if self.db_index is False:
kwargs['db_index'] = False
else:
del kwargs['db_index']
return name, path, args, kwargs
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
value = super(TextField, self).get_prep_value(value)
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length, 'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(DateTimeCheckMixin, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%(value)s' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _("'%(value)s' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(TimeField, self).__init__(verbose_name, name, **kwargs)
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
"""
if not self.has_default():
return []
now = timezone.now()
if not timezone.is_naive(now):
now = timezone.make_naive(now, timezone.utc)
value = self.default
if isinstance(value, datetime.datetime):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc)
elif isinstance(value, datetime.time):
second_offset = datetime.timedelta(seconds=10)
lower = now - second_offset
upper = now + second_offset
value = datetime.datetime.combine(now.date(), value)
if timezone.is_aware(value):
value = timezone.make_naive(value, timezone.utc).time()
else:
# No explicit time / datetime value -- no checks necessary
return []
if lower <= value <= upper:
return [
checks.Warning(
'Fixed default value provided.',
hint='It seems you set a fixed date / time / datetime '
'value as default for this field. This may not be '
'what you want. If you want to have the current date '
'as default, use `django.utils.timezone.now`',
obj=self,
id='fields.W161',
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super(TimeField, self).deconstruct()
if self.auto_now is not False:
kwargs["auto_now"] = self.auto_now
if self.auto_now_add is not False:
kwargs["auto_now_add"] = self.auto_now_add
if self.auto_now or self.auto_now_add:
del kwargs['blank']
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid_time'],
code='invalid_time',
params={'value': value},
)
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
value = super(TimeField, self).get_prep_value(value)
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
default_validators = [validators.URLValidator()]
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(URLField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(URLField, self).deconstruct()
if kwargs.get("max_length", None) == 200:
del kwargs['max_length']
return name, path, args, kwargs
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class BinaryField(Field):
description = _("Raw binary data")
empty_values = [None, b'']
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(BinaryField, self).__init__(*args, **kwargs)
if self.max_length is not None:
self.validators.append(validators.MaxLengthValidator(self.max_length))
def deconstruct(self):
name, path, args, kwargs = super(BinaryField, self).deconstruct()
del kwargs['editable']
return name, path, args, kwargs
def get_internal_type(self):
return "BinaryField"
def get_default(self):
if self.has_default() and not callable(self.default):
return self.default
default = super(BinaryField, self).get_default()
if default == '':
return b''
return default
def get_db_prep_value(self, value, connection, prepared=False):
value = super(BinaryField, self).get_db_prep_value(value, connection, prepared)
if value is not None:
return connection.Database.Binary(value)
return value
def value_to_string(self, obj):
"""Binary data is serialized as base64"""
return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
def to_python(self, value):
# If it's a string, it should be base64-encoded data
if isinstance(value, six.text_type):
return six.memoryview(b64decode(force_bytes(value)))
return value
class UUIDField(Field):
default_error_messages = {
'invalid': _("'%(value)s' is not a valid UUID."),
}
description = 'Universally unique identifier'
empty_strings_allowed = False
def __init__(self, verbose_name=None, **kwargs):
kwargs['max_length'] = 32
super(UUIDField, self).__init__(verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(UUIDField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def get_internal_type(self):
return "UUIDField"
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, six.string_types):
value = uuid.UUID(value.replace('-', ''))
if isinstance(value, uuid.UUID):
if connection.features.has_native_uuid_field:
return value
return value.hex
return value
def to_python(self, value):
if value and not isinstance(value, uuid.UUID):
try:
return uuid.UUID(value)
except ValueError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return value
def formfield(self, **kwargs):
defaults = {
'form_class': forms.UUIDField,
}
defaults.update(kwargs)
return super(UUIDField, self).formfield(**defaults)
|
[
"[email protected]"
] | |
8a5602d811f96072614c0fb9de6a9ce93ee0dc75
|
53095b893d0a80614aa43ab5a50b07d929fa4cf8
|
/HRC_archive/LETG/extract_pha_file.py
|
1cc120ec5884157a1a500717ef8a97b01372c3bf
|
[] |
no_license
|
chandra-mta/HRC
|
e4cd297373e5e8f6dfb0e014275f5cee82b326b6
|
aafb1f552394efd3367635a968f8df1e26b6f1c8
|
refs/heads/main
| 2021-10-23T23:06:45.964300 | 2021-10-21T10:10:09 | 2021-10-21T10:10:09 | 154,149,839 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,854 |
py
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# extract_pha_file.py: create a pha2 file and a tg directory for LETG observation #
# #
# author: t. isobe ([email protected]) #
# #
# last update: Apr 16, 2021 #
# #
#########################################################################################
import sys
import os
import string
import re
import math
import time
import astropy.io.fits as pyfits
#
#--- from ska
#
from Ska.Shell import getenv, bash
#
#--- set ciao environment
#
ciaoenv = getenv('source /soft/ciao/bin/ciao.csh -o', shell='tcsh')
#
#--- reading directory list
#
path = '/data/aschrc6/wilton/isobe/Project9/Scripts/LETG/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folders
#
sys.path.append(mta_dir)
sys.path.append(bin_dir)
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time())
zspace = '/tmp/zspace' + str(rtail)
#
#--- a couple of other settings
#
acmd = '/usr/bin/env PERL5LIB="" ;'
ilist = ['i', 's']
#------------------------------------------------------------------------------------
#-- extract_pha_file: create a pha2 file and a tg directory for LETG observation --
#------------------------------------------------------------------------------------
def extract_pha_file():
"""
create a pha2 file and a tg directory for LETG observation
input: none, but read from /data/hrc/<inst>
output: /data/hrc/<inst>/<obsid>/repro/*pha2.fits
/data/hrc/<inst>/<obsid>/repro/tg/*
"""
#
#--- get obsid list; d_list = [hrci_list, hrcs_list]
#
d_list = make_process_list()
for k in range(0, 2):
dlen = len(d_list[k])
for j in range(0, dlen):
obsid = d_list[k][j]
if j > 0:
cmd = 'rm -rf ./' + d_list[k][j-1]
os.system(cmd)
print("OBSID: " + str(obsid))
#
#--- original data directory name
#
d_dir = '/data/hrc/' + ilist[k] + '/' + str(obsid)
#
#--- copy the data to the local directory
#
cmd = 'cp -r ' + d_dir + ' ./' + str(obsid)
os.system(cmd)
#
#--- remove the files with "new" in the file names
#
cmd = 'rm -f ' + str(obsid) + '/secondary/*new*'
os.system(cmd)
#
#--- extract_pha_file chandra_repro
#
try:
pcmd = 'chandra_repro indir=' + str(obsid) + ' outdir='
pcmd = pcmd + str(obsid) + '/new cleanup=no'
cmd = acmd + pcmd
bash(cmd, env=ciaoenv)
except:
#
#--- if failed, keep the obsid in the record
#
ofile = house_keeping + 'no_repro_list'
with open(ofile, 'a') as fo:
eline = mcf.add_leading_zero(obsid, 5) + '\n'
fo.write(eline)
continue
#
#--- move pha2 file and tg directroy to the repro directory
#
outdir = d_dir + '/repro/'
cmd = 'mkdir -p ' + outdir #--- just in a case analysis dir does not exist
os.system(cmd)
cmd = 'mv -f ' + str(obsid) + '/new/*_pha2.fits* ' + outdir + '/.'
os.system(cmd)
cmd = 'mv -f ' + str(obsid) + '/new/tg ' + outdir + '/.'
os.system(cmd)
#
#--- change permission etc
#
cmd = 'chmod -R 775 ' + outdir
os.system(cmd)
cmd = 'chgrp -R hat ' + outdir
os.system(cmd)
#
#--- remove the copied data
#
cmd = 'rm -rf ./' + str(obsid)
os.system(cmd)
#
#--- fix naming to 5 digit obsid
#
correct_naming(obsid, ilist[k])
#
#--- send email
#
# line = 'HRC pha process finished\n'
# with open(zspace, 'w') as fo:
# fo.write(line)
#
# cmd = 'cat ' + zspace + '|mailx -s "Subject: HRC PHA finished" [email protected]'
# os.system(cmd)
# cmd = 'rm -rf ' + zspace
# os.system(cmd)
#------------------------------------------------------------------------------------
#-- make_process_list: create a list of unprocessed obsid lists --
#------------------------------------------------------------------------------------
def make_process_list():
"""
create a list of unprocessed obsid lists
input: none
output: a list of lists of [<hrc_i obsids>, <hrc_s obsids>]
"""
#
#--- create a dict: obsid <---> grating
#
[obs_list, dict_inst, dict_grat] = make_inst_dict()
#
#--- read failed repro obsid list
#
ifile = house_keeping + 'no_repro_list'
out = mcf.read_data_file(ifile)
rfailed = []
for ent in out:
rfailed.append(ent)
save = []
for inst in ['i', 's']:
hdir = '/data/hrc/' + inst + '/'
olist = []
#
#--- choose data with evt1 exists in the directory
#
cmd = 'ls -d ' + hdir + '*/secondary/*evt1.fits* > ' + zspace + ' 2>/dev/null'
os.system(cmd)
out = mcf.read_data_file(zspace, remove=1)
for ent in out:
atemp = re.split('\/', ent)
obsid = atemp[-3]
#
#--- check whether this obsid was previously checked, but failed to get the data
#
test = mcf.add_leading_zero(obsid, 5)
if test in rfailed:
continue
#
#--- check whether the pha2 file already exists
#
cmd = 'ls ' + hdir + obsid + '/repro/* > ' + zspace + ' 2>/dev/null'
os.system(cmd)
with open(zspace, 'r') as f:
ochk = f.read()
cmd = 'rm -rf ' + zspace
os.system(cmd)
mc = re.search('pha2', ochk)
#
#--- check whether it is an grating observation
#
if mc is None:
try:
iobsid = str(int(float(obsid)))
grat = dict_grat[iobsid]
except:
grat = 'NONE'
#
#--- special treatment for 6**** level calib observations
#
if obsid[0] == '6':
try:
grat = check_grating_from_header(inst, obsid)
except:
grat = 'NONE'
if grat == 'LETG':
olist.append(obsid)
save.append(olist)
return save
#------------------------------------------------------------------------------------
#-- make_inst_dict: create obsid <---> inst, obsid <---> grating dictionaries -
#------------------------------------------------------------------------------------
def make_inst_dict():
"""
create obsid <---> inst, obsid <---> grating dictionaries
input: none, but read from /data/mta4/obs_ss/sot_ocat.out
output: a list of <obsid list>, <dict of instruments>, <dict of grating>
note: only letg is taken as grating. hetg is ignored
"""
ifile = '/data/mta4/obs_ss/sot_ocat.out'
data = mcf.read_data_file(ifile)
obs_list = []
dict_inst = {}
dict_grat = {}
for ent in data:
mc1 = re.search('HRC', ent)
if mc1 is None:
continue
mc2 = re.search('archived', ent)
mc3 = re.search('observed', ent)
if (mc2 is None) and (mc3 is None):
continue
mc4 = re.search('LETG', ent)
if mc4 is not None:
grat = 'LETG'
else:
grat = 'NONE'
atemp = re.split('\^', ent)
obsid = atemp[1].strip()
obsid = str(int(float(obsid)))
inst = atemp[12].strip()
if inst in ['HRC-I', 'HRC-S']:
obs_list.append(obsid)
dict_inst[obsid] = inst
dict_grat[obsid] = grat
return [obs_list, dict_inst, dict_grat]
#------------------------------------------------------------------------------------
#-- check_grating_from_header: checking grating from a header of the evt1 file of obsid
#------------------------------------------------------------------------------------
def check_grating_from_header(hrc, obsid):
"""
checking grating from a header of the evt1 file of obsid
input: hrc --- either i or s
obsid --- obsid
output: grat --- gating, such as LETG, HETG, or NONE
"""
cmd = ' ls /data/hrc/' + hrc + '/' + obsid + '/secondary/*evt1.fits* > ' + zspace + ' 2>/dev/null'
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
try:
fits = data[0].strip()
except:
return 'NONE'
flist = pyfits.open(fits)
try:
grat = flist[1].header['GRATING']
except:
grat = 'NONE'
flist.close()
return grat
#------------------------------------------------------------------------------------------------
#-- correct_naming: check repro directory and correct wrongly named fits and par file
#------------------------------------------------------------------------------------------------
def correct_naming(obsid, inst):
"""
check repro directory and correct wrongly named fits and par file
input: obsid --- obsid
inst --- instrument. either "i" or "s"
"""
cobsid = str(int(float(obsid)))
if len(cobsid) == 5:
return
lobsid = mcf.add_leading_zero(obsid, 5)
cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/repro/hrcf* >' + zspace
os.system(cmd)
data = mcf.read_data_file(zspace, remove=1)
for ent in data:
atemp = re.split('\/', ent)
fname = atemp[-1]
mc = re.search(lobsid, fname)
if mc is not None:
continue
else:
atemp = re.split('hrcf', fname)
btemp = re.split('_', atemp[1])
sobs = btemp[0]
new = fname.replace(sobs, lobsid)
full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new
cmd = 'mv ' + ent + ' ' + full
os.system(cmd)
#
#--- compress fits files
#
cmd = 'gzip /data/hrc/' + inst + '/' + lobsid + '/repro/*fits'
os.system(cmd)
#------------------------------------------------------------------------------------
if __name__ == "__main__":
extract_pha_file()
|
[
"[email protected]"
] | |
bb146c14c39b06484ad348ceeed461644b34d13c
|
df3853b41ed05d86f5bcd992fcc265f637c67784
|
/graph/blini1.py
|
a90284faa70677f0bfa9cbcff1a51bda97d6ec7c
|
[] |
no_license
|
KseniaMIPT/Adamasta
|
6ab0121519581dbbbf6ae788d1da85f545f718d1
|
e91c34c80834c3f4bf176bc4bf6bf790f9f72ca3
|
refs/heads/master
| 2021-01-10T16:48:31.141709 | 2016-11-23T21:02:25 | 2016-11-23T21:02:25 | 43,350,507 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
def check():
for i in range(5):
yield i+1
print(check())
for i in check():
print(i)
|
[
"[email protected]"
] | |
27bb5b5457ce8249495d9fcc5263dd01e827aed6
|
e7d2c2c7fbcffc3b4e8976f01b354f794fc3b71d
|
/bmga/utils/formatting.py
|
6ab11a6dfcbe02e87b651ba6371989e3ca2c1403
|
[
"BSD-3-Clause"
] |
permissive
|
vituocgia/boxme-api
|
41da50fcec12089e59a29786b3bcff6c9b169d99
|
10c8054a223f124a85e70669d17313e3a2991226
|
refs/heads/master
| 2020-03-08T19:18:21.829490 | 2018-04-06T09:11:30 | 2018-04-06T09:11:30 | 128,347,542 | 0 | 0 | null | 2018-04-27T04:43:38 | 2018-04-06T05:24:05 |
Python
|
UTF-8
|
Python
| false | false | 822 |
py
|
from __future__ import unicode_literals
from django.utils import dateformat
from bmga.utils.timezone import make_naive, aware_datetime
from dateutil.parser import parse as mk_datetime # flake8: noqa
def format_datetime(dt):
"""
RFC 2822 datetime formatter
"""
return dateformat.format(make_naive(dt), 'r')
def format_date(d):
"""
RFC 2822 date formatter
"""
# workaround because Django's dateformat utility requires a datetime
# object (not just date)
dt = aware_datetime(d.year, d.month, d.day, 0, 0, 0)
return dateformat.format(dt, 'j M Y')
def format_time(t):
"""
RFC 2822 time formatter
"""
# again, workaround dateformat input requirement
dt = aware_datetime(2000, 1, 1, t.hour, t.minute, t.second)
return dateformat.format(dt, 'H:i:s O')
|
[
"[email protected]"
] | |
a9f7a5d7afa2ce9694901a23dc8266394a8b4b54
|
1c6283303ceb883add8de4ee07c5ffcfc2e93fab
|
/Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/iptvrange_b15e81483d5be2cf30e042843e79b969.py
|
1574bc8306f8407f267c617e318f6311dba42fcd
|
[] |
no_license
|
pdobrinskiy/devcore
|
0f5b3dfc2f3bf1e44abd716f008a01c443e14f18
|
580c7df6f5db8c118990cf01bc2b986285b9718b
|
refs/heads/main
| 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 37,452 |
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class IptvRange(Base):
"""
The IptvRange class encapsulates a list of iptvRange resources that are managed by the user.
A list of resources can be retrieved from the server using the IptvRange.find() method.
The list can be managed by using the IptvRange.add() and IptvRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'iptvRange'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'GeneralQueryResponseMode': 'generalQueryResponseMode',
'ImmediateResponse': 'immediateResponse',
'InterStbStartDelay': 'interStbStartDelay',
'JoinLatencyThreshold': 'joinLatencyThreshold',
'JoinLeaveMultiplier': 'joinLeaveMultiplier',
'LeaveLatencyThreshold': 'leaveLatencyThreshold',
'LogFailureTimestamps': 'logFailureTimestamps',
'Name': 'name',
'ObjectId': 'objectId',
'ReportFrequency': 'reportFrequency',
'RouterAlert': 'routerAlert',
'SpecificQueryResponseMode': 'specificQueryResponseMode',
'StbLeaveJoinDelay': 'stbLeaveJoinDelay',
'UnsolicitedResponseMode': 'unsolicitedResponseMode',
'Version': 'version',
'ViewingProfile': 'viewingProfile',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(IptvRange, self).__init__(parent, list_op)
@property
def IptvChannels(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iptvchannels_7305b62e9ac2aa9f13637cc3a90a716f.IptvChannels): An instance of the IptvChannels class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iptvchannels_7305b62e9ac2aa9f13637cc3a90a716f import IptvChannels
if self._properties.get('IptvChannels', None) is not None:
return self._properties.get('IptvChannels')
else:
return IptvChannels(self)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def GeneralQueryResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, responds to General Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'])
@GeneralQueryResponseMode.setter
def GeneralQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'], value)
@property
def ImmediateResponse(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
"""
return self._get_attribute(self._SDM_ATT_MAP['ImmediateResponse'])
@ImmediateResponse.setter
def ImmediateResponse(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ImmediateResponse'], value)
@property
def InterStbStartDelay(self):
# type: () -> int
"""
Returns
-------
- number: Time in milliseconds between Join messages from clients within the same range.
"""
return self._get_attribute(self._SDM_ATT_MAP['InterStbStartDelay'])
@InterStbStartDelay.setter
def InterStbStartDelay(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InterStbStartDelay'], value)
@property
def JoinLatencyThreshold(self):
# type: () -> int
"""
Returns
-------
- number: The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['JoinLatencyThreshold'])
@JoinLatencyThreshold.setter
def JoinLatencyThreshold(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['JoinLatencyThreshold'], value)
@property
def JoinLeaveMultiplier(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: The number of times a host sends every Join or Leave message.
"""
return self._get_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'])
@JoinLeaveMultiplier.setter
def JoinLeaveMultiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'], value)
@property
def LeaveLatencyThreshold(self):
# type: () -> int
"""
Returns
-------
- number: The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
"""
return self._get_attribute(self._SDM_ATT_MAP['LeaveLatencyThreshold'])
@LeaveLatencyThreshold.setter
def LeaveLatencyThreshold(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LeaveLatencyThreshold'], value)
@property
def LogFailureTimestamps(self):
# type: () -> bool
"""
Returns
-------
- bool: If enabled, the timestamps for Join and Leave failures are saved to a log file.
"""
return self._get_attribute(self._SDM_ATT_MAP['LogFailureTimestamps'])
@LogFailureTimestamps.setter
def LogFailureTimestamps(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['LogFailureTimestamps'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def ReportFrequency(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
"""
return self._get_attribute(self._SDM_ATT_MAP['ReportFrequency'])
@ReportFrequency.setter
def ReportFrequency(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['ReportFrequency'], value)
@property
def RouterAlert(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, sets the Send Router Alert bit in the IP header.
"""
return self._get_attribute(self._SDM_ATT_MAP['RouterAlert'])
@RouterAlert.setter
def RouterAlert(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['RouterAlert'], value)
@property
def SpecificQueryResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, responds to Group-Specific Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'])
@SpecificQueryResponseMode.setter
def SpecificQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'], value)
@property
def StbLeaveJoinDelay(self):
# type: () -> int
"""
Returns
-------
- number: Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
"""
return self._get_attribute(self._SDM_ATT_MAP['StbLeaveJoinDelay'])
@StbLeaveJoinDelay.setter
def StbLeaveJoinDelay(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['StbLeaveJoinDelay'], value)
@property
def UnsolicitedResponseMode(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
"""
return self._get_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'])
@UnsolicitedResponseMode.setter
def UnsolicitedResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'], value)
@property
def Version(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str: IGMP/MLD protocol version.
"""
return self._get_attribute(self._SDM_ATT_MAP['Version'])
@Version.setter
def Version(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Version'], value)
@property
def ViewingProfile(self):
# type: () -> str
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile): Template describing the behavior of how clients view the lists of channels.
"""
return self._get_attribute(self._SDM_ATT_MAP['ViewingProfile'])
@ViewingProfile.setter
def ViewingProfile(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['ViewingProfile'], value)
def update(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Updates iptvRange resource on the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Adds a new iptvRange resource on the server and adds it to the container.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Returns
-------
- self: This instance with all currently retrieved iptvRange resources using find and the newly added iptvRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained iptvRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, InterStbStartDelay=None, JoinLatencyThreshold=None, JoinLeaveMultiplier=None, LeaveLatencyThreshold=None, LogFailureTimestamps=None, Name=None, ObjectId=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, StbLeaveJoinDelay=None, UnsolicitedResponseMode=None, Version=None, ViewingProfile=None):
# type: (bool, bool, bool, int, int, int, int, bool, str, str, int, bool, bool, int, bool, str, str) -> IptvRange
"""Finds and retrieves iptvRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve iptvRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all iptvRange resources from the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- InterStbStartDelay (number): Time in milliseconds between Join messages from clients within the same range.
- JoinLatencyThreshold (number): The maximum time that is allowed for a multicast stream to arrive for channel for which a Join has been sent.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- LeaveLatencyThreshold (number): The maximum time allowed for a multicast stream to stop for a channel for which a Leave has been sent.
- LogFailureTimestamps (bool): If enabled, the timestamps for Join and Leave failures are saved to a log file.
- Name (str): Name of range
- ObjectId (str): Unique identifier for this object
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- StbLeaveJoinDelay (number): Time in milliseconds between sending a Leave for the current channel and Join for the next channel.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
- ViewingProfile (str(None | /api/v1/sessions/1/ixnetwork/globals/.../iptvProfile)): Template describing the behavior of how clients view the lists of channels.
Returns
-------
- self: This instance with matching iptvRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of iptvRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the iptvRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def IptvStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the iptvStart operation on the server.
Start IPTV on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
iptvStart(async_operation=bool)
-------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
iptvStart(Arg2=enum, async_operation=bool)
------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/iptv,/vport/protocolStack/atm/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/iptv,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/iptv,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ipEndpoint/iptv,/vport/protocolStack/atm/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/iptv,/vport/protocolStack/atm/pppoxEndpoint/iptv,/vport/protocolStack/atm/pppoxEndpoint/range/iptvRange,/vport/protocolStack/ethernet/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/iptv,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/iptv,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ipEndpoint/iptv,/vport/protocolStack/ethernet/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/range/iptvRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('iptvStart', payload=payload, response_object=None)
def IptvStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the iptvStop operation on the server.
Stop IPTV on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
iptvStop(async_operation=bool)
------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
iptvStop(Arg2=enum, async_operation=bool)
-----------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/iptv,/vport/protocolStack/atm/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/iptv,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/iptv,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/atm/ipEndpoint/iptv,/vport/protocolStack/atm/ipEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/atm/pppox/iptv,/vport/protocolStack/atm/pppoxEndpoint/iptv,/vport/protocolStack/atm/pppoxEndpoint/range/iptvRange,/vport/protocolStack/ethernet/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/iptv,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/iptv,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/iptv,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/iptvRange,/vport/protocolStack/ethernet/ipEndpoint/iptv,/vport/protocolStack/ethernet/ipEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/iptvRange,/vport/protocolStack/ethernet/pppox/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/iptv,/vport/protocolStack/ethernet/pppoxEndpoint/range/iptvRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('iptvStop', payload=payload, response_object=None)
|
[
"[email protected]"
] | |
7728d77ed32221009bb1df33c077dfc8ef3d3b5f
|
8dcd3ee098b4f5b80879c37a62292f42f6b2ae17
|
/venv/Lib/site-packages/pandas/tests/frame/test_api.py
|
1cfbd5961177ae6b74b0110ac71414299c5047fe
|
[] |
no_license
|
GregVargas1999/InfinityAreaInfo
|
53fdfefc11c4af8f5d2b8f511f7461d11a3f7533
|
2e4a7c6a2424514ca0ec58c9153eb08dc8e09a4a
|
refs/heads/master
| 2022-12-01T20:26:05.388878 | 2020-08-11T18:37:05 | 2020-08-11T18:37:05 | 286,821,452 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,965 |
py
|
import datetime
import pydoc
from copy import deepcopy
import numpy as np
import pandas as pd
import pandas._testing as tm
import pytest
from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range
from pandas.compat import PY37
from pandas.util._test_decorators import async_mark
class TestDataFrameMisc:
def test_copy_index_name_checking(self, float_frame):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ("index", "columns"):
ind = getattr(float_frame, attr)
ind.name = None
cp = float_frame.copy()
getattr(cp, attr).name = "foo"
assert getattr(float_frame, attr).name is None
def test_getitem_pop_assign_name(self, float_frame):
s = float_frame["A"]
assert s.name == "A"
s = float_frame.pop("A")
assert s.name == "A"
s = float_frame.loc[:, "B"]
assert s.name == "B"
s2 = s.loc[:]
assert s2.name == "B"
def test_get_value(self, float_frame):
for idx in float_frame.index:
for col in float_frame.columns:
result = float_frame._get_value(idx, col)
expected = float_frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
expected = pd.Index(["foo#{c}".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
expected = pd.Index(["{c}#foo".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
expected = pd.Index(["%{c}".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
expected = pd.Index(["{c}%".format(c=c) for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
f = float_frame
assert f._get_axis_number(0) == 0
assert f._get_axis_number(1) == 1
assert f._get_axis_number("index") == 0
assert f._get_axis_number("rows") == 0
assert f._get_axis_number("columns") == 1
assert f._get_axis_name(0) == "index"
assert f._get_axis_name(1) == "columns"
assert f._get_axis_name("index") == "index"
assert f._get_axis_name("rows") == "index"
assert f._get_axis_name("columns") == "columns"
assert f._get_axis(0) is f.index
assert f._get_axis(1) is f.columns
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(2)
with pytest.raises(ValueError, match="No axis.*foo"):
f._get_axis_name("foo")
with pytest.raises(ValueError, match="No axis.*None"):
f._get_axis_name(None)
with pytest.raises(ValueError, match="No axis named"):
f._get_axis_number(None)
def test_keys(self, float_frame):
getkeys = float_frame.keys
assert getkeys() is float_frame.columns
def test_column_contains_raises(self, float_frame):
with pytest.raises(TypeError, match="unhashable type: 'Index'"):
float_frame.columns in float_frame
def test_tab_completion(self):
# DataFrame whose columns are identifiers shall have them in __dir__.
df = pd.DataFrame([list("abcd"), list("efgh")], columns=list("ABCD"))
for key in list("ABCD"):
assert key in dir(df)
assert isinstance(df.__getitem__("A"), pd.Series)
# DataFrame whose first-level columns are identifiers shall have
# them in __dir__.
df = pd.DataFrame(
[list("abcd"), list("efgh")],
columns=pd.MultiIndex.from_tuples(list(zip("ABCD", "EFGH"))),
)
for key in list("ABCD"):
assert key in dir(df)
for key in list("EFGH"):
assert key not in dir(df)
assert isinstance(df.__getitem__("A"), pd.DataFrame)
def test_not_hashable(self):
empty_frame = DataFrame()
df = DataFrame([1])
msg = "'DataFrame' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(df)
with pytest.raises(TypeError, match=msg):
hash(empty_frame)
def test_new_empty_index(self):
df1 = DataFrame(np.random.randn(0, 3))
df2 = DataFrame(np.random.randn(0, 3))
df1.index.name = "foo"
assert df2.index.name is None
def test_array_interface(self, float_frame):
with np.errstate(all="ignore"):
result = np.sqrt(float_frame)
assert isinstance(result, type(float_frame))
assert result.index is float_frame.index
assert result.columns is float_frame.columns
tm.assert_frame_equal(result, float_frame.apply(np.sqrt))
def test_get_agg_axis(self, float_frame):
cols = float_frame._get_agg_axis(0)
assert cols is float_frame.columns
idx = float_frame._get_agg_axis(1)
assert idx is float_frame.index
msg = r"Axis must be 0 or 1 \(got 2\)"
with pytest.raises(ValueError, match=msg):
float_frame._get_agg_axis(2)
def test_nonzero(self, float_frame, float_string_frame):
empty_frame = DataFrame()
assert empty_frame.empty
assert not float_frame.empty
assert not float_string_frame.empty
# corner case
df = DataFrame({"A": [1.0, 2.0, 3.0], "B": ["a", "b", "c"]}, index=np.arange(3))
del df["A"]
assert not df.empty
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
for k, v in df.items():
assert isinstance(v, DataFrame._constructor_sliced)
def test_items(self):
# GH 17213, GH 13918
cols = ["a", "b", "c"]
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=cols)
for c, (k, v) in zip(cols, df.items()):
assert c == k
assert isinstance(v, Series)
assert (df[k] == v).all()
def test_iter(self, float_frame):
assert tm.equalContents(list(float_frame), float_frame.columns)
def test_iterrows(self, float_frame, float_string_frame):
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_series_equal(v, exp)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_iso8601(self):
# GH 19671
s = DataFrame(
{
"non_iso8601": ["M1701", "M1802", "M1903", "M2004"],
"iso8601": date_range("2000-01-01", periods=4, freq="M"),
}
)
for k, v in s.iterrows():
exp = s.loc[k]
tm.assert_series_equal(v, exp)
def test_iterrows_corner(self):
# gh-12222
df = DataFrame(
{
"a": [datetime.datetime(2015, 1, 1)],
"b": [None],
"c": [None],
"d": [""],
"e": [[]],
"f": [set()],
"g": [{}],
}
)
expected = Series(
[datetime.datetime(2015, 1, 1), None, None, "", [], set(), {}],
index=list("abcdefg"),
name=0,
dtype="object",
)
_, result = next(df.iterrows())
tm.assert_series_equal(result, expected)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = DataFrame._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_series_equal(s, expected)
df = DataFrame(
{"floats": np.random.randn(5), "ints": range(5)}, columns=["floats", "ints"]
)
for tup in df.itertuples(index=False):
assert isinstance(tup[1], int)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[["a", "a"]]
assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)]
# repr with int on 32-bit/windows
if not (compat.is_platform_windows() or compat.is_platform_32bit()):
assert (
repr(list(df.itertuples(name=None)))
== "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]"
)
tup = next(df.itertuples(name="TestName"))
assert tup._fields == ("Index", "a", "b")
assert (tup.Index, tup.a, tup.b) == tup
assert type(tup).__name__ == "TestName"
df.columns = ["def", "return"]
tup2 = next(df.itertuples(name="TestName"))
assert tup2 == (0, 1, 4)
assert tup2._fields == ("Index", "_1", "_2")
df3 = DataFrame({"f" + str(i): [i] for i in range(1024)})
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
assert isinstance(tup3, tuple)
if PY37:
assert hasattr(tup3, "_fields")
else:
assert not hasattr(tup3, "_fields")
# GH 28282
df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}])
result_254_columns = next(df_254_columns.itertuples(index=False))
assert isinstance(result_254_columns, tuple)
assert hasattr(result_254_columns, "_fields")
df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}])
result_255_columns = next(df_255_columns.itertuples(index=False))
assert isinstance(result_255_columns, tuple)
# Dataframes with >=255 columns will fallback to regular tuples on python < 3.7
if PY37:
assert hasattr(result_255_columns, "_fields")
else:
assert not hasattr(result_255_columns, "_fields")
def test_sequence_like_with_categorical(self):
# GH 7839
# make sure can iterate
df = DataFrame(
{"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]}
)
df["grade"] = Categorical(df["raw_grade"])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.items():
str(s)
def test_len(self, float_frame):
assert len(float_frame) == len(float_frame.index)
def test_values_mixed_dtypes(self, float_frame, float_string_frame):
frame = float_frame
arr = frame.values
frame_cols = frame.columns
for i, row in enumerate(arr):
for j, value in enumerate(row):
col = frame_cols[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
arr = float_string_frame[["foo", "A"]].values
assert arr[0, 0] == "bar"
df = DataFrame({"complex": [1j, 2j, 3j], "real": [1, 2, 3]})
arr = df.values
assert arr[0, 0] == 1j
# single block corner case
arr = float_frame[["A", "B"]].values
expected = float_frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(arr, expected)
def test_to_numpy(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4.5]])
result = df.to_numpy()
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dtype(self):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4]], dtype="int64")
result = df.to_numpy(dtype="int64")
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_copy(self):
arr = np.random.randn(4, 3)
df = pd.DataFrame(arr)
assert df.values.base is arr
assert df.to_numpy(copy=False).base is arr
assert df.to_numpy(copy=True).base is None
def test_transpose(self, float_frame):
frame = float_frame
dft = frame.T
for idx, series in dft.items():
for col, value in series.items():
if np.isnan(value):
assert np.isnan(frame[col][idx])
else:
assert value == frame[col][idx]
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in mixed_T.items():
assert s.dtype == np.object_
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
tm.assert_frame_equal(df.T, df.swapaxes(0, 1))
tm.assert_frame_equal(df.T, df.swapaxes(1, 0))
tm.assert_frame_equal(df, df.swapaxes(0, 0))
msg = (
"No axis named 2 for object type"
r" <class 'pandas.core(.sparse)?.frame.(Sparse)?DataFrame'>"
)
with pytest.raises(ValueError, match=msg):
df.swapaxes(2, 5)
def test_axis_aliases(self, float_frame):
f = float_frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis="index")
tm.assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis="columns")
tm.assert_series_equal(result, expected)
def test_class_axis(self):
# GH 18147
# no exception and no empty docstring
assert pydoc.getdoc(DataFrame.index)
assert pydoc.getdoc(DataFrame.columns)
def test_more_values(self, float_string_frame):
values = float_string_frame.values
assert values.shape[1] == len(float_string_frame.columns)
def test_repr_with_mi_nat(self, float_string_frame):
df = DataFrame(
{"X": [1, 2]}, index=[[pd.NaT, pd.Timestamp("20130101")], ["a", "b"]]
)
result = repr(df)
expected = " X\nNaT a 1\n2013-01-01 b 2"
assert result == expected
def test_items_names(self, float_string_frame):
for k, v in float_string_frame.items():
assert v.name == k
def test_series_put_names(self, float_string_frame):
series = float_string_frame._series
for k, v in series.items():
assert v.name == k
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
assert not df.empty
df = DataFrame(index=[1], columns=[1])
assert not df.empty
df = DataFrame(index=["a", "b"], columns=["c", "d"]).dropna()
assert df.empty
assert df.T.empty
empty_frames = [
DataFrame(),
DataFrame(index=[1]),
DataFrame(columns=[1]),
DataFrame({1: []}),
]
for df in empty_frames:
assert df.empty
assert df.T.empty
def test_with_datetimelikes(self):
df = DataFrame(
{
"A": date_range("20130101", periods=10),
"B": timedelta_range("1 day", periods=10),
}
)
t = df.T
result = t.dtypes.value_counts()
expected = Series({np.dtype("object"): 10})
tm.assert_series_equal(result, expected)
def test_values(self, float_frame):
float_frame.values[:, 0] = 5.0
assert (float_frame.values[:, 0] == 5).all()
def test_deepcopy(self, float_frame):
cp = deepcopy(float_frame)
series = cp["A"]
series[:] = 10
for idx, value in series.items():
assert float_frame["A"][idx] != value
def test_transpose_get_view(self, float_frame):
dft = float_frame.T
dft.values[:, 5:10] = 5
assert (float_frame.values[5:10] == 5).all()
def test_inplace_return_self(self):
# GH 1893
data = DataFrame(
{"a": ["foo", "bar", "baz", "qux"], "b": [0, 0, 1, 1], "c": [1, 2, 3, 4]}
)
def _check_f(base, f):
result = f(base)
assert result is None
# -----DataFrame-----
# set_index
f = lambda x: x.set_index("a", inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index("a"), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values("b", inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()["c"]
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index("a")["c"], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: "foo"}, inplace=True)
_check_f(d.copy(), f)
@async_mark()
async def test_tab_complete_warning(self, ip):
# GH 16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; df = pd.DataFrame()"
await ip.run_code(code)
# TODO: remove it when Ipython updates
# GH 33567, jedi version raises Deprecation warning in Ipython
import jedi
if jedi.__version__ < "0.17.0":
warning = tm.assert_produces_warning(None)
else:
warning = tm.assert_produces_warning(
DeprecationWarning, check_stacklevel=False
)
with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("df.", 1))
def test_attrs(self):
df = pd.DataFrame({"A": [2, 3]})
assert df.attrs == {}
df.attrs["version"] = 1
result = df.rename(columns=str)
assert result.attrs == {"version": 1}
|
[
"[email protected]"
] | |
d62cb9ed15fdf25fbcf76191f5229784b9ee13e5
|
cf3ef8f3eca858bd3c64ba6159a2ba7cdb1722ad
|
/studygroups/views/organizer.py
|
06e748fc35dd2c27b9b26843e6b0676e4ae6d0d6
|
[] |
no_license
|
alvarmaciel/learning-circles
|
2ff956dcbe0b5a42f64036c33613644115063a8d
|
3ac444fd6f5a81f655face733e7d41786e085cd4
|
refs/heads/master
| 2021-01-11T00:45:04.513019 | 2016-10-05T14:13:16 | 2016-10-05T14:13:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,094 |
py
|
import datetime
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.core.mail import EmailMultiAlternatives, send_mail
from django.contrib import messages
from django.conf import settings
from django import http
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.generic.base import View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic import ListView
from studygroups.models import Course
from studygroups.models import StudyGroup
from studygroups.models import TeamMembership
from studygroups.models import Facilitator
from studygroups.models import StudyGroupMeeting
from studygroups.models import report_data
from studygroups.models import generate_all_meetings
from studygroups.models import get_team_users
from studygroups.models import get_user_team
from studygroups.forms import StudyGroupForm
from studygroups.forms import FacilitatorForm
from studygroups.decorators import user_is_organizer
@user_is_organizer
def organize(request):
today = datetime.datetime.now().date()
two_weeks_ago = today - datetime.timedelta(weeks=2, days=today.weekday())
two_weeks = today - datetime.timedelta(days=today.weekday()) + datetime.timedelta(weeks=3)
study_groups = StudyGroup.objects.active()
facilitators = Facilitator.objects.all()
courses = []# TODO Remove courses until we implement course selection for teams
team = None
if not request.user.is_staff:
team = get_user_team(request.user)
team_users = get_team_users(request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
facilitators = facilitators.filter(user__in=team_users)
active_study_groups = study_groups.filter(
id__in=StudyGroupMeeting.objects.active().filter(meeting_date__gte=two_weeks_ago).values('study_group')
)
meetings = StudyGroupMeeting.objects.active()\
.filter(study_group__in=study_groups, meeting_date__gte=two_weeks_ago)\
.exclude(meeting_date__gte=two_weeks)
context = {
'team': team,
'courses': courses,
'meetings': meetings,
'study_groups': study_groups,
'active_study_groups': active_study_groups,
'facilitators': facilitators,
'today': timezone.now(),
}
return render_to_response('studygroups/organize.html', context, context_instance=RequestContext(request))
class StudyGroupList(ListView):
model = StudyGroup
def get_queryset(self):
study_groups = StudyGroup.objects.active()
if not self.request.user.is_staff:
team_users = get_team_users(self.request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
return study_groups
class StudyGroupMeetingList(ListView):
model = StudyGroupMeeting
def get_queryset(self):
study_groups = StudyGroup.objects.active()
if not self.request.user.is_staff:
team_users = get_team_users(self.request.user)
study_groups = study_groups.filter(facilitator__in=team_users)
meetings = StudyGroupMeeting.objects.active().filter(study_group__in=study_groups)
return meetings
class TeamMembershipDelete(DeleteView):
model = TeamMembership
success_url = reverse_lazy('studygroups_organize')
template_name = 'studygroups/confirm_delete_membership.html'
def get_object(self, queryset=None):
if queryset == None:
queryset = TeamMembership.objects
return queryset.get(user_id=self.kwargs.get('user_id'), team_id=self.kwargs.get('team_id'))
class CourseUpdate(UpdateView):
model = Course
fields = [
'title',
'provider',
'link',
'start_date',
'duration',
'prerequisite',
'time_required',
'caption',
]
success_url = reverse_lazy('studygroups_organize')
class CourseDelete(DeleteView):
model = Course
success_url = reverse_lazy('studygroups_organize')
template_name = 'studygroups/confirm_delete.html'
class StudyGroupCreate(CreateView):
model = StudyGroup
form_class = StudyGroupForm
success_url = reverse_lazy('studygroups_organize')
def form_valid(self, form):
self.object = form.save()
generate_all_meetings(self.object)
return http.HttpResponseRedirect(self.get_success_url())
@user_is_organizer
def report(request):
# TODO - remove this view
study_groups = StudyGroup.objects.active()
for study_group in study_groups:
study_group.laptop_stats = {}
context = {
'study_groups': study_groups,
}
return render_to_response('studygroups/report.html', context, context_instance=RequestContext(request))
@user_is_organizer
def weekly_report(request, year=None, month=None, day=None ):
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
if month and day and year:
today = today.replace(year=int(year), month=int(month), day=int(day))
start_time = today - datetime.timedelta(days=today.weekday())
end_time = start_time + datetime.timedelta(days=7)
context = {
'start_time': start_time,
'end_time': end_time,
}
# get team for current user
team = None
membership = TeamMembership.objects.filter(user=request.user, role=TeamMembership.ORGANIZER).first()
if membership:
team = membership.team
context.update(report_data(start_time, end_time, team))
return render_to_response('studygroups/weekly-update.html', context, context_instance=RequestContext(request))
|
[
"[email protected]"
] | |
03fbeb1450ccc44bd26fc126ce64cfd378980fa0
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_117/1246.py
|
70e284873ba26f9f9d0eb663271fb9c4b2097cdc
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,615 |
py
|
__author__ = 'joranvar'
__problem__ = 'B'
class Field(object):
def __init__(self, data, height, width):
self.data = data
self.height = height
self.width = width
def is_cuttable(self):
max_x = [max([self.data[y][x] for x in range(self.width)]) for y in range(self.height)]
max_y = [max([self.data[y][x] for y in range(self.height)]) for x in range(self.width)]
for x in range(self.width):
for y in range(self.height):
if self.data[y][x] < min(max_x[y], max_y[x]):
return False
return True
def read_field(f_in, width, height):
field_data = [[int(square) for square in f_in.readline().split()] for line in range(height)]
field = Field(field_data, height, width)
return field
def solve(case, f_in):
N, M = list(map(int, f_in.readline().split()))
field = read_field(f_in, M, N)
if field.is_cuttable(): return ['Case #{}: YES\n'.format(case + 1)]
return ['Case #{}: NO\n'.format(case + 1)]
def open_last_file():
for problem_type in ['-large', '-small-attempt1', '-sample']:
try:
return problem_type, open(__problem__ + problem_type + '.in', 'r')
except FileNotFoundError:
pass
raise FileNotFoundError("No input file found!")
if __name__ == '__main__':
problem_type, f_in = open_last_file()
print (problem_type)
f_out = open(__problem__ + problem_type + '.out', 'w')
T = int(f_in.readline())
for case in range(T):
f_out.writelines(solve(case, f_in))
|
[
"[email protected]"
] | |
7d0942492c486ab43f4c39a5adee4453c034f50e
|
c1fe97208afe479b7ae1ee67d69866a6911564ca
|
/AdvCBV/basicapp/admin.py
|
046528c9de692ae1d3f199606430ad8437e9c4a1
|
[] |
no_license
|
jaindhairyahere/Python_Django
|
a0a46c57b6ca60d0942ae181fe28ea56bb1ee948
|
f170a2e38b78df698a02821a454a3baea0c358a6
|
refs/heads/master
| 2020-06-18T09:17:56.364928 | 2019-11-02T18:34:12 | 2019-11-02T18:34:12 | 196,249,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 164 |
py
|
from django.contrib import admin
from basicapp.models import School, Student
# Register your models here.
admin.site.register(Student)
admin.site.register(School)
|
[
"[email protected]"
] | |
151fc23e1533e76eb12ce1b8bb1392755667dbab
|
7f54637e347e5773dfbfded7b46b58b50544cfe5
|
/7-3/chainxy/spiders/tradesecretscanada.py
|
dc8b30b2b1997267ec5b41a42628814c788f3cc0
|
[] |
no_license
|
simba999/all-scrapy
|
5cc26fd92b1d03366b74d4fff58c4a0641c85609
|
d48aeb3c00fa2474153fbc8d131cf58402976e1d
|
refs/heads/master
| 2021-01-25T14:24:04.715550 | 2018-03-03T13:43:13 | 2018-03-03T13:43:13 | 123,695,640 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,369 |
py
|
import scrapy
import json
import os
from scrapy.spiders import Spider
from scrapy.http import FormRequest
from scrapy.http import Request
from chainxy.items import ChainItem
from lxml import etree
from selenium import webdriver
from lxml import html
import usaddress
import pdb
class tradesecretscanada(scrapy.Spider):
name = 'tradesecretscanada'
domain = ''
history = []
def start_requests(self):
init_url = 'http://talk.tradesecrets.ca/locations-reviews/'
yield scrapy.Request(url=init_url, callback=self.body)
def body(self, response):
print("========= Checking.......")
store_list = response.xpath('//a[@rel="noopener noreferrer"]/@href').extract()
for store in store_list:
yield scrapy.Request(url=store, callback=self.parse_page)
def parse_page(self, response):
try:
item = ChainItem()
detail = self.eliminate_space(response.xpath('//div[contains(@class, "fusion-one-half fusion-layout-column fusion-spacing-no")]//h4//text()').extract())
h_temp = ''
for de in detail:
if '(' in de and '-' in de:
try:
item['phone_number'] = self.validate('(' + de.split('(')[1])
except:
item['phone_number'] = self.validate(de)
if ':' in de:
h_temp += de + ', '
if '(' in detail[0]:
detail[0] = self.validate(detail[0].split('(')[0]).replace('|','')
addr = detail[0].replace('|','').split(',')
if len(addr) == 4:
item['address'] = self.validate(addr[1])
item['city'] = self.validate(addr[2])
item['state'] = self.validate(addr[3].strip())[:2].strip()
item['zip_code'] = self.validate(addr[3])[2:].strip()
elif len(addr) == 3:
item['address'] = self.validate(addr[0])
item['city'] = self.validate(addr[1])
item['state'] = self.validate(addr[2].strip())[:2].strip()
item['zip_code'] = self.validate(addr[2])[2:].strip()
else:
pdb.set_trace()
item['country'] = 'Canada'
item['store_hours'] = h_temp[:-2]
yield item
except:
pass
def validate(self, item):
try:
return item.encode('raw-unicode-escape').replace('\u2013', '').replace('\xa0', '').replace('|','').strip()
except:
return ''
def eliminate_space(self, items):
tmp = []
for item in items:
if self.validate(item) != '' and 'try' not in self.validate(item).lower() and 'http' not in self.validate(item).lower():
tmp.append(self.validate(item))
return tmp
|
[
"[email protected]"
] | |
d4a5a2155aa71f6f81e1301fb6dea5d302b0742f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_greens.py
|
ce3b23baa678edd94ee5bf830fa189133e5ffadb
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
from xai.brain.wordbase.nouns._green import _GREEN
#calss header
class _GREENS(_GREEN, ):
def __init__(self,):
_GREEN.__init__(self)
self.name = "GREENS"
self.specie = 'nouns'
self.basic = "green"
self.jsondata = {}
|
[
"[email protected]"
] | |
62f1f7e28e890ada8f842b1295e4c295bd93ce6b
|
3d14e2430b696a21661ee1b5fc000aa031a81a67
|
/locators/books_page_locators.py
|
403e152b6985de415825fb662971e8422cf916ea
|
[] |
no_license
|
a-soliman/books-toscrap
|
53defb67df7167917a53fa1193fff5f3bccd998c
|
958c347eeb881178346f228326b4ccfe47b7acd2
|
refs/heads/master
| 2020-04-25T17:25:36.492827 | 2019-02-28T16:22:59 | 2019-02-28T16:22:59 | 172,947,801 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 114 |
py
|
"""
extracts books from a page
"""
class BooksPageLocators:
BOOK = "li.col-xs-6.col-sm-4.col-md-3.col-lg-3"
|
[
"[email protected]"
] | |
2370e7452bcc9e77a37e5853184a510e1184341d
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingMedian_NoCycle_LSTM.py
|
52f0b5d941ab48694348dbd8ae8a86fd89845917
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 |
BSD-3-Clause
| 2018-12-17T22:08:12 | 2018-06-12T17:15:43 |
Python
|
UTF-8
|
Python
| false | false | 155 |
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingMedian'] , ['NoCycle'] , ['LSTM'] );
|
[
"[email protected]"
] | |
a33ea344425501fccf20a8502fc44380fce73c76
|
ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31
|
/dino-master/dino/rest/resources/send.py
|
a63978198fcff73e1c60cefb3ad6386d3ea9a807
|
[
"Apache-2.0"
] |
permissive
|
babiato/flaskapp1
|
84de2d0b26a54f5820d3bbe97926782ad41e005c
|
530beb9e3b8516e0e93960b99521c23a523ef546
|
refs/heads/master
| 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,723 |
py
|
import logging
import traceback
import eventlet
import sys
from dino import environ
from dino import utils
from dino.utils.decorators import timeit
from dino.db.manager import UserManager
from dino.rest.resources.base import BaseResource
from flask import request
logger = logging.getLogger(__name__)
def fail(error_message):
return {
'status': 'FAIL',
'message': error_message
}
class SendResource(BaseResource):
def __init__(self):
super(SendResource, self).__init__()
self.user_manager = UserManager(environ.env)
self.request = request
def async_post(self, json):
logger.debug('POST request: %s' % str(json))
if 'content' not in json:
raise RuntimeError('no key [content] in json message')
msg_content = json.get('content')
if msg_content is None or len(msg_content.strip()) == 0:
raise RuntimeError('content may not be blank')
if not utils.is_base64(msg_content):
raise RuntimeError('content in json message must be base64')
user_id = str(json.get('user_id', 0))
user_name = utils.b64d(json.get('user_name', utils.b64e('admin')))
object_type = json.get('object_type')
target_id = str(json.get('target_id'))
namespace = json.get('namespace', '/ws')
target_name = json.get('target_name')
data = utils.activity_for_message(user_id, user_name)
data['target'] = {
'objectType': object_type,
'id': target_id,
'displayName': target_name,
'url': namespace
}
data['object'] = {
'content': msg_content
}
if not environ.env.cache.user_is_in_multicast(target_id):
logger.info('user {} is offline, dropping message: {}'.format(target_id, str(json)))
return
try:
environ.env.out_of_scope_emit('message', data, room=target_id, json=True, namespace='/ws', broadcast=True)
except Exception as e:
logger.error('could not /send message to target {}: {}'.format(target_id, str(e)))
logger.exception(traceback.format_exc())
environ.env.capture_exception(sys.exc_info())
@timeit(logger, 'on_rest_send')
def do_post(self):
is_valid, msg, json = self.validate_json(self.request, silent=False)
if not is_valid:
logger.error('invalid json: %s' % msg)
raise RuntimeError('invalid json')
if json is None:
raise RuntimeError('no json in request')
if not isinstance(json, dict):
raise RuntimeError('need a dict')
eventlet.spawn_n(self.async_post, dict(json))
|
[
"[email protected]"
] | |
c797e1ec5b3e5955a867418fed9a26431bd4212c
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/cloud/redis/v1/redis-v1-py/google/cloud/redis_v1/services/cloud_redis/pagers.py
|
ea1c2287e22e2c73eb752e030a4919c860621449
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,709 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.redis_v1.types import cloud_redis
class ListInstancesPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.redis_v1.types.ListInstancesResponse` object, and
provides an ``__iter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.redis_v1.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., cloud_redis.ListInstancesResponse],
request: cloud_redis.ListInstancesRequest,
response: cloud_redis.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.redis_v1.types.ListInstancesRequest):
The initial request object.
response (google.cloud.redis_v1.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = cloud_redis.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[cloud_redis.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[cloud_redis.Instance]:
for page in self.pages:
yield from page.instances
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListInstancesAsyncPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.redis_v1.types.ListInstancesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.redis_v1.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[cloud_redis.ListInstancesResponse]],
request: cloud_redis.ListInstancesRequest,
response: cloud_redis.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.redis_v1.types.ListInstancesRequest):
The initial request object.
response (google.cloud.redis_v1.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = cloud_redis.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[cloud_redis.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[cloud_redis.Instance]:
async def async_generator():
async for page in self.pages:
for response in page.instances:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
4a8ba21250def0e771eb0d8cfee9b9e5f35ef4b8
|
e87415a8507341d66991411c91e77ad38cda3df9
|
/templated_email/block_render.py
|
9ddf822ec66358a729ae9b8e2ad0a806ddf76d91
|
[
"MIT"
] |
permissive
|
somair/django-templated-email
|
6185abf24031a9813fc8b9d53faa8433f7bda0a6
|
b217a3e38d7af8b514d8f83568c1fd55efd1ac11
|
refs/heads/master
| 2021-01-19T14:13:10.500289 | 2017-01-13T13:06:06 | 2017-01-13T13:06:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,748 |
py
|
from django.template.loader_tags import BlockNode, ExtendsNode
from django.template import loader, Context, RequestContext
def _get_template(template):
if isinstance(template, (tuple, list)):
return loader.select_template(template)
return loader.get_template(template)
class BlockNotFound(Exception):
"""The requested block did not exist."""
pass
def render_template_block(template, block, context):
"""
Renders a single block from a template.
This template should have previously been rendered.
"""
template._render(context)
return _render_template_block_nodelist(template.nodelist, block, context)
def _render_template_block_nodelist(nodelist, block, context):
for node in nodelist:
if isinstance(node, BlockNode) and node.name == block:
return node.render(context)
for key in ('nodelist', 'nodelist_true', 'nodelist_false'):
if hasattr(node, key):
try:
rendered = _render_template_block_nodelist(
getattr(node, key), block, context)
except:
pass
else:
return rendered
for node in nodelist:
if isinstance(node, ExtendsNode):
try:
rendered = render_template_block(
node.get_parent(context), block, context)
except BlockNotFound:
pass
else:
return rendered
raise BlockNotFound
def render_block_to_string(template_name, block, dictionary=None,
context_instance=None):
"""Return a string
Loads the given template_name and renders the given block with the
given dictionary as context.
"""
dictionary = dictionary or {}
t = _get_template(template_name)
if context_instance:
context_instance.update(dictionary)
else:
context_instance = Context(dictionary)
return render_template_block(t, block, context_instance)
def direct_block_to_template(request, template, block, extra_context=None,
mimetype=None, **kwargs):
"""
Render a given block in a given template with any extra URL
parameters in the context as ``{{ params }}``.
"""
if extra_context is None:
extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
c = RequestContext(request, dictionary)
t = _get_template(template)
t.render(c)
return HttpResponse(render_template_block(t, block, c), mimetype=mimetype)
|
[
"[email protected]"
] | |
d4ef7df593f1fbf7027fa866174ceb80592f6f0c
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/battle_control/controllers/quest_progress/__init__.py
|
f9b0128616646671d06aafd2df3f29f0785e39a0
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 |
Python
|
UTF-8
|
Python
| false | false | 151 |
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_control/controllers/quest_progress/__init__.py
pass
|
[
"[email protected]"
] | |
aa6a81ca2a68d3bbe0fcd037c5db7068f2adb766
|
dd44e145ac547209f5f209bc9b1f09189bb8b5c7
|
/Python-Advanced-2021/03.Multidimensional-lists-L/02.Sum-of-matrix-columns.py
|
719862b6c9646cc99f509dcebd28edadbfe2e5d6
|
[] |
no_license
|
todorovventsi/Software-Engineering
|
e3c1be8f0f72c85619518bb914d2a4dbaac270f8
|
64ffa6c80b190e7c6f340aaf219986f769f175ab
|
refs/heads/master
| 2023-07-09T05:35:14.522958 | 2021-08-15T14:35:55 | 2021-08-15T14:35:55 | 336,056,643 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 254 |
py
|
rows, columns = map(int, input().split(", "))
matrix = [[int(i) for i in input().split()] for _ in range(rows)]
for column in range(columns):
column_sum = 0
for row in range(rows):
column_sum += matrix[row][column]
print(column_sum)
|
[
"[email protected]"
] | |
1cf47e979c62abe7878aec58e70e8bf82cace12f
|
3cfc6d23f37e45b8fd8b3810aa56eee21a493a01
|
/custom/plugins/RBKeyshot/KeyShot_RenderScript.py
|
1b2b4b7b7cbcf42f8fc4921ae87894b943238807
|
[] |
no_license
|
joinmm/Deadline_Development
|
eb72f13e1beffac2dd55b3d0eb69d56b98110a86
|
90b1031ffa27177c2b7b93ac4fa59fca0f79e227
|
refs/heads/master
| 2023-03-17T22:56:53.716116 | 2019-08-30T03:18:33 | 2019-08-30T03:18:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,952 |
py
|
import os
import time
import shutil
HOME_PATH = os.path.join(os.environ["HOMEPATH"], "Desktop", "Temp")
SCENE_FILE_PATH = "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4.bip"
NEW_SCENE_FILE_NAME = os.path.basename(SCENE_FILE_PATH)
NEW_TEMP_SCENE_FILE_NAME = ""
def valid_temp_folder():
if os.path.exists(HOME_PATH):
print("Temp folder has already been created.")
return True
else:
try:
os.makedirs(HOME_PATH)
print("Temp folder created successfully.")
return True
except:
print("Temp folder could not be created.")
return False
def dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
NETWORK_FILE_DIR_LIST = os.listdir(NETWORK_FILE_DIR)
DESTINATION_PATH_LIST = os.listdir(DESTINATION_PATH)
if len(NETWORK_FILE_DIR_LIST) == len(DESTINATION_PATH_LIST)or len(NETWORK_FILE_DIR_LIST) < len(DESTINATION_PATH_LIST):
print("No directory update required.")
return True
else:
print("Directory update required.")
return False
def file_transfer(SCENE_FILE_PATH):
NETWORK_FILE_DIR = os.path.dirname(SCENE_FILE_PATH)
NETWORK_DIR_NAME = os.path.basename(NETWORK_FILE_DIR)
DESTINATION_PATH = os.path.join(os.environ["HOMEPATH"], "Desktop", "Temp", NETWORK_DIR_NAME)
NEW_SCENE_PATH = os.path.join(DESTINATION_PATH, os.path.basename(SCENE_FILE_PATH))
if os.path.exists(DESTINATION_PATH)and dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
print("Render folder has already been transferred , returning immediately .")
return NEW_SCENE_PATH
elif os.path.exists(DESTINATION_PATH) and not dir_update_check(NETWORK_FILE_DIR, DESTINATION_PATH):
shutil.rmtree(DESTINATION_PATH)
print("Render folder has been removed.")
if valid_temp_folder() :
try:
shutil.copytree(NETWORK_FILE_DIR, DESTINATION_PATH)
print("Render folder transferred successfully.")
except:
print("Render folder could not be transferred.")
else:
print("File transfer failed")
return NEW_SCENE_PATH
def main(scene_file_path):
lux.openFile(scene_file_path)
lux.setCamera("Camera 2")
lux.setAnimationFrame( 0 )
lux.pause
lux.setAnimationFrame( 0 )
lux.unpause
lux.setAnimationFrame( 0 )
lux.saveFile( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
lux.openFile( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
path = "A:/Test_Output/made_to_travel_black_rev4_1560962403_%d.tif"
width = 1920
height = 1080
opts = lux.getRenderOptions()
opts.setAddToQueue(False)
opts.setOutputRenderLayers(False)
opts.setOutputAlphaChannel(False)
try:
opts.setOutputDiffusePass(False)
except AttributeError:
print( "Failed to set render pass: output_diffuse_pass" )
try:
opts.setOutputReflectionPass(False)
except AttributeError:
print( "Failed to set render pass: output_reflection_pass" )
try:
opts.setOutputClownPass(False)
except AttributeError:
print( "Failed to set render pass: output_clown_pass" )
try:
opts.setOutputDirectLightingPass(False)
except AttributeError:
print( "Failed to set render pass: output_direct_lighting_pass" )
try:
opts.setOutputRefractionPass(False)
except AttributeError:
print( "Failed to set render pass: output_refraction_pass" )
try:
opts.setOutputDepthPass(False)
except AttributeError:
print( "Failed to set render pass: output_depth_pass" )
try:
opts.setOutputIndirectLightingPass(False)
except AttributeError:
print( "Failed to set render pass: output_indirect_lighting_pass" )
try:
opts.setOutputShadowPass(False)
except AttributeError:
print( "Failed to set render pass: output_indirect_lighting_pass" )
try:
opts.setOutputNormalsPass(False)
except AttributeError:
print( "Failed to set render pass: output_normals_pass" )
try:
opts.setOutputCausticsPass(False)
except AttributeError:
print( "Failed to set render pass: output_caustics_pass" )
try:
opts.setOutputShadowPass(False)
except AttributeError:
print( "Failed to set render pass: output_shadow_pass" )
try:
opts.setOutputAmbientOcclusionPass(False)
except AttributeError:
print( "Failed to set render pass: output_ambient_occlusion_pass" )
try:
opts.setAdvancedRendering( 38 )
except AttributeError:
print( "Failed to set render option: advanced_samples" )
try:
opts.setGlobalIllumination( 1.0 )
except AttributeError:
print( "Failed to set render option: engine_global_illumination" )
try:
opts.setRayBounces( 14 )
except AttributeError:
print( "Failed to set render option: engine_ray_bounces" )
try:
opts.setPixelBlur( 1.5 )
except AttributeError:
print( "Failed to set render option: engine_pixel_blur" )
try:
opts.setAntiAliasing( 3 )
except AttributeError:
print( "Failed to set render option: engine_anti_aliasing" )
try:
opts.setDofQuality( 3 )
except AttributeError:
print( "Failed to set render option: engine_dof_quality" )
try:
opts.setShadowQuality( 4.47200012207 )
except AttributeError:
print( "Failed to set render option: engine_shadow_quality" )
try:
opts.setCausticsQuality( 0.0 )
except AttributeError:
print( "Failed to set render option: engine_caustics_quality" )
try:
opts.setSharpShadows( True )
except AttributeError:
print( "Failed to set render option: engine_sharp_shadows" )
try:
opts.setSharperTextureFiltering( True )
except AttributeError:
print( "Failed to set render option: engine_sharper_texture_filtering" )
try:
opts.setGlobalIlluminationCache( True )
except AttributeError:
print( "Failed to set render option: engine_global_illumination_cache" )
for frame in range( 0, 1 ):
renderPath = path
renderPath = renderPath.replace( "%d", str(frame) )
lux.setAnimationFrame( frame )
lux.renderImage(path = renderPath, width = width, height = height, opts = opts)
print("Rendered Image: "+renderPath)
os.remove( "A:/RenderShot_Dir/Files/ctCmh6931TKgvV2/made_to_travel_black_rev4_92630339406526/made_to_travel_black_rev4_1561004076_Camera 2_0_.bip")
print ('Job Completed')
exit()
GET_NEW_FILE_PATH = file_transfer(SCENE_FILE_PATH)
if GET_NEW_FILE_PATH:
main(GET_NEW_FILE_PATH)
else:
main(SCENE_FILE_PATH)
|
[
"[email protected]"
] | |
1bbcc01ac088646277008e1eb2cd085872555dbc
|
8da91c26d423bacbeee1163ac7e969904c7e4338
|
/pyvisdk/do/map.py
|
6cd8f87633a30e6210e2784a05d6e7d2c56ec9bd
|
[] |
no_license
|
pexip/os-python-infi-pyvisdk
|
5d8f3a3858cdd61fb76485574e74ae525cdc7e25
|
1aadea0afbc306d09f6ecb9af0e683dbbf961d20
|
refs/heads/master
| 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,019 |
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def Map(vim, *args, **kwargs):
'''Topological representation of entity relationships as a set of nodes and edges.'''
obj = vim.client.factory.create('{urn:sms}Map')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'edge', 'lastUpdateTime', 'node', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"[email protected]"
] | |
b703d23d4eb23bc86961a3a4aeb666dabf0dda73
|
6f594cc963795c69d8da3c30ca580c0405ef2d6e
|
/bitwise/476NumberComplement/0.py
|
33f4c15e585b8d532a3126140c9cbb3e777b3817
|
[] |
no_license
|
lo-tp/leetcode
|
25933c5b25f64f881d43748d8b2763f69614a97f
|
4cc4d76c64e9d9aa3f53c5e9574e488c93e10a50
|
refs/heads/master
| 2022-09-07T20:32:58.487759 | 2022-09-05T03:39:50 | 2022-09-07T13:39:50 | 116,555,892 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 819 |
py
|
def helper(k):
if k is 0:
return 1
else:
return 0
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
binaryForm = []
tem = num
while tem:
binaryForm.append(tem%2)
tem = tem >> 1
binaryForm.reverse()
complement=map(helper, binaryForm)
try:
index=complement.index(1)
complement=complement[index:]
complement.reverse()
ratio=1
sum=0
for i in complement:
sum+=i*ratio
ratio*=2
return sum
except ValueError:
return 0
soluction = Solution()
print soluction.findComplement(5)
print soluction.findComplement(1)
|
[
"[email protected]"
] | |
41bc879377fb025f109b4ead056627f4d30424db
|
799d8f9024926bb69a0226110740a56bf30929e3
|
/SoftuniAdvanced/ADVANCED/stacks_and_queues/crossroads.py
|
bacd369a82728fa8c60e20e0b88a0d8917517af0
|
[] |
no_license
|
velinovasen/python-adv-oop
|
a849cdff92793b45c6cca3279f1db853125b6ec8
|
1e3d7c194c2e8e24e4d7b07969db86e9973890cb
|
refs/heads/main
| 2023-01-01T11:16:55.572778 | 2020-10-25T18:06:34 | 2020-10-25T18:06:34 | 307,159,270 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 423 |
py
|
from collections import deque
green_light_time = int(input())
free_window = int(input())
total_time = green_light_time + free_window
crossroad = deque([])
car_inside = deque([])
cars_passed = 0
while True:
command = input()
if command == 'END':
break
elif command == 'green':
while green_light_time > 0:
car_inside = crossroad.popleft()
else:
crossroad.append(command)
|
[
"[email protected]"
] | |
1f01924e59a9a35f46bb3ddaa5e7f3a0b028cb8f
|
9d67cd5f8d3e0ffdd4334a6b9b67c93f8deca100
|
/dqn_new/configs/target7.py
|
70d57a14af0c64a3a6b36deb10a442f6035c220c
|
[] |
no_license
|
SiyuanLee/caps
|
0c300a8e5a9a661eca4b2f59cd38125ddc35b6d3
|
476802e18ca1c7c88f1e29ed66a90c350aa50c1f
|
refs/heads/master
| 2021-06-20T22:48:16.230354 | 2021-02-22T13:21:57 | 2021-02-22T13:21:57 | 188,695,489 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,819 |
py
|
"""
This is the example config file
"""
import numpy as np
# More one-char representation will be added in order to support
# other objects.
# The following a=10 is an example although it does not work now
# as I have not included a '10' object yet.
a = 10
# This is the map array that represents the map
# You have to fill the array into a (m x n) matrix with all elements
# not None. A strange shape of the array may cause malfunction.
# Currently available object indices are # they can fill more than one element in the array.
# 0: nothing
# 1: wall
# 2: ladder
# 3: coin
# 4: spike
# 5: triangle -------source
# 6: square ------ source
# 7: coin -------- target
# 8: princess -------source
# 9: player # elements(possibly more than 1) filled will be selected randomly to place the player
# unsupported indices will work as 0: nothing
map_array = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 5, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 9, 9, 9, 9, 1, 9, 9, 9, 8, 1],
[1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1],
[1, 0, 0, 2, 0, 0, 0, 2, 0, 7, 1],
[1, 9, 9, 2, 9, 9, 9, 2, 9, 9, 1],
[1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 6, 0, 1],
[1, 9, 9, 9, 1, 9, 9, 9, 9, 9, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
# set to true -> win when touching the object
# 0, 1, 2, 3, 4, 9 are not possible
end_game = {
7: True,
}
rewards = {
"positive": 0, # when collecting a coin
"win": 1, # endgame (win)
"negative": -25, # endgame (die)
"tick": 0 # living
}
######### dqn only ##########
# ensure correct import
import os
import sys
__file_path = os.path.abspath(__file__)
__dqn_dir = '/'.join(str.split(__file_path, '/')[:-2]) + '/'
sys.path.append(__dqn_dir)
__cur_dir = '/'.join(str.split(__file_path, '/')[:-1]) + '/'
from dqn_utils import PiecewiseSchedule
# load the random sampled obs
# import pickle
# pkl_file = __cur_dir + 'eval_obs_array_random.pkl'
# with open(pkl_file, 'rb') as f:
# eval_obs_array = pickle.loads(f.read())
def seed_func():
return np.random.randint(0, 1000)
num_timesteps = 2.5e7
learning_freq = 4
# training iterations to go
num_iter = num_timesteps / learning_freq
# piecewise learning rate
lr_multiplier = 1.0
learning_rate = PiecewiseSchedule([
(0, 2e-4 * lr_multiplier),
(num_iter / 2, 1e-4 * lr_multiplier),
(num_iter * 3 / 4, 5e-5 * lr_multiplier),
], outside_value=5e-5 * lr_multiplier)
# piecewise learning rate
exploration = PiecewiseSchedule([
(0, 1.0),
(num_iter / 2, 0.7),
(num_iter * 3 / 4, 0.1),
(num_iter * 7 / 8, 0.05),
], outside_value=0.05)
dqn_config = {
'seed': seed_func, # will override game settings
'num_timesteps': num_timesteps,
'replay_buffer_size': 1000000,
'batch_size': 32,
'gamma': 0.99,
'learning_starts': 8e5,
'learning_freq': learning_freq,
'frame_history_len': 4,
'target_update_freq': 10000,
'grad_norm_clipping': 10,
'learning_rate': learning_rate,
'exploration': exploration,
# 'eval_obs_array': eval_obs_array,
'room_q_interval': 1e4, # q_vals will be evaluated every room_q_interval steps
'epoch_size': 5e4, # you decide any way
'config_name': str.split(__file_path, '/')[-1].replace('.py', '') # the config file name
}
map_config = {
'map_array': map_array,
'rewards': rewards,
'end_game': end_game,
'init_score': 0,
'init_lives': 1, # please don't change, not going to work
# configs for dqn
'dqn_config': dqn_config,
# work automatically only for aigym wrapped version
'fps': 1000,
'frame_skip': 1,
'force_fps': True, # set to true to make the game run as fast as possible
'display_screen': False,
'episode_length': 1200,
'episode_end_sleep': 0., # sec
}
|
[
"[email protected]"
] | |
88e75c46abb9494b3a6c173c9d4edbb771ad30b3
|
83951f7fd0bbaba9675bdf9ba6980504213bc1c6
|
/skim/crab/skim_QCD_Pt-15to7000_Flat2017_cfg.py
|
f4567da99bb4f470b3019a97ec8411522789b737
|
[] |
no_license
|
DryRun/DijetSkimmer
|
6db71583b969ecc64841da26107f43c4c734ca43
|
ead65f8e2a5d11f99f3e1a60a1d2f9a163e68491
|
refs/heads/main
| 2021-07-22T19:41:09.096943 | 2021-07-14T13:01:00 | 2021-07-14T13:01:00 | 171,485,404 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,340 |
py
|
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
job_name = "DijetSkim_QCD_Pt-15to7000_Flat2017_1_0_1"
config.section_("General")
config.General.requestName = job_name
config.General.transferLogs = False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
# Setup the custom executable
config.JobType.psetName = os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/PSet.py') # CRAB modifies this file to contain the input files and lumis
config.JobType.scriptExe = os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/crab_shell.sh') # CRAB then calls scriptExe jobId <scriptArgs>
config.JobType.scriptArgs = ["--source=mc", "--year=2017"]
config.JobType.inputFiles = [
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/crab_meat.py'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/NanoAODTools/scripts/haddnano.py'), #hadd nano will not be needed once nano tools are in cmssw
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches_data.txt'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches_mc.txt'),
os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/skim_branches.txt'),
#os.path.expandvars('$CMSSW_BASE/src/PhysicsTools/DijetSkimmer/skim/FrameworkJobReport.xml'),
]
config.JobType.outputFiles = ["nanoskim.root", "hists.root"]
config.JobType.sendPythonFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/JetHT/Run2018C-Nano14Dec2018-v1/NANOAOD'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 4
#config.Data.totalUnits = 10
config.JobType.allowUndistributedCMSSW = True
config.Data.outLFNDirBase = '/store/user/{}/{}'.format(getUsernameFromSiteDB(), job_name)
config.Data.publication = False
config.Data.outputDatasetTag = job_name
#config.Data.ignoreLocality = True
config.section_("Site")
config.Site.storageSite = "T3_US_Brown"
config.Data.inputDataset = '/QCD_Pt-15to7000_TuneCP5_Flat2017_13TeV_pythia8/RunIIFall17NanoAODv4-PU2017_12Apr2018_Nano14Dec2018_102X_mc2017_realistic_v6-v1/NANOAODSIM'
|
[
"[email protected]"
] | |
3f008a682cd719d81b222f36983c87310b67f103
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/402.py
|
631b928370b0e9eabec5dcf010eca20cf6babf83
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 751 |
py
|
class Solution(object):
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
dp=["" for _ in range(k+1) ]
for i in range(len(num)):
dp[i][0]=num[:i+1]
for j in range(1,k+1):
dp[0][j]=""
for i in range(1,len(num)):
for j in range(1,k+1)[::-1]:
dp[i][j]=min(dp[i-1][j-1],dp[i-1][j]+num[i])
# print(dp)
res=dp[len(num) - 1][k].lstrip('0')
if res=="":
return '0'
else:
return res
a=Solution()
num = "1432219"
k = 3
print(a.removeKdigits(num,k))
num = "10200"
k=1
print(a.removeKdigits(num,k))
test='00002000'
print(test.lstrip('0'))
|
[
"[email protected]"
] | |
958a75ab50cf92aa3f4243c6b47edba3f8c0b023
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4140/codes/1593_1802.py
|
997c8783e8cfb6c3d78bc17c96cca711247bd924
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 127 |
py
|
balrog=int(input());
d1=int(input());
d2=int(input());
from math import *
dano=int(sqrt(5*d1)+pi**(d2/3));
print(balrog-dano)
|
[
"[email protected]"
] | |
11a3b54a12af9a6d287edfead2ec004be81b18c7
|
5be992e6ac6bae2ebf938005d1cae93777825087
|
/space/research/genelab.py
|
34513f8b9468f68b837529823a4942d5eab865ce
|
[] |
no_license
|
a1aiintel/SpaceIsCool
|
0c88acaa966c85e31d73da8319966c218447158f
|
939641dbe626a2cbb9fcec845c18bfb3371118ad
|
refs/heads/master
| 2020-07-30T04:54:14.577501 | 2019-01-10T17:57:52 | 2019-01-10T17:57:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,083 |
py
|
import requests
from space import NASA_KEY
def search_genelab(query, type):
"""
GeneLab provides a RESTful Application Programming Interface (API) to its full-text search_exoplanet capability,
which provides the same functionality available through the GeneLab public data repository website.
The API provides a choice of standardized web output formats, such as JavaScript Object Notation (JSON)
or Hyper Text Markup Language (HTML), of the search_exoplanet results. The GeneLab Search API can also
federate with other heterogeneous external bioinformatics databases, such as the
National Institutes of Health (NIH) / National Center for Biotechnology Information's (NCBI)
Gene Expression Omnibus (GEO); the European Bioinformatics Institute's (EBI)
Proteomics Identification (PRIDE); the Argonne National Laboratory's (ANL)
Metagenomics Rapid Annotations using Subsystems Technology (MG-RAST).
:param query:
:return:
"""
url = "https://genelab-data.ndc.nasa.gov/genelab/data/search_exoplanet?term=mouse%20liver&type=cgene"
|
[
"[email protected]"
] | |
25328fb0492fe750697b3767b53d440d4e3da0b8
|
e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163
|
/ScientificComputing/ch14/filter_firdesign_sinc1.py
|
cfb39fc541dac9e8bb9246523bf73a615acecbeb
|
[] |
no_license
|
socrates77-sh/learn
|
a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b
|
ae50978023f6b098b168b8cca82fba263af444aa
|
refs/heads/master
| 2022-12-16T16:53:50.231577 | 2019-07-13T13:52:42 | 2019-07-13T13:52:42 | 168,442,963 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-01-31T01:30:06 |
HTML
|
UTF-8
|
Python
| false | false | 363 |
py
|
# -*- coding: utf-8 -*-
import scipy.signal as signal
import numpy as np
import pylab as pl
def h_ideal(n, fc):
return 2*fc*np.sinc(2*fc*np.arange(0, n, 1.0))
b = h_ideal(30, 0.25)
w, h = signal.freqz(b, 1)
pl.figure(figsize=(8, 4))
pl.plot(w/2/np.pi, 20*np.log10(np.abs(h)))
pl.xlabel(u"正规化频率 周期/取样")
pl.ylabel(u"幅值(dB)")
pl.show()
|
[
"[email protected]"
] | |
172d528877e46d3a15c44ea0bd68dd96091dec79
|
77676610410e479a3214669b082b5f410b499e24
|
/apps/main/migrations/0010_auto_20170424_0645.py
|
cfeb0350a6e5aedc05e7e5c8f745933e2474e75b
|
[
"Apache-2.0"
] |
permissive
|
StepicOrg/stepik-extensions
|
e76b2ee033275b33bf9d8c8deeac495d3a6bde46
|
5825bc9b2444ad4690681964d1bed172706f8796
|
refs/heads/develop
| 2023-04-05T12:43:28.114500 | 2021-04-19T12:57:30 | 2021-04-19T12:57:30 | 82,687,804 | 5 | 2 |
Apache-2.0
| 2021-04-19T12:58:47 | 2017-02-21T14:17:00 |
JavaScript
|
UTF-8
|
Python
| false | false | 653 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-24 06:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20170422_2002'),
]
operations = [
migrations.RemoveField(
model_name='extension',
name='categories',
),
migrations.RemoveField(
model_name='extension',
name='user_groups',
),
migrations.DeleteModel(
name='Category',
),
migrations.DeleteModel(
name='Extension',
),
]
|
[
"[email protected]"
] | |
b01cb42df40d9efc85d03a815e799ee14b6e8fd8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03339/s273941488.py
|
c82cd4ca992be5faaa424d10d255497c4a9fd014
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 222 |
py
|
n = int(input())
s = [(i == "W")*1 for i in list(input())]
c = [0]*(n+1)
for i in range(n):
c[i+1] = c[i] + s[i]
ans = float("inf")
for i in range(n):
t = c[i] + (n-i-1-c[-1]+c[i+1])
ans = min(ans,t)
print(ans)
|
[
"[email protected]"
] | |
297b49422f62295813f98787154517148273d665
|
a59deecc5d91214601c38bd170605d9d080e06d2
|
/14-dictionaries/08-copy()/app.py
|
2a626c1bb68207e6df9b951c1b8fd7d46c37c8b5
|
[] |
no_license
|
reyeskevin9767/modern-python-bootcamp-2018
|
a6a3abdb911716d19f6ab516835ed1a04919a13d
|
d0234f10c4b8aaa6a20555348aec7e3571e3d4e7
|
refs/heads/master
| 2022-12-03T18:48:50.035054 | 2020-08-09T03:00:55 | 2020-08-09T03:00:55 | 286,109,881 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 214 |
py
|
# * Copy Method
d = dict(a=1, b=2, c=3)
c = d.copy()
print(c) # {'a': 1, 'b': 2, 'c': 3}
print(c is d) # False
e = dict(a=6, b=7, c=8)
f = e.copy()
print(e) # {'a': 1, 'b': 2, 'c': 3}
print(e is f) # False
|
[
"[email protected]"
] | |
0826bb49bda6584cc57d9ea1205a457341b5e9ac
|
4e3c976773526fd610d64ffb83589bccfaee5e68
|
/sponge-integration-tests/examples/core/filters_event_pattern.py
|
32eae8faeab3bf1d4d3fa3664b9a44fc5a0f1edc
|
[
"Apache-2.0"
] |
permissive
|
softelnet/sponge
|
2313d2328953fcff49a002e727bb803757870627
|
7190f23ae888bbef49d0fbb85157444d6ea48bcd
|
refs/heads/master
| 2022-10-28T16:19:55.619882 | 2021-09-16T19:50:08 | 2021-09-16T19:50:08 | 95,256,030 | 10 | 2 |
Apache-2.0
| 2022-10-04T23:55:09 | 2017-06-23T20:58:49 |
Java
|
UTF-8
|
Python
| false | false | 1,408 |
py
|
"""
Sponge Knowledge Base
Filters - Event pattern
"""
from java.util.concurrent.atomic import AtomicInteger
def onInit():
# Variables for assertions only
sponge.setVariable("nameCount", AtomicInteger(0))
sponge.setVariable("patternCount", AtomicInteger(0))
sponge.setVariable("acceptedCount", AtomicInteger(0))
sponge.setVariable("notAcceptedCount", AtomicInteger(0))
class NameFilter(Filter):
def onConfigure(self):
self.withEvent("a1")
def onAccept(self, event):
sponge.getVariable("nameCount").incrementAndGet()
return True
class PatternFilter(Filter):
def onConfigure(self):
self.withEvent("a.+")
def onAccept(self, event):
sponge.getVariable("patternCount").incrementAndGet()
return False
class AcceptedTrigger(Trigger):
def onConfigure(self):
self.withEvent(".+")
def onRun(self, event):
self.logger.info("accepted {}", event.name)
if event.name != EventName.STARTUP:
sponge.getVariable("acceptedCount").incrementAndGet()
class NotAcceptedTrigger(Trigger):
def onConfigure(self):
self.withEvent("a.+")
def onRun(self, event):
sponge.getVariable("notAcceptedCount").incrementAndGet()
def onStartup():
for name in ["a1", "b1", "a2", "b2", "a", "b", "a1", "b2"]:
sponge.event(name).send()
|
[
"[email protected]"
] | |
ce6bfe2a9145cfc6f226201d4923551145eb81a7
|
479559fc4d4724a7145cfb8ecdaa5cdc55e46761
|
/tensorflow/python/data/experimental/ops/interleave_ops.py
|
257639a2560aa5248ffb97bdeb46add625c96113
|
[
"Apache-2.0"
] |
permissive
|
mudassirej/tensorflow
|
434818cc68c754c40d2e3b014daf1e3974d26698
|
bd47c759176f0039026fd5cac8db247bf452de28
|
refs/heads/master
| 2020-06-14T10:55:42.751443 | 2019-07-03T04:07:46 | 2019-07-03T04:12:59 | 194,978,111 | 1 | 0 |
Apache-2.0
| 2019-07-03T04:13:09 | 2019-07-03T04:13:09 | null |
UTF-8
|
Python
| false | false | 11,807 |
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-deterministic dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import random_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import gen_stateless_random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.interleave(map_func, cycle_length, block_length, "
"num_parallel_calls=tf.data.experimental.AUTOTUNE)` instead. If sloppy "
"execution is desired, use `tf.data.Options.experimental_determinstic`.")
@tf_export("data.experimental.parallel_interleave")
def parallel_interleave(map_func,
cycle_length,
block_length=1,
sloppy=False,
buffer_output_elements=None,
prefetch_input_elements=None):
"""A parallel version of the `Dataset.interleave()` transformation.
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
`tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
in a deterministic order, and allowing the implementation to skip over nested
datasets whose elements are not readily available when requested.
Example usage:
```python
# Preprocess 4 files concurrently.
filenames = tf.data.Dataset.list_files("/path/to/data/train*.tfrecords")
dataset = filenames.apply(
tf.data.experimental.parallel_interleave(
lambda filename: tf.data.TFRecordDataset(filename),
cycle_length=4))
```
WARNING: If `sloppy` is `True`, the order of produced elements is not
deterministic.
Args:
map_func: A function mapping a nested structure of tensors to a `Dataset`.
cycle_length: The number of input `Dataset`s to interleave from in parallel.
block_length: The number of consecutive elements to pull from an input
`Dataset` before advancing to the next input `Dataset`.
sloppy: If false, elements are produced in deterministic order. Otherwise,
the implementation is allowed, for the sake of expediency, to produce
elements in a non-deterministic order.
buffer_output_elements: The number of elements each iterator being
interleaved should buffer (similar to the `.prefetch()` transformation for
each interleaved iterator).
prefetch_input_elements: The number of input elements to transform to
iterators before they are needed for interleaving.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
dataset, map_func, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements)
return _apply_fn
class _DirectedInterleaveDataset(dataset_ops.Dataset):
"""A substitute for `Dataset.interleave()` on a fixed list of datasets."""
def __init__(self, selector_input, data_inputs):
self._selector_input = selector_input
self._data_inputs = list(data_inputs)
first_output_types = dataset_ops.get_legacy_output_types(data_inputs[0])
first_output_classes = dataset_ops.get_legacy_output_classes(data_inputs[0])
for data_input in data_inputs[1:]:
if (dataset_ops.get_legacy_output_types(data_input) != first_output_types
or dataset_ops.get_legacy_output_classes(data_input)
!= first_output_classes):
raise TypeError("All datasets must have the same type and class.")
output_shapes = dataset_ops.get_legacy_output_shapes(self._data_inputs[0])
for data_input in self._data_inputs[1:]:
output_shapes = nest.pack_sequence_as(output_shapes, [
ts1.most_specific_compatible_shape(ts2) for (ts1, ts2) in zip(
nest.flatten(output_shapes),
nest.flatten(dataset_ops.get_legacy_output_shapes(data_input)))
])
self._element_spec = structure.convert_legacy_structure(
first_output_types, output_shapes, first_output_classes)
super(_DirectedInterleaveDataset, self).__init__()
def _as_variant_tensor(self):
# pylint: disable=protected-access
return (
gen_experimental_dataset_ops.experimental_directed_interleave_dataset(
self._selector_input._variant_tensor,
[data_input._variant_tensor for data_input in self._data_inputs],
**self._flat_structure))
# pylint: enable=protected-access
def _inputs(self):
return [self._selector_input] + self._data_inputs
@property
def element_spec(self):
return self._element_spec
@tf_export("data.experimental.sample_from_datasets", v1=[])
def sample_from_datasets_v2(datasets, weights=None, seed=None):
"""Samples elements at random from the datasets in `datasets`.
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
weights: (Optional.) A list of `len(datasets)` floating-point values where
`weights[i]` represents the probability with which an element should be
sampled from `datasets[i]`, or a `tf.data.Dataset` object where each
element is such a list. Defaults to a uniform distribution across
`datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A dataset that interleaves elements from `datasets` at random, according to
`weights` if provided, otherwise with uniform probability.
Raises:
TypeError: If the `datasets` or `weights` arguments have the wrong type.
ValueError: If the `weights` argument is specified and does not match the
length of the `datasets` element.
"""
num_datasets = len(datasets)
if not isinstance(weights, dataset_ops.DatasetV2):
if weights is None:
# Select inputs with uniform probability.
logits = [[1.0] * num_datasets]
else:
# Use the given `weights` as the probability of choosing the respective
# input.
weights = ops.convert_to_tensor(weights, name="weights")
if weights.dtype not in (dtypes.float32, dtypes.float64):
raise TypeError("`weights` must be convertible to a tensor of "
"`tf.float32` or `tf.float64` elements.")
if not weights.shape.is_compatible_with([num_datasets]):
raise ValueError(
"`weights` must be a vector of length `len(datasets)`.")
# The `stateless_multinomial()` op expects log-probabilities, as opposed
# to weights.
logits = array_ops.expand_dims(math_ops.log(weights, name="logits"), 0)
# NOTE(mrry): We only specialize when `weights` is not a `Dataset`. When it
# is a `Dataset`, it is possible that evaluating it has a side effect the
# user depends on.
if len(datasets) == 1:
return datasets[0]
def select_dataset_constant_logits(seed):
return array_ops.squeeze(
gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed),
axis=[0, 1])
selector_input = dataset_ops.MapDataset(
random_ops.RandomDataset(seed).batch(2),
select_dataset_constant_logits,
use_inter_op_parallelism=False)
else:
# Use each element of the given `weights` dataset as the probability of
# choosing the respective input.
# The `stateless_multinomial()` op expects log-probabilities, as opposed to
# weights.
logits_ds = weights.map(lambda *p: math_ops.log(p, name="logits"))
def select_dataset_varying_logits(logits, seed):
return array_ops.squeeze(
gen_stateless_random_ops.stateless_multinomial(logits, 1, seed=seed),
axis=[0, 1])
logits_and_seeds = dataset_ops.Dataset.zip(
(logits_ds, random_ops.RandomDataset(seed).batch(2)))
selector_input = dataset_ops.MapDataset(
logits_and_seeds,
select_dataset_varying_logits,
use_inter_op_parallelism=False)
return _DirectedInterleaveDataset(selector_input, datasets)
@tf_export(v1=["data.experimental.sample_from_datasets"])
def sample_from_datasets_v1(datasets, weights=None, seed=None):
return dataset_ops.DatasetV1Adapter(
sample_from_datasets_v2(datasets, weights, seed))
sample_from_datasets_v1.__doc__ = sample_from_datasets_v2.__doc__
@tf_export("data.experimental.choose_from_datasets", v1=[])
def choose_from_datasets_v2(datasets, choice_dataset):
"""Creates a dataset that deterministically chooses elements from `datasets`.
For example, given the following datasets:
```python
datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
tf.data.Dataset.from_tensors("bar").repeat(),
tf.data.Dataset.from_tensors("baz").repeat()]
# Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
choice_dataset = tf.data.Dataset.range(3).repeat(3)
result = tf.data.experimental.choose_from_datasets(datasets, choice_dataset)
```
The elements of `result` will be:
```
"foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
```
Args:
datasets: A list of `tf.data.Dataset` objects with compatible structure.
choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
Returns:
A dataset that interleaves elements from `datasets` according to the values
of `choice_dataset`.
Raises:
TypeError: If the `datasets` or `choice_dataset` arguments have the wrong
type.
"""
if not structure.are_compatible(choice_dataset.element_spec,
structure.TensorStructure(dtypes.int64, [])):
raise TypeError("`choice_dataset` must be a dataset of scalar "
"`tf.int64` tensors.")
return _DirectedInterleaveDataset(choice_dataset, datasets)
@tf_export(v1=["data.experimental.choose_from_datasets"])
def choose_from_datasets_v1(datasets, choice_dataset):
return dataset_ops.DatasetV1Adapter(
choose_from_datasets_v2(datasets, choice_dataset))
choose_from_datasets_v1.__doc__ = choose_from_datasets_v2.__doc__
# TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep
# these aliases in place.
choose_from_datasets = choose_from_datasets_v1
sample_from_datasets = sample_from_datasets_v1
|
[
"[email protected]"
] | |
bd7d1491e809be7611d09d0d0e8578f497fb3520
|
e811da3715d43e23a4548490aa27be40ac21d6e4
|
/handlers/base/__init__.py
|
8f1904288c671963f969ea59e55106edced6d3da
|
[] |
no_license
|
atiger808/tornado
|
2a2ff73957d6fb97cd91222038f499ee8ed325f5
|
77e981ee70a7c7b3903bec82d91109f163bb2a43
|
refs/heads/master
| 2020-04-04T09:22:07.007710 | 2018-11-02T05:04:00 | 2018-11-02T05:04:00 | 155,815,465 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 149 |
py
|
# _*_ coding: utf-8 _*_
# @Time : 2018/6/26 22:52
# @Author : Ole211
# @Site :
# @File : __init__.py.py
# @Software : PyCharm
|
[
"[email protected]"
] | |
a9b098aaf599f218d0e3b35cae1d246bcbeb2c50
|
a66b69c3f9da9779ae80f347b61f47e3bc5ba145
|
/day1002/A04_loop.py
|
311112630c8c83899668600713293b1a7f31e1f9
|
[] |
no_license
|
kyungtae92/python-basic
|
c841d9c9c6196b01da3de007c1298fe2c4b8f693
|
80a2051e37b6e87c9dbfd332c4b2946089ff0d5c
|
refs/heads/master
| 2020-11-25T08:01:22.156661 | 2019-12-17T08:25:38 | 2019-12-17T08:25:38 | 228,567,120 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 352 |
py
|
import os # 파이썬이 운영체제의 일부 기능 가져옴(명령어)
while (True):
dan = input('input gugudan >> ')
if dan.isalpha() == True or dan == '':
os.system('cls')
else:
break
dan = int(dan)
i = 0
for i in range(1, 10): # for i in range(1, 10, 1):
print("%d * %d = %2d" % (dan, i, dan * i))
|
[
"[email protected]"
] | |
d5d4dc11f80514143b96cfebbcab39e53506dd9b
|
7f9811857538858ea5c6baaefdccf424c2dea3c2
|
/INTRODUCTION_TO_DS/chapter5_search/linear_search.py
|
b3c44483d7fd39c6fc66b263858905c46d9c2969
|
[] |
no_license
|
owari-taro/python_algorithm
|
ec4d0c737eefdb4f5ddc140c4dfe81fcfb2ee5af
|
5af19f7dabe6224f0d06b7c89f38c528a08cf903
|
refs/heads/master
| 2021-11-23T07:23:08.958737 | 2021-08-31T00:56:07 | 2021-08-31T00:56:07 | 231,067,479 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 254 |
py
|
from typing import List
def binary_search(a: List, x, lo=0, hi=None):
if lo < 0:
raise ValueError()
if hi is None:
hi = len(a)
while lo < hi:
mid = (hi+lo)//2
if x < a[mid]:
hi = mid
|
[
"[email protected]"
] | |
00e11d2488cdcb01be07386274adfad59acacc43
|
0cbf36f06f5316326ef635f14c887cd2849800db
|
/typings/celery/app/registry.pyi
|
33985b1be0f526d3403d3531c9b515b239c0b430
|
[
"Apache-2.0"
] |
permissive
|
espritgames/celery_types
|
b59545a7cd28f06e766a1a520590f3bbc155e82f
|
4d4064eb78d2a1a3e79a5fefe111f59ad4d3c9b9
|
refs/heads/main
| 2023-08-18T20:11:33.992509 | 2021-10-04T11:21:49 | 2021-10-04T11:21:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 70 |
pyi
|
from typing import Any, Dict
class TaskRegistry(Dict[str, Any]): ...
|
[
"[email protected]"
] | |
31f505bcd3e2862f943b2fb2fb39a976fcf80f18
|
7ba05e73515c14fb8d2f3d056b51102131171a11
|
/First_steps_March_Excercise/Akvarium.py
|
c65b850ffd42b8483b25d7fd5129ca00ac7b1aab
|
[] |
no_license
|
gyurel/SoftUni-Basics-and-Fundamentals
|
bd6d5fa8c9d0cc51f241393afd418633a66c65dc
|
184fc5dfab2fdd410aa8593f4c562fd56211c727
|
refs/heads/main
| 2023-07-05T11:16:58.966841 | 2021-08-31T19:25:40 | 2021-08-31T19:25:40 | 401,485,125 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 727 |
py
|
# От конзолата се четат 4 реда:
# 1. Дължина в см – цяло число
# 2. Широчина в см – цяло число
# 3. Височина в см – цяло число
# 4. Процент зает обем – реално число
length = int(input())
width = int(input())
height = int(input())
occuqied_percentage = float(input()) / 100
volume_in_litters = length * width * height/1000
# Да се напише програма, която изчислява литрите вода, които са необходими за напълването на аквариума.
needed_water = volume_in_litters - (volume_in_litters * occuqied_percentage)
print(needed_water)
|
[
"[email protected]"
] | |
2f11b0f81351e4f628d1266ab215c514e432d2f2
|
7b0413547fb0e4766febcc6a7f0010fafe025fb6
|
/medium/course_schedule.py
|
52ca3f20847247a445eb480dcaa842522eed1cac
|
[] |
no_license
|
theeric80/LeetCode
|
b00d4bace7c48c409bc6b2f57321aea7b7106f35
|
e05321d8c2143d35279136d3999e1be1e7005690
|
refs/heads/master
| 2021-01-19T00:51:20.608326 | 2016-06-30T05:32:44 | 2016-06-30T05:32:44 | 42,165,229 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,037 |
py
|
class UndirectedGraphNode(object):
def __init__(self, x):
self.label = x
self.neighbors = []
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
self.cycle = False
G = self.buildGraph(numCourses, prerequisites)
result, marked, on_stack = [], [False]*len(G), [False]*len(G)
for v in G:
if not marked[v.label]:
self.topological_sort(G, v, marked, on_stack, result)
result.reverse()
return not self.cycle
def buildGraph(self, numCourses, prerequisites):
G = [UndirectedGraphNode(i) for i in xrange(numCourses)]
for u, v in prerequisites:
G[u].neighbors.append(G[v])
return G
def topological_sort(self, G, v, marked, on_stack, result):
label = v.label
marked[label] = True
on_stack[label] = True
for w in v.neighbors:
if self.cycle:
return
if not marked[w.label]:
self.topological_sort(G, w, marked, on_stack, result)
elif on_stack[w.label]:
self.cycle = True
on_stack[label] = False
result.append(label)
def dfs(self, G, v):
result, marked = [], [False]*len(G)
s = [v]
while s:
node = s.pop()
label = node.label
if not marked[label]:
marked[label] = True
result.append(label)
for neighbor in node.neighbors:
s.append(neighbor)
print '->'.join(str(i) for i in result)
def main():
import sys
from os.path import join, abspath
sys.path.append(join('..', 'common'))
inputs = [(2, [[1,0]])]
for numCourses, prerequisites in inputs:
result = Solution().canFinish(numCourses, prerequisites)
print result
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
07ae3fd425deb6e5c593ee9d9ae487d5398b8f25
|
e3765def4a180f1d51eaef3884448b0bb9be2cd3
|
/example/12.3.1_create_pygame_window/alien_invasion.py
|
136e506214bafb12d29f556453abfc4bb31417aa
|
[] |
no_license
|
spearfish/python-crash-course
|
cbeb254efdf0c1ab37d8a7d2fa0409194f19fa2b
|
66bc42d41395cc365e066a597380a96d3282d30b
|
refs/heads/master
| 2023-07-14T11:04:49.276764 | 2021-08-20T10:02:27 | 2021-08-20T10:02:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 424 |
py
|
#!/usr/bin/env python3
# modules
import sys
import pygame
def run_game() :
pygame.init()
# pygame.display is a object that handles display.
screen = pygame.display.set_mode((1200,800))
pygame.display.set_caption('Alien Invasion')
while True :
for event in pygame.event.get() :
if event.type == pygame.QUIT :
sys.exit()
pygame.display.flip()
run_game()
|
[
"[email protected]"
] | |
27cc4cebf599c8d3b7a61be91fd2e525d3304487
|
6d967da5fd95aa5e66ddbb211da40041006ca5ec
|
/myvenv/Lib/site-packages/pip/_vendor/packaging/markers.py
|
8ef134ba7b10dc55e4de37dd77c217c87ff3f97e
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
gevorkyannaira/my-first-blog
|
96e4458045a1dd0aa9c1f3ec69f4c829428200e0
|
42ab12a8c2b0e402b5fa1b8e5a7cdd2629d06c16
|
refs/heads/master
| 2022-09-03T21:14:18.946448 | 2020-05-18T18:15:39 | 2020-05-18T18:15:39 | 264,909,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,735 |
py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pip._vendor.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pip._vendor.pyparsing import Literal as L # noqa
from ._compat import string_types
<<<<<<< HEAD
from .specifiers import Specifier, InvalidSpecifier
=======
from ._typing import MYPY_CHECK_RUNNING
from .specifiers import Specifier, InvalidSpecifier
if MYPY_CHECK_RUNNING: # pragma: no cover
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Operator = Callable[[str, str], bool]
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
<<<<<<< HEAD
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
=======
# type: (Any) -> None
self.value = value
def __str__(self):
# type: () -> str
return str(self.value)
def __repr__(self):
# type: () -> str
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
# type: () -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
raise NotImplementedError
class Variable(Node):
def serialize(self):
<<<<<<< HEAD
=======
# type: () -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
return str(self)
class Value(Node):
def serialize(self):
<<<<<<< HEAD
=======
# type: () -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
<<<<<<< HEAD
=======
# type: () -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
<<<<<<< HEAD
| L("os.name")
=======
| L("os.name") # PEP-345
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
<<<<<<< HEAD
| L("python_implementation") # PEP-345
| L("extra") # undocumented setuptools legacy
=======
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
<<<<<<< HEAD
=======
# type: (Union[ParseResults, List[Any]]) -> List[Any]
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
<<<<<<< HEAD
=======
# type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
<<<<<<< HEAD
}
def _eval_op(lhs, op, rhs):
=======
} # type: Dict[str, Operator]
def _eval_op(lhs, op, rhs):
# type: (str, Op, str) -> bool
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
<<<<<<< HEAD
oper = _operators.get(op.serialize())
=======
oper = _operators.get(op.serialize()) # type: Optional[Operator]
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
<<<<<<< HEAD
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
=======
class Undefined(object):
pass
_undefined = Undefined()
def _get_env(environment, name):
# type: (Dict[str, str], str) -> str
value = environment.get(name, _undefined) # type: Union[str, Undefined]
if isinstance(value, Undefined):
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
<<<<<<< HEAD
groups = [[]]
=======
# type: (List[Any], Dict[str, str]) -> bool
groups = [[]] # type: List[List[bool]]
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
<<<<<<< HEAD
=======
# type: (sys._version_info) -> str
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment():
<<<<<<< HEAD
if hasattr(sys, "implementation"):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
=======
# type: () -> Dict[str, str]
if hasattr(sys, "implementation"):
# Ignoring the `sys.implementation` reference for type checking due to
# mypy not liking that the attribute doesn't exist in Python 2.7 when
# run with the `--py27` flag.
iver = format_full_version(sys.implementation.version) # type: ignore
implementation_name = sys.implementation.name # type: ignore
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
<<<<<<< HEAD
"python_version": platform.python_version()[:3],
=======
"python_version": ".".join(platform.python_version_tuple()[:2]),
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
<<<<<<< HEAD
=======
# type: (str) -> None
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc : e.loc + 8]
)
raise InvalidMarker(err_str)
def __str__(self):
<<<<<<< HEAD
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
=======
# type: () -> str
return _format_marker(self._markers)
def __repr__(self):
# type: () -> str
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
# type: (Optional[Dict[str, str]]) -> bool
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
|
[
"[email protected]"
] | |
b8b49ba5bc255e5615ec2889ec70661333b1a2c2
|
4252102a1946b2ba06d3fa914891ec7f73570287
|
/pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_localdot.py
|
6b47b5b33566ea24783e9ae4019290a4fabb845d
|
[] |
no_license
|
lpigou/chalearn2014
|
21d487f314c4836dd1631943e20f7ab908226771
|
73b99cdbdb609fecff3cf85e500c1f1bfd589930
|
refs/heads/master
| 2020-05-17T00:08:11.764642 | 2014-09-24T14:42:00 | 2014-09-24T14:42:00 | 24,418,815 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,927 |
py
|
import nose
import unittest
import numpy as np
import theano
from localdot import LocalDot
from ..test_matrixmul import SymbolicSelfTestMixin
class TestLocalDot32x32(unittest.TestCase, SymbolicSelfTestMixin):
channels = 3
bsize = 10 # batch size
imshp = (32, 32)
ksize = 5
nkern_per_group = 16
subsample_stride = 1
ngroups = 1
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
def setUp(self):
np.random.seed(234)
assert self.imshp[0] == self.imshp[1]
fModulesR = (self.imshp[0] - self.ksize + 1) // self.subsample_stride
#fModulesR += 1 # XXX GpuImgActs crashes w/o this??
fModulesC = fModulesR
self.fshape = (fModulesR, fModulesC, self.channels // self.ngroups,
self.ksize, self.ksize, self.ngroups, self.nkern_per_group)
self.ishape = (self.ngroups, self.channels // self.ngroups,
self.imshp[0], self.imshp[1], self.bsize)
self.hshape = (self.ngroups, self.nkern_per_group, fModulesR, fModulesC,
self.bsize)
filters = theano.shared(self.rand(self.fshape))
self.A = LocalDot(filters, self.imshp[0], self.imshp[1],
subsample=(self.subsample_stride, self.subsample_stride))
self.xlval = self.rand((self.hshape[-1],) + self.hshape[:-1])
self.xrval = self.rand(self.ishape)
self.xl = theano.shared(self.xlval)
self.xr = theano.shared(self.xrval)
# N.B. the tests themselves come from SymbolicSelfTestMixin
class TestLocalDotLargeGray(TestLocalDot32x32):
channels = 1
bsize = 128
imshp = (256, 256)
ksize = 9
nkern_per_group = 16
subsample_stride = 2
ngroups = 1
n_patches = 3000
def rand(self, shp):
return np.random.rand(*shp).astype('float32')
# not really a test, but important code to support
# Currently exposes error, by e.g.:
# CUDA_LAUNCH_BLOCKING=1
# THEANO_FLAGS=device=gpu,mode=DEBUG_MODE
# nosetests -sd test_localdot.py:TestLocalDotLargeGray.run_autoencoder
def run_autoencoder(
self,
n_train_iter=10000, # -- make this small to be a good unit test
rf_shape=(9, 9),
n_filters=1024,
dtype='float32',
module_stride=2,
lr=0.01,
show_filters=True,
):
if show_filters:
# import here to fail right away
import matplotlib.pyplot as plt
try:
import skdata.vanhateren.dataset
except ImportError:
raise nose.SkipTest()
# 1. Get a set of image patches from the van Hateren data set
print 'Loading van Hateren images'
n_images = 50
vh = skdata.vanhateren.dataset.Calibrated(n_images)
patches = vh.raw_patches((self.n_patches,) + self.imshp,
items=vh.meta[:n_images],
rng=np.random.RandomState(123),
)
patches = patches.astype('float32')
patches /= patches.reshape(self.n_patches, self.imshp[0] * self.imshp[1])\
.max(axis=1)[:, None, None]
# TODO: better local contrast normalization
if 0 and show_filters:
plt.subplot(2, 2, 1); plt.imshow(patches[0], cmap='gray')
plt.subplot(2, 2, 2); plt.imshow(patches[1], cmap='gray')
plt.subplot(2, 2, 3); plt.imshow(patches[2], cmap='gray')
plt.subplot(2, 2, 4); plt.imshow(patches[3], cmap='gray')
plt.show()
# -- Convert patches to localdot format:
# groups x colors x rows x cols x images
patches5 = patches[:, :, :, None, None].transpose(3, 4, 1, 2, 0)
print 'Patches shape', patches.shape, self.n_patches, patches5.shape
# 2. Set up an autoencoder
print 'Setting up autoencoder'
hid = theano.tensor.tanh(self.A.rmul(self.xl))
out = self.A.rmul_T(hid)
cost = ((out - self.xl) ** 2).sum()
params = self.A.params()
gparams = theano.tensor.grad(cost, params)
train_updates = [(p, p - lr / self.bsize * gp)
for (p, gp) in zip(params, gparams)]
if 1:
train_fn = theano.function([], [cost], updates=train_updates)
else:
train_fn = theano.function([], [], updates=train_updates)
theano.printing.debugprint(train_fn)
# 3. Train it
params[0].set_value(0.001 * params[0].get_value())
for ii in xrange(0, self.n_patches, self.bsize):
self.xl.set_value(patches5[:, :, :, :, ii:ii + self.bsize], borrow=True)
cost_ii, = train_fn()
print 'Cost', ii, cost_ii
if 0 and show_filters:
self.A.imshow_gray()
plt.show()
assert cost_ii < 0 # TODO: determine a threshold for detecting regression bugs
|
[
"[email protected]"
] | |
d063d7cbffb4226f8efbf9db037d712b216b8bb7
|
a8547f73463eef517b98d1085430732f442c856e
|
/pysam-0.13-py3.6-macosx-10.13-x86_64.egg/pysam/libcbgzf.py
|
366d86d29872fb9a2271270af8be79da14542344
|
[] |
no_license
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
63aece1b692225ee2fbb865200279d7ef88a1eca
|
5668b5785296b314ea1321057420bcd077dba9ea
|
refs/heads/master
| 2021-01-23T19:13:04.707152 | 2017-12-25T17:41:30 | 2017-12-25T17:41:30 | 102,808,884 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 303 |
py
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'libcbgzf.cpython-36m-darwin.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
[
"[email protected]"
] | |
c839051c620fd066513fce874f55bfe78f1dc4e4
|
540b24e3ec47a2cb4baefb6fe19d6c97c05b41c6
|
/subversion/tools/hook-scripts/svn2feed.py
|
c3abe8c1eb2dc1858dc594f397eb2d74cd7b596e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"X11",
"Apache-2.0",
"BSD-2-Clause",
"HPND-Markus-Kuhn",
"LicenseRef-scancode-unicode",
"MIT"
] |
permissive
|
Quantum-Platinum-Cloud/subversion
|
dedeff0955fc6d03df445d1cb1b9a6d058e47c72
|
494f46f077e41a3ef32cf315e903695ecf547f5c
|
refs/heads/main
| 2023-08-17T16:36:40.102795 | 2021-03-17T19:13:59 | 2021-10-06T05:38:16 | 589,011,516 | 1 | 0 | null | 2023-01-14T19:18:40 | 2023-01-14T19:18:39 | null |
UTF-8
|
Python
| false | false | 16,736 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
"""Usage: svn2feed.py [OPTION...] REPOS-PATH
Generate an RSS 2.0 or Atom 1.0 feed file containing commit
information for the Subversion repository located at REPOS-PATH. Once
the maximum number of items is reached, older elements are removed.
The item title is the revision number, and the item description
contains the author, date, log messages and changed paths.
Options:
-h, --help Show this help message.
-F, --format=FORMAT Required option. FORMAT must be one of:
'rss' (RSS 2.0)
'atom' (Atom 1.0)
to select the appropriate feed format.
-f, --feed-file=PATH Store the feed in the file located at PATH, which will
be created if it does not exist, or overwritten if it
does. If not provided, the script will store the feed
in the current working directory, in a file named
REPOS_NAME.rss or REPOS_NAME.atom (where REPOS_NAME is
the basename of the REPOS_PATH command-line argument,
and the file extension depends on the selected
format).
-r, --revision=X[:Y] Subversion revision (or revision range) to generate
info for. If not provided, info for the single
youngest revision in the repository will be generated.
-m, --max-items=N Keep only N items in the feed file. By default,
20 items are kept.
-u, --item-url=URL Use URL as the basis for generating feed item links.
This value is appended with '?rev=REV_NUMBER' to form
the actual item links.
-U, --feed-url=URL Use URL as the global link associated with the feed.
-P, --svn-path=DIR Look in DIR for the svnlook binary. If not provided,
svnlook must be on the PATH.
"""
# TODO:
# --item-url should support arbitrary formatting of the revision number,
# to be useful with web viewers other than ViewVC.
# Rather more than intended is being cached in the pickle file. Instead of
# only old items being drawn from the pickle, all the global feed metadata
# is actually set only on initial feed creation, and thereafter simply
# re-used from the pickle each time.
# $HeadURL: https://svn.apache.org/repos/asf/subversion/branches/1.10.x/tools/hook-scripts/svn2feed.py $
# $LastChangedDate: 2016-04-30 08:16:53 +0000 (Sat, 30 Apr 2016) $
# $LastChangedBy: stefan2 $
# $LastChangedRevision: 1741723 $
import sys
# Python 2.4 is required for subprocess
if sys.version_info < (2, 4):
sys.stderr.write("Error: Python 2.4 or higher required.\n")
sys.stderr.flush()
sys.exit(1)
import getopt
import os
import subprocess
try:
# Python <3.0
import cPickle as pickle
except ImportError:
# Python >=3.0
import pickle
import datetime
import time
def usage_and_exit(errmsg=None):
"""Print a usage message, plus an ERRMSG (if provided), then exit.
If ERRMSG is provided, the usage message is printed to stderr and
the script exits with a non-zero error code. Otherwise, the usage
message goes to stdout, and the script exits with a zero
errorcode."""
if errmsg is None:
stream = sys.stdout
else:
stream = sys.stderr
stream.write("%s\n" % __doc__)
stream.flush()
if errmsg:
stream.write("\nError: %s\n" % errmsg)
stream.flush()
sys.exit(2)
sys.exit(0)
def check_url(url, opt):
"""Verify that URL looks like a valid URL or option OPT."""
if not (url.startswith('https://') \
or url.startswith('http://') \
or url.startswith('file://')):
usage_and_exit("svn2feed.py: Invalid url '%s' is specified for " \
"'%s' option" % (url, opt))
class Svn2Feed:
def __init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url):
self.repos_path = repos_path
self.item_url = item_url
self.feed_file = feed_file
self.max_items = max_items
self.feed_url = feed_url
self.svnlook_cmd = 'svnlook'
if svn_path is not None:
self.svnlook_cmd = os.path.join(svn_path, 'svnlook')
self.feed_title = ("%s's Subversion Commits Feed"
% (os.path.basename(os.path.abspath(self.repos_path))))
self.feed_desc = "The latest Subversion commits"
def _get_item_dict(self, revision):
revision = str(revision)
cmd = [self.svnlook_cmd, 'info', '-r', revision, self.repos_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
proc.wait()
info_lines = proc.stdout.readlines()
cmd = [self.svnlook_cmd, 'changed', '-r', revision, self.repos_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
proc.wait()
changed_data = proc.stdout.readlines()
desc = ("\nRevision: %s\nLog: %sModified: \n%s"
% (revision, info_lines[3], changed_data))
item_dict = {
'author': info_lines[0].strip('\n'),
'title': "Revision %s" % revision,
'link': self.item_url and "%s?rev=%s" % (self.item_url, revision),
'date': self._format_updated_ts(info_lines[1]),
'description': "<pre>" + desc + "</pre>",
}
return item_dict
def _format_updated_ts(self, revision_ts):
# Get "2006-08-10 20:17:08" from
# "2006-07-28 20:17:18 +0530 (Fri, 28 Jul 2006)
date = revision_ts[0:19]
epoch = time.mktime(time.strptime(date, "%Y-%m-%d %H:%M:%S"))
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(epoch))
class Svn2RSS(Svn2Feed):
def __init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url):
Svn2Feed.__init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url)
try:
import PyRSS2Gen
except ImportError:
sys.stderr.write("""
Error: Required PyRSS2Gen module not found. You can download the PyRSS2Gen
module from:
http://www.dalkescientific.com/Python/PyRSS2Gen.html
""")
sys.exit(1)
self.PyRSS2Gen = PyRSS2Gen
(file, ext) = os.path.splitext(self.feed_file)
self.pickle_file = file + ".pickle"
if os.path.exists(self.pickle_file):
self.rss = pickle.load(open(self.pickle_file, "r"))
else:
self.rss = self.PyRSS2Gen.RSS2(
title = self.feed_title,
link = self.feed_url,
description = self.feed_desc,
lastBuildDate = datetime.datetime.now(),
items = [])
@staticmethod
def get_default_file_extension():
return ".rss"
def add_revision_item(self, revision):
rss_item = self._make_rss_item(revision)
self.rss.items.insert(0, rss_item)
if len(self.rss.items) > self.max_items:
del self.rss.items[self.max_items:]
def write_output(self):
s = pickle.dumps(self.rss)
f = open(self.pickle_file, "w")
f.write(s)
f.close()
f = open(self.feed_file, "w")
self.rss.write_xml(f)
f.close()
def _make_rss_item(self, revision):
info = self._get_item_dict(revision)
rss_item = self.PyRSS2Gen.RSSItem(
author = info['author'],
title = info['title'],
link = info['link'],
description = info['description'],
guid = self.PyRSS2Gen.Guid(info['link']),
pubDate = info['date'])
return rss_item
class Svn2Atom(Svn2Feed):
def __init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url):
Svn2Feed.__init__(self, svn_path, repos_path, item_url, feed_file,
max_items, feed_url)
from xml.dom import getDOMImplementation
self.dom_impl = getDOMImplementation()
self.pickle_file = self.feed_file + ".pickle"
if os.path.exists(self.pickle_file):
self.document = pickle.load(open(self.pickle_file, "r"))
self.feed = self.document.getElementsByTagName('feed')[0]
else:
self._init_atom_document()
@staticmethod
def get_default_file_extension():
return ".atom"
def add_revision_item(self, revision):
item = self._make_atom_item(revision)
total = 0
for childNode in self.feed.childNodes:
if childNode.nodeName == 'entry':
if total == 0:
self.feed.insertBefore(item, childNode)
total += 1
total += 1
if total > self.max_items:
self.feed.removeChild(childNode)
if total == 0:
self.feed.appendChild(item)
def write_output(self):
s = pickle.dumps(self.document)
f = open(self.pickle_file, "w")
f.write(s)
f.close()
f = open(self.feed_file, "w")
f.write(self.document.toxml())
f.close()
def _make_atom_item(self, revision):
info = self._get_item_dict(revision)
doc = self.document
entry = doc.createElement("entry")
id = doc.createElement("id")
entry.appendChild(id)
id.appendChild(doc.createTextNode(info['link']))
title = doc.createElement("title")
entry.appendChild(title)
title.appendChild(doc.createTextNode(info['title']))
updated = doc.createElement("updated")
entry.appendChild(updated)
updated.appendChild(doc.createTextNode(info['date']))
link = doc.createElement("link")
entry.appendChild(link)
link.setAttribute("href", info['link'])
summary = doc.createElement("summary")
entry.appendChild(summary)
summary.appendChild(doc.createTextNode(info['description']))
author = doc.createElement("author")
entry.appendChild(author)
aname = doc.createElement("name")
author.appendChild(aname)
aname.appendChild(doc.createTextNode(info['author']))
return entry
def _init_atom_document(self):
doc = self.document = self.dom_impl.createDocument(None, None, None)
feed = self.feed = doc.createElement("feed")
doc.appendChild(feed)
feed.setAttribute("xmlns", "http://www.w3.org/2005/Atom")
title = doc.createElement("title")
feed.appendChild(title)
title.appendChild(doc.createTextNode(self.feed_title))
id = doc.createElement("id")
feed.appendChild(id)
id.appendChild(doc.createTextNode(self.feed_url))
updated = doc.createElement("updated")
feed.appendChild(updated)
now = datetime.datetime.now()
updated.appendChild(doc.createTextNode(self._format_date(now)))
link = doc.createElement("link")
feed.appendChild(link)
link.setAttribute("href", self.feed_url)
author = doc.createElement("author")
feed.appendChild(author)
aname = doc.createElement("name")
author.appendChild(aname)
aname.appendChild(doc.createTextNode("subversion"))
def _format_date(self, dt):
""" input date must be in GMT """
return ("%04d-%02d-%02dT%02d:%02d:%02d.%02dZ"
% (dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, dt.microsecond))
def main():
# Parse the command-line options and arguments.
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "hP:r:u:f:m:U:F:",
["help",
"svn-path=",
"revision=",
"item-url=",
"feed-file=",
"max-items=",
"feed-url=",
"format=",
])
except getopt.GetoptError as msg:
usage_and_exit(msg)
# Make sure required arguments are present.
if len(args) != 1:
usage_and_exit("You must specify a repository path.")
repos_path = os.path.abspath(args[0])
# Now deal with the options.
max_items = 20
commit_rev = svn_path = None
item_url = feed_url = None
feed_file = None
feedcls = None
feed_classes = { 'rss': Svn2RSS, 'atom': Svn2Atom }
for opt, arg in opts:
if opt in ("-h", "--help"):
usage_and_exit()
elif opt in ("-P", "--svn-path"):
svn_path = arg
elif opt in ("-r", "--revision"):
commit_rev = arg
elif opt in ("-u", "--item-url"):
item_url = arg
check_url(item_url, opt)
elif opt in ("-f", "--feed-file"):
feed_file = arg
elif opt in ("-m", "--max-items"):
try:
max_items = int(arg)
except ValueError as msg:
usage_and_exit("Invalid value '%s' for --max-items." % (arg))
if max_items < 1:
usage_and_exit("Value for --max-items must be a positive "
"integer.")
elif opt in ("-U", "--feed-url"):
feed_url = arg
check_url(feed_url, opt)
elif opt in ("-F", "--format"):
try:
feedcls = feed_classes[arg]
except KeyError:
usage_and_exit("Invalid value '%s' for --format." % arg)
if feedcls is None:
usage_and_exit("Option -F [--format] is required.")
if item_url is None:
usage_and_exit("Option -u [--item-url] is required.")
if feed_url is None:
usage_and_exit("Option -U [--feed-url] is required.")
if commit_rev is None:
svnlook_cmd = 'svnlook'
if svn_path is not None:
svnlook_cmd = os.path.join(svn_path, 'svnlook')
cmd = [svnlook_cmd, 'youngest', repos_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
proc.wait()
cmd_out = proc.stdout.readlines()
try:
revisions = [int(cmd_out[0])]
except IndexError as msg:
usage_and_exit("svn2feed.py: Invalid value '%s' for " \
"REPOS-PATH" % (repos_path))
else:
try:
rev_range = commit_rev.split(':')
len_rev_range = len(rev_range)
if len_rev_range == 1:
revisions = [int(commit_rev)]
elif len_rev_range == 2:
start, end = rev_range
start = int(start)
end = int(end)
if (start > end):
tmp = start
start = end
end = tmp
revisions = list(range(start, end + 1)[-max_items:])
else:
raise ValueError()
except ValueError as msg:
usage_and_exit("svn2feed.py: Invalid value '%s' for --revision." \
% (commit_rev))
if feed_file is None:
feed_file = (os.path.basename(repos_path) +
feedcls.get_default_file_extension())
feed = feedcls(svn_path, repos_path, item_url, feed_file, max_items,
feed_url)
for revision in revisions:
feed.add_revision_item(revision)
feed.write_output()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
6224998f24dbbf286ac343c71d3f2cf7401f4b20
|
abf9238ac124738796a61e4ae3e667cae950d55a
|
/Custom Troop Trees/Source Files/cstm_party_templates.py
|
e85eb75bb7d7beadb6787f95fd1ff63989067576
|
[] |
no_license
|
ChroniclesStudio/custom-troop-trees
|
d92d4c3723ca117fd087332451ea1a0414998162
|
d39333cf8c4ea9fddb3d58c49850a4dffedbb917
|
refs/heads/master
| 2023-02-18T07:27:56.439995 | 2021-01-19T14:46:50 | 2021-01-19T14:46:50 | 331,012,346 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,713 |
py
|
from header_common import *
from header_parties import *
from header_troops import *
from ID_troops import *
from ID_factions import *
from ID_map_icons import *
from module_constants import *
from module_troops import troops
import math
pmf_is_prisoner = 0x0001
####################################################################################################################
# Each party template record contains the following fields:
# 1) Party-template id: used for referencing party-templates in other files.
# The prefix pt_ is automatically added before each party-template id.
# 2) Party-template name.
# 3) Party flags. See header_parties.py for a list of available flags
# 4) Menu. ID of the menu to use when this party is met. The value 0 uses the default party encounter system.
# 5) Faction
# 6) Personality. See header_parties.py for an explanation of personality flags.
# 7) List of stacks. Each stack record is a tuple that contains the following fields:
# 7.1) Troop-id.
# 7.2) Minimum number of troops in the stack.
# 7.3) Maximum number of troops in the stack.
# 7.4) Member flags(optional). Use pmf_is_prisoner to note that this member is a prisoner.
# Note: There can be at most 6 stacks.
####################################################################################################################
party_templates = [
#("kingdom_1_reinforcements_a", "{!}kingdom_1_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_swadian_recruit,5,10),(trp_swadian_militia,2,4)]),
#("kingdom_1_reinforcements_b", "{!}kingdom_1_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_swadian_footman,3,6),(trp_swadian_skirmisher,2,4)]),
#("kingdom_1_reinforcements_c", "{!}kingdom_1_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_swadian_man_at_arms,2,4),(trp_swadian_crossbowman,1,2)]), #Swadians are a bit less-powered thats why they have a bit more troops in their modernised party template (3-6, others 3-5)
#("kingdom_2_reinforcements_a", "{!}kingdom_2_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_vaegir_recruit,5,10),(trp_vaegir_footman,2,4)]),
#("kingdom_2_reinforcements_b", "{!}kingdom_2_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_vaegir_veteran,2,4),(trp_vaegir_skirmisher,2,4),(trp_vaegir_footman,1,2)]),
#("kingdom_2_reinforcements_c", "{!}kingdom_2_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_vaegir_horseman,2,3),(trp_vaegir_infantry,1,2)]),
#("kingdom_3_reinforcements_a", "{!}kingdom_3_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_khergit_tribesman,3,5),(trp_khergit_skirmisher,4,9)]), #Khergits are a bit less-powered thats why they have a bit more 2nd upgraded(trp_khergit_skirmisher) than non-upgraded one(trp_khergit_tribesman).
#("kingdom_3_reinforcements_b", "{!}kingdom_3_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_khergit_horseman,2,4),(trp_khergit_horse_archer,2,4),(trp_khergit_skirmisher,1,2)]),
#("kingdom_3_reinforcements_c", "{!}kingdom_3_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_khergit_horseman,2,4),(trp_khergit_veteran_horse_archer,2,3)]), #Khergits are a bit less-powered thats why they have a bit more troops in their modernised party template (4-7, others 3-5)
#("kingdom_4_reinforcements_a", "{!}kingdom_4_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_nord_footman,5,10),(trp_nord_recruit,2,4)]),
#("kingdom_4_reinforcements_b", "{!}kingdom_4_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_nord_huntsman,2,5),(trp_nord_archer,2,3),(trp_nord_footman,1,2)]),
#("kingdom_4_reinforcements_c", "{!}kingdom_4_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_nord_warrior,3,5)]),
#("kingdom_5_reinforcements_a", "{!}kingdom_5_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_rhodok_tribesman,5,10),(trp_rhodok_spearman,2,4)]),
#("kingdom_5_reinforcements_b", "{!}kingdom_5_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_rhodok_crossbowman,3,6),(trp_rhodok_trained_crossbowman,2,4)]),
#("kingdom_5_reinforcements_c", "{!}kingdom_5_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_rhodok_veteran_spearman,2,3),(trp_rhodok_veteran_crossbowman,1,2)]),
#("kingdom_6_reinforcements_a", "{!}kingdom_6_reinforcements_a", 0, 0, fac_commoners, 0, [(trp_sarranid_recruit,5,10),(trp_sarranid_footman,2,4)]),
#("kingdom_6_reinforcements_b", "{!}kingdom_6_reinforcements_b", 0, 0, fac_commoners, 0, [(trp_sarranid_skirmisher,2,4),(trp_sarranid_veteran_footman,2,3),(trp_sarranid_footman,1,3)]),
#("kingdom_6_reinforcements_c", "{!}kingdom_6_reinforcements_c", 0, 0, fac_commoners, 0, [(trp_sarranid_horseman,3,5)]),
]
def troop_indexes_of_tier(skin, tier):
return [find_troop(troops, troop[0]) for troop in tree.get_custom_troops_of_tier(skin, tier)]
def tier_stacks(skin, tier, min, max):
troops = troop_indexes_of_tier(skin, tier)
return [(troop, int(math.ceil(min * 1.0 / len(troops))), int(math.ceil(max * 1.0 / len(troops)))) for troop in troops]
for tree in CUSTOM_TROOP_TREES:
for skin in CSTM_SKINS:
id = "cstm_kingdom_player_%s_%d_reinforcements" % (tree.id, skin.id)
party_templates.extend([
(id + "_a", "{!}" + id + "_a", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 1, min = 5, max = 10) + tier_stacks(skin, tier = 2, min = 2, max = 4)),
(id + "_b", "{!}" + id + "_b", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 3, min = 5, max = 10)),
(id + "_c", "{!}" + id + "_c", 0, 0, fac_commoners, 0, tier_stacks(skin, tier = 4, min = 3, max = 5)),
])
#for party_template in party_templates:
# print ", ".join([party_template[0], party_template[1], ", ".join(["%d-%d %s" % (stack[1], stack[2], troops[stack[0]][2]) for stack in party_template[6]])])
|
[
"[email protected]"
] | |
1e07bbeff0fb13fa145c80101d396935d33a0423
|
6b14d9a64a578239e5612e6098320b61b45c08d9
|
/AUG14/04.py
|
27bec86f2774038bbdffc335d52b45f500521bfc
|
[
"MIT"
] |
permissive
|
Razdeep/PythonSnippets
|
498c403140fec33ee2f0dd84801738f1256ee9dd
|
76f9313894f511c487a99bc38bdf0fe5e594caf5
|
refs/heads/master
| 2020-03-26T08:56:23.067022 | 2018-11-26T05:36:36 | 2018-11-26T05:36:36 | 144,726,845 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 67 |
py
|
# String slicing
test='Hello world'
print(test[1:5])
print(test[6])
|
[
"[email protected]"
] | |
c1fda1a470ad681c3a1a16d4e839b87151b19b33
|
6f6d215a4f0a1c30eeb5a08c8a36016fc351998a
|
/zcls/model/recognizers/resnet/torchvision_resnet.py
|
040bc44da6892b30585f415d6130a4b2fe65cecc
|
[
"Apache-2.0"
] |
permissive
|
Quebradawill/ZCls
|
ef9db2b54fbee17802f3342752e3d4fe4ef9d2c5
|
ade3dc7fd23584b7ba597f24ec19c02ae847673e
|
refs/heads/master
| 2023-04-15T23:25:18.195089 | 2021-04-29T07:05:46 | 2021-04-29T07:05:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,125 |
py
|
# -*- coding: utf-8 -*-
"""
@date: 2021/2/20 上午10:28
@file: torchvision_resnet.py
@author: zj
@description:
"""
from abc import ABC
import torch.nn as nn
from torch.nn.modules.module import T
from torchvision.models.resnet import resnet18, resnet50, resnext50_32x4d
from zcls.config.key_word import KEY_OUTPUT
from zcls.model import registry
from zcls.model.norm_helper import freezing_bn
class TorchvisionResNet(nn.Module, ABC):
def __init__(self,
arch="resnet18",
num_classes=1000,
torchvision_pretrained=False,
pretrained_num_classes=1000,
fix_bn=False,
partial_bn=False,
zero_init_residual=False):
super(TorchvisionResNet, self).__init__()
self.num_classes = num_classes
self.fix_bn = fix_bn
self.partial_bn = partial_bn
if arch == 'resnet18':
self.model = resnet18(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes,
zero_init_residual=zero_init_residual)
elif arch == 'resnet50':
self.model = resnet50(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes,
zero_init_residual=zero_init_residual)
elif arch == 'resnext50_32x4d':
self.model = resnext50_32x4d(pretrained=torchvision_pretrained, num_classes=pretrained_num_classes,
zero_init_residual=zero_init_residual)
else:
raise ValueError('no such value')
self.init_weights(num_classes, pretrained_num_classes)
def init_weights(self, num_classes, pretrained_num_classes):
if num_classes != pretrained_num_classes:
fc = self.model.fc
fc_features = fc.in_features
self.model.fc = nn.Linear(fc_features, num_classes)
nn.init.normal_(self.model.fc.weight, 0, 0.01)
nn.init.zeros_(self.model.fc.bias)
def train(self, mode: bool = True) -> T:
super(TorchvisionResNet, self).train(mode=mode)
if mode and (self.partial_bn or self.fix_bn):
freezing_bn(self, partial_bn=self.partial_bn)
return self
def forward(self, x):
x = self.model(x)
return {KEY_OUTPUT: x}
@registry.RECOGNIZER.register('TorchvisionResNet')
def build_torchvision_resnet(cfg):
torchvision_pretrained = cfg.MODEL.RECOGNIZER.TORCHVISION_PRETRAINED
pretrained_num_classes = cfg.MODEL.RECOGNIZER.PRETRAINED_NUM_CLASSES
fix_bn = cfg.MODEL.NORM.FIX_BN
partial_bn = cfg.MODEL.NORM.PARTIAL_BN
# for backbone
arch = cfg.MODEL.BACKBONE.ARCH
zero_init_residual = cfg.MODEL.RECOGNIZER.ZERO_INIT_RESIDUAL
num_classes = cfg.MODEL.HEAD.NUM_CLASSES
return TorchvisionResNet(
arch=arch,
num_classes=num_classes,
torchvision_pretrained=torchvision_pretrained,
pretrained_num_classes=pretrained_num_classes,
fix_bn=fix_bn,
partial_bn=partial_bn,
zero_init_residual=zero_init_residual
)
|
[
"[email protected]"
] | |
8afe9cc9f4f53d06be5e718686be5cb4cf5c0cdb
|
c67268ac491ecfe606308a43185f1bf8073d56a1
|
/unittesting/test_employee2.py
|
84682a7e52ffd035b6a9a992a079c59112128dc6
|
[] |
no_license
|
jisshub/python-django-training
|
3c0fad4c80c78bcfb4b61b025da60d220b502e4b
|
d8c61f53e3bb500b1a58a706f20108babd6a1a54
|
refs/heads/master
| 2020-06-21T15:07:25.704209 | 2019-09-01T19:24:02 | 2019-09-01T19:24:02 | 197,487,745 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,929 |
py
|
import unittest
# here v import Employee class employee module(employee.py)
from employee import Employee
class EmployeeTest(unittest.TestCase):
def setUp(self):
print('setup\n')
# here v create two employee obj instead of creating them for each test.
# ENSURING DRY PRINCIPLE
self.emp1 = Employee('jiss', 'jose', 3000)
self.emp2 = Employee('isco', 'alarcon', 5000)
def tearDown(self):
print('teardown\n')
def test_email(self):
print('test_email\n')
var1 = self.emp1.email
var2 = self.emp2.email
self.assertEqual(var1, '[email protected]')
self.assertEqual(var2, '[email protected]')
self.emp1.first = 'john'
self.emp2.last = 'james'
self.assertEqual(self.emp1.email, '[email protected]')
self.assertEqual(self.emp2.email, '[email protected]')
def test_fullname(self):
print('test_fullname\n')
self.assertEqual(self.emp1.full_name, 'jiss jose')
self.emp1.first = 'jom'
self.emp1.last = 'thomas'
self.assertEqual(self.emp1.full_name, 'jom thomas')
self.assertEqual(self.emp2.full_name, 'isco alarcon')
self.emp2.first = 'alvaro'
self.emp2.last = 'morata'
self.assertEqual(self.emp2.full_name, 'alvaro morata')
def test_pay(self):
print('test_pay\n')
self.assertEqual(self.emp1.apply_raise, 6000)
self.emp1.pay_raise = 1.5
self.assertEqual(self.emp1.apply_raise, 9000)
self.assertEqual(self.emp2.apply_raise, 10000)
self.emp2.pay_raise = .5
self.assertEqual(self.emp2.apply_raise, 5000)
if __name__ == '__main__':
unittest.main()
# here v text whether value of apply_raise and pay are equal.
# here setUp runs before each test and tearDown method runs after each test.
# order will be like
# setUp
# testmethod
# teardown
|
[
"[email protected]"
] | |
ccf100ecb17578bc9791263e5270183990fed468
|
0b793bce2da8c3d09b7956c0672ddbffd46feaed
|
/atcoder/corp/keyence2020_c.py
|
9e943f94b0f860184c871b6de78e2af5092d409b
|
[
"MIT"
] |
permissive
|
knuu/competitive-programming
|
c6c4e08fb231937d988bdc5a60a8ad6b31b97616
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
refs/heads/master
| 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 201 |
py
|
N, K, S = map(int, input().split())
if S == 1:
const = S + 1
else:
const = S - 1
ans = []
for i in range(N):
if i < K:
ans.append(S)
else:
ans.append(const)
print(*ans)
|
[
"[email protected]"
] | |
03bc0d80849bc3264945b6fc903d9599b980d26a
|
a38725ed7fb93b503207502984ec197e921eb54b
|
/venv/lib/python3.6/site-packages/django_ajax/encoder.py
|
64ed9ca2af3a6a719fd651966cacb7ddaf862693
|
[] |
no_license
|
tanveerahmad1517/myblogproject
|
d00d550230e2df0843e67f793504f9c19d0b755c
|
2eaa051caa5b68a8fba260c7cd431f1e1719a171
|
refs/heads/master
| 2020-03-16T21:38:32.738671 | 2018-08-23T11:55:02 | 2018-08-23T11:55:02 | 133,008,051 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,859 |
py
|
"""
Utils
"""
from __future__ import unicode_literals
import json
from datetime import date
from django.http.response import HttpResponseRedirectBase, HttpResponse
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.db.models.base import ModelBase
from decimal import Decimal
class LazyJSONEncoderMixin(object):
"""
A JSONEncoder subclass that handle querysets and models objects.
Add how handle your type of object here to use when dump json
"""
def default(self, obj):
# handles HttpResponse and exception content
if issubclass(type(obj), HttpResponseRedirectBase):
return obj['Location']
elif issubclass(type(obj), TemplateResponse):
return obj.rendered_content
elif issubclass(type(obj), HttpResponse):
return obj.content
elif issubclass(type(obj), Exception) or isinstance(obj, bytes):
return force_text(obj)
# this handles querysets and other iterable types
try:
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
# this handlers Models
if isinstance(obj.__class__, ModelBase):
return force_text(obj)
if isinstance(obj, Decimal):
return float(obj)
if isinstance(obj, date):
return obj.isoformat()
return super(LazyJSONEncoderMixin, self).default(obj)
class LazyJSONEncoder(LazyJSONEncoderMixin, json.JSONEncoder):
pass
def serialize_to_json(data, *args, **kwargs):
"""
A wrapper for simplejson.dumps with defaults as:
cls=LazyJSONEncoder
All arguments can be added via kwargs
"""
kwargs['cls'] = kwargs.get('cls', LazyJSONEncoder)
return json.dumps(data, *args, **kwargs)
|
[
"[email protected]"
] | |
5c03758b507d6d0764e0ee096e04ba7048e30035
|
da9b9f75a693d17102be45b88efc212ca6da4085
|
/sdk/cosmos/azure-cosmos/azure/cosmos/container.py
|
73441d19f5abd428087ba295d4936b854400a8c0
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
elraikhm/azure-sdk-for-python
|
e1f57b2b4d8cc196fb04eb83d81022f50ff63db7
|
dcb6fdd18b0d8e0f1d7b34fdf82b27a90ee8eafc
|
refs/heads/master
| 2021-06-21T22:01:37.063647 | 2021-05-21T23:43:56 | 2021-05-21T23:43:56 | 216,855,069 | 0 | 0 |
MIT
| 2019-10-22T16:05:03 | 2019-10-22T16:05:02 | null |
UTF-8
|
Python
| false | false | 35,017 |
py
|
# The MIT License (MIT)
# Copyright (c) 2014 Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Create, read, update and delete items in the Azure Cosmos DB SQL API service.
"""
from typing import Any, Dict, List, Optional, Union, Iterable, cast # pylint: disable=unused-import
import six
from azure.core.tracing.decorator import distributed_trace # type: ignore
from ._cosmos_client_connection import CosmosClientConnection
from ._base import build_options
from .errors import CosmosResourceNotFoundError
from .http_constants import StatusCodes
from .offer import Offer
from .scripts import ScriptsProxy
from .partition_key import NonePartitionKeyValue
__all__ = ("ContainerProxy",)
# pylint: disable=protected-access
# pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs
class ContainerProxy(object):
"""
An interface to interact with a specific DB Container.
This class should not be instantiated directly, use :func:`DatabaseProxy.get_container_client` method.
A container in an Azure Cosmos DB SQL API database is a collection of documents,
each of which represented as an Item.
:ivar str id: ID (name) of the container
:ivar str session_token: The session token for the container.
.. note::
To create a new container in an existing database, use :func:`Database.create_container`.
"""
def __init__(self, client_connection, database_link, id, properties=None): # pylint: disable=redefined-builtin
# type: (CosmosClientConnection, str, str, Dict[str, Any]) -> None
self.client_connection = client_connection
self.id = id
self._properties = properties
self.container_link = u"{}/colls/{}".format(database_link, self.id)
self._is_system_key = None
self._scripts = None # type: Optional[ScriptsProxy]
def _get_properties(self):
# type: () -> Dict[str, Any]
if self._properties is None:
self._properties = self.read()
return self._properties
@property
def is_system_key(self):
# type: () -> bool
if self._is_system_key is None:
properties = self._get_properties()
self._is_system_key = (
properties["partitionKey"]["systemKey"] if "systemKey" in properties["partitionKey"] else False
)
return cast('bool', self._is_system_key)
@property
def scripts(self):
# type: () -> ScriptsProxy
if self._scripts is None:
self._scripts = ScriptsProxy(self.client_connection, self.container_link, self.is_system_key)
return cast('ScriptsProxy', self._scripts)
def _get_document_link(self, item_or_link):
# type: (Union[Dict[str, Any], str]) -> str
if isinstance(item_or_link, six.string_types):
return u"{}/docs/{}".format(self.container_link, item_or_link)
return item_or_link["_self"]
def _get_conflict_link(self, conflict_or_link):
# type: (Union[Dict[str, Any], str]) -> str
if isinstance(conflict_or_link, six.string_types):
return u"{}/conflicts/{}".format(self.container_link, conflict_or_link)
return conflict_or_link["_self"]
def _set_partition_key(self, partition_key):
if partition_key == NonePartitionKeyValue:
return CosmosClientConnection._return_undefined_or_empty_partition_key(self.is_system_key)
return partition_key
@distributed_trace
def read(
self,
populate_query_metrics=None, # type: Optional[bool]
populate_partition_key_range_statistics=None, # type: Optional[bool]
populate_quota_info=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Dict[str, Any]
"""
Read the container properties
:param session_token: Token for use with Session consistency.
:param initial_headers: Initial headers to be sent as part of the request.
:param populate_query_metrics: Enable returning query metrics in response headers.
:param populate_partition_key_range_statistics: Enable returning partition key
range statistics in response headers.
:param populate_quota_info: Enable returning collection storage quota information in response headers.
:param request_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:raises ~azure.cosmos.errors.CosmosHttpResponseError: Raised if the container couldn't be retrieved.
This includes if the container does not exist.
:returns: Dict representing the retrieved container.
:rtype: dict[str, Any]
"""
request_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if populate_query_metrics is not None:
request_options["populateQueryMetrics"] = populate_query_metrics
if populate_partition_key_range_statistics is not None:
request_options["populatePartitionKeyRangeStatistics"] = populate_partition_key_range_statistics
if populate_quota_info is not None:
request_options["populateQuotaInfo"] = populate_quota_info
collection_link = self.container_link
self._properties = self.client_connection.ReadContainer(
collection_link, options=request_options, **kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, self._properties)
return cast('Dict[str, Any]', self._properties)
@distributed_trace
def read_item(
self,
item, # type: Union[str, Dict[str, Any]]
partition_key, # type: Any
populate_query_metrics=None, # type: Optional[bool]
post_trigger_include=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Dict[str, str]
"""
Get the item identified by `item`.
:param item: The ID (name) or dict representing item to retrieve.
:param partition_key: Partition key for the item to retrieve.
:param session_token: Token for use with Session consistency.
:param initial_headers: Initial headers to be sent as part of the request.
:param populate_query_metrics: Enable returning query metrics in response headers.
:param post_trigger_include: trigger id to be used as post operation trigger.
:param request_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: Dict representing the item to be retrieved.
:raises ~azure.cosmos.errors.CosmosHttpResponseError: The given item couldn't be retrieved.
:rtype: dict[str, Any]
.. admonition:: Example:
.. literalinclude:: ../samples/examples.py
:start-after: [START update_item]
:end-before: [END update_item]
:language: python
:dedent: 0
:caption: Get an item from the database and update one of its properties:
:name: update_item
"""
doc_link = self._get_document_link(item)
request_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if partition_key:
request_options["partitionKey"] = self._set_partition_key(partition_key)
if populate_query_metrics is not None:
request_options["populateQueryMetrics"] = populate_query_metrics
if post_trigger_include:
request_options["postTriggerInclude"] = post_trigger_include
result = self.client_connection.ReadItem(document_link=doc_link, options=request_options, **kwargs)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
return result
@distributed_trace
def read_all_items(
self,
max_item_count=None, # type: Optional[int]
populate_query_metrics=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Iterable[Dict[str, Any]]
"""
List all items in the container.
:param max_item_count: Max number of items to be returned in the enumeration operation.
:param session_token: Token for use with Session consistency.
:param initial_headers: Initial headers to be sent as part of the request.
:param populate_query_metrics: Enable returning query metrics in response headers.
:param feed_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: An Iterable of items (dicts).
:rtype: Iterable[dict[str, Any]]
"""
feed_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if max_item_count is not None:
feed_options["maxItemCount"] = max_item_count
if populate_query_metrics is not None:
feed_options["populateQueryMetrics"] = populate_query_metrics
if hasattr(response_hook, "clear"):
response_hook.clear()
items = self.client_connection.ReadItems(
collection_link=self.container_link, feed_options=feed_options, response_hook=response_hook, **kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, items)
return items
@distributed_trace
def query_items_change_feed(
self,
partition_key_range_id=None, # type: Optional[str]
is_start_from_beginning=False, # type: bool
continuation=None, # type: Optional[str]
max_item_count=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable[Dict[str, Any]]
"""
Get a sorted list of items that were changed, in the order in which they were modified.
:param partition_key_range_id: ChangeFeed requests can be executed against specific partition key ranges.
This is used to process the change feed in parallel across multiple consumers.
:param is_start_from_beginning: Get whether change feed should start from
beginning (true) or from current (false). By default it's start from current (false).
:param continuation: e_tag value to be used as continuation for reading change feed.
:param max_item_count: Max number of items to be returned in the enumeration operation.
:param feed_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: An Iterable of items (dicts).
:rtype: Iterable[dict[str, Any]]
"""
feed_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if partition_key_range_id is not None:
feed_options["partitionKeyRangeId"] = partition_key_range_id
if is_start_from_beginning is not None:
feed_options["isStartFromBeginning"] = is_start_from_beginning
if max_item_count is not None:
feed_options["maxItemCount"] = max_item_count
if continuation is not None:
feed_options["continuation"] = continuation
if hasattr(response_hook, "clear"):
response_hook.clear()
result = self.client_connection.QueryItemsChangeFeed(
self.container_link, options=feed_options, response_hook=response_hook, **kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
return result
@distributed_trace
def query_items(
self,
query, # type: str
parameters=None, # type: Optional[List[str]]
partition_key=None, # type: Optional[Any]
enable_cross_partition_query=None, # type: Optional[bool]
max_item_count=None, # type: Optional[int]
enable_scan_in_query=None, # type: Optional[bool]
populate_query_metrics=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Iterable[Dict[str, Any]]
"""
Return all results matching the given `query`.
You can use any value for the container name in the FROM clause, but typically the container name is used.
In the examples below, the container name is "products," and is aliased as "p" for easier referencing
in the WHERE clause.
:param query: The Azure Cosmos DB SQL query to execute.
:param parameters: Optional array of parameters to the query. Ignored if no query is provided.
:param partition_key: Specifies the partition key value for the item.
:param enable_cross_partition_query: Allows sending of more than one request to
execute the query in the Azure Cosmos DB service.
More than one request is necessary if the query is not scoped to single partition key value.
:param max_item_count: Max number of items to be returned in the enumeration operation.
:param session_token: Token for use with Session consistency.
:param initial_headers: Initial headers to be sent as part of the request.
:param enable_scan_in_query: Allow scan on the queries which couldn't be served as
indexing was opted out on the requested paths.
:param populate_query_metrics: Enable returning query metrics in response headers.
:param feed_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: An Iterable of items (dicts).
:rtype: Iterable[dict[str, Any]]
.. admonition:: Example:
.. literalinclude:: ../samples/examples.py
:start-after: [START query_items]
:end-before: [END query_items]
:language: python
:dedent: 0
:caption: Get all products that have not been discontinued:
:name: query_items
.. literalinclude:: ../samples/examples.py
:start-after: [START query_items_param]
:end-before: [END query_items_param]
:language: python
:dedent: 0
:caption: Parameterized query to get all products that have been discontinued:
:name: query_items_param
"""
feed_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if enable_cross_partition_query is not None:
feed_options["enableCrossPartitionQuery"] = enable_cross_partition_query
if max_item_count is not None:
feed_options["maxItemCount"] = max_item_count
if populate_query_metrics is not None:
feed_options["populateQueryMetrics"] = populate_query_metrics
if partition_key is not None:
feed_options["partitionKey"] = self._set_partition_key(partition_key)
if enable_scan_in_query is not None:
feed_options["enableScanInQuery"] = enable_scan_in_query
if hasattr(response_hook, "clear"):
response_hook.clear()
items = self.client_connection.QueryItems(
database_or_container_link=self.container_link,
query=query if parameters is None else dict(query=query, parameters=parameters),
options=feed_options,
partition_key=partition_key,
response_hook=response_hook,
**kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, items)
return items
@distributed_trace
def replace_item(
self,
item, # type: Union[str, Dict[str, Any]]
body, # type: Dict[str, Any]
populate_query_metrics=None, # type: Optional[bool]
pre_trigger_include=None, # type: Optional[str]
post_trigger_include=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Dict[str, str]
"""
Replaces the specified item if it exists in the container.
:param item: The ID (name) or dict representing item to be replaced.
:param body: A dict-like object representing the item to replace.
:param session_token: Token for use with Session consistency.
:param initial_headers: Initial headers to be sent as part of the request.
:param access_condition: Conditions Associated with the request.
:param populate_query_metrics: Enable returning query metrics in response headers.
:param pre_trigger_include: trigger id to be used as pre operation trigger.
:param post_trigger_include: trigger id to be used as post operation trigger.
:param request_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: A dict representing the item after replace went through.
:raises ~azure.cosmos.errors.CosmosHttpResponseError: The replace failed or the item with
given id does not exist.
:rtype: dict[str, Any]
"""
item_link = self._get_document_link(item)
request_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
request_options["disableIdGeneration"] = True
if populate_query_metrics is not None:
request_options["populateQueryMetrics"] = populate_query_metrics
if pre_trigger_include:
request_options["preTriggerInclude"] = pre_trigger_include
if post_trigger_include:
request_options["postTriggerInclude"] = post_trigger_include
result = self.client_connection.ReplaceItem(
document_link=item_link, new_document=body, options=request_options, **kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
return result
@distributed_trace
def upsert_item(
self,
body, # type: Dict[str, Any]
populate_query_metrics=None, # type: Optional[bool]
pre_trigger_include=None, # type: Optional[str]
post_trigger_include=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Dict[str, str]
"""
Insert or update the specified item.
If the item already exists in the container, it is replaced. If it does not, it is inserted.
:param body: A dict-like object representing the item to update or insert.
:param session_token: Token for use with Session consistency.
:param initial_headers: Initial headers to be sent as part of the request.
:param access_condition: Conditions Associated with the request.
:param populate_query_metrics: Enable returning query metrics in response headers.
:param pre_trigger_include: trigger id to be used as pre operation trigger.
:param post_trigger_include: trigger id to be used as post operation trigger.
:param request_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: A dict representing the upserted item.
:raises ~azure.cosmos.errors.CosmosHttpResponseError: The given item could not be upserted.
:rtype: dict[str, Any]
"""
request_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
request_options["disableIdGeneration"] = True
if populate_query_metrics is not None:
request_options["populateQueryMetrics"] = populate_query_metrics
if pre_trigger_include:
request_options["preTriggerInclude"] = pre_trigger_include
if post_trigger_include:
request_options["postTriggerInclude"] = post_trigger_include
result = self.client_connection.UpsertItem(
database_or_container_link=self.container_link, document=body, **kwargs)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
return result
@distributed_trace
def create_item(
self,
body, # type: Dict[str, Any]
populate_query_metrics=None, # type: Optional[bool]
pre_trigger_include=None, # type: Optional[str]
post_trigger_include=None, # type: Optional[str]
indexing_directive=None, # type: Optional[Any]
**kwargs # type: Any
):
# type: (...) -> Dict[str, str]
"""
Create an item in the container.
To update or replace an existing item, use the :func:`ContainerProxy.upsert_item` method.
:param body: A dict-like object representing the item to create.
:param session_token: Token for use with Session consistency.
:param initial_headers: Initial headers to be sent as part of the request.
:param access_condition: Conditions Associated with the request.
:param populate_query_metrics: Enable returning query metrics in response headers.
:param pre_trigger_include: trigger id to be used as pre operation trigger.
:param post_trigger_include: trigger id to be used as post operation trigger.
:param indexing_directive: Indicate whether the document should be omitted from indexing.
:param request_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: A dict representing the new item.
:raises ~azure.cosmos.errors.CosmosHttpResponseError: Item with the given ID already exists.
:rtype: dict[str, Any]
"""
request_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
request_options["disableAutomaticIdGeneration"] = True
if populate_query_metrics:
request_options["populateQueryMetrics"] = populate_query_metrics
if pre_trigger_include:
request_options["preTriggerInclude"] = pre_trigger_include
if post_trigger_include:
request_options["postTriggerInclude"] = post_trigger_include
if indexing_directive:
request_options["indexingDirective"] = indexing_directive
result = self.client_connection.CreateItem(
database_or_container_link=self.container_link, document=body, options=request_options, **kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
return result
@distributed_trace
def delete_item(
self,
item, # type: Union[Dict[str, Any], str]
partition_key, # type: Any
populate_query_metrics=None, # type: Optional[bool]
pre_trigger_include=None, # type: Optional[str]
post_trigger_include=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""
Delete the specified item from the container.
:param item: The ID (name) or dict representing item to be deleted.
:param partition_key: Specifies the partition key value for the item.
:param session_token: Token for use with Session consistency.
:param initial_headers: Initial headers to be sent as part of the request.
:param access_condition: Conditions Associated with the request.
:param populate_query_metrics: Enable returning query metrics in response headers.
:param pre_trigger_include: trigger id to be used as pre operation trigger.
:param post_trigger_include: trigger id to be used as post operation trigger.
:param request_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:raises ~azure.cosmos.errors.CosmosHttpResponseError: The item wasn't deleted successfully.
:raises ~azure.cosmos.errors.CosmosResourceNotFoundError: The item does not exist in the container.
:rtype: None
"""
request_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if partition_key:
request_options["partitionKey"] = self._set_partition_key(partition_key)
if populate_query_metrics is not None:
request_options["populateQueryMetrics"] = populate_query_metrics
if pre_trigger_include:
request_options["preTriggerInclude"] = pre_trigger_include
if post_trigger_include:
request_options["postTriggerInclude"] = post_trigger_include
document_link = self._get_document_link(item)
result = self.client_connection.DeleteItem(document_link=document_link, options=request_options, **kwargs)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
@distributed_trace
def read_offer(self, **kwargs):
# type: (Any) -> Offer
"""
Read the Offer object for this container.
:param response_hook: a callable invoked with the response metadata
:returns: Offer for the container.
:raises ~azure.cosmos.errors.CosmosHttpResponseError: No offer exists for the container or
the offer could not be retrieved.
:rtype: ~azure.cosmos.Offer
"""
response_hook = kwargs.pop('response_hook', None)
properties = self._get_properties()
link = properties["_self"]
query_spec = {
"query": "SELECT * FROM root r WHERE r.resource=@link",
"parameters": [{"name": "@link", "value": link}],
}
offers = list(self.client_connection.QueryOffers(query_spec, **kwargs))
if not offers:
raise CosmosResourceNotFoundError(
status_code=StatusCodes.NOT_FOUND,
message="Could not find Offer for container " + self.container_link)
if response_hook:
response_hook(self.client_connection.last_response_headers, offers)
return Offer(offer_throughput=offers[0]["content"]["offerThroughput"], properties=offers[0])
@distributed_trace
def replace_throughput(self, throughput, **kwargs):
# type: (int, Any) -> Offer
"""
Replace the container's throughput
:param throughput: The throughput to be set (an integer).
:param response_hook: a callable invoked with the response metadata
:returns: Offer for the container, updated with new throughput.
:raises ~azure.cosmos.errors.CosmosHttpResponseError: No offer exists for the container
or the offer could not be updated.
:rtype: ~azure.cosmos.Offer
"""
response_hook = kwargs.pop('response_hook', None)
properties = self._get_properties()
link = properties["_self"]
query_spec = {
"query": "SELECT * FROM root r WHERE r.resource=@link",
"parameters": [{"name": "@link", "value": link}],
}
offers = list(self.client_connection.QueryOffers(query_spec, **kwargs))
if not offers:
raise CosmosResourceNotFoundError(
status_code=StatusCodes.NOT_FOUND,
message="Could not find Offer for container " + self.container_link)
new_offer = offers[0].copy()
new_offer["content"]["offerThroughput"] = throughput
data = self.client_connection.ReplaceOffer(offer_link=offers[0]["_self"], offer=offers[0], **kwargs)
if response_hook:
response_hook(self.client_connection.last_response_headers, data)
return Offer(offer_throughput=data["content"]["offerThroughput"], properties=data)
@distributed_trace
def list_conflicts(self, max_item_count=None, **kwargs):
# type: (Optional[int], Any) -> Iterable[Dict[str, Any]]
"""
List all conflicts in the container.
:param max_item_count: Max number of items to be returned in the enumeration operation.
:param feed_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: An Iterable of conflicts (dicts).
:rtype: Iterable[dict[str, Any]]
"""
feed_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if max_item_count is not None:
feed_options["maxItemCount"] = max_item_count
result = self.client_connection.ReadConflicts(
collection_link=self.container_link, feed_options=feed_options, **kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
return result
@distributed_trace
def query_conflicts(
self,
query, # type: str
parameters=None, # type: Optional[List[str]]
enable_cross_partition_query=None, # type: Optional[bool]
partition_key=None, # type: Optional[Any]
max_item_count=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable[Dict[str, Any]]
"""
Return all conflicts matching the given `query`.
:param query: The Azure Cosmos DB SQL query to execute.
:param parameters: Optional array of parameters to the query. Ignored if no query is provided.
:param partition_key: Specifies the partition key value for the item.
:param enable_cross_partition_query: Allows sending of more than one request to execute
the query in the Azure Cosmos DB service.
More than one request is necessary if the query is not scoped to single partition key value.
:param max_item_count: Max number of items to be returned in the enumeration operation.
:param feed_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: An Iterable of conflicts (dicts).
:rtype: Iterable[dict[str, Any]]
"""
feed_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if max_item_count is not None:
feed_options["maxItemCount"] = max_item_count
if enable_cross_partition_query is not None:
feed_options["enableCrossPartitionQuery"] = enable_cross_partition_query
if partition_key is not None:
feed_options["partitionKey"] = self._set_partition_key(partition_key)
result = self.client_connection.QueryConflicts(
collection_link=self.container_link,
query=query if parameters is None else dict(query=query, parameters=parameters),
options=feed_options,
**kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
return result
@distributed_trace
def get_conflict(self, conflict, partition_key, **kwargs):
# type: (Union[str, Dict[str, Any]], Any, Any) -> Dict[str, str]
"""
Get the conflict identified by `conflict`.
:param conflict: The ID (name) or dict representing the conflict to retrieve.
:param partition_key: Partition key for the conflict to retrieve.
:param request_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:returns: A dict representing the retrieved conflict.
:raises ~azure.cosmos.errors.CosmosHttpResponseError: The given conflict couldn't be retrieved.
:rtype: dict[str, Any]
"""
request_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if partition_key:
request_options["partitionKey"] = self._set_partition_key(partition_key)
result = self.client_connection.ReadConflict(
conflict_link=self._get_conflict_link(conflict), options=request_options, **kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
return result
@distributed_trace
def delete_conflict(self, conflict, partition_key, **kwargs):
# type: (Union[str, Dict[str, Any]], Any, Any) -> None
"""
Delete the specified conflict from the container.
:param conflict: The ID (name) or dict representing the conflict to be deleted.
:param partition_key: Partition key for the conflict to delete.
:param request_options: Dictionary of additional properties to be used for the request.
:param response_hook: a callable invoked with the response metadata
:raises ~azure.cosmos.errors.CosmosHttpResponseError: The conflict wasn't deleted successfully.
:raises ~azure.cosmos.errors.CosmosResourceNotFoundError: The conflict does not exist in the container.
:rtype: None
"""
request_options = build_options(kwargs)
response_hook = kwargs.pop('response_hook', None)
if partition_key:
request_options["partitionKey"] = self._set_partition_key(partition_key)
result = self.client_connection.DeleteConflict(
conflict_link=self._get_conflict_link(conflict), options=request_options, **kwargs
)
if response_hook:
response_hook(self.client_connection.last_response_headers, result)
|
[
"[email protected]"
] | |
cb2f886ed26850bfebfaf4e3a00a9e730652e300
|
cc086a96967761f520c24ce3b22bacecb673cbf2
|
/chec_operator/threads/observation.py
|
877c8afa6dcb8e097cf23a53a3504277d6791849
|
[] |
no_license
|
watsonjj/chec_operator
|
39524405b3c6a55fe7fa3e8353da5f456f76a27d
|
c537a1737a53fe996652c793c09f5a33cd03e208
|
refs/heads/master
| 2020-04-18T02:27:52.730614 | 2019-01-23T10:41:13 | 2019-01-23T10:41:13 | 167,163,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,722 |
py
|
import threading
from time import sleep, ctime, time
from datetime import datetime
from chec_operator.utils.enums import CameraState
class ObservingThread(threading.Thread):
def __init__(self, parent_handler, timedelta, triggerdelta):
print("Creating observation thread")
self.parent_handler = parent_handler
self.timedelta = timedelta
self.triggerdelta = triggerdelta
self.starttime = 0
self.starttrigger = 0
self.currenttimedelta = 0
self.currenttriggerdelta = 0
self.get_trigger = self.parent_handler.get_backplane_trigger_count
super(ObservingThread, self).__init__()
self._observation_interrupt = threading.Event()
self.observation_reached_end = False
self.running = False
self.lock = threading.Lock()
def _check_time(self):
if self.timedelta:
self.currenttimedelta = datetime.now() - self.starttime
return self.currenttimedelta >= self.timedelta
else:
return False
def _check_trigger(self):
if self.triggerdelta:
self.currenttriggerdelta = self.get_trigger() - self.starttrigger
return self.currenttriggerdelta >= self.triggerdelta
else:
return False
def observation_ended(self):
return self._observation_interrupt.isSet()
def interrupt_observation(self):
if self.lock.acquire(False):
print("[WARNING] Interrupting observation thread!")
self._observation_interrupt.set()
self.join()
def run(self):
self.running = True
self.starttime = datetime.now()
self.starttrigger = self.get_trigger()
print("[INFO] Starting observation thread, "
"start time = {}, timedelta = {} s, triggerdelta = {}"
.format(ctime(time()), self.timedelta, self.triggerdelta))
while not self.observation_ended():
if self._check_time() or self._check_trigger():
self._finish_run()
break
self.running = False
print("Observation Ended")
def _finish_run(self):
if self.lock.acquire(False):
print("[INFO] Observation thread complete, "
"end time = {}, duration = {}, triggers {} (end) {} (actual)"
.format(ctime(time()), self.currenttimedelta,
self.currenttriggerdelta,
self.get_trigger() - self.starttrigger))
self.observation_reached_end = True
self.parent_handler.go_to_state(CameraState.READY)
def wait_for_end(self):
self.join()
print("Observation Ended")
|
[
"[email protected]"
] | |
f3d5dcd2e5f655280d986d7d5e685dfb3b524cc2
|
06604399c457d6ec05fa5d5ae458632e2606ec98
|
/torch/utils/_sympy/functions.py
|
3c78e1bebb50e8e34e979cab147e57e371f418bb
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
yncxcw/pytorch
|
6f262f7613caef4c2ce18c85662db9adc6a2a81a
|
a3b72ee354031004edd9b951d0efcdd4508fd578
|
refs/heads/master
| 2023-07-20T21:38:00.718093 | 2023-07-13T03:54:17 | 2023-07-13T03:54:17 | 234,432,318 | 0 | 0 |
NOASSERTION
| 2020-01-16T23:34:42 | 2020-01-16T23:34:41 | null |
UTF-8
|
Python
| false | false | 5,787 |
py
|
import sympy
from sympy.core.logic import fuzzy_and, fuzzy_or
__all__ = ["FloorDiv", "ModularIndexing", "CleanDiv", "CeilDiv", "LShift", "RShift"]
class FloorDiv(sympy.Function):
"""
We maintain this so that:
1. We can use divisibility guards to simplify FloorDiv(a, b) to a / b.
2. Printing out the expression is nicer (compared to say, representing a//b as (a - a % b) / b)
"""
nargs = (2,)
precedence = 50 # precedence of mul # noqa: F811
# Default return type for SymPy assumptions.
# https://docs.sympy.org/latest/guides/assumptions.html#implementing-assumptions-handlers
is_real = True
@property
def base(self):
return self.args[0]
@property
def divisor(self):
return self.args[1]
def _sympystr(self, printer):
base = printer.parenthesize(self.base, self.precedence)
divisor = printer.parenthesize(self.divisor, self.precedence)
return f"({base}//{divisor})"
# SymPy assumptions based on argument types.
def _eval_is_real(self):
return fuzzy_or([self.base.is_real, self.divisor.is_real])
def _eval_is_integer(self):
return fuzzy_and([self.base.is_integer, self.divisor.is_integer])
# Automatic evaluation.
# https://docs.sympy.org/latest/guides/custom-functions.html#best-practices-for-eval
@classmethod
def eval(cls, base, divisor):
def check_supported_type(x):
if (x.is_integer is False and x.is_real is False and x.is_complex) or x.is_Boolean:
raise TypeError(
f"unsupported operand type(s) for //: "
f"'{type(base).__name__}' and '{type(divisor).__name__}'"
f", expected integer or real")
check_supported_type(base)
check_supported_type(divisor)
# We don't provide the same error message as in Python because SymPy
# makes it difficult to check the types.
if divisor.is_zero:
raise ZeroDivisionError("division by zero")
if base.is_zero:
return sympy.S.Zero
if base.is_integer and divisor == 1:
return base
if base.is_real and divisor == 1:
return sympy.floor(base)
if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer):
return base // divisor
if isinstance(base, (sympy.Integer, sympy.Float)) and isinstance(divisor, (sympy.Integer, sympy.Float)):
return sympy.floor(base / divisor)
if isinstance(base, FloorDiv):
return FloorDiv(base.args[0], base.args[1] * divisor)
if isinstance(base, sympy.Add):
for a in base.args:
gcd = sympy.gcd(a, divisor)
if gcd == divisor:
return FloorDiv(base - a, divisor) + a / gcd
gcd = sympy.gcd(base, divisor)
if gcd != 1:
return FloorDiv(
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd)
)
class ModularIndexing(sympy.Function):
"""
ModularIndexing(a, b, c) => (a // b) % c
"""
nargs = (3,)
is_integer = True
@classmethod
def eval(cls, base, divisor, modulus):
if base == 0 or modulus == 1:
return sympy.Integer(0)
if (
isinstance(base, sympy.Integer)
and isinstance(divisor, sympy.Integer)
and isinstance(modulus, sympy.Integer)
):
return (base // divisor) % modulus
if divisor != 1:
gcd = sympy.gcd(base, divisor)
if gcd != 1:
return ModularIndexing(
sympy.simplify(base / gcd), sympy.simplify(divisor / gcd), modulus
)
if isinstance(base, sympy.Add):
new_terms = []
all_positive = True
for term in base.args:
if sympy.gcd(term, modulus * divisor) != modulus * divisor:
if (isinstance(term, sympy.Integer) and term < 0) or (
isinstance(term, sympy.Mul)
and isinstance(term.args[0], sympy.Integer)
and term.args[0] < 0
):
# workaround for https://github.com/openai/triton/issues/619,
# if there are negative terms, // produces wrong result
# TODO if https://github.com/openai/triton/issues/619 is fixed
# this optimization would become valid
all_positive = False
break
else:
new_terms.append(term)
if len(new_terms) != len(base.args) and all_positive:
return ModularIndexing(sum(new_terms), divisor, modulus)
if isinstance(base, FloorDiv):
return ModularIndexing(base.args[0], base.args[1] * divisor, modulus)
class CleanDiv(FloorDiv):
"""
Div where we can assume no rounding.
This is to enable future optimizations.
"""
pass
class CeilDiv(sympy.Function):
"""
Div used in indexing that rounds up.
"""
is_integer = True
def __new__(cls, base, divisor):
if sympy.gcd(base, divisor) == divisor:
return CleanDiv(base, divisor)
else:
return FloorDiv(base + (divisor - 1), divisor)
class LShift(sympy.Function):
@classmethod
def eval(cls, base, shift):
if shift < 0:
raise ValueError('negative shift count')
return base * 2 ** shift
class RShift(sympy.Function):
@classmethod
def eval(cls, base, shift):
if shift < 0:
raise ValueError('negative shift count')
return base // 2 ** shift
|
[
"[email protected]"
] | |
af2729e0f3c3c35ad20460334df67ddb78436aec
|
6635686859b272d291d0ba3520ccd03cdc80a349
|
/DT/threadingtext.py
|
5b21c27fdb9a81a7ecb35c0e0d9c9ebe52c19d32
|
[] |
no_license
|
yangrencong/web_spiders
|
ac15c491f60e489000e5312c999f02e6c4fdafdf
|
69fdc6eeb5ad19283690c056064f8853e0256445
|
refs/heads/master
| 2020-03-28T18:45:50.800667 | 2018-10-26T02:50:16 | 2018-10-26T02:50:16 | 148,908,630 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 995 |
py
|
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: Mr.yang
# Created Time : 2018/10/10 星期三 12:26:07
# File Name: threadingtext.py
# Description:
# Editortool: vim8.0
"""
import threading
import time
class myThread(threading.Thread):
def __init__(self ,name ,delay):
threading.Thread.__init__(self)
self.name = name
self.delay = delay
def run(self):
print("Starting " + self.name)
print_time(self.name ,self.delay)
print("Exiting " + self.name)
def print_time(threadName ,delay):
counter = 0
while counter < 3:
time.sleep(delay)
print(threadName ,time.ctime())
counter += 1
threads = []
#创建新线程
thread1 = myThread("Thread-1" ,1)
thread2 = myThread("Thread-2" ,2)
#开启新线程
thread1.start()
thread2.start()
#添加线程到线程列表
threads.append(thread1)
threads.append(thread2)
#等待所有线程完成
for t in threads:
t.join()
print("Exiting main thread")
|
[
"[email protected]"
] | |
a8b8d378ab3f6f1387d52577aa1bf5431858cd0c
|
c41471781f65d38d9010450b6c9e17f2346a551b
|
/openstack-dashboard/templates/icehouse/local_settings.py
|
f45295ea2a3762091554852d5a4f73c472022cc5
|
[
"Apache-2.0"
] |
permissive
|
juanarturovargas/openstack-juju
|
b6854e2feea615404c053e9c754e4d7997c8a6a5
|
21b1aef8aa51c3c32cb1efd1b8cad7865c4d40a0
|
refs/heads/master
| 2022-12-13T15:31:53.383963 | 2017-05-05T19:18:55 | 2017-05-05T19:18:55 | 90,163,436 | 0 | 1 |
NOASSERTION
| 2022-11-20T08:41:15 | 2017-05-03T15:17:34 |
Python
|
UTF-8
|
Python
| false | false | 17,009 |
py
|
import os
from django.utils.translation import ugettext_lazy as _
{% if use_syslog %}
from logging.handlers import SysLogHandler
{% endif %}
from openstack_dashboard import exceptions
DEBUG = {{ debug }}
TEMPLATE_DEBUG = DEBUG
# Required for Django 1.5.
# If horizon is running in production (DEBUG is False), set this
# with the list of host/domain names that the application can serve.
# For more information see:
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
#ALLOWED_HOSTS = ['horizon.example.com', ]
# Set SSL proxy settings:
# For Django 1.4+ pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
#CSRF_COOKIE_SECURE = True
#SESSION_COOKIE_SECURE = True
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specfic API version for a given service API.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be "2.0" or "3".
# OPENSTACK_API_VERSIONS = {
# "identity": 3
# }
# Set this to True if running on multi-domain model. When this is enabled, it
# will require user to enter the Domain name in addition to username for login.
# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
# Set Console type:
# valid options would be "AUTO", "VNC" or "SPICE"
# CONSOLE_TYPE = "AUTO"
# Default OpenStack Dashboard configuration.
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'settings',),
'default_dashboard': 'project',
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
}
# Specify a regular expression to validate user passwords.
# HORIZON_CONFIG["password_validator"] = {
# "regex": '.*',
# "help_text": _("Your password does not meet the requirements.")
# }
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
# HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for the login form if so desired.
# HORIZON_CONFIG["password_autocomplete"] = "off"
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# Set custom secret key:
# You can either set it to a specific value or you can let horizion generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
# may be situations where you would want to set this explicitly, e.g. when
# multiple dashboard instances are distributed on different machines (usually
# behind a load-balancer). Either you have to make sure that a session gets all
# requests routed to the same dashboard instance or you set the same SECRET_KEY
# for all of them.
SECRET_KEY = "{{ secret }}"
# We recommend you use memcached for development; otherwise after every reload
# of the django development server, you will have to login again. To use
# memcached set CACHES to something like
CACHES = {
'default': {
'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION' : '127.0.0.1:11211',
}
}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
# EMAIL_HOST = 'smtp.my-company.com'
# EMAIL_PORT = 25
# EMAIL_HOST_USER = 'djangomail'
# EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
# AVAILABLE_REGIONS = [
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
# ]
{% if regions|length > 1 -%}
AVAILABLE_REGIONS = [
{% for region in regions -%}
('{{ region.endpoint }}', '{{ region.title }}'),
{% endfor -%}
]
{% endif -%}
OPENSTACK_HOST = "{{ service_host }}"
OPENSTACK_KEYSTONE_URL = "{{ service_protocol }}://%s:{{ service_port }}/v2.0" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ default_role }}"
# Disable SSL certificate checks (useful for self-signed certificates):
# OPENSTACK_SSL_NO_VERIFY = True
# The CA certificate to use to verify SSL connections
# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': False,
}
# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
# services provided by neutron. Options currenly available are load
# balancer service, security groups, quotas.
OPENSTACK_NEUTRON_NETWORK = {
'enable_lb': {{ neutron_network_lb }},
'enable_quotas': True,
'enable_security_group': True,
'enable_firewall': {{ neutron_network_firewall }},
'enable_vpn': {{ neutron_network_vpn }},
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
#'profile_support': None,
#'profile_support': 'cisco', # Example of value set to support Cisco
{% if support_profile -%}
'profile_support': '{{ support_profile }}',
{% else -%}
'profile_support': None,
{% endif -%}
}
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', ''),
('aki', _('AKI - Amazon Kernel Image')),
('ami', _('AMI - Amazon Machine Image')),
('ari', _('ARI - Amazon Ramdisk Image')),
('iso', _('ISO - Optical Disk Image')),
('qcow2', _('QCOW2 - QEMU Emulator')),
('raw', _('Raw')),
('vdi', _('VDI')),
('vhd', _('VHD')),
('vmdk', _('VMDK'))
]
}
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
# image custom property attributes that appear on image detail pages.
IMAGE_CUSTOM_PROPERTY_TITLES = {
"architecture": _("Architecture"),
"kernel_id": _("Kernel ID"),
"ramdisk_id": _("Ramdisk ID"),
"image_state": _("Euca2ools state"),
"project_id": _("Project ID"),
"image_type": _("Image Type")
}
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'publicURL'.
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
{% if primary_endpoint -%}
OPENSTACK_ENDPOINT_TYPE = "{{ primary_endpoint }}"
{% endif -%}
# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is None. This
# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
#SECONDARY_ENDPOINT_TYPE = "publicURL"
{% if secondary_endpoint -%}
SECONDARY_ENDPOINT_TYPE = "{{ secondary_endpoint }}"
{% endif -%}
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
# When launching an instance, the menu of available flavors is
# sorted by RAM usage, ascending. Provide a callback method here
# (and/or a flag for reverse sort) for the sorted() method if you'd
# like a different behaviour. For more info, see
# http://docs.python.org/2/library/functions.html#sorted
# CREATE_INSTANCE_FLAVOR_SORT = {
# 'key': my_awesome_callback_method,
# 'reverse': False,
# }
# The Horizon Policy Enforcement engine uses these values to load per service
# policy rule files. The content of these files should match the files the
# OpenStack services are using to determine role based access control in the
# target installation.
# Path to directory containing policy.json files
#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files
#POLICY_FILES = {
# 'identity': 'keystone_policy.json',
# 'compute': 'nova_policy.json'
#}
# Trove user and database extension support. By default support for
# creating users and databases on database instances is turned on.
# To disable these extensions set the permission here to something
# unusable such as ["!"].
# TROVE_ADD_USER_PERMS = []
# TROVE_ADD_DATABASE_PERMS = []
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
{% if use_syslog %}
'syslog': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
}
{% endif %}
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'openstack_dashboard': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'openstack_auth': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'novaclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'cinderclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'keystoneclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'glanceclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'heatclient': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
},
'nose.plugins.manager': {
{% if use_syslog %}
'handlers': ['syslog'],
{% else %}
'handlers': ['console'],
{% endif %}
'propagate': False,
}
}
}
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': 'ALL TCP',
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': 'ALL UDP',
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': 'ALL ICMP',
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
'ssh': {
'name': 'SSH',
'ip_protocol': 'tcp',
'from_port': '22',
'to_port': '22',
},
'smtp': {
'name': 'SMTP',
'ip_protocol': 'tcp',
'from_port': '25',
'to_port': '25',
},
'dns': {
'name': 'DNS',
'ip_protocol': 'tcp',
'from_port': '53',
'to_port': '53',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
'pop3': {
'name': 'POP3',
'ip_protocol': 'tcp',
'from_port': '110',
'to_port': '110',
},
'imap': {
'name': 'IMAP',
'ip_protocol': 'tcp',
'from_port': '143',
'to_port': '143',
},
'ldap': {
'name': 'LDAP',
'ip_protocol': 'tcp',
'from_port': '389',
'to_port': '389',
},
'https': {
'name': 'HTTPS',
'ip_protocol': 'tcp',
'from_port': '443',
'to_port': '443',
},
'smtps': {
'name': 'SMTPS',
'ip_protocol': 'tcp',
'from_port': '465',
'to_port': '465',
},
'imaps': {
'name': 'IMAPS',
'ip_protocol': 'tcp',
'from_port': '993',
'to_port': '993',
},
'pop3s': {
'name': 'POP3S',
'ip_protocol': 'tcp',
'from_port': '995',
'to_port': '995',
},
'ms_sql': {
'name': 'MS SQL',
'ip_protocol': 'tcp',
'from_port': '1443',
'to_port': '1443',
},
'mysql': {
'name': 'MYSQL',
'ip_protocol': 'tcp',
'from_port': '3306',
'to_port': '3306',
},
'rdp': {
'name': 'RDP',
'ip_protocol': 'tcp',
'from_port': '3389',
'to_port': '3389',
},
}
FLAVOR_EXTRA_KEYS = {
'flavor_keys': [
('quota:read_bytes_sec', _('Quota: Read bytes')),
('quota:write_bytes_sec', _('Quota: Write bytes')),
('quota:cpu_quota', _('Quota: CPU')),
('quota:cpu_period', _('Quota: CPU period')),
('quota:inbound_average', _('Quota: Inbound average')),
('quota:outbound_average', _('Quota: Outbound average')),
]
}
{% if ubuntu_theme %}
# Enable the Ubuntu theme if it is present.
try:
from ubuntu_theme import *
except ImportError:
pass
{% endif %}
# Default Ubuntu apache configuration uses /horizon as the application root.
# Configure auth redirects here accordingly.
{% if webroot == "/" %}
LOGIN_URL='/auth/login/'
LOGOUT_URL='/auth/logout/'
{% else %}
LOGIN_URL='{{ webroot }}/auth/login/'
LOGOUT_URL='{{ webroot }}/auth/logout/'
{% endif %}
LOGIN_REDIRECT_URL='{{ webroot }}'
# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
# offline compression by default. To enable online compression, install
# the node-less package and enable the following option.
COMPRESS_OFFLINE = {{ compress_offline }}
# By default, validation of the HTTP Host header is disabled. Production
# installations should have this set accordingly. For more information
# see https://docs.djangoproject.com/en/dev/ref/settings/.
ALLOWED_HOSTS = '*'
{% if password_retrieve %}
OPENSTACK_ENABLE_PASSWORD_RETRIEVE = True
{% endif %}
{{ settings|join('\n\n') }}
|
[
"kataguruma1"
] |
kataguruma1
|
7fc024f18bdc5289a4cad605dbc8a2f6fa792e74
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/contrib/eager/python/tfe.py
|
c441ab87be7e0aebadefe92023f89bfd67ff471e
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 129 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:9b7bd976495c4645582fde2d7fcf488a311648b43813cff249462fccfa19224a
size 5928
|
[
"github@cuba12345"
] |
github@cuba12345
|
4353deb50a51a18cfc392b8d5fada6467c849fe1
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/worklink_write_3/domain_associate.py
|
622afc66bafaf4062a1575d617c77e954bc7ee2e
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,279 |
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/worklink/associate-domain.html
if __name__ == '__main__':
"""
describe-domain : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/worklink/describe-domain.html
disassociate-domain : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/worklink/disassociate-domain.html
list-domains : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/worklink/list-domains.html
"""
parameter_display_string = """
# fleet-arn : The Amazon Resource Name (ARN) of the fleet.
# domain-name : The fully qualified domain name (FQDN).
# acm-certificate-arn : The ARN of an issued ACM certificate that is valid for the domain being associated.
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("worklink", "associate-domain", "fleet-arn", "domain-name", "acm-certificate-arn", add_option_dict)
|
[
"[email protected]"
] | |
9adc81d26ca9708c7ee07b95c8795d117a6c05e9
|
987a82368d3a15b618ff999f28dc16b89e50f675
|
/plaso/parsers/winreg_plugins/shutdown.py
|
f14cb1abcb7795e44c6e63f3728e75b94987c6ff
|
[
"Apache-2.0"
] |
permissive
|
arunthirukkonda/plaso
|
185b30ab4ec90fcc2d280b3c89c521c9eef7b7ab
|
846fc2fce715e1f78b11f375f6fe4e11b5c284ba
|
refs/heads/master
| 2021-08-30T15:21:12.267584 | 2017-12-18T12:33:08 | 2017-12-18T12:33:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,893 |
py
|
# -*- coding: utf-8 -*-
"""Windows Registry plugin for parsing the last shutdown time of a system."""
from __future__ import unicode_literals
import construct
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import interface
__author__ = 'Preston Miller, dpmforensics.com, github.com/prmiller91'
class ShutdownWindowsRegistryEventData(events.EventData):
"""Shutdown Windows Registry event data.
Attributes:
key_path (str): Windows Registry key path.
value_name (str): name of the Windows Registry value.
"""
DATA_TYPE = 'windows:registry:shutdown'
def __init__(self):
"""Initializes event data."""
super(ShutdownWindowsRegistryEventData, self).__init__(
data_type=self.DATA_TYPE)
self.key_path = None
self.value_name = None
class ShutdownPlugin(interface.WindowsRegistryPlugin):
"""Windows Registry plugin for parsing the last shutdown time of a system."""
NAME = 'windows_shutdown'
DESCRIPTION = 'Parser for ShutdownTime Registry value.'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Control\\Windows')])
_UINT64_STRUCT = construct.ULInt64('value')
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a ShutdownTime Windows Registry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
shutdown_value = registry_key.GetValueByName('ShutdownTime')
if not shutdown_value:
return
# Directly parse the Windows Registry value data in case it is defined
# as binary data.
try:
timestamp = self._UINT64_STRUCT.parse(shutdown_value.data)
except construct.FieldError as exception:
timestamp = None
parser_mediator.ProduceExtractionError(
'unable to determine shutdown timestamp with error: {0!s}'.format(
exception))
if not timestamp:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event_data = ShutdownWindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = shutdown_value.offset
event_data.value_name = shutdown_value.name
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg.WinRegistryParser.RegisterPlugin(ShutdownPlugin)
|
[
"[email protected]"
] | |
d5aa6095ffe361c6c24f7e7ace9e878dcd34a356
|
8a452b71e3942d762fc2e86e49e72eac951b7eba
|
/leetcode/editor/en/[1065]Index Pairs of a String.py
|
9596550ca2c48f4cb14e3df379385e19b37fe19c
|
[] |
no_license
|
tainenko/Leetcode2019
|
7bea3a6545f97c678a176b93d6622f1f87e0f0df
|
8595b04cf5a024c2cd8a97f750d890a818568401
|
refs/heads/master
| 2023-08-02T18:10:59.542292 | 2023-08-02T17:25:49 | 2023-08-02T17:25:49 | 178,761,023 | 5 | 0 | null | 2019-08-27T10:59:12 | 2019-04-01T01:04:21 |
JavaScript
|
UTF-8
|
Python
| false | false | 1,478 |
py
|
# Given a string text and an array of strings words, return an array of all
# index pairs [i, j] so that the substring text[i...j] is in words.
#
# Return the pairs [i, j] in sorted order (i.e., sort them by their first
# coordinate, and in case of ties sort them by their second coordinate).
#
#
# Example 1:
#
#
# Input: text = "thestoryofleetcodeandme", words = ["story","fleet","leetcode"]
# Output: [[3,7],[9,13],[10,17]]
#
#
# Example 2:
#
#
# Input: text = "ababa", words = ["aba","ab"]
# Output: [[0,1],[0,2],[2,3],[2,4]]
# Explanation: Notice that matches can overlap, see "aba" is found in [0,2] and
# [2,4].
#
#
#
# Constraints:
#
#
# 1 <= text.length <= 100
# 1 <= words.length <= 20
# 1 <= words[i].length <= 50
# text and words[i] consist of lowercase English letters.
# All the strings of words are unique.
#
# Related Topics Array String Trie Sorting 👍 203 👎 73
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def indexPairs(self, text: str, words: List[str]) -> List[List[int]]:
res = []
words.sort(key=lambda x: len(x))
for i in range(len(text)):
for word in words:
if i + len(word) > len(text):
continue
if word == text[i:i + len(word)]:
res.append([i, i + len(word) - 1])
return res
# leetcode submit region end(Prohibit modification and deletion)
|
[
"[email protected]"
] | |
d4d00147e745d9e951765b3fc1fd6c50c016f113
|
3c9011b549dd06b6344c6235ed22b9dd483365d1
|
/Agenda/contatos/migrations/0003_contato_foto.py
|
097090b85a3f8c6636e29e52edc33d1acc2ee0e7
|
[] |
no_license
|
joaoo-vittor/estudo-python
|
1411f4c3620bbc5f6b7c674a096cae8f90f0db8d
|
5562d823dd574d7df49fddca87a1fbd319356969
|
refs/heads/master
| 2023-05-31T17:59:16.752835 | 2021-06-25T04:54:56 | 2021-06-25T04:54:56 | 292,372,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 402 |
py
|
# Generated by Django 3.2 on 2021-05-16 01:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contatos', '0002_contato_mostrar'),
]
operations = [
migrations.AddField(
model_name='contato',
name='foto',
field=models.ImageField(blank=True, upload_to='fotos/%Y/%m'),
),
]
|
[
"[email protected]"
] | |
02922e6762aefcb1ca578359e971f39a28df7916
|
d0eb582894eff3c44e3de4bd50f571f9d9ab3a02
|
/venv/lib/python3.7/site-packages/tox/config.py
|
4d5e02be935189dbd4ec6eaf1666cc27c16ee562
|
[
"MIT"
] |
permissive
|
tdle94/app-store-scrapper
|
159187ef3825213d40425215dd9c9806b415769e
|
ed75880bac0c9ef685b2c1bf57a6997901abface
|
refs/heads/master
| 2022-12-20T21:10:59.621305 | 2020-10-28T00:32:21 | 2020-10-28T00:32:21 | 247,291,364 | 1 | 2 |
MIT
| 2022-12-08T03:53:08 | 2020-03-14T14:25:44 |
Python
|
UTF-8
|
Python
| false | false | 57,366 |
py
|
from __future__ import print_function
import argparse
import itertools
import os
import random
import re
import shlex
import string
import sys
import warnings
from collections import OrderedDict
from fnmatch import fnmatchcase
from subprocess import list2cmdline
import pkg_resources
import pluggy
import py
import toml
import tox
from tox.constants import INFO
from tox.interpreters import Interpreters, NoInterpreterInfo
hookimpl = tox.hookimpl
"""DEPRECATED - REMOVE - this is left for compatibility with plugins importing this from here.
Instead create a hookimpl in your code with:
import pluggy
hookimpl = pluggy.HookimplMarker("tox")
"""
default_factors = tox.PYTHON.DEFAULT_FACTORS
"""DEPRECATED MOVE - please update to new location."""
def get_plugin_manager(plugins=()):
# initialize plugin manager
import tox.venv
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(tox.hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.register(tox.venv)
pm.register(tox.session)
from tox import package
pm.register(package)
pm.load_setuptools_entrypoints("tox")
for plugin in plugins:
pm.register(plugin)
pm.check_pending()
return pm
class Parser:
"""Command line and ini-parser control object."""
def __init__(self):
self.argparser = argparse.ArgumentParser(description="tox options", add_help=False)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
""" add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
""" add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
""" add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def parse_cli(self, args):
return self.argparser.parse_args(args)
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
# we need to process options, in case they contain a space,
# as the subprocess call to pip install will otherwise fail.
# in case of a short option, we remove the space
for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
if name.startswith(option):
name = "{}{}".format(option, name[len(option) :].strip())
# in case of a long option, we add an equal sign
for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
name_start = "{} ".format(option)
if name.startswith(name_start):
name = "{}={}".format(option, name[len(option) :].strip())
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""Override given dependency config name. Take ``--force-dep-version`` option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: ``Config`` instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""Definitions are the same if they refer to the same package, even if versions differ."""
dep1_name = pkg_resources.Requirement.parse(dep1).project_name
try:
dep2_name = pkg_resources.Requirement.parse(dep2).project_name
except pkg_resources.RequirementParseError:
# we couldn't parse a version, probably a URL
return False
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg and not os.path.isabs(arg):
origpath = os.path.join(config.invocationcwd.strpath, arg)
if os.path.exists(origpath):
arg = os.path.relpath(origpath, testenv_config.changedir.strpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv"
default = "python -m pip install {opts} {packages}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if "{packages}" not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution"
)
return value
def parseconfig(args, plugins=()):
"""Parse the configuration file and create a Config object.
:param plugins:
:param list[str] args: list of arguments.
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager(plugins)
config, option = parse_cli(args, pm)
for config_file in propose_configs(option.configfile):
config_type = config_file.basename
content = None
if config_type == "pyproject.toml":
toml_content = get_py_project_toml(config_file)
try:
content = toml_content["tool"]["tox"]["legacy_tox_ini"]
except KeyError:
continue
ParseIni(config, config_file, content)
pm.hook.tox_configure(config=config) # post process config object
break
else:
msg = "tox config file (either {}) not found"
candidates = ", ".join(INFO.CONFIG_CANDIDATES)
feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
return config
def get_py_project_toml(path):
with open(str(path)) as file_handler:
config_data = toml.load(file_handler)
return config_data
def propose_configs(cli_config_file):
from_folder = py.path.local()
if cli_config_file is not None:
if os.path.isfile(cli_config_file):
yield py.path.local(cli_config_file)
return
if os.path.isdir(cli_config_file):
from_folder = py.path.local(cli_config_file)
else:
print(
"ERROR: {} is neither file or directory".format(cli_config_file), file=sys.stderr
)
return
for basename in INFO.CONFIG_CANDIDATES:
if from_folder.join(basename).isfile():
yield from_folder.join(basename)
for path in from_folder.parts(reverse=True):
ini_path = path.join(basename)
if ini_path.check():
yield ini_path
def parse_cli(args, pm):
parser = Parser()
pm.hook.tox_addoption(parser=parser)
option = parser.parse_cli(args)
if option.version:
print(get_version_info(pm))
raise SystemExit(0)
interpreters = Interpreters(hook=pm.hook)
config = Config(pluginmanager=pm, option=option, interpreters=interpreters, parser=parser)
return config, option
def feedback(msg, sysexit=False):
print("ERROR: {}".format(msg), file=sys.stderr)
if sysexit:
raise SystemExit(1)
def get_version_info(pm):
out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
plugin_dist_info = pm.list_plugin_distinfo()
if plugin_dist_info:
out.append("registered plugins:")
for mod, egg_info in plugin_dist_info:
source = getattr(mod, "__file__", repr(mod))
out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
return "\n".join(out)
class SetenvDict(object):
_DUMMY = object()
def __init__(self, definitions, reader):
self.definitions = definitions
self.reader = reader
self.resolved = {}
self._lookupstack = []
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.definitions)
def __contains__(self, name):
return name in self.definitions
def get(self, name, default=None):
try:
return self.resolved[name]
except KeyError:
try:
if name in self._lookupstack:
raise KeyError(name)
val = self.definitions[name]
except KeyError:
return os.environ.get(name, default)
self._lookupstack.append(name)
try:
self.resolved[name] = res = self.reader._replace(val)
finally:
self._lookupstack.pop()
return res
def __getitem__(self, name):
x = self.get(name, self._DUMMY)
if x is self._DUMMY:
raise KeyError(name)
return x
def keys(self):
return self.definitions.keys()
def __setitem__(self, name, value):
self.definitions[name] = value
self.resolved[name] = value
@tox.hookimpl
def tox_addoption(parser):
parser.add_argument(
"--version",
action="store_true",
dest="version",
help="report version information to stdout.",
)
parser.add_argument(
"-h", "--help", action="store_true", dest="help", help="show help about options"
)
parser.add_argument(
"--help-ini", "--hi", action="store_true", dest="helpini", help="show help about ini-names"
)
parser.add_argument(
"-v",
action="count",
dest="verbose_level",
default=0,
help="increase verbosity of reporting output."
"-vv mode turns off output redirection for package installation, "
"above level two verbosity flags are passed through to pip (with two less level)",
)
parser.add_argument(
"-q",
action="count",
dest="quiet_level",
default=0,
help="progressively silence reporting output.",
)
parser.add_argument(
"--showconfig",
action="store_true",
help="show configuration information for all environments. ",
)
parser.add_argument(
"-l",
"--listenvs",
action="store_true",
dest="listenvs",
help="show list of test environments (with description if verbose)",
)
parser.add_argument(
"-a",
"--listenvs-all",
action="store_true",
dest="listenvs_all",
help="show list of all defined environments (with description if verbose)",
)
parser.add_argument(
"-c",
action="store",
default=None,
dest="configfile",
help="config file name or directory with 'tox.ini' file.",
)
parser.add_argument(
"-e",
action="append",
dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).",
)
parser.add_argument(
"--notest", action="store_true", dest="notest", help="skip invoking test commands."
)
parser.add_argument(
"--sdistonly",
action="store_true",
dest="sdistonly",
help="only perform the sdist packaging activity.",
)
parser.add_argument(
"--parallel--safe-build",
action="store_true",
dest="parallel_safe_build",
help="(deprecated) ensure two tox builds can run in parallel "
"(uses a lock file in the tox workdir with .lock extension)",
)
parser.add_argument(
"--installpkg",
action="store",
default=None,
metavar="PATH",
help="use specified package for installation into venv, instead of creating an sdist.",
)
parser.add_argument(
"--develop",
action="store_true",
dest="develop",
help="install package in the venv using 'setup.py develop' via 'pip -e .'",
)
parser.add_argument(
"-i",
"--index-url",
action="append",
dest="indexurl",
metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)",
)
parser.add_argument(
"--pre",
action="store_true",
dest="pre",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).",
)
parser.add_argument(
"-r",
"--recreate",
action="store_true",
dest="recreate",
help="force recreation of virtual environments",
)
parser.add_argument(
"--result-json",
action="store",
dest="resultjson",
metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.",
)
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument(
"--hashseed",
action="store",
metavar="SEED",
default=None,
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.",
)
parser.add_argument(
"--force-dep",
action="append",
metavar="REQ",
default=None,
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.",
)
parser.add_argument(
"--sitepackages",
action="store_true",
help="override sitepackages setting to True in all envs",
)
parser.add_argument(
"--alwayscopy", action="store_true", help="override alwayscopy setting to True in all envs"
)
cli_skip_missing_interpreter(parser)
parser.add_argument(
"--workdir",
action="store",
dest="workdir",
metavar="PATH",
default=None,
help="tox working directory",
)
parser.add_argument(
"args", nargs="*", help="additional arguments available to command positional substitution"
)
parser.add_testenv_attribute(
name="envdir",
type="path",
default="{toxworkdir}/{envname}",
help="set venv directory -- be very careful when changing this as tox "
"will remove this directory when recreating an environment",
)
# add various core venv interpreter attributes
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv["PYTHONHASHSEED"] = config.hashseed
setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
return setenv
parser.add_testenv_attribute(
name="setenv",
type="dict_setenv",
postprocess=setenv,
help="list of X=Y lines with environment variable settings",
)
def basepython_default(testenv_config, value):
"""either user set or proposed from the factor name
in both cases we check that the factor name implied python version and the resolved
python interpreter version match up; if they don't we warn, unless ignore base
python conflict is set in which case the factor name implied version if forced
"""
for factor in testenv_config.factors:
if factor in tox.PYTHON.DEFAULT_FACTORS:
implied_python = tox.PYTHON.DEFAULT_FACTORS[factor]
break
else:
implied_python, factor = None, None
if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
return implied_python
proposed_python = (implied_python or sys.executable) if value is None else str(value)
if implied_python is not None and implied_python != proposed_python:
testenv_config.basepython = proposed_python
implied_version = tox.PYTHON.PY_FACTORS_RE.match(factor).group(2)
python_info_for_proposed = testenv_config.python_info
if not isinstance(python_info_for_proposed, NoInterpreterInfo):
proposed_version = "".join(
str(i) for i in python_info_for_proposed.version_info[0:2]
)
if implied_version != proposed_version:
# TODO(stephenfin): Raise an exception here in tox 4.0
warnings.warn(
"conflicting basepython version (set {}, should be {}) for env '{}';"
"resolve conflict or set ignore_basepython_conflict".format(
proposed_version, implied_version, testenv_config.envname
)
)
return proposed_python
parser.add_testenv_attribute(
name="basepython",
type="string",
default=None,
postprocess=basepython_default,
help="executable name or path of interpreter used to create a virtual test environment.",
)
def merge_description(testenv_config, value):
"""the reader by default joins generated description with new line,
replace new line with space"""
return value.replace("\n", " ")
parser.add_testenv_attribute(
name="description",
type="string",
default="",
postprocess=merge_description,
help="short description of this environment",
)
parser.add_testenv_attribute(
name="envtmpdir", type="path", default="{envdir}/tmp", help="venv temporary directory"
)
parser.add_testenv_attribute(
name="envlogdir", type="path", default="{envdir}/log", help="venv log directory"
)
parser.add_testenv_attribute(
name="downloadcache",
type="string",
default=None,
help="(ignored) has no effect anymore, pip-8 uses local caching by default",
)
parser.add_testenv_attribute(
name="changedir",
type="path",
default="{toxinidir}",
help="directory to change to when running commands",
)
parser.add_testenv_attribute_obj(PosargsOption())
parser.add_testenv_attribute(
name="skip_install",
type="bool",
default=False,
help="Do not install the current package. This can be used when you need the virtualenv "
"management but do not want to install the current package",
)
parser.add_testenv_attribute(
name="ignore_errors",
type="bool",
default=False,
help="if set to True all commands will be executed irrespective of their result error "
"status.",
)
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate",
type="bool",
default=False,
postprocess=recreate,
help="always recreate this test environment.",
)
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
passenv = {"PATH", "PIP_INDEX_URL", "LANG", "LANGUAGE", "LD_LIBRARY_PATH", "TOX_WORK_DIR"}
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
env_values = [x for x in p.split() if x]
value.extend(env_values)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if tox.INFO.IS_WIN:
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("COMSPEC") # needed for distutils cygwincompiler
passenv.add("TEMP")
passenv.add("TMP")
# for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv",
type="line-list",
postprocess=passenv,
help="environment variables needed during executing test commands (taken from invocation "
"environment). Note that tox always passes through some basic environment variables "
"which are needed for basic functioning of the Python system. See --showconfig for the "
"eventual passenv setting.",
)
parser.add_testenv_attribute(
name="whitelist_externals",
type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.",
)
parser.add_testenv_attribute(
name="platform",
type="string",
default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.",
)
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
def alwayscopy(testenv_config, value):
return testenv_config.config.option.alwayscopy or value
parser.add_testenv_attribute(
name="sitepackages",
type="bool",
default=False,
postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.",
)
parser.add_testenv_attribute(
name="alwayscopy",
type="bool",
default=False,
postprocess=alwayscopy,
help="Set to ``True`` if you want virtualenv to always copy files rather "
"than symlinking.",
)
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre",
type="bool",
default=False,
postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
)
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop)
parser.add_testenv_attribute(
name="usedevelop",
type="bool",
postprocess=develop,
default=False,
help="install package in develop/editable mode",
)
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute(
name="list_dependencies_command",
type="argv",
default="python -m pip freeze",
help="list dependencies for a virtual environment",
)
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="commands",
type="argvlist",
default="",
help="each line specifies a test command and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_pre",
type="argvlist",
default="",
help="each line specifies a setup command action and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_post",
type="argvlist",
default="",
help="each line specifies a teardown command and can use substitution.",
)
parser.add_testenv_attribute(
"ignore_outcome",
type="bool",
default=False,
help="if set to True a failing result of this testenv will not make "
"tox fail, only a warning will be produced",
)
parser.add_testenv_attribute(
"extras",
type="line-list",
help="list of extras to install with the source distribution or develop install",
)
def cli_skip_missing_interpreter(parser):
class SkipMissingInterpreterAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise argparse.ArgumentTypeError("value must be config, true or false")
setattr(namespace, self.dest, value)
parser.add_argument(
"--skip-missing-interpreters",
default="config",
metavar="val",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
class Config(object):
"""Global Tox config object."""
def __init__(self, pluginmanager, option, interpreters, parser):
self.envconfigs = OrderedDict()
"""Mapping envname -> envconfig"""
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
self.option = option
self._parser = parser
self._testenv_attr = parser._testenv_attr
"""option namespace containing all parsed command line options"""
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # FIXME XXX good idea?
return homedir
class TestenvConfig:
"""Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
self.missing_subs = []
"""Holds substitutions that could not be resolved.
Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
problem if the env is not part of the current testrun. So we need to remember this and
check later when the testenv is actually run and crash only then.
"""
def get_envbindir(self):
"""Path to directory where scripts/binaries reside."""
if tox.INFO.IS_WIN and "jython" not in self.basepython and "pypy" not in self.basepython:
return self.envdir.join("Scripts")
else:
return self.envdir.join("bin")
@property
def envbindir(self):
return self.get_envbindir()
@property
def envpython(self):
"""Path to python executable."""
return self.get_envpython()
def get_envpython(self):
""" path to python/jython executable. """
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
def get_envsitepackagesdir(self):
"""Return sitepackagesdir of the virtualenv environment.
NOTE: Only available during execution, not during parsing.
"""
x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
return x
@property
def python_info(self):
"""Return sitepackagesdir of the virtualenv environment."""
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts"
)
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
"Failed to get version_info for {}: {}".format(info.name, info.err)
)
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if tox.INFO.IS_WIN:
max_seed = 1024
return str(random.randint(1, max_seed))
class ParseIni(object):
def __init__(self, config, ini_path, ini_data): # noqa
config.toxinipath = ini_path
config.toxinidir = config.toxinipath.dirpath()
self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
config._cfg = self._cfg
self.config = config
prefix = "tox" if ini_path.basename == "setup.cfg" else None
context_name = getcontextname()
if context_name == "jenkins":
reader = SectionReader(
"tox:jenkins", self._cfg, prefix=prefix, fallbacksections=["tox"]
)
dist_share_default = "{toxworkdir}/distshare"
elif not context_name:
reader = SectionReader("tox", self._cfg, prefix=prefix)
dist_share_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hash_seed = make_hashseed()
elif config.option.hashseed == "noset":
hash_seed = None
else:
hash_seed = config.option.hashseed
config.hashseed = hash_seed
reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
# As older versions of tox may have bugs or incompatibilities that
# prevent parsing of tox.ini this must be the first thing checked.
config.minversion = reader.getstring("minversion", None)
if config.minversion:
tox_version = pkg_resources.parse_version(tox.__version__)
config_min_version = pkg_resources.parse_version(self.config.minversion)
if config_min_version > tox_version:
raise tox.exception.MinVersionError(
"tox version is {}, required is at least {}".format(
tox.__version__, self.config.minversion
)
)
self.ensure_requires_satisfied(reader.getlist("requires"))
if config.option.workdir is None:
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
else:
config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
if config.option.skip_missing_interpreters == "config":
val = reader.getbool("skip_missing_interpreters", False)
config.option.skip_missing_interpreters = "true" if val else "false"
config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
# determine indexserver dictionary
config.indexserver = {"default": IndexServerConfig("default")}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
override = False
if config.option.indexurl:
for url_def in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", url_def)
if m is None:
url = url_def
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", dist_share_default)
config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
reader.addsubstitutions(distshare=config.distshare)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
self.parse_build_isolation(config, reader)
config.envlist, all_envs = self._getenvdata(reader, config)
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update({"py", "python"})
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split("-"))
# configure testenvs
for name in all_envs:
section = "{}{}".format(testenvprefix, name)
factors = set(name.split("-"))
if (
section in self._cfg
or factors <= known_factors
or all(
tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
)
):
config.envconfigs[name] = self.make_envconfig(name, section, reader._subs, config)
all_develop = all(
name in config.envconfigs and config.envconfigs[name].usedevelop
for name in config.envlist
)
config.skipsdist = reader.getbool("skipsdist", all_develop)
def parse_build_isolation(self, config, reader):
config.isolated_build = reader.getbool("isolated_build", False)
config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
if config.isolated_build is True:
name = config.isolated_build_env
if name not in config.envconfigs:
config.envconfigs[name] = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config
)
@staticmethod
def ensure_requires_satisfied(specified):
missing_requirements = []
for s in specified:
try:
pkg_resources.get_distribution(s)
except pkg_resources.RequirementParseError:
raise
except Exception:
missing_requirements.append(str(pkg_resources.Requirement(s)))
if missing_requirements:
raise tox.exception.MissingRequirement(
"Packages {} need to be installed alongside tox in {}".format(
", ".join(missing_requirements), sys.executable
)
)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r"^([\w{}\.!,-]+)\:\s+", value, re.M)
factors.update(*mapcat(_split_factor_expr_all, exprs))
return factors
def make_envconfig(self, name, section, subs, config, replace=True):
factors = set(name.split("-"))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
tc = TestenvConfig(name, config, factors, reader)
reader.addsubstitutions(
envname=name,
envbindir=tc.get_envbindir,
envsitepackagesdir=tc.get_envsitepackagesdir,
envpython=tc.get_envpython,
**subs
)
for env_attr in config._testenv_attr:
atype = env_attr.type
try:
if atype in ("bool", "path", "string", "dict", "dict_setenv", "argv", "argvlist"):
meth = getattr(reader, "get{}".format(atype))
res = meth(env_attr.name, env_attr.default, replace=replace)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
else:
raise ValueError("unknown type {!r}".format(atype))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=tc, value=res)
except tox.exception.MissingSubstitution as e:
tc.missing_subs.append(e.name)
res = e.FLAG
setattr(tc, env_attr.name, res)
if atype in ("path", "string"):
reader.addsubstitutions(**{env_attr.name: res})
return tc
def _getenvdata(self, reader, config):
candidates = (
self.config.option.env,
os.environ.get("TOXENV"),
reader.getstring("envlist", replace=False),
)
env_str = next((i for i in candidates if i), [])
env_list = _split_env(env_str)
# collect section envs
all_envs = OrderedDict((i, None) for i in env_list)
if "ALL" in all_envs:
all_envs.pop("ALL")
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs[section.name[len(testenvprefix) :]] = None
if not all_envs:
all_envs["python"] = None
package_env = config.isolated_build_env
if config.isolated_build is True and package_env in all_envs:
all_envs.pop(package_env)
if not env_list or "ALL" in env_list:
env_list = list(all_envs.keys())
if config.isolated_build is True and package_env in env_list:
msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
raise tox.exception.ConfigError(msg)
all_env_list = list(all_envs.keys())
return env_list, all_env_list
def _split_env(env):
"""if handed a list, action="append" was used for -e """
if not isinstance(env, list):
env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
env = ",".join([e for e in env if e])
env = [env]
return mapcat(_expand_envstr, env)
def _is_negated_factor(factor):
return factor.startswith("!")
def _base_factor_name(factor):
return factor[1:] if _is_negated_factor(factor) else factor
def _split_factor_expr(expr):
def split_single(e):
raw = e.split("-")
included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
return included, excluded
partial_envs = _expand_envstr(expr)
return [split_single(e) for e in partial_envs]
def _split_factor_expr_all(expr):
partial_envs = _expand_envstr(expr)
return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = re.split(r"((?:\{[^}]+\})+)|,", envstr)
envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = re.split(r"\{([^}]+)\}", env)
parts = [re.sub(r"\s+", "", token).split(",") for token in tokens]
return ["".join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __str__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":{}:{}".format(self.indexserver.name, self.name)
return str(self.name)
__repr__ = __str__
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
"""Check value matches substitution form of referencing value from other section.
E.g. {[base]commands}
"""
class SectionReader:
def __init__(self, section_name, cfgparser, fallbacksections=None, factors=(), prefix=None):
if prefix is None:
self.section_name = section_name
else:
self.section_name = "{}:{}".format(prefix, section_name)
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
self._setenv = None
def get_environ_value(self, name):
if self._setenv is None:
return os.environ.get(name)
return self._setenv.get(name)
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath, replace=True):
path = self.getstring(name, defaultpath, replace=replace)
if path is not None:
toxinidir = self._subs["toxinidir"]
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace)
return self._getdict(value, default=default, sep=sep, replace=replace)
def getdict_setenv(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace, crossonly=True)
definitions = self._getdict(value, default=default, sep=sep, replace=replace)
self._setenv = SetenvDict(definitions, reader=self)
return self._setenv
def _getdict(self, value, default, sep, replace=True):
if value is None or not replace:
return default or {}
d = {}
for line in value.split(sep):
if line.strip():
name, rest = line.split("=", 1)
d[name.strip()] = rest.strip()
return d
def getbool(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s)
)
return s
def getargvlist(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s, replace=replace)
def getargv(self, name, default="", replace=True):
return self.getargvlist(name, default, replace=replace)[0]
def getstring(self, name, default=None, replace=True, crossonly=False):
x = None
for s in [self.section_name] + self.fallbacksections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
# It is needed to apply factors before unwrapping
# dependencies, otherwise it can break the substitution
# process. Once they are unwrapped, we call apply factors
# again for those new dependencies.
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
return x
def _replace_if_needed(self, x, name, replace, crossonly):
if replace and x and hasattr(x, "replace"):
x = self._replace(x, name=name, crossonly=crossonly)
return x
def _apply_factors(self, s):
def factor_line(line):
m = re.search(r"^([\w{}\.!,-]+)\:\s+(.+)", line)
if not m:
return line
expr, line = m.groups()
if any(
included <= self.factors and not any(x in self.factors for x in excluded)
for included, excluded in _split_factor_expr(expr)
):
return line
lines = s.strip().splitlines()
return "\n".join(filter(None, map(factor_line, lines)))
def _replace(self, value, name=None, section_name=None, crossonly=False):
if "{" not in value:
return value
section_name = section_name if section_name else self.section_name
self._subststack.append((section_name, name))
try:
replaced = Replacer(self, crossonly=crossonly).do_replace(value)
assert self._subststack.pop() == (section_name, name)
except tox.exception.MissingSubstitution:
if not section_name.startswith(testenvprefix):
raise tox.exception.ConfigError(
"substitution env:{!r}: unknown or recursive definition in"
" section {!r}.".format(value, section_name)
)
raise
return replaced
class Replacer:
RE_ITEM_REF = re.compile(
r"""
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key
(?::(?P<default_value>[^{}]*))? # default value
[}]
""",
re.VERBOSE,
)
def __init__(self, reader, crossonly=False):
self.reader = reader
self.crossonly = crossonly
def do_replace(self, value):
"""
Recursively expand substitutions starting from the innermost expression
"""
def substitute_once(x):
return self.RE_ITEM_REF.sub(self._replace_match, x)
expanded = substitute_once(value)
while expanded != value: # substitution found
value = expanded
expanded = substitute_once(value)
return expanded
def _replace_match(self, match):
g = match.groupdict()
sub_value = g["substitution_value"]
if self.crossonly:
if sub_value.startswith("["):
return self._substitute_from_other_section(sub_value)
# in crossonly we return all other hits verbatim
start, end = match.span()
return match.string[start:end]
# special case: all empty values means ":" which is os.pathsep
if not any(g.values()):
return os.pathsep
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
if sub_value in ("opts", "packages"):
return "{{{}}}".format(sub_value)
try:
sub_type = g["sub_type"]
except KeyError:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided"
)
if sub_type == "env":
return self._replace_env(match)
if sub_type == "tty":
if is_interactive():
return match.group("substitution_value")
return match.group("default_value")
if sub_type is not None:
raise tox.exception.ConfigError(
"No support for the {} substitution type".format(sub_type)
)
return self._replace_substitution(match)
def _replace_env(self, match):
key = match.group("substitution_value")
if not key:
raise tox.exception.ConfigError("env: requires an environment variable name")
default = match.group("default_value")
value = self.reader.get_environ_value(key)
if value is not None:
return value
if default is not None:
return default
raise tox.exception.MissingSubstitution(key)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1 :]
cfg = self.reader._cfg
if section in cfg and item in cfg[section]:
if (section, item) in self.reader._subststack:
raise ValueError(
"{} already in {}".format((section, item), self.reader._subststack)
)
x = str(cfg[section][item])
return self.reader._replace(
x, name=item, section_name=section, crossonly=self.crossonly
)
raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
def _replace_substitution(self, match):
sub_key = match.group("substitution_value")
val = self.reader._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if callable(val):
val = val()
return str(val)
def is_interactive():
return sys.stdin.isatty()
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value, replace=True):
"""Parse ``commands`` argvlist multiline string.
:param SectionReader reader: reader to be used.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " {}".format(line[:-1])
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command, crossonly=True)
commands.extend(cls.getargvlist(reader, replaced))
else:
commands.append(cls.processcommand(reader, current_command, replace))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [{}] {}".format(
reader.section_name, "commands"
)
)
return commands
@classmethod
def processcommand(cls, reader, command, replace=True):
posargs = getattr(reader, "posargs", "")
posargs_string = list2cmdline([x for x in posargs if x])
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
if replace:
newcommand = ""
for word in CommandParser(command).words():
if word == "{posargs}" or word == "[]":
newcommand += posargs_string
continue
elif word.startswith("{posargs:") and word.endswith("}"):
if posargs:
newcommand += posargs_string
continue
else:
word = word[9:-1]
new_arg = ""
new_word = reader._replace(word)
new_word = reader._replace(new_word)
new_word = new_word.replace("\\{", "{").replace("\\}", "}")
new_arg += new_word
newcommand += new_arg
else:
newcommand = command
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ""
return list(shlexer)
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ""
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return (
(
cur_char in string.whitespace
and ps.word
and ps.word[-1] not in string.whitespace
)
or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
)
def yield_this_word():
yieldword = ps.word
ps.word = ""
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == "{":
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == "}":
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
return "jenkins"
return None
|
[
"[email protected]"
] | |
44a7c1943eb7b978a98e5947ac8a6925fbb719a5
|
5f27bc1a0460a078f6fe33a544f494a5dff7f452
|
/script/puzzle_test_backup_D_20_1026/D_20_1020_vrepMoveit_jaco1.py
|
9c282275c0404bbd785b9da6c406c8b27613031f
|
[] |
no_license
|
A-Why-not-fork-repositories-Good-Luck/arm_move
|
3e381f0310265f47da14beaac136c358fb318f92
|
e2e6182cfd93df1935bd3b8e9158134964dc44fa
|
refs/heads/master
| 2023-03-15T18:37:17.337770 | 2020-11-18T06:46:06 | 2020-11-18T06:46:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 32,748 |
py
|
#!/usr/bin/env python
GRID_SIZE = 0.01
G2P_SIZE = 100
import rospy
import numpy as np
import tf
import matplotlib.pyplot as plt
import copy
import time
import D_20_1020_custom_function as CUF
import D_20_1020_client_function as CLF
from D_20_1020_VFHplus_change_radius import influence
from D_20_1020_envClass4altest import EnvInfo as EI
from D_20_1020_envClass4altest import CanInfo as CI
from arm_move.srv._box_info_srv import *
from arm_move.srv._arm_move_srv import *
from arm_move.srv._work_start_srv import *
from arm_move.srv._att_hand_box_srv import *
from arm_move.srv._arm_goalJoint_srv import *
import timeit
def go_home():
# 2020.08.05 SH
move_group_name = 'panda_arm'
home_joint = [-0.7912285295667355, -1.7449968666946676, 1.6255344777637362, -2.9980328554805484, 1.552371742049853, 1.345932931635115, 0.8050298552807971]
CLF.move_joints_client_rad(move_group_name, home_joint)
def go_ready():
# 2020.08.05 SH
move_group_name = 'panda_arm'
home_joint = [-1.6238, -1.6078, -0.2229, -2.6057, 1.4646, 1.4325, -0.2159]
CLF.move_joints_client_rad(move_group_name, home_joint)
def hand_open():
# 2020.08.05 SH
CLF.panda_gripper_open()
def pick_and_place(env, pick_pose, pick_object_name, place_pose):
print"\tPICK AND PLACE ACTION => rearrange", pick_object_name
env.pick(env.obs_pos, pick_pose, place_pose)
CLF.att_box_client('hand', pick_object_name)
env.go_ready()
env.place(env.obs_pos, place_pose)#, vrep_env.get_current_joint(joint_names_jaco))
CLF.det_box_client(pick_object_name, [0, 0, 0], [0, 0, 0, 0], [0, 0, 0], 'red')
CLF.add_mesh_client(pick_object_name, [place_pose[0], place_pose[1], 0.605], [0.0, 0.0, 0.0, 0.0], [0.001, 0.001, 0.001])
env.go_ready()
print"\tEND PICK AND PLACE ACTION"
#
# # ret_pick_pose = env.pick(env.obs_pos, pick_pose, place_pose)
# env.move_to([[ret_pick_pose[0][0] - 0.03, ret_pick_pose[0][1], ret_pick_pose[0][2]], ret_pick_pose[1]])
#
# env.move_to([[ret_pick_pose[0][0] + 0.05, ret_pick_pose[0][1], ret_pick_pose[0][2]], ret_pick_pose[1]])
#
#
# env.pre_place(env.obs_pos, place_pose, vrep_env.get_current_joint(joint_names_jaco))
# ret_place_pose = env.place(env.obs_pos, place_pose, vrep_env.get_current_joint(joint_names_jaco))
# env.move_to([[ret_place_pose[0][0] - 0.1, ret_place_pose[0][1], ret_place_pose[0][2]], ret_place_pose[1]])
#
# CLF.det_box_client(pick_object_name, [env.object_z, -sel_can_pos[1], sel_can_pos[0]], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2], 'blue')
#
# env.move_to([[ret_place_pose[0][0] + 0.1, ret_place_pose[0][1], ret_place_pose[0][2]], ret_place_pose[1]])
# # CLF.add_box_client(obstacle_name[env.ore_order[0]], [env.object_z, -sel_can_pos[1], sel_can_pos[0]], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2], 'blue')
def test_algorithm(method, data_in):
# method
# "where" : icra2020 "where to relocate?"
# "far" : farthest method
go_ready()
hand_open()
print "start with method:", method
print "\n***STEP 1*** : env setting"
obj_h = -0.0
obj_z = 0.605 + obj_h#+ obj_h/2.0
target_name = ['target']
target_info = []
target_info.append([[data_in[0][1], -data_in[0][0], obj_z], [0, 0, 0, 0], [0.001, 0.001, 0.001]]) # for the add_mesh
# target_info.append([[data_in[0][0], data_in[0][1], obj_z], [0, 0, 0, 0], [0.06, 0.06, 0.12]]) # for the add_box
# target_info[i][0][2] = target_info[i][0][2] + 0.04
# target_info[i][2][2] = target_info[i][2][2] + 0.08
# obstacle_name = []
# for i in range(len(data_in[1])):
obstacle_name = [str(i).zfill(2) for i in range(len(data_in[1]))]
# obstacle_name.append('obstacle'+str(i))
# print obstacle_name
# obstacle_name = ['obstacle0', 'obstacle1', 'obstacle2', 'obstacle3', 'obstacle4', 'obstacle5', 'obstacle6', 'obstacle7', 'obstacle8']
obstacle_info = []
# [[obj_pos.x, obj_pos.y, obj_pos.z], [obj_ori_q.x, obj_ori_q.y, obj_ori_q.z, obj_ori_q.w], [obj_scale.x, obj_scale.y, obj_scale.z]]
for i in range(len(obstacle_name)):
obstacle_info.append([[data_in[1][i][1], -data_in[1][i][0], obj_z], [0, 0, 0, 0], [0.001, 0.001, 0.001]]) # for the add_mesh
# obstacle_info.append([[data_in[1][i][0], data_in[1][i][1], obj_z], [0, 0, 0, 0], [0.06, 0.06, 0.12]]) # for the add_box
# obstacle_info[i][0][2] = obstacle_info[i][0][2] + 0.04
# obstacle_info[i][2][2] = obstacle_info[i][2][2] + 0.08
print "\tNo. of obstacles:", len(obstacle_name)
env_name = ['shelf_gazebo']#2020.10.21: puzzle test, 'Jaco_base', 'table_ls', 'table_rs', 'table_us', 'table_bs']
env_info = []
base_position = [0.8637, 0, 0.0 + obj_h]
base_quaternion = [0, 0, 0, 1]
base_scale = [0.001, 0.001, 0.001]
CLF.add_mesh_client('shelf_gazebo', base_position, base_quaternion, base_scale)
ws_pos = [0.8637+0.5*0.45+0.03, 0.0, 0.0 + obj_h]
ws_rot = [0.0, 0.0, 0.0, 0.0]
ws_scale = [0.45, 0.91, 0.0]
env_info.append([ws_pos, ws_rot, ws_scale])
# for i in range(len(env_name)):
# env_info.append(vrep_env.get_object_info(env_name[i]))
# if i > 1:
# env_info[i][2][0] = env_info[i][2][0]+0.01
# env_info[i][2][1] = env_info[i][2][1]+0.01
# env_info[i][2][2] = env_info[i][2][2]+0.01
for i in range(len(obstacle_info)):
CLF.add_mesh_client(obstacle_name[i], obstacle_info[i][0], obstacle_info[i][1], obstacle_info[i][2])
# CLF.add_box_client(obstacle_name[i], obstacle_info[i][0], obstacle_info[i][1], obstacle_info[i][2], 'red')
for i in range(len(target_info)):
CLF.add_mesh_client(target_name[i], target_info[i][0], target_info[i][1], target_info[i][2])
# CLF.add_box_client(target_name[i], target_info[i][0], target_info[i][1], target_info[i][2], 'green')
# for i in range(len(env_info)):
# # CLF.add_mesh_client(env_name[i], env_info[i][0], env_info[i][1], env_info[i][2])
# CLF.add_box_client(env_name[i], env_info[i][0], env_info[i][1], env_info[i][2], 'gray')
ws = env_info[0]
# print"ws info", env_info[0]
ws_w = int(round(ws[2][0]*100)) # x-axes in Rviz
ws_d = int(round(ws[2][1]*100)) # y-axes in Rviz
print "\tRviz ws width, depth:", ws_w, ws_d
# GRID_SIZE = 0.01
ws_zero_pos = [round(ws[0][0] - ws[2][0]*0.5, 2), round(ws[0][1] - ws[2][1]*0.5, 2)]
print "\tRviz ws cen pos:", ws[0]
print "\tRviz ws, zero pos:", ws_zero_pos
# ws_w, ws_d = 100, 100 # get table size in the v-rep
ws_cen = [-ws[0][1], ws[0][0]]
rob_pos = [0.0, 0.0]
OBJ_R = 0.035
env = EI(rob_pos, ws_w, ws_d, ws_cen, ws_zero_pos, grid_size=GRID_SIZE, wall_r=OBJ_R)
env.set_env(obstacle_name, obstacle_info, target_name, target_info)
env.update_env(env.obs_pos, env.obs_grid)
print "\trearrangement order:", env.ore_order
if len(env.ore_order) == 0:
print "end rearrangement"
pick_and_place(env, env.tar_pos, 'target', env.tar_pos)
time.sleep(1)
# CUF.draw_grid_info(env.grid_ori)
# plt.show()
space_err = 0
rearr_cnt = 0
# env.get_env(obs_r, tar_r, min_ore)
algorithm_start = timeit.default_timer()
env.get_max_can(env.grid_ori, bt_num=1, trial_num=1000) # We get "grid_max_can", "can_grid"
# env.get_env_case1()
# env.get_max_can_case1()
'''
Make object info!
Type : target, obstacle, candidate
Info : pos, grid, A, BT, b, ORC, ORE
'''
can_info = []
for i in range(len(env.can_pos)):
can_info.append((CI('candidate', env.can_pos[i], env.can_grid[i])))
# check env info got right
# if 1:
# print "\n# of obstacles", len(env.obs_pos), "\n# of candidates", len(env.can_pos)
'''
GET candidates info
'''
t_ore_order = copy.deepcopy(env.ore_order)
# for i in range(len(can_info)):
# print "can", i, ":", can_info[i].pos
# CUF.draw_grid_info(env.grid_ori)
# CUF.draw_grid_info(env.grid_del)
# CUF.draw_grid_info(env.grid_max_can)
# for c_i in range(len(can_info)):
# plt.text(can_info[c_i].grid[0], can_info[c_i].grid[1], 'Can' + str(c_i), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
# plt.show()
method = 'mine'
method = 'far'
method = 'deep'
while len(env.ore_order): # this while loop is for the algorithm
print"\n***STEP 2*** REARRANGE ORDER => :", env.ore_order
print"\tCheck C.A"
# Check C.A : just next step
t_can_info = []
in_can_info = copy.deepcopy(can_info)
in_obs_pos = copy.deepcopy(env.obs_pos)
in_obs_pos.remove(env.obs_pos[env.ore_order[0]])
CLF.del_box_client(obstacle_name[env.ore_order[0]])
t_can_info.append(env.get_can_A(in_can_info, in_obs_pos, env.tar_pos))
CLF.add_mesh_client(obstacle_name[env.ore_order[0]], obstacle_info[env.ore_order[0]][0], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2])
# CLF.add_box_client(obstacle_name[env.ore_order[0]], obstacle_info[env.ore_order[0]][0], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2], 'red')
# Check C.BT
in_can_info = copy.deepcopy(t_can_info[0])
in_can_info = env.init_BT(in_can_info) # init the BT value of candidates to '0'
in_obs_pos = copy.deepcopy(env.obs_pos)
for ore_i in range(len(env.ore_order)): # after rearrange all ORE
in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
CLF.del_box_client(obstacle_name[env.ore_order[ore_i]])
t_can_info[0] = env.get_can_BT(in_can_info, in_obs_pos, env.tar_pos)
for ore_i in range(len(env.ore_order)):
CLF.add_mesh_client(obstacle_name[env.ore_order[ore_i]], obstacle_info[env.ore_order[ore_i]][0], obstacle_info[env.ore_order[ore_i]][1], obstacle_info[env.ore_order[ore_i]][2])
# CLF.add_box_client(obstacle_name[env.ore_order[ore_i]], obstacle_info[env.ore_order[ore_i]][0], obstacle_info[env.ore_order[ore_i]][1], obstacle_info[env.ore_order[ore_i]][2], 'red')
# Check C.BO : BO : other ORE, just before target
in_can_info = copy.deepcopy(t_can_info[0])
in_obs_pos = copy.deepcopy(env.obs_pos)
for ore_i in range(len(env.ore_order)): # after rearrange all ORE
in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
CLF.del_box_client(obstacle_name[env.ore_order[ore_i]])
for j in range(len(env.ore_order)): # check other ORE just before target
if j > i:
t_can_info[0] = env.get_can_BT(in_can_info, in_obs_pos,env.obs_pos[env.ore_order[j]])
for ore_i in range(len(env.ore_order)):
CLF.add_mesh_client(obstacle_name[env.ore_order[ore_i]], obstacle_info[env.ore_order[ore_i]][0], obstacle_info[env.ore_order[ore_i]][1], obstacle_info[env.ore_order[ore_i]][2])
# CLF.add_box_client(obstacle_name[env.ore_order[ore_i]], obstacle_info[env.ore_order[ore_i]][0], obstacle_info[env.ore_order[ore_i]][1], obstacle_info[env.ore_order[ore_i]][2], 'red')
s_v = []
s_v_index = []
for i in range(1):
in_can_info = copy.deepcopy(t_can_info[i])
ret_can, ret_index = env.get_cf(in_can_info)
s_v.append(ret_can)
s_v_index.append(ret_index)
# print "\n step", i, " has # of cf pos:", len(t_cf[i]), "index", t_cf_index[i]
print"\n***STEP 3*** : find valid candidates"
print "\ts_v:", len(s_v[0]), "\n\ts_v_index:", len(s_v_index[0])
# for i in range(len(s_v[0])):
# print "s_v index:", [i], s_v_index[0][i]
# See the feasibile candidate
# for i in range(len(t_cf[0])):
# print "\n Our Cf pos:", i, t_cf[0][i].pos
# See if this case if case0 or case1
# print "t_cf:", t_cf, "order", env.ore_order
if len(s_v[0]) >= len(env.ore_order):
print "\n\tenough candidate spots"
t_b = []
for i in range(1):
in_obs_pos = copy.deepcopy(env.obs_pos)
for ore_i in range(i + 1):
in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
t_b.append(env.get_cf_b(s_v[i], in_obs_pos))
# print "\n step", i, " has cf b:", t_b[i]
# draw_figs = 1
# if draw_figs == 1:
# for c_i in range(len(can_info)):
# plt.text(can_info[c_i].grid[0], can_info[c_i].grid[1], 'Can' + str(c_i), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
#
# for step_i in range(1):
# step_grid = copy.deepcopy(env.grid_act)
# step_obs_grid = copy.deepcopy(env.obs_grid)
# for ore_i in range(step_i + 1):
# step_obs_grid.remove(env.obs_grid[env.ore_order[ore_i]])
# for i in range(len(step_obs_grid)):
# step_grid = CUF.obstacle_circle(step_grid, [round(step_obs_grid[i][0], 2), round(step_obs_grid[i][1], 2), env.obs_r[i]], 2)
# for ci in range(len(can_info)):
# xi, yi = can_info[ci].grid
# step_grid = CUF.obstacle_circle(step_grid, [xi, yi, 0.04], 30)
#
# step_grid = CUF.obstacle_circle(step_grid, [env.tar_grid[0], env.tar_grid[1], tar_r], 4) # target
#
# for cf_i in range(len(t_b[step_i])):
# xi = (t_cf[step_i][cf_i].pos[0] - env.ws_zero[0]) * G2P_SIZE
# yi = (t_cf[step_i][cf_i].pos[1] - env.ws_zero[1]) * G2P_SIZE
# step_grid = CUF.obstacle_circle(step_grid, [xi, yi, 0.04], 3)
#
# CUF.draw_grid_info(step_grid)
#
# for cf_i in range(len(t_b[step_i])):
# xi = (t_cf[step_i][cf_i].pos[0] - env.ws_zero[0]) * G2P_SIZE
# yi = (t_cf[step_i][cf_i].pos[1] - env.ws_zero[1]) * G2P_SIZE
# plt.text(xi, yi, 'b=' + str(t_b[step_i][cf_i]), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for ci in range(len(t_can_info[step_i])):
# plt.text(t_can_info[step_i][ci].grid[0], t_can_info[step_i][ci].grid[1] - 2.0, '[A, BT] :' + str([t_can_info[step_i][ci].A, t_can_info[step_i][ci].BT]), fontsize=10, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
# plt.title('step' + str(step_i) + " obs: " + str(env.ore_order[step_i]) + " rearranged")
elif len(s_v[0]) < len(env.ore_order):
print "\n\tnot enough candidate spots"
# print "Since we meet condition: N(CF) < N(ORE) by", len(t_cf[0]), "<", len(env.ore_order), ",\nwe have to remove additional obstacles."
## step1 : "get t_cp", check candidates which have A = 0 and BT = 0
## This means that a candidate is not reachable and it does not block the target object
# Check A for this environment state
in_can_info = copy.deepcopy(can_info)
in_obs_pos = copy.deepcopy(env.obs_pos)
t_can_add = copy.deepcopy(env.get_can_A(in_can_info, in_obs_pos, env.tar_pos))
s_e = [] # s_e: extra candidate spots
in_can_info = copy.deepcopy(t_can_add)
ret_can, ret_index = env.get_cp(in_can_info)
print "\t# of OR'", len(ret_can)
t_s_e = ret_can
t_s_e_index = ret_index
# print "t_cp:", len(t_cp), "index", t_cp_index
# for i in range(len(t_cp)):
# print "\n Our Cp:", i, t_cp[i].pos
if len(t_s_e) == 0:
print "\tno possible extra candidate exist"
space_err = 1
break
# step2 : check c_ore for each cp and pick min of it
t_s_r = [] # s_r: candidate spot relocate plan
in_can_info = copy.deepcopy(t_s_e)
# tmp_order_time_start = timeit.default_timer()
# tmp_order_time_start2 = time.clock()
t_s_r = env.get_c_ore(in_can_info)
# tmp_order_time_end = timeit.default_timer()
# tmp_order_time_end2 = time.clock()
# order_time = order_time + tmp_order_time_end - tmp_order_time_start
# order_time2 = order_time2 + tmp_order_time_end2 - tmp_order_time_start2
# order_cnt = order_cnt + 100 * len(t_s_e)
# print "\n"
# for i in range(len(t_cp)):
# print "cp", t_cp[i].pos, "\nc_ore", c_ore[i]
s_r = []
s_e_index = []
print "\n"
for i in range(len(t_s_e)):
print "can", t_s_e_index[i], "grid:", t_s_e[i].grid, ", s_r:", t_s_r[i]
for i in range(len(t_s_e)):
if t_s_r[i] != []:
s_e.append(t_s_e[i])
s_r.append(t_s_r[i])
s_e_index.append(t_s_e_index[i])
# tmp_se = copy.deepcopy(s_e)
# tmp_sr = copy.deepcopy(s_r)
# emp_sr = []
# for i in range(len(s_e)):
# if s_r[i] == []:
# print "remove empty s_e", i
# emp_sr.append(i)
#
# print "tmp se:", tmp_se, "\ntmp sr", tmp_sr
# for i in range(len(emp_sr)):
#
# print "tmp_se[emp_sr[i]]", tmp_se[emp_sr[i]].pos
# print "tmp_sr[emp_sr[i]]", tmp_sr[emp_sr[i]]
# s_e.remove(tmp_se[emp_sr[i]])
# s_r.remove(tmp_sr[emp_sr[i]])
while len(s_e):
print "# of s_e:", len(s_e), s_r
print "\n"
for i in range(len(s_e)):
print "can", s_e_index[i], "pos:", s_e[i].pos, ", s_r:", s_r[i]
min_s_r = CUF.min_len_list(s_r)
print "\nmin sr:", min_s_r
#
# print "picked ci index:", t_cp.index(t_cp[c_ore.index(min_c_ore)])
# print "picked ci address:", copy.deepcopy(t_cp[c_ore.index(min_c_ore)]).pos
cp = copy.deepcopy(s_e[s_r.index(min_s_r)])
# print "selected cp pos", cp.pos
## step3 : "get t_cf", check candidates which have A = 1 and BT' = 0
## Check A for this environment state T' is t_cp_i
in_can_info = copy.deepcopy(can_info)
in_obs_pos = copy.deepcopy(env.obs_pos)
in_tar_pos = copy.deepcopy(cp.pos)
t_can_add = copy.deepcopy(env.get_can_A(in_can_info, in_obs_pos, env.tar_pos))
# Check C.BT for this environment state
in_can_info = copy.deepcopy(t_can_add)
in_can_info = env.init_BT(in_can_info) # init the BT value of candidates to '0'
in_obs_pos = copy.deepcopy(env.obs_pos)
sorted_min_s_r = copy.deepcopy(min_s_r)
sorted_min_s_r.sort(reverse=True)
print "sorted min_s_r:", sorted_min_s_r
if sorted_min_s_r[0] == len(env.obs_pos): # if OR' has o_t ! remove s_e
print "o_t is in OR'"
s_e.remove(s_e[s_r.index(min_s_r)])
s_e_index.remove(s_e_index[s_r.index(min_s_r)])
s_r.remove(s_r[s_r.index(min_s_r)])
else:
for ore_i in range(len(min_s_r)): # after rearrange all OR'
in_obs_pos.remove(in_obs_pos[sorted_min_s_r[ore_i]])
CLF.del_box_client(obstacle_name[sorted_min_s_r[ore_i]])
in_tar_pos = copy.deepcopy(cp.pos)
t_can_add = env.get_can_BT(in_can_info, in_obs_pos, in_tar_pos)
for ore_i in range(len(min_s_r)): # after rearrange all OR'
CLF.add_box_client(obstacle_name[sorted_min_s_r[ore_i]], obstacle_info[sorted_min_s_r[ore_i]][0], obstacle_info[sorted_min_s_r[ore_i]][1], obstacle_info[sorted_min_s_r[ore_i]][2], 'red')
# for i in range(len(t_can_add)):
# print "can", i, "A:", t_can_add[i].A, "B:", t_can_add[i].BT
s_e_v = []
s_v_index = []
in_can_info = copy.deepcopy(t_can_add)
ret_can, ret_index = env.get_cf(in_can_info)
s_e_v.append(ret_can)
s_v_index.append(ret_index)
print "s_e_v: ", s_e_v
for i in range(len(s_e_v[0])):
print s_e_v[0][i].grid
if len(s_e_v[0]) >= len(min_s_r) - 1:
print "this se is possible"
if len(min_s_r) == 1:
print "only one move needed"
# t_can_info = []
# for i in range(len(env.ore_order)):
# in_can_info = copy.deepcopy(can_info)
# in_obs_pos = copy.deepcopy(env.obs_pos)
# for ore_i in range(i + 1):
# if min_s_r[0] != env.ore_order[ore_i]:
# in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
# in_obs_pos.remove(env.obs_pos[min_s_r[0]])
# t_can_info.append(env.get_can_A(in_can_info, in_obs_pos, env.tar_pos))
s_v = [[s_e[s_r.index(min_s_r)]]]
s_v_index = [[s_e_index[s_r.index(min_s_r)]]]
# print "se v:", s_v, s_v[0], s_v[0][0], s_v[0][0].pos
# for i in range(len(env.ore_order)):
# add_can_info = copy.deepcopy(t_can_info[i])
# ret_can, ret_index = env.get_cf(add_can_info)
# s_v.append(ret_can)
# s_v_index.append(ret_index)
t_b = [[0]]
# for i in range(1):
# in_obs_pos = copy.deepcopy(env.obs_pos)
# for ore_i in range(i+1):
# in_obs_pos.remove(env.obs_pos[env.ore_order[ore_i]])
# t_b.append(env.get_cf_b(s_v[i], in_obs_pos))
# # print "\n step", i, " has cf b:", t_b[i]
break # for out s_e loop
else:
t_b = []
in_obs_pos = copy.deepcopy(env.obs_pos)
for ore_i in range(1):
in_obs_pos.remove(env.obs_pos[min_s_r[ore_i]])
t_b.append(env.get_cf_b(s_e_v[0], in_obs_pos))
s_v[0] = s_e_v[0]
break # for out s_e loop
else: # s_e[s_r.index(min_s_r)]
print "\nremove",
print "s_e:", s_e
print "s_r:", s_r
print "s_e_index:", s_e_index
s_e.remove(s_e[s_r.index(min_s_r)])
s_e_index.remove(s_e_index[s_r.index(min_s_r)])
s_r.remove(s_r[s_r.index(min_s_r)])
if len(s_e) == 0:
# print "no possible extra candidate exist"
break
env.ore_order = min_s_r
# draw_figs = 1
# if draw_figs == 1:
# for c_i in range(len(can_info)):
# plt.text(can_info[c_i].grid[0], can_info[c_i].grid[1], 'Can' + str(c_i), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
#
# step_i = 0
# step_grid = copy.deepcopy(env.grid_act)
# step_obs_grid = copy.deepcopy(env.obs_grid)
# step_obs_grid.remove(env.obs_grid[env.ore_order[0]])
# for i in range(len(step_obs_grid)):
# # print "i:", i, "step_obs_grid [i]:", step_obs_grid[i]
# step_grid = CUF.obstacle_circle(step_grid, [round(step_obs_grid[i][0], 2), round(step_obs_grid[i][1], 2), env.obs_r[i]], 2)
# for ci in range(len(can_info)):
# xi, yi = can_info[ci].grid
# step_grid = CUF.obstacle_circle(step_grid, [xi, yi, 0.04], 30)
#
# step_grid = CUF.obstacle_circle(step_grid, [env.tar_grid[0], env.tar_grid[1], tar_r], 4) # target
#
# for cf_i in range(len(t_b[step_i])):
# xi = (t_cf[step_i][cf_i].pos[0] - env.ws_zero[0]) * G2P_SIZE
# yi = (t_cf[step_i][cf_i].pos[1] - env.ws_zero[1]) * G2P_SIZE
# step_grid = CUF.obstacle_circle(step_grid, [xi, yi, 0.04], 3)
#
# CUF.draw_grid_info(step_grid)
#
# for cf_i in range(len(t_b[step_i])):
# xi = (t_cf[step_i][cf_i].pos[0] - env.ws_zero[0]) * G2P_SIZE
# yi = (t_cf[step_i][cf_i].pos[1] - env.ws_zero[1]) * G2P_SIZE
# plt.text(xi, yi, 'b=' + str(t_b[step_i][cf_i]), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for ci in range(len(t_can_info[step_i])):
# plt.text(t_can_info[step_i][ci].grid[0], t_can_info[step_i][ci].grid[1] - 2.0, '[A, BT] :' + str([t_can_info[step_i][ci].A, t_can_info[step_i][ci].BT]), fontsize=10, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
# plt.title('step' + str(step_i) + " obs: " + str(env.ore_order[step_i]) + " rearranged")
if space_err:
print "no possible extra candidate exist"
break
# move obstacle to can(min(b))
# print "s_v", s_v
# print "s_v[0]", s_v[0]
# print "s_v[0][0]", s_v[0][0]
# print "s_v[0][0].pos", s_v[0][0].pos
print "\tt_b[0]", t_b[0]
find_b = copy.deepcopy(t_b[0])
# print "move to c_", find_b.index(min(find_b))
if method == 'far':
t_sel_can_index = [i for i in range(len(find_b))]
elif method == 'deep':
t_sel_can_index = [i for i in range(len(find_b))]
elif method == 'mine':
t_sel_can_index = [i for i in range(len(find_b)) if find_b[i] == min(find_b)]
t_sel_can_dist = []
# print "\ntar grid: ", env.tar_grid
# print "\ntar pos: ", env.tar_pos
print "\tt sel can index", t_sel_can_index
for i in range(len(t_sel_can_index)):
# print "t_cf grid x,y:", t_sel_can_index[i], t_cf[0][t_sel_can_index[i]].grid[0], t_cf[0][t_sel_can_index[i]].grid[1]
# print "t_cf pos x,y:", t_sel_can_index[i], s_v[0][t_sel_can_index[i]].pos[0], s_v[0][t_sel_can_index[i]].pos[1]
if method == 'deep':
t_sel_can_dist.append(np.sqrt((env.rob_pos[0] - s_v[0][t_sel_can_index[i]].pos[0]) ** 2 + (env.rob_pos[1] - s_v[0][t_sel_can_index[i]].pos[1]) ** 2))
else:
t_sel_can_dist.append(np.sqrt((env.tar_pos[0] - s_v[0][t_sel_can_index[i]].pos[0]) ** 2 + (env.tar_pos[1] - s_v[0][t_sel_can_index[i]].pos[1]) ** 2))
# print "t sel can dist", t_sel_can_dist
sel_can_index = t_sel_can_index[t_sel_can_dist.index(max(t_sel_can_dist))]
# print "sel can index", sel_can_index
sel_can_pos = can_info[s_v_index[0][sel_can_index]].pos
sel_can_grid = can_info[s_v_index[0][sel_can_index]].grid
sel_obs_pos = env.obs_pos[env.ore_order[0]]
sel_obs_grid = env.obs_grid[env.ore_order[0]]
env.obs_pos[env.ore_order[0]] = sel_can_pos
env.obs_grid[env.ore_order[0]] = sel_can_grid
can_info[s_v_index[0][sel_can_index]].pos = sel_obs_pos
can_info[s_v_index[0][sel_can_index]].grid = sel_obs_grid
# tmp_order_time_start = timeit.default_timer()
# tmp_order_time_start2 = time.clock()
# env.pick_n_place()
# CLF.add_box_client(obstacle_name[env.ore_order[0]], [env.object_z, -sel_can_pos[1], sel_can_pos[0]], obstacle_info[env.ore_order[0]][1], obstacle_info[env.ore_order[0]][2], 'blue')
pick_and_place(env, sel_obs_pos, obstacle_name[env.ore_order[0]], env.obs_pos[env.ore_order[0]])
# time.sleep(1)
# obstacle_info = []
# for i in range(len(obstacle_name)):
# obstacle_info.append(vrep_env.get_object_info(obstacle_name[i]))
# # obstacle_info[i][0][2] = obstacle_info[i][0][2] + 0.04
# # obstacle_info[i][2][2] = obstacle_info[i][2][2] + 0.08
# for i in range(len(obstacle_info)):
# CLF.add_box_client(obstacle_name[i], obstacle_info[i][0], obstacle_info[i][1], obstacle_info[i][2], 'red')
# env.set_env(obstacle_name, obstacle_info, target_name, target_info)
# home_joint = [3.1415927410125732, 4.537856101989746, 5.93411922454834, -0.6108652353286743, 1.7453292608261108, -0.5235987901687622]
#
# CLF.move_joints_client_rad('arm', home_joint)
env.update_env(env.obs_pos, env.obs_grid)
# tmp_order_time_end = timeit.default_timer()
# order_time = order_time + tmp_order_time_end - tmp_order_time_start
# order_time2 = order_time2 + tmp_order_time_end2 - tmp_order_time_start2
# order_cnt = order_cnt + 1
rearr_cnt = rearr_cnt + 1
if env.order_error_flag == 0:
print "\nretry for another environment"
space_err = 1
break
print "after move order is:", env.ore_order
# CUF.draw_grid_info(env.grid_ori)
# for c_i in range(len(can_info)):
# plt.text(can_info[c_i].grid[0], can_info[c_i].grid[1], 'Can' + str(c_i), fontsize=20, ha='center', bbox=dict(facecolor='pink', alpha=0.8))
# for o_i in range(len(env.obs_grid)):
# plt.text(env.obs_grid[o_i][0], env.obs_grid[o_i][1], 'Obs' + str(o_i), fontsize=20, ha='center', bbox=dict(facecolor='red', alpha=0.8))
if len(env.ore_order) == 0:
print "end rearrangement"
pick_and_place(env, env.tar_pos, 'target', env.tar_pos)
time.sleep(1)
# plt.title('rearrangement finished')
break
# else:
# plt.title('after rearrngement')
# plt.show()
pick_and_place(env, env.tar_pos, 'target', env.tar_pos)
time.sleep(1)
algorithm_end = timeit.default_timer()
tot_time = algorithm_end - algorithm_start
print "tot time:", tot_time
if __name__ == "__main__":
X = ['0.03', '-0.01', '0.36', '0.30', '-0.19', '-0.05', '-0.29', '0.22', '0.19', '0.14', '-0.12']
Y = ['1.22', '1.11', '1.04', '1.17', '1.06', '1.31', '1.17', '1.31', '1.06', '1.19', '1.13']
data_in = []
data_in.append([-0.17, 1.22])
obs_list = []
for i in range(len(X)):
obs_list.append([float(X[i]), float(Y[i])])
data_in.append(obs_list)
# print "data:", data_in
method = "where"
test_algorithm(method, data_in)
|
[
"[email protected]"
] | |
00afe15515e8406d7267839d7d8a4be3bccea3fa
|
1dbbb05b30d27c6419b9f34eea3b9a47f92582a0
|
/parlai/zoo/sea/bart_base.py
|
44e3581dd73c1b7ad168a64f76a5a09e3c7c18f6
|
[
"MIT"
] |
permissive
|
facebookresearch/ParlAI
|
815334323d0ebef51bf9837336fe3eef6fe1655d
|
e1d899edfb92471552bae153f59ad30aa7fca468
|
refs/heads/main
| 2023-08-31T22:20:45.918129 | 2023-08-14T19:39:56 | 2023-08-14T19:39:56 | 89,266,735 | 10,943 | 2,395 |
MIT
| 2023-09-13T23:07:40 | 2017-04-24T17:10:44 |
Python
|
UTF-8
|
Python
| false | false | 741 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Vanila BART-Large 400m parameter model with no retrieval.
"""
from parlai.core.build_data import built, download_models, get_model_dir
import os
import os.path
def download(datapath):
ddir = os.path.join(get_model_dir(datapath), 'sea')
model_type = 'bart_base'
version = 'v1.0'
if not built(os.path.join(ddir, model_type), version):
opt = {'datapath': datapath, 'model_type': model_type}
fnames = [f'model_{version}.tgz']
download_models(opt, fnames, 'sea', version=version, use_model_type=True)
|
[
"[email protected]"
] | |
d9d33fe8b116cb7dc30454d84a7a1097e2401020
|
ad1e55b9a67c798cf4b4ce41c76b26977f8b4e8d
|
/vendor-local/celery/tests/test_utils/test_datastructures.py
|
c06e4d732e8b0cdfa46db88f55237d3206f51bd9
|
[
"BSD-3-Clause"
] |
permissive
|
kumar303/rockit
|
7a6ac84bb8c37e5f3b65d7dcecf9b9c549902cf5
|
fc347b5b143835ddd77fd0c1ea4e6f2007a21972
|
refs/heads/master
| 2021-01-10T19:51:30.638073 | 2020-07-26T19:00:37 | 2020-07-26T19:00:37 | 4,219,328 | 0 | 2 |
BSD-3-Clause
| 2020-07-26T19:00:38 | 2012-05-03T22:03:24 |
Python
|
UTF-8
|
Python
| false | false | 8,435 |
py
|
from __future__ import absolute_import
from __future__ import with_statement
import sys
from celery.datastructures import (ExceptionInfo, LRUCache, LimitedSet,
AttributeDict, DictAttribute,
ConfigurationView, DependencyGraph)
from celery.tests.utils import Case, WhateverIO
class Object(object):
pass
class test_DictAttribute(Case):
def test_get_set(self):
x = DictAttribute(Object())
x["foo"] = "The quick brown fox"
self.assertEqual(x["foo"], "The quick brown fox")
self.assertEqual(x["foo"], x.obj.foo)
self.assertEqual(x.get("foo"), "The quick brown fox")
self.assertIsNone(x.get("bar"))
with self.assertRaises(KeyError):
x["bar"]
def test_setdefault(self):
x = DictAttribute(Object())
self.assertEqual(x.setdefault("foo", "NEW"), "NEW")
self.assertEqual(x.setdefault("foo", "XYZ"), "NEW")
def test_contains(self):
x = DictAttribute(Object())
x["foo"] = 1
self.assertIn("foo", x)
self.assertNotIn("bar", x)
def test_items(self):
obj = Object()
obj.attr1 = 1
x = DictAttribute(obj)
x["attr2"] = 2
self.assertDictEqual(dict(x.iteritems()),
dict(attr1=1, attr2=2))
self.assertDictEqual(dict(x.items()),
dict(attr1=1, attr2=2))
class test_ConfigurationView(Case):
def setUp(self):
self.view = ConfigurationView({"changed_key": 1,
"both": 2},
[{"default_key": 1,
"both": 1}])
def test_setdefault(self):
self.assertEqual(self.view.setdefault("both", 36), 2)
self.assertEqual(self.view.setdefault("new", 36), 36)
def test_get(self):
self.assertEqual(self.view.get("both"), 2)
sp = object()
self.assertIs(self.view.get("nonexisting", sp), sp)
def test_update(self):
changes = dict(self.view.changes)
self.view.update(a=1, b=2, c=3)
self.assertDictEqual(self.view.changes,
dict(changes, a=1, b=2, c=3))
def test_contains(self):
self.assertIn("changed_key", self.view)
self.assertIn("default_key", self.view)
self.assertNotIn("new", self.view)
def test_repr(self):
self.assertIn("changed_key", repr(self.view))
self.assertIn("default_key", repr(self.view))
def test_iter(self):
expected = {"changed_key": 1,
"default_key": 1,
"both": 2}
self.assertDictEqual(dict(self.view.items()), expected)
self.assertItemsEqual(list(iter(self.view)),
expected.keys())
self.assertItemsEqual(self.view.keys(), expected.keys())
self.assertItemsEqual(self.view.values(), expected.values())
class test_ExceptionInfo(Case):
def test_exception_info(self):
try:
raise LookupError("The quick brown fox jumps...")
except LookupError:
exc_info = sys.exc_info()
einfo = ExceptionInfo(exc_info)
self.assertEqual(str(einfo), einfo.traceback)
self.assertIsInstance(einfo.exception, LookupError)
self.assertTupleEqual(einfo.exception.args,
("The quick brown fox jumps...", ))
self.assertTrue(einfo.traceback)
r = repr(einfo)
self.assertTrue(r)
class test_LimitedSet(Case):
def test_add(self):
s = LimitedSet(maxlen=2)
s.add("foo")
s.add("bar")
for n in "foo", "bar":
self.assertIn(n, s)
s.add("baz")
for n in "bar", "baz":
self.assertIn(n, s)
self.assertNotIn("foo", s)
def test_iter(self):
s = LimitedSet(maxlen=2)
items = "foo", "bar"
for item in items:
s.add(item)
l = list(iter(s))
for item in items:
self.assertIn(item, l)
def test_repr(self):
s = LimitedSet(maxlen=2)
items = "foo", "bar"
for item in items:
s.add(item)
self.assertIn("LimitedSet(", repr(s))
def test_clear(self):
s = LimitedSet(maxlen=2)
s.add("foo")
s.add("bar")
self.assertEqual(len(s), 2)
s.clear()
self.assertFalse(s)
def test_update(self):
s1 = LimitedSet(maxlen=2)
s1.add("foo")
s1.add("bar")
s2 = LimitedSet(maxlen=2)
s2.update(s1)
self.assertItemsEqual(list(s2), ["foo", "bar"])
s2.update(["bla"])
self.assertItemsEqual(list(s2), ["bla", "bar"])
s2.update(["do", "re"])
self.assertItemsEqual(list(s2), ["do", "re"])
def test_as_dict(self):
s = LimitedSet(maxlen=2)
s.add("foo")
self.assertIsInstance(s.as_dict(), dict)
class test_LRUCache(Case):
def test_expires(self):
limit = 100
x = LRUCache(limit=limit)
slots = list(xrange(limit * 2))
for i in slots:
x[i] = i
self.assertListEqual(x.keys(), list(slots[limit:]))
def test_least_recently_used(self):
x = LRUCache(3)
x[1], x[2], x[3] = 1, 2, 3
self.assertEqual(x.keys(), [1, 2, 3])
x[4], x[5] = 4, 5
self.assertEqual(x.keys(), [3, 4, 5])
# access 3, which makes it the last used key.
x[3]
x[6] = 6
self.assertEqual(x.keys(), [5, 3, 6])
x[7] = 7
self.assertEqual(x.keys(), [3, 6, 7])
def assertSafeIter(self, method, interval=0.01, size=10000):
from threading import Thread, Event
from time import sleep
x = LRUCache(size)
x.update(zip(xrange(size), xrange(size)))
class Burglar(Thread):
def __init__(self, cache):
self.cache = cache
self._is_shutdown = Event()
self._is_stopped = Event()
Thread.__init__(self)
def run(self):
while not self._is_shutdown.isSet():
try:
self.cache.data.popitem(last=False)
except KeyError:
break
self._is_stopped.set()
def stop(self):
self._is_shutdown.set()
self._is_stopped.wait()
self.join(1e10)
burglar = Burglar(x)
burglar.start()
try:
for _ in getattr(x, method)():
sleep(0.0001)
finally:
burglar.stop()
def test_safe_to_remove_while_iteritems(self):
self.assertSafeIter("iteritems")
def test_safe_to_remove_while_keys(self):
self.assertSafeIter("keys")
def test_safe_to_remove_while_itervalues(self):
self.assertSafeIter("itervalues")
def test_items(self):
c = LRUCache()
c.update(a=1, b=2, c=3)
self.assertTrue(c.items())
class test_AttributeDict(Case):
def test_getattr__setattr(self):
x = AttributeDict({"foo": "bar"})
self.assertEqual(x["foo"], "bar")
with self.assertRaises(AttributeError):
x.bar
x.bar = "foo"
self.assertEqual(x["bar"], "foo")
class test_DependencyGraph(Case):
def graph1(self):
return DependencyGraph([
("A", []),
("B", []),
("C", ["A"]),
("D", ["C", "B"]),
])
def test_repr(self):
self.assertTrue(repr(self.graph1()))
def test_topsort(self):
order = self.graph1().topsort()
# C must start before D
self.assertLess(order.index("C"), order.index("D"))
# and B must start before D
self.assertLess(order.index("B"), order.index("D"))
# and A must start before C
self.assertLess(order.index("A"), order.index("C"))
def test_edges(self):
self.assertListEqual(list(self.graph1().edges()),
["C", "D"])
def test_items(self):
self.assertDictEqual(dict(self.graph1().items()),
{"A": [], "B": [],
"C": ["A"], "D": ["C", "B"]})
def test_to_dot(self):
s = WhateverIO()
self.graph1().to_dot(s)
self.assertTrue(s.getvalue())
|
[
"[email protected]"
] | |
2fe4cec6defc2e66ddc4db17511c536f84514dd1
|
ee6acbd5fcd0fcd16230e96a4a539de41a02c97e
|
/operators/special-resource-operator/python/pulumi_pulumi_kubernetes_crds_operators_special_resource_operator/sro/v1alpha1/__init__.py
|
7e6f12156a258138619de35e038acddf3d969e0c
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulumi-kubernetes-crds
|
777e78137aaf6525a44b61a02dccf91bf0d87a14
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
refs/heads/master
| 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 275 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .SpecialResource import *
from ._inputs import *
from . import outputs
|
[
"[email protected]"
] | |
5a1a215fc88b1c2d5c7a9729d348862c15461931
|
b64687833bbbd206d871e5b20c73e5bf363c4995
|
/crocs.py
|
2ea505fc464101c7b928b4bbcbb3e5e9cd5a0f07
|
[
"Apache-2.0"
] |
permissive
|
barkinet/crocs
|
462225eee0975c9240ec25ca1275e0f9dc991e00
|
7ab44d1eb45aac7b24ab64601255d9fb38049040
|
refs/heads/master
| 2020-12-02T09:12:18.988446 | 2017-07-09T20:09:52 | 2017-07-09T20:09:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,547 |
py
|
from random import choice, randint
from string import printable
import re
class RegexStr(object):
def __init__(self, value):
self.value = value
def invalid_data(self):
pass
def valid_data(self):
return self.value
def __str__(self):
return re.escape(self.value)
class RegexOperator(object):
# It may be interesting to have a base class Pattern
# that implements common methods with Group and Include, Exclude.
# Because these accept multiple arguments.
def __init__(self):
pass
def invalid_data(self):
pass
def valid_data(self):
pass
def encargs(self, args):
return [RegexStr(ind) if isinstance(ind, str) else ind
for ind in args]
def encstr(self, regex):
regex = RegexStr(regex) if isinstance(
regex, str) else regex
return regex
def test(self):
regex = str(self)
data = self.valid_data()
# It has to be search in order to work with ConsumeNext.
strc = re.search(regex, data)
print 'Regex;', regex
print 'Input:', data
print 'Group dict:', strc.groupdict()
print 'Group 0:', strc.group(0)
print 'Groups:', strc.groups()
def join(self):
return ''.join(map(lambda ind: str(ind), self.args))
def __str__(self):
pass
class NamedGroup(RegexOperator):
"""
Named groups.
(?P<name>...)
"""
def __init__(self, name, *args):
self.args = self.encargs(args)
self.name = name
def invalid_data(self):
pass
def valid_data(self):
return ''.join(map(lambda ind: \
ind.valid_data(), self.args))
def __str__(self):
return '(?P<%s>%s)' % (self.name, self.join())
class Group(RegexOperator):
"""
A normal group.
(abc).
"""
def __init__(self, *args):
self.args = self.encargs(args)
def invalid_data(self):
pass
def valid_data(self):
return ''.join(map(lambda ind: \
ind.valid_data(), self.args))
def __str__(self):
return '(%s)' % self.join()
class Times(RegexOperator):
"""
Match n, m times.
a{1, 3}
Note: The * and + are emulated by
Times(regex, 0) or Times(regex, 1)
"""
TEST_MAX = 10
def __init__(self, regex, min=0, max=''):
self.regex = self.encstr(regex)
self.min = min
self.max = max
def invalid_data(self):
pass
def valid_data(self):
count = randint(self.min, self.max
if self.max else self.TEST_MAX)
data = ''.join((self.regex.valid_data()
for ind in xrange(count)))
return data
def __str__(self):
return '%s{%s,%s}' % (self.regex,
self.min, self.max)
class ConsumeNext(RegexOperator):
"""
Lookbehind assertion.
(?<=...)
"""
def __init__(self, regex0, regex1):
self.regex0 = self.encstr(regex0)
self.regex1 = self.encstr(regex1)
def invalid_data(self):
pass
def valid_data(self):
return '%s%s' % (self.regex0.valid_data(),
self.regex1.valid_data())
def __str__(self):
return '(?<=%s)%s' % (self.regex0, self.regex1)
class ConsumeBack(RegexOperator):
"""
Lookahead assertion.
(?=...)
"""
def __init__(self, regex0, regex1):
self.regex0 = self.encstr(regex0)
self.regex1 = self.encstr(regex1)
def invalid_data(self):
pass
def valid_data(self):
return '%s%s' % (self.regex0.valid_data(),
self.regex1.valid_data())
def __str__(self):
return '%s(?=%s)' % (self.regex0, self.regex1)
class Seq(RegexOperator):
def __init__(self, start, end):
self.start = start
self.end = end
self.seq = [chr(ind) for ind in xrange(
ord(self.start), ord(self.end))]
def valid_data(self):
return ''.join(self.seq)
def __str__(self):
return '%s-%s' % (self.start, self.end)
class Include(RegexOperator):
"""
Sets.
[abc]
"""
def __init__(self, *args):
self.args = self.encargs(args)
def invalid_data(self):
pass
def valid_data(self):
chars = ''.join(map(lambda ind: \
ind.valid_data(), self.args))
char = choice(chars)
return char
def __str__(self):
return '[%s]' % self.join()
class Exclude(RegexOperator):
"""
Excluding.
[^abc]
"""
def __init__(self, *args):
self.args = self.encargs(args)
def invalid_data(self):
pass
def valid_data(self):
chars = ''.join(map(lambda ind: \
ind.valid_data(), self.args))
data = filter(lambda ind: \
not ind in chars, printable)
return choice(data)
def __str__(self):
return '[^%s]' % self.join()
class X(RegexOperator):
"""
The dot.
.
"""
TOKEN = '.'
def __init__(self):
pass
def invalid_data(self):
return ''
def valid_data(self):
char = choice(printable)
return char
def __str__(self):
return self.TOKEN
class Pattern(RegexOperator):
"""
Setup a pattern.
"""
def __init__(self, *args):
self.args = self.encargs(args)
def invalid_data(self):
pass
def valid_data(self):
return ''.join(map(lambda ind: \
ind.valid_data(), self.args))
def __str__(self):
return self.join()
|
[
"ioliveira.id.uff.br"
] |
ioliveira.id.uff.br
|
e1918394a57db35a46a6856e38ebedd667af34e4
|
d21112887ed1ec675b7b519cc991fc47bfa11735
|
/SaleML_PreDjango/Predicting/urls.py
|
fc440ad929f7717a684452088ecfe3d8b3a0a1bb
|
[] |
no_license
|
SydNS/DjangoML-model
|
8c9ab65075b896ff129a872b087cdcd9dfc87e83
|
c15474b136d592e182e707f6a73269685c3e62ad
|
refs/heads/master
| 2023-03-02T13:27:33.809869 | 2021-02-06T09:57:34 | 2021-02-06T09:57:34 | 336,550,706 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('products', views.product_describe_view, name='product_add'),
]
|
[
"[email protected]"
] | |
a97d0b7b5c266a837d5caf3fefb00339c7d845dc
|
8fcae139173f216eba1eaa01fd055e647d13fd4e
|
/.history/scraper_20191220144406.py
|
d08dde54a93939a73cdb07eb8e08d72519375f5e
|
[] |
no_license
|
EnriqueGalindo/backend-web-scraper
|
68fdea5430a0ffb69cc7fb0e0d9bcce525147e53
|
895d032f4528d88d68719838a45dae4078ebcc82
|
refs/heads/master
| 2020-11-27T14:02:59.989697 | 2019-12-21T19:47:34 | 2019-12-21T19:47:34 | 229,475,085 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,693 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module docstring: One line description of what your program does.
There should be a blank line in between description above, and this
more detailed description. In this section you should put any caveats,
environment variable expectations, gotchas, and other notes about running
the program. Author tag (below) helps instructors keep track of who
wrote what, when grading.
"""
__author__ = "Enrique Galindo"
# Imports go at the top of your file, after the module docstring.
# One module per import line. These are for example only.
import sys
import requests
import re
def main(args):
"""Main function is declared as standalone, for testability"""
url = args[0]
response = requests.get(url)
response.raise_for_status()
url_list = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', response.text)
01-\x08\x0regex_email = r'''(?:[a-z0-9!#$%&‘*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&‘*+/=?^_`{|}~-]+)*|“(?:[\xb\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*“)@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])'''
regex_phone = r'''(1?\W*([2-9][0-8][0-9])\W*([2-9][0-9]{2})\W*([0-9]{4})(\se?x?t?(\d*))?)'''
email_list = set(re.findall(regex_email, response.text))
phone_list = set(re.findall(re_phone, response.text))
print(email_list)
if __name__ == '__main__':
"""Docstring goes here"""
main(sys.argv[1:])
|
[
"[email protected]"
] | |
1c0a243fae087ba9520b940a1940a5458e5d1a61
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_95/2103.py
|
a414f3ccb3b1e3142cf256e7d173e37982b1e31b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,026 |
py
|
#! /usr/bin/env python
import operator
from sys import stdin
in1 = "ejp mysljylc kd kxveddknmc re jsicpdrysirbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcdde kr kd eoya kw aej tysr re ujdr lkgc jvzq"
out1 = "our language is impossible to understandthere are twenty six factorial possibilitiesso it is okay if you want to just give upqz"
sample = "ejp mysljylc kd kxveddknmc re jsicpdrysi"
def getInput():
raw = stdin.readlines()
for x in range(0, len(raw)):
raw[x] = raw[x].replace('\n', '')
return raw
def makeMap(input_str, output_str):
mymap = {}
for x,y in zip(input_str, output_str):
if(x != " "):
mymap[x] = y
return mymap
def googler2english(input_str):
mymap = makeMap(in1, out1)
ret_str = ""
for x in input_str:
if x != ' ':
ret_str = ret_str + mymap[x]
else:
ret_str = ret_str + " "
return ret_str
def main():
myinput = getInput()
bound = int(myinput[0])
for x in range(1, bound + 1):
print "Case #%d: %s" % (x, googler2english(myinput[x]))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
5e1e1a8a01e9a4132bd94ac4745a7070a47d4718
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/22114002.py
|
7953c9feeb8c8d3d7f7c9d855b04a94363e3a510
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 752 |
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/22114002.py generated: Fri, 27 Mar 2015 15:48:15
#
# Event Type: 22114002
#
# ASCII decay Descriptor: [D0 -> pi+ pi- mu+ mu-]cc
#
from Configurables import Generation
Generation().EventType = 22114002
Generation().SampleGenerationTool = "SignalPlain"
from Configurables import SignalPlain
Generation().addTool( SignalPlain )
Generation().SignalPlain.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/D0_pipimumu=DecProdCut.dec"
Generation().SignalPlain.CutTool = "DaughtersInLHCb"
Generation().SignalPlain.SignalPIDList = [ 421,-421 ]
|
[
"[email protected]"
] | |
1565de3abac24dee338faefa2cd4b094f35f2ecd
|
7963f09b4002249e73496c6cbf271fd6921b3d22
|
/emulator_6502/instructions/sbc.py
|
31e549a6575d30d766fd1bf37990f233dd92938b
|
[] |
no_license
|
thales-angelino/py6502emulator
|
6df908fc02f29b41fad550c8b773723a7b63c414
|
1cea28489d51d77d2dec731ab98a6fe8a515a2a8
|
refs/heads/master
| 2023-03-19T14:46:17.393466 | 2021-03-08T04:10:45 | 2021-03-08T04:10:45 | 345,754,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,553 |
py
|
SBC_IMMEDIATE_OPCODE = 0xe9
SBC_ZEROPAGE_OPCODE = 0xe5
SBC_ZEROPAGEX_OPCODE = 0xf5
SBC_ABSOLUTE_OPCODE = 0xed
SBC_ABSOLUTEX_OPCODE = 0xfd
SBC_ABSOLUTEY_OPCODE = 0xf9
SBC_INDIRECTX_OPCODE = 0xe1
SBC_INDIRECTY_OPCODE = 0xf1
class SBCImmediate(object):
def __init__(self):
super(SBCImmediate, self).__init__()
def run(self, cpu):
byte_r = cpu.immediate()
print("SBC memory byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status Carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCZeroPage(object):
"""SBC Zero Page instruction"""
def __init__(self):
super(SBCZeroPage, self).__init__()
def run(self, cpu):
byte_r = cpu.zero_page()
print("SBC zero page byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status Carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCZeroPageX(object):
"""SBC Zero Page X instruction"""
def __init__(self):
super(SBCZeroPageX, self).__init__()
def run(self, cpu):
byte_r = cpu.zero_page_x()
print("SBC zero page X byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCAbsolute(object):
"""SBC absolute instruction"""
def __init__(self):
super(SBCAbsolute, self).__init__()
def run(self, cpu):
byte_r = cpu.absolute()
print("SBC absolute byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCAbsoluteX(object):
"""SBC absolute X instruction"""
def __init__(self):
super(SBCAbsoluteX, self).__init__()
def run(self, cpu):
byte_r = cpu.absolute_x()
print("SBC absolute x byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCAbsoluteY(object):
"""SBC absolute Y instruction"""
def __init__(self):
super(SBCAbsoluteY, self).__init__()
def run(self, cpu):
byte_r = cpu.absolute_y()
print("SBC absolute Y byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCIndirectX(object):
"""SBC indirect X instruction"""
def __init__(self):
super(SBCIndirectX, self).__init__()
def run(self, cpu):
byte_r = cpu.indirect_x()
print("SBC indirect X byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
class SBCIndirectY(object):
"""SBC Indirect Y instruction"""
def __init__(self):
super(SBCIndirectY, self).__init__()
def run(self, cpu):
byte_r = cpu.indirect_y()
print("SBC indirect Y byte read: %s" % hex(byte_r))
print("SBC register A read: %s" % hex(cpu.a))
print("SBC processor status Carry read: %s" % hex(cpu.processor_status['carry']))
cpu.sbc(byte_r)
|
[
"[email protected]"
] | |
a0adbf0801f319434a3785fe01f994198732a1a1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2864/60618/317011.py
|
a6f56f689fbf65afed239fae41b9105f1e30fc28
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,548 |
py
|
class Solution:
def find(self, n, data):
if n == 1:
return data[0]
re = 0
d = dict()
for i in range(n):
d[data[i]] = data.count(data[i])
# 这两行可以好好学习一下!!!
sorted_key_list = sorted(d, reverse=True) # key从大到小
sorted_dict = sorted(d.items(), key=lambda x: x[0], reverse=True)
k = sorted_key_list
dd = []
for item in sorted_dict:
dd.append(item[1])
i = 0
while i < len(k):
if k[i] == k[i+1] + 1: # 下一个不能要了
if dd[i] * k[i] > dd[i + 1] * k[i + 1]:
re += dd[i] * k[i]
i += 1
if i == len(k) - 1:
break
if i == len(k) - 2:
re += dd[i + 1] * k[i + 1]
break
else:
re += dd[i + 1] * k[i + 1]
i += 1
if i == len(k) - 1:
break
if k[i] == k[i+1]+1:
i += 1
i += 1
else: # 下一个还能要
re += dd[i] * k[i]
i += 1
if i == len(k) - 1:
re += dd[i] * k[i]
break
return re
if __name__ == '__main__':
n = int(input())
data = [int(a) for a in input().split()]
s = Solution()
re = s.find(n, data)
print(re)
|
[
"[email protected]"
] | |
2018328ca867ccc87ff04f9b378c17cac512df31
|
747f759311d404af31c0f80029e88098193f6269
|
/addons/sale_payment/sale_payment.py
|
807c9c55bec7e7ec51aae95ef58e50ca79e28d7c
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 66 |
py
|
/home/openerp/production/extra-addons/sale_payment/sale_payment.py
|
[
"[email protected]"
] | |
07d35f66b6c9afb14eb49238a555cec69a67ef2c
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_geometry_to_be_within_shape.py
|
4fb1ffed1fe1ffab5fafb06d1d68eb5f5526a0d0
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518 | 2023-09-02T00:00:13 | 2023-09-02T00:00:13 | 103,071,520 | 8,931 | 1,535 |
Apache-2.0
| 2023-09-14T19:57:16 | 2017-09-11T00:18:46 |
Python
|
UTF-8
|
Python
| false | false | 12,685 |
py
|
from typing import Optional
import pandas as pd
import pygeos as geos
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesGeometryWithinShape(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.geometry.within_shape"
condition_value_keys = ("shape", "shape_format", "column_shape_format", "properly")
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
shape = kwargs.get("shape")
shape_format = kwargs.get("shape_format")
column_shape_format = kwargs.get("column_shape_format")
properly = kwargs.get("properly")
# Check that shape is given and given in the correct format
if shape is not None:
try:
if shape_format == "wkt":
shape_ref = geos.from_wkt(shape)
elif shape_format == "wkb":
shape_ref = geos.from_wkb(shape)
elif shape_format == "geojson":
shape_ref = geos.from_geojson(shape)
else:
raise NotImplementedError(
"Shape constructor method not implemented. Must be in WKT, WKB, or GeoJSON format."
)
except Exception:
raise Exception("A valid reference shape was not given.")
else:
raise Exception("A shape must be provided for this method.")
# Load the column into a pygeos Geometry vector from numpy array (Series not supported).
if column_shape_format == "wkt":
shape_test = geos.from_wkt(column.to_numpy(), on_invalid="ignore")
elif column_shape_format == "wkb":
shape_test = geos.from_wkb(column.to_numpy(), on_invalid="ignore")
else:
raise NotImplementedError("Column values shape format not implemented.")
# Allow for an array of reference shapes to be provided. Return a union of all the shapes in the array (Polygon or Multipolygon)
shape_ref = geos.union_all(shape_ref)
# Prepare the geometries
geos.prepare(shape_ref)
geos.prepare(shape_test)
if properly:
return pd.Series(geos.contains_properly(shape_ref, shape_test))
else:
return pd.Series(geos.contains(shape_ref, shape_test))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesGeometryToBeWithinShape(ColumnMapExpectation):
"""Expect that column values as geometries are within a given reference shape.
expect_column_values_geometry_to_be_within_shape is a \
[Column Map Expectation](https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations).
Args:
column (str): \
The column name. \
Column values must be provided in WKT or WKB format, which are commom formats for GIS Database formats. \
WKT can be accessed thhrough the ST_AsText() or ST_AsBinary() functions in queries for PostGIS and MSSQL.
Keyword Args:
shape (str or list of str): \
The reference geometry
shape_format (str): \
Geometry format for 'shape' string(s). Can be provided as 'Well Known Text' (WKT), 'Well Known Binary' (WKB), or as GeoJSON. \
Must be one of: [wkt, wkb, geojson]. Default: wkt
column_shape_format (str): \
Geometry format for 'column'. Column values must be provided in WKT or WKB format, which are commom formats for GIS Database formats. \
WKT can be accessed thhrough the ST_AsText() or ST_AsBinary() functions in queries for PostGIS and MSSQL.
properly (boolean): \
Whether the 'column' values should be properly within in the reference 'shape'. \
The method allows for shapes to be 'properly contained' within the reference, meaning no points of a given geometry can touch the boundary of the reference. \
See the pygeos docs for reference. Default: False
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Notes:
* Convention is (X Y Z) for points, which would map to (Longitude Latitude Elevation) for geospatial cases.
* Any convention can be followed as long as the test and reference shapes are consistent.
* The reference shape allows for an array, but will union (merge) all the shapes into 1 and check the contains condition.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"points_only": [
"POINT(1 1)",
"POINT(2 2)",
"POINT(6 4)",
"POINT(3 9)",
"POINT(8 9.999)",
],
"points_and_lines": [
"POINT(1 1)",
"POINT(2 2)",
"POINT(6 4)",
"POINT(3 9)",
"LINESTRING(5 5, 8 10)",
],
},
"tests": [
{
"title": "positive_test_with_points",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_only",
"shape": "POLYGON ((0 0, 0 10, 10 10, 10 0, 0 0))",
"shape_format": "wkt",
"properly": False,
},
"out": {
"success": True,
},
},
{
"title": "positive_test_with_points_and_lines",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_and_lines",
"shape": "POLYGON ((0 0, 0 10, 10 10, 10 0, 0 0))",
"shape_format": "wkt",
"properly": False,
},
"out": {
"success": True,
},
},
{
"title": "positive_test_with_points_wkb_reference_shape",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_only",
"shape": "010300000001000000050000000000000000000000000000000000000000000000000000000000000000002440000000000000244000000000000024400000000000002440000000000000000000000000000000000000000000000000",
"shape_format": "wkb",
"properly": False,
},
"out": {
"success": True,
},
},
{
"title": "positive_test_with_points_geojson_reference_shape",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_only",
"shape": '{"type":"Polygon","coordinates":[[[0.0,0.0],[0.0,10.0],[10.0,10.0],[10.0,0.0],[0.0,0.0]]]}',
"shape_format": "geojson",
"properly": False,
},
"out": {
"success": True,
},
},
{
"title": "negative_test_with_points",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_only",
"shape": "POLYGON ((0 0, 0 7.5, 7.5 7.5, 7.5 0, 0 0))",
"shape_format": "wkt",
"properly": True,
},
"out": {
"success": False,
},
},
{
"title": "negative_test_with_points_and_lines_not_properly_contained",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "points_and_lines",
"shape": "POLYGON ((0 0, 0 10, 10 10, 10 0, 0 0))",
"shape_format": "wkt",
"properly": True,
"mostly": 1,
},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.geometry.within_shape"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"mostly",
"shape",
"shape_format",
"column_shape_format",
"properly",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"mostly": 1,
"shape_format": "wkt",
"column_shape_format": "wkt",
"properly": False,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"geospatial",
"hackathon-2022",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@pjdobson", # Don't forget to add your github handle here!
],
"requirements": ["pygeos"],
}
if __name__ == "__main__":
ExpectColumnValuesGeometryToBeWithinShape().print_diagnostic_checklist()
|
[
"[email protected]"
] | |
b248f7b6e4a7f92757f0a8c13236f489a28b112f
|
ace30d0a4b1452171123c46eb0f917e106a70225
|
/filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/oslo_privsep/tests/test_comm.py
|
72f7aefceab898b5a143045d7aa771a0fc759a9f
|
[
"Python-2.0"
] |
permissive
|
juancarlosdiaztorres/Ansible-OpenStack
|
e98aa8c1c59b0c0040c05df292964520dd796f71
|
c01951b33e278de9e769c2d0609c0be61d2cb26b
|
refs/heads/master
| 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 |
Python
|
UTF-8
|
Python
| false | false | 2,698 |
py
|
# Copyright 2015 Rackspace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from oslotest import base
from oslo_privsep import comm
class BufSock(object):
def __init__(self):
self.readpos = 0
self.buf = six.BytesIO()
def recv(self, bufsize):
if self.buf.closed:
return b''
self.buf.seek(self.readpos, 0)
data = self.buf.read(bufsize)
self.readpos += len(data)
return data
def sendall(self, data):
self.buf.seek(0, 2)
self.buf.write(data)
def shutdown(self, _flag):
self.buf.close()
class TestSerialization(base.BaseTestCase):
def setUp(self):
super(TestSerialization, self).setUp()
sock = BufSock()
self.input = comm.Serializer(sock)
self.output = iter(comm.Deserializer(sock))
def send(self, data):
self.input.send(data)
return next(self.output)
def assertSendable(self, value):
self.assertEqual(value, self.send(value))
def test_none(self):
self.assertSendable(None)
def test_bool(self):
self.assertSendable(True)
self.assertSendable(False)
def test_int(self):
self.assertSendable(42)
self.assertSendable(-84)
def test_bytes(self):
data = b'\x00\x01\x02\xfd\xfe\xff'
self.assertSendable(data)
def test_unicode(self):
data = u'\u4e09\u9df9'
self.assertSendable(data)
def test_tuple(self):
self.assertSendable((1, 'foo'))
def test_list(self):
# NB! currently lists get converted to tuples by serialization.
self.assertEqual((1, 'foo'), self.send([1, 'foo']))
def test_dict(self):
self.assertSendable(
{
'a': 'b',
1: 2,
None: None,
(1, 2): (3, 4),
}
)
def test_badobj(self):
class UnknownClass(object):
pass
obj = UnknownClass()
self.assertRaises(TypeError, self.send, obj)
def test_eof(self):
self.input.close()
self.assertRaises(StopIteration, next, self.output)
|
[
"[email protected]"
] | |
a7f52a070ab9786932134e6185e25c4294abacda
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories_2to3/113677/KaggleBillionWordImputation-master/scripts/test_to_train.py
|
d6a8b8242d2e01d61592d440427057247ee7db57
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 284 |
py
|
#!/usr/bin/env python
'''Convert test file format to train file format'''
import sys
if __name__ == '__main__':
header = sys.stdin.readline()
for line in sys.stdin:
i, sentence = line.rstrip().split(',', 1)
print(sentence[1:-1].replace('""', '"'))
|
[
"[email protected]"
] | |
7914eab270311d6a94213bb0d0fa5edfa4c36fb0
|
863d32f9adc6890600a7a114574be66e80dc4ec7
|
/models/seg_model.py
|
0e3d6fddf9a0d4b5e475694ffe2eb863038fda1d
|
[] |
no_license
|
dsl2009/dsl_instance
|
9e60dc36a3106a9500a9486208533c2eb23578ae
|
ca299c16feaf58eadfd21f282bf681194b6c118f
|
refs/heads/master
| 2020-04-24T15:18:08.246023 | 2019-07-26T08:38:19 | 2019-07-26T08:38:19 | 172,060,432 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,584 |
py
|
from models import resnet
import torch
from torch import nn
from torch.nn import functional as F
from layer import renet
class SegModel(nn.Module):
def __init__(self):
super(SegModel, self).__init__()
self.cnn = resnet.resnet50(pretrained=False)
self.cov1 = nn.Sequential(
nn.Conv2d(2048, 512, kernel_size=1, stride=1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
)
self.cov2 = nn.Sequential(
nn.Conv2d(768, 256, kernel_size=3,padding=1, stride=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.cov3 = nn.Sequential(
nn.Conv2d(320, 64, kernel_size=3,padding=1, stride=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.seg = nn.Conv2d(64, 1, kernel_size=3,padding=1, stride=1, bias=False)
self.edge = nn.Conv2d(64, 1, kernel_size=3, padding=1, stride=1, bias=False)
def forward(self, img):
x1, x2, x3 = self.cnn(img)
x3 = self.cov1(x3)
x3_up = F.interpolate(x3,scale_factor=2, mode='bilinear')
x2 = torch.cat([x3_up, x2],dim =1)
x2 = self.cov2(x2)
x2_up = F.interpolate(x2,scale_factor=2, mode='bilinear')
x1 = torch.cat([x2_up, x1],dim =1)
x1 = self.cov3(x1)
x0 = F.interpolate(x1,scale_factor=2, mode='bilinear')
seg = self.seg(x0)
edge = self.edge(x0)
return seg,edge
if __name__ == '__main__':
x = torch.randn(2,3,256,256).cuda()
md = SegModel().cuda()
md(x)
|
[
"dsl"
] |
dsl
|
34c4d58dbc00a029cccf06bca3604352c7a3dc0b
|
833e9e3b34b271aa2522471bd0b281b892adff78
|
/backend/forms.py
|
9f1014a729fa0d32ce2cc205096f506180fa41c4
|
[] |
no_license
|
emilte/case
|
b3fcd869468e093ec754980824c6b155f283caa7
|
35eadb05bdd224f845353a952c9aa18b03d95591
|
refs/heads/master
| 2021-06-27T13:19:32.550253 | 2019-11-24T23:21:36 | 2019-11-24T23:21:36 | 223,599,299 | 0 | 0 | null | 2021-03-19T08:42:52 | 2019-11-23T14:10:19 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,377 |
py
|
from django import forms
from urllib import request
from captcha.fields import ReCaptchaField
from django.conf import settings
def between(x, a, b):
return x >= a and x <= b
class Info(forms.Form):
applicant = forms.CharField(initial="emil", required=True, widget=forms.HiddenInput)
name = forms.CharField(initial="Emil Telstad", required=True, min_length=2)
email = forms.EmailField(initial="[email protected]", required=True)
phone = forms.IntegerField(initial="41325358", required=True)
areacode = forms.CharField(initial="7051", required=False, min_length=4, max_length=4)
comment = forms.CharField(required=False, widget=forms.Textarea)
captcha = ReCaptchaField(
public_key=settings.RECAPTCHA_PUBLIC_KEY,
private_key=settings.RECAPTCHA_PRIVATE_KEY,
)
required_css_class = 'required'
def __init__(self, *args, **kwargs):
super(type(self), self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs.update({'class': 'form-control'})
self.fields['name'].widget.attrs.update({'placeholder': 'Ola Nordmann'})
self.fields['email'].widget.attrs.update({'placeholder': '[email protected]'})
self.fields['phone'].widget.attrs.update({'placeholder': '12345678'})
self.fields['areacode'].widget.attrs.update({'placeholder': '1234'})
def clean_phone(self):
data = self.cleaned_data['phone']
if between(data, 40000000, 49999999) or between(data, 90000000, 99999999):
return data
raise forms.ValidationError("Invalid Norwegian phone number")
def clean_areacode(self):
data = self.cleaned_data['areacode']
if not data: # Areacode is not required
return data
try: int(data)
except: raise forms.ValidationError("Areacodes contain only digits (0-9)")
if len(data) != 4:
raise forms.ValidationError("Norwegian areacodes contain exactly 4 digits")
resource = request.urlopen("https://www.bring.no/postnummerregister-ansi.txt")
encode = resource.headers.get_content_charset()
for line in resource:
line = line.decode(encode)
n = line.split('\t')[0]
if int(n) == int(data):
return data
raise forms.ValidationError("Areacode does not exist")
|
[
"[email protected]"
] | |
87cb6e36d3ce8f25552e58055a81a96c81d016d0
|
9994911f0ff388c92c21ca8178eec2d3af57082d
|
/teamup/cli.py
|
8379e8bc873e2b905aca6bd2f170758de61ca15c
|
[
"MIT"
] |
permissive
|
BruceEckel/TeamUp
|
2809b36b8946b51bf96fcc113ef24ef02508f3c9
|
23e29301b462c329ad17253b4d4fb7f56fb7881b
|
refs/heads/master
| 2023-01-05T19:06:21.010258 | 2022-12-26T23:30:44 | 2022-12-26T23:30:44 | 127,565,232 | 7 | 1 |
MIT
| 2022-12-26T23:30:45 | 2018-03-31T19:42:07 |
Python
|
UTF-8
|
Python
| false | false | 1,527 |
py
|
# -*- coding: utf-8 -*-
"""
Combine people for group activities
"""
from pathlib import Path
import os, sys
import click
import webbrowser
from teamup.pairings import Pairings
from teamup.PersistentLoopCounter import PersistentLoopCounter
attendees = Path("Attendees.txt")
html = Path() / "html"
@click.group()
@click.version_option()
def main():
"""
Generates and displays all combinations of 2-person teams using a
round-robin algorithm. Requires an Attendees.txt file containing
one name per line. Remove the 'html' directory to restart.
"""
def display(index):
pairing = html / f"pairing{index}.html"
assert pairing.exists()
webbrowser.open_new_tab(pairing)
@main.command()
def current():
"""
Show current teams
"""
if not attendees.exists():
print("Attendees.txt not found")
sys.exit(1)
pairings = Pairings.from_file(Path("Attendees.txt"))
if not html.exists():
pairings.create_html_files()
PersistentLoopCounter.create(html, pairings.bound)
display(PersistentLoopCounter.get(html).index())
@main.command()
def next():
"""
Moves to next team grouping and shows
"""
if not html.exists():
print("No 'html' directory, first run 'teamup current'")
sys.exit(1)
display(PersistentLoopCounter.get(html).next())
# @main.command()
# def clean():
# """
# Erases the 'html' directory
# """
# if html.exists():
# html.unlink()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
1c5daec5e4fda16f1120b32e7f9d688b02254b60
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp-with-texts/IB-DHCPONE-MIB.py
|
aea222e97e72ae77fa4c45e1500e93446cf69240
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 |
Apache-2.0
| 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null |
UTF-8
|
Python
| false | false | 11,349 |
py
|
#
# PySNMP MIB module IB-DHCPONE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IB-DHCPONE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:50:35 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
IbString, IbIpAddr, ibDHCPOne = mibBuilder.importSymbols("IB-SMI-MIB", "IbString", "IbIpAddr", "ibDHCPOne")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, enterprises, Gauge32, ModuleIdentity, IpAddress, Integer32, Counter32, ObjectIdentity, TimeTicks, MibIdentifier, Unsigned32, iso, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "enterprises", "Gauge32", "ModuleIdentity", "IpAddress", "Integer32", "Counter32", "ObjectIdentity", "TimeTicks", "MibIdentifier", "Unsigned32", "iso", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ibDhcpModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1))
ibDhcpModule.setRevisions(('2010-03-23 00:00', '2008-02-14 00:00', '2005-01-10 00:00', '2004-05-21 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ibDhcpModule.setRevisionsDescriptions(('Fixed smilint errors', 'change ibDHCPSubnetPercentUsed syntax', 'Added copyright', 'Creation of the MIB file',))
if mibBuilder.loadTexts: ibDhcpModule.setLastUpdated('201003230000Z')
if mibBuilder.loadTexts: ibDhcpModule.setOrganization('Infoblox')
if mibBuilder.loadTexts: ibDhcpModule.setContactInfo('See IB-SMI-MIB for information.')
if mibBuilder.loadTexts: ibDhcpModule.setDescription('This file defines the Infoblox DHCP One MIB.')
ibDHCPSubnetTable = MibTable((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1), )
if mibBuilder.loadTexts: ibDHCPSubnetTable.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetTable.setDescription('A table of DHCP Subnet statistics.')
ibDHCPSubnetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1, 1), ).setIndexNames((0, "IB-DHCPONE-MIB", "ibDHCPSubnetNetworkAddress"))
if mibBuilder.loadTexts: ibDHCPSubnetEntry.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetEntry.setDescription('A conceptual row of the ibDHCPSubnetEntry containing info about a particular network using DHCP.')
ibDHCPSubnetNetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1, 1, 1), IbIpAddr()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPSubnetNetworkAddress.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetNetworkAddress.setDescription('DHCP Subnet in IpAddress format. A subnetwork may have many ranges for lease.')
ibDHCPSubnetNetworkMask = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1, 1, 2), IbIpAddr()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPSubnetNetworkMask.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetNetworkMask.setDescription('DHCP Subnet mask in IpAddress format.')
ibDHCPSubnetPercentUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPSubnetPercentUsed.setStatus('current')
if mibBuilder.loadTexts: ibDHCPSubnetPercentUsed.setDescription('Percentage of dynamic DHCP address for subnet leased out at this time. Fixed addresses are always counted as leased for this calcuation if the fixed addresses are within ranges of leases.')
ibDHCPStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3))
ibDhcpTotalNoOfDiscovers = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfDiscovers.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfDiscovers.setDescription('This variable indicates the number of discovery messages received')
ibDhcpTotalNoOfRequests = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfRequests.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfRequests.setDescription('This variable indicates the number of requests received')
ibDhcpTotalNoOfReleases = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfReleases.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfReleases.setDescription('This variable indicates the number of releases received')
ibDhcpTotalNoOfOffers = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfOffers.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfOffers.setDescription('This variable indicates the number of offers sent')
ibDhcpTotalNoOfAcks = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfAcks.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfAcks.setDescription('This variable indicates the number of acks sent')
ibDhcpTotalNoOfNacks = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfNacks.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfNacks.setDescription('This variable indicates the number of nacks sent')
ibDhcpTotalNoOfDeclines = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfDeclines.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfDeclines.setDescription('This variable indicates the number of declines received')
ibDhcpTotalNoOfInforms = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfInforms.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfInforms.setDescription('This variable indicates the number of informs received')
ibDhcpTotalNoOfOthers = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 3, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpTotalNoOfOthers.setStatus('current')
if mibBuilder.loadTexts: ibDhcpTotalNoOfOthers.setDescription('This variable indicates the number of other messages received')
ibDhcpDeferredQueueSize = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDhcpDeferredQueueSize.setStatus('current')
if mibBuilder.loadTexts: ibDhcpDeferredQueueSize.setDescription('The size of deferred dynamic DNS update queue')
ibDHCPDDNSStats = MibIdentifier((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5))
ibDHCPDDNSAvgLatency5 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency5.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency5.setDescription('Average Latencies (in microseconds) for DHCPD dynamic DNS updates during the last 5 minutes')
ibDHCPDDNSAvgLatency15 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency15.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency15.setDescription('Average Latencies (in microseconds) for DHCPD dynamic DNS updates during the last 15 minutes')
ibDHCPDDNSAvgLatency60 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency60.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency60.setDescription('Average Latencies (in microseconds) for DHCPD dynamic DNS updates during the last 60 minutes')
ibDHCPDDNSAvgLatency1440 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency1440.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSAvgLatency1440.setDescription('Average Latencies (in microseconds) for DHCPD dynamic DNS updates during the last 1 day')
ibDHCPDDNSTimeoutCount5 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount5.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount5.setDescription('The number of timeout DHCPD dynamic DDNS updates during the last 5 minutes')
ibDHCPDDNSTimeoutCount15 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount15.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount15.setDescription('The number of timeout DHCPD dynamic DDNS updates during the last 15 minutes')
ibDHCPDDNSTimeoutCount60 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount60.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount60.setDescription('The number of timeout DHCPD dynamic DDNS updates during the last 60 minutes')
ibDHCPDDNSTimeoutCount1440 = MibScalar((1, 3, 6, 1, 4, 1, 7779, 3, 1, 1, 4, 1, 5, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount1440.setStatus('current')
if mibBuilder.loadTexts: ibDHCPDDNSTimeoutCount1440.setDescription('The number of timeout DHCPD dynamic DDNS updates during the last 1 day')
mibBuilder.exportSymbols("IB-DHCPONE-MIB", ibDhcpTotalNoOfAcks=ibDhcpTotalNoOfAcks, ibDhcpTotalNoOfOthers=ibDhcpTotalNoOfOthers, ibDHCPSubnetNetworkAddress=ibDHCPSubnetNetworkAddress, ibDHCPDDNSAvgLatency5=ibDHCPDDNSAvgLatency5, ibDhcpTotalNoOfReleases=ibDhcpTotalNoOfReleases, ibDhcpTotalNoOfInforms=ibDhcpTotalNoOfInforms, ibDHCPDDNSTimeoutCount5=ibDHCPDDNSTimeoutCount5, ibDhcpTotalNoOfOffers=ibDhcpTotalNoOfOffers, ibDhcpTotalNoOfRequests=ibDhcpTotalNoOfRequests, ibDHCPSubnetTable=ibDHCPSubnetTable, ibDHCPStatistics=ibDHCPStatistics, ibDHCPDDNSAvgLatency60=ibDHCPDDNSAvgLatency60, ibDhcpModule=ibDhcpModule, ibDhcpTotalNoOfDiscovers=ibDhcpTotalNoOfDiscovers, ibDHCPDDNSTimeoutCount60=ibDHCPDDNSTimeoutCount60, ibDHCPDDNSAvgLatency15=ibDHCPDDNSAvgLatency15, ibDHCPDDNSTimeoutCount15=ibDHCPDDNSTimeoutCount15, ibDHCPDDNSStats=ibDHCPDDNSStats, ibDhcpTotalNoOfDeclines=ibDhcpTotalNoOfDeclines, ibDHCPSubnetNetworkMask=ibDHCPSubnetNetworkMask, ibDhcpTotalNoOfNacks=ibDhcpTotalNoOfNacks, ibDHCPSubnetEntry=ibDHCPSubnetEntry, ibDHCPSubnetPercentUsed=ibDHCPSubnetPercentUsed, ibDhcpDeferredQueueSize=ibDhcpDeferredQueueSize, PYSNMP_MODULE_ID=ibDhcpModule, ibDHCPDDNSTimeoutCount1440=ibDHCPDDNSTimeoutCount1440, ibDHCPDDNSAvgLatency1440=ibDHCPDDNSAvgLatency1440)
|
[
"[email protected]"
] | |
d614a2dc512cfe4f235594be6aaf24e0db8ac8fd
|
bc8b9ca228fb90ce3e0aefd53b135cdd68329caa
|
/telethon/events/chataction.py
|
2927c8d0f0b65e052e3609fbb2fae46a45097883
|
[
"MIT"
] |
permissive
|
huangdehui2013/Telethon
|
1147ce9acba4db087efa39514a7cab6276becb92
|
dd954b8fbd1957844c8e241183764c3ced7698a9
|
refs/heads/master
| 2020-03-16T18:49:25.989083 | 2018-05-10T07:44:25 | 2018-05-10T07:44:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,657 |
py
|
from .common import EventBuilder, EventCommon, name_inner_event
from .. import utils
from ..tl import types, functions
@name_inner_event
class ChatAction(EventBuilder):
"""
Represents an action in a chat (such as user joined, left, or new pin).
"""
def build(self, update):
if isinstance(update, types.UpdateChannelPinnedMessage) and update.id == 0:
# Telegram does not always send
# UpdateChannelPinnedMessage for new pins
# but always for unpin, with update.id = 0
event = ChatAction.Event(types.PeerChannel(update.channel_id),
unpin=True)
elif isinstance(update, types.UpdateChatParticipantAdd):
event = ChatAction.Event(types.PeerChat(update.chat_id),
added_by=update.inviter_id or True,
users=update.user_id)
elif isinstance(update, types.UpdateChatParticipantDelete):
event = ChatAction.Event(types.PeerChat(update.chat_id),
kicked_by=True,
users=update.user_id)
elif (isinstance(update, (
types.UpdateNewMessage, types.UpdateNewChannelMessage))
and isinstance(update.message, types.MessageService)):
msg = update.message
action = update.message.action
if isinstance(action, types.MessageActionChatJoinedByLink):
event = ChatAction.Event(msg,
added_by=True,
users=msg.from_id)
elif isinstance(action, types.MessageActionChatAddUser):
event = ChatAction.Event(msg,
added_by=msg.from_id or True,
users=action.users)
elif isinstance(action, types.MessageActionChatDeleteUser):
event = ChatAction.Event(msg,
kicked_by=msg.from_id or True,
users=action.user_id)
elif isinstance(action, types.MessageActionChatCreate):
event = ChatAction.Event(msg,
users=action.users,
created=True,
new_title=action.title)
elif isinstance(action, types.MessageActionChannelCreate):
event = ChatAction.Event(msg,
created=True,
users=msg.from_id,
new_title=action.title)
elif isinstance(action, types.MessageActionChatEditTitle):
event = ChatAction.Event(msg,
users=msg.from_id,
new_title=action.title)
elif isinstance(action, types.MessageActionChatEditPhoto):
event = ChatAction.Event(msg,
users=msg.from_id,
new_photo=action.photo)
elif isinstance(action, types.MessageActionChatDeletePhoto):
event = ChatAction.Event(msg,
users=msg.from_id,
new_photo=True)
elif isinstance(action, types.MessageActionPinMessage):
# Telegram always sends this service message for new pins
event = ChatAction.Event(msg,
users=msg.from_id,
new_pin=msg.reply_to_msg_id)
else:
return
else:
return
event._entities = update._entities
return self._filter_event(event)
class Event(EventCommon):
"""
Represents the event of a new chat action.
Members:
action_message (`MessageAction <https://lonamiwebs.github.io/Telethon/types/message_action.html>`_):
The message invoked by this Chat Action.
new_pin (`bool`):
``True`` if there is a new pin.
new_photo (`bool`):
``True`` if there's a new chat photo (or it was removed).
photo (:tl:`Photo`, optional):
The new photo (or ``None`` if it was removed).
user_added (`bool`):
``True`` if the user was added by some other.
user_joined (`bool`):
``True`` if the user joined on their own.
user_left (`bool`):
``True`` if the user left on their own.
user_kicked (`bool`):
``True`` if the user was kicked by some other.
created (`bool`, optional):
``True`` if this chat was just created.
new_title (`str`, optional):
The new title string for the chat, if applicable.
unpin (`bool`):
``True`` if the existing pin gets unpinned.
"""
def __init__(self, where, new_pin=None, new_photo=None,
added_by=None, kicked_by=None, created=None,
users=None, new_title=None, unpin=None):
if isinstance(where, types.MessageService):
self.action_message = where
where = where.to_id
else:
self.action_message = None
super().__init__(chat_peer=where, msg_id=new_pin)
self.new_pin = isinstance(new_pin, int)
self._pinned_message = new_pin
self.new_photo = new_photo is not None
self.photo = \
new_photo if isinstance(new_photo, types.Photo) else None
self._added_by = None
self._kicked_by = None
self.user_added, self.user_joined, self.user_left,\
self.user_kicked, self.unpin = (False, False, False, False, False)
if added_by is True:
self.user_joined = True
elif added_by:
self.user_added = True
self._added_by = added_by
if kicked_by is True:
self.user_left = True
elif kicked_by:
self.user_kicked = True
self._kicked_by = kicked_by
self.created = bool(created)
self._user_peers = users if isinstance(users, list) else [users]
self._users = None
self._input_users = None
self.new_title = new_title
self.unpin = unpin
def respond(self, *args, **kwargs):
"""
Responds to the chat action message (not as a reply).
Shorthand for ``client.send_message(event.chat, ...)``.
"""
return self._client.send_message(self.input_chat, *args, **kwargs)
def reply(self, *args, **kwargs):
"""
Replies to the chat action message (as a reply). Shorthand for
``client.send_message(event.chat, ..., reply_to=event.message.id)``.
Has the same effect as ``.respond()`` if there is no message.
"""
if not self.action_message:
return self.respond(*args, **kwargs)
kwargs['reply_to'] = self.action_message.id
return self._client.send_message(self.input_chat, *args, **kwargs)
def delete(self, *args, **kwargs):
"""
Deletes the chat action message. You're responsible for checking
whether you have the permission to do so, or to except the error
otherwise. This is a shorthand for
``client.delete_messages(event.chat, event.message, ...)``.
Does nothing if no message action triggered this event.
"""
if self.action_message:
return self._client.delete_messages(self.input_chat,
[self.action_message],
*args, **kwargs)
@property
def pinned_message(self):
"""
If ``new_pin`` is ``True``, this returns the (:tl:`Message`)
object that was pinned.
"""
if self._pinned_message == 0:
return None
if isinstance(self._pinned_message, int) and self.input_chat:
r = self._client(functions.channels.GetMessagesRequest(
self._input_chat, [self._pinned_message]
))
try:
self._pinned_message = next(
x for x in r.messages
if isinstance(x, types.Message)
and x.id == self._pinned_message
)
except StopIteration:
pass
if isinstance(self._pinned_message, types.Message):
return self._pinned_message
@property
def added_by(self):
"""
The user who added ``users``, if applicable (``None`` otherwise).
"""
if self._added_by and not isinstance(self._added_by, types.User):
self._added_by =\
self._entities.get(utils.get_peer_id(self._added_by))
if not self._added_by:
self._added_by = self._client.get_entity(self._added_by)
return self._added_by
@property
def kicked_by(self):
"""
The user who kicked ``users``, if applicable (``None`` otherwise).
"""
if self._kicked_by and not isinstance(self._kicked_by, types.User):
self._kicked_by =\
self._entities.get(utils.get_peer_id(self._kicked_by))
if not self._kicked_by:
self._kicked_by = self._client.get_entity(self._kicked_by)
return self._kicked_by
@property
def user(self):
"""
The first user that takes part in this action (e.g. joined).
Might be ``None`` if the information can't be retrieved or
there is no user taking part.
"""
if self.users:
return self._users[0]
@property
def input_user(self):
"""
Input version of the ``self.user`` property.
"""
if self.input_users:
return self._input_users[0]
@property
def user_id(self):
"""
Returns the marked signed ID of the first user, if any.
"""
if self._user_peers:
return utils.get_peer_id(self._user_peers[0])
@property
def users(self):
"""
A list of users that take part in this action (e.g. joined).
Might be empty if the information can't be retrieved or there
are no users taking part.
"""
if not self._user_peers:
return []
if self._users is None:
have, missing = [], []
for peer in self._user_peers:
user = self._entities.get(utils.get_peer_id(peer))
if user:
have.append(user)
else:
missing.append(peer)
try:
missing = self._client.get_entity(missing)
except (TypeError, ValueError):
missing = []
self._users = have + missing
return self._users
@property
def input_users(self):
"""
Input version of the ``self.users`` property.
"""
if self._input_users is None and self._user_peers:
self._input_users = []
for peer in self._user_peers:
try:
self._input_users.append(self._client.get_input_entity(
peer
))
except (TypeError, ValueError):
pass
return self._input_users
@property
def user_ids(self):
"""
Returns the marked signed ID of the users, if any.
"""
if self._user_peers:
return [utils.get_peer_id(u) for u in self._user_peers]
|
[
"[email protected]"
] | |
9770331cc4ed8b9caba652786a87ec8aced75466
|
e94c7bd97d8b8b3b2945d357521bd346e66d5d75
|
/test/lmp/script/gen_txt/test_signature.py
|
1a75301a671acbdfbd9ac9ea870cb204b57d9bc1
|
[
"Beerware"
] |
permissive
|
ProFatXuanAll/language-model-playground
|
4d34eacdc9536c57746d6325d71ebad0d329080e
|
ec4442a0cee988a4412fb90b757c87749b70282b
|
refs/heads/main
| 2023-02-19T16:21:06.926421 | 2022-09-25T13:35:01 | 2022-09-25T13:35:01 | 202,471,099 | 11 | 26 |
NOASSERTION
| 2023-02-16T06:39:40 | 2019-08-15T03:57:23 |
Python
|
UTF-8
|
Python
| false | false | 1,040 |
py
|
"""Test :py:mod:`lmp.script.gen_txt` signatures."""
import argparse
import inspect
from inspect import Parameter, Signature
from typing import List
import lmp.script.gen_txt
def test_module_method() -> None:
"""Ensure module methods' signatures."""
assert hasattr(lmp.script.gen_txt, 'parse_args')
assert inspect.isfunction(lmp.script.gen_txt.parse_args)
assert inspect.signature(lmp.script.gen_txt.parse_args) == Signature(
parameters=[
Parameter(
annotation=List[str],
default=Parameter.empty,
kind=Parameter.POSITIONAL_OR_KEYWORD,
name='argv',
),
],
return_annotation=argparse.Namespace,
)
assert hasattr(lmp.script.gen_txt, 'main')
assert inspect.isfunction(lmp.script.gen_txt.main)
assert inspect.signature(lmp.script.gen_txt.main) == Signature(
parameters=[
Parameter(
annotation=List[str],
default=Parameter.empty,
kind=Parameter.POSITIONAL_OR_KEYWORD,
name='argv',
),
],
return_annotation=None,
)
|
[
"[email protected]"
] | |
9e6666d6b99be4eaa286ee65de5946bc52dde225
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_managed_instance_vulnerability_assessments_operations.py
|
b81f57346b44a5fb1b5e3a63d654f6f168e9144d
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 |
MIT
| 2022-07-19T08:05:23 | 2018-11-16T22:15:30 |
Python
|
UTF-8
|
Python
| false | false | 21,802 |
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_instance_vulnerability_assessments_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_instance_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedInstanceVulnerabilityAssessmentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.sql.aio.SqlManagementClient`'s
:attr:`managed_instance_vulnerability_assessments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
**kwargs: Any
) -> _models.ManagedInstanceVulnerabilityAssessment:
"""Gets the managed instance's vulnerability assessment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[_models.ManagedInstanceVulnerabilityAssessment] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
vulnerability_assessment_name=vulnerability_assessment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedInstanceVulnerabilityAssessment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}"
}
@overload
async def create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
parameters: _models.ManagedInstanceVulnerabilityAssessment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedInstanceVulnerabilityAssessment:
"""Creates or updates the managed instance's vulnerability assessment. Learn more about setting
SQL vulnerability assessment with managed identity -
https://docs.microsoft.com/azure/azure-sql/database/sql-database-vulnerability-assessment-storage.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:param parameters: The requested resource. Required.
:type parameters: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedInstanceVulnerabilityAssessment:
"""Creates or updates the managed instance's vulnerability assessment. Learn more about setting
SQL vulnerability assessment with managed identity -
https://docs.microsoft.com/azure/azure-sql/database/sql-database-vulnerability-assessment-storage.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:param parameters: The requested resource. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
parameters: Union[_models.ManagedInstanceVulnerabilityAssessment, IO],
**kwargs: Any
) -> _models.ManagedInstanceVulnerabilityAssessment:
"""Creates or updates the managed instance's vulnerability assessment. Learn more about setting
SQL vulnerability assessment with managed identity -
https://docs.microsoft.com/azure/azure-sql/database/sql-database-vulnerability-assessment-storage.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:param parameters: The requested resource. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedInstanceVulnerabilityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.ManagedInstanceVulnerabilityAssessment] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ManagedInstanceVulnerabilityAssessment")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
vulnerability_assessment_name=vulnerability_assessment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("ManagedInstanceVulnerabilityAssessment", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("ManagedInstanceVulnerabilityAssessment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}"
}
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
managed_instance_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
**kwargs: Any
) -> None:
"""Removes the managed instance's vulnerability assessment.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessment is defined. Required.
:type managed_instance_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
vulnerability_assessment_name=vulnerability_assessment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}"
}
@distributed_trace
def list_by_instance(
self, resource_group_name: str, managed_instance_name: str, **kwargs: Any
) -> AsyncIterable["_models.ManagedInstanceVulnerabilityAssessment"]:
"""Gets the managed instance's vulnerability assessment policies.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance for which the vulnerability
assessments is defined. Required.
:type managed_instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedInstanceVulnerabilityAssessment or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.ManagedInstanceVulnerabilityAssessment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[_models.ManagedInstanceVulnerabilityAssessmentListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_instance_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_instance.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedInstanceVulnerabilityAssessmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_instance.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/vulnerabilityAssessments"
}
|
[
"[email protected]"
] | |
68b259649181c54eea9faebc337711ab016af534
|
5c4289608693609de3d755674cba53b77cbc4c69
|
/Python_Study/2课堂练习/Python基础班/06_名片管理系统/cards_main.py
|
32a8e9caa251e2f2c3000e3de1f3a1e6e5ad5bcf
|
[
"Apache-2.0"
] |
permissive
|
vipliujunjie/HouseCore
|
95892e632f840f22715d08467d6610195d562261
|
e9fa5ebc048cbede7823ac59a011a554bddf8674
|
refs/heads/master
| 2021-02-05T13:09:43.962224 | 2020-02-28T14:46:26 | 2020-02-28T14:46:26 | 243,783,276 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,298 |
py
|
#! /Library/Frameworks/Python.framework/Versions/3.7/bin/python3
import cards_tools
# 无限循环 由用户决定什么时候退出循环
while True:
# TODO(刘俊杰) 显示功能菜单
cards_tools.show_menu()
action_str = input("请输入希望执行的操作:")
print("您选择的操作是【%s】" % action_str)
# [1,2,3] 针对名片的操作
if action_str in ["1", "2", "3"]: # 判断在指定列表内
# 新增名片
if action_str == "1":
cards_tools.new_card()
# pass
# 显示全部
if action_str == "2":
cards_tools.show_all()
# pass
# 查询名片
if action_str == "3":
cards_tools.search_card()
# pass
# pass
# 0 退出系统
elif action_str == "0":
# 如果在开发程序时,不希望立刻编写分支内部的代码
# 可以使用 pass 关键字,表示一个占位符,能够保证程序的代码结构正确!
# 程序运行时,pass 关键字不会执行任何的操作
print("\n欢迎再次使用【名片管理系统】")
break
# pass
# 输入其他内容提示用户错误
else:
print("您输入的不正确,请从新选择")
|
[
"[email protected]"
] | |
230c05c7d30324adcb69a3442767523215dea7ec
|
a56252fda5c9e42eff04792c6e16e413ad51ba1a
|
/resources/usr/local/lib/python2.7/dist-packages/sklearn/metrics/cluster/supervised.py
|
31d1a45b74047c04f16b5e95a5fec55fca7b256f
|
[
"Apache-2.0"
] |
permissive
|
edawson/parliament2
|
4231e692565dbecf99d09148e75c00750e6797c4
|
2632aa3484ef64c9539c4885026b705b737f6d1e
|
refs/heads/master
| 2021-06-21T23:13:29.482239 | 2020-12-07T21:10:08 | 2020-12-07T21:10:08 | 150,246,745 | 0 | 0 |
Apache-2.0
| 2019-09-11T03:22:55 | 2018-09-25T10:21:03 |
Python
|
UTF-8
|
Python
| false | false | 26,696 |
py
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from ...utils.fixes import unique
from .expected_mutual_info_fast import expected_mutual_information
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = unique(labels_true, return_inverse=True)
clusters, cluster_idx = unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari: float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://acl.ldc.upenn.edu/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
[
"[email protected]"
] | |
468ec6b362681d9a3018b5f0182ef31622ef30b1
|
1b0a729f6e20c542a6370785a49c181c0675e334
|
/main.py
|
35fb3f77ad0ea393411e9e0c57d85315d85bd310
|
[] |
no_license
|
fans656/mint-dev
|
68125c4b41ab64b20d54a2b19e8bf0179dc4636b
|
408f6f055670b15a3f3ee9c9ec086b1090cce372
|
refs/heads/master
| 2021-05-04T11:43:44.740116 | 2016-09-07T13:43:44 | 2016-09-07T13:43:44 | 45,515,119 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 239 |
py
|
from mint import *
from mint.protocols.test import Retransmit
a, b, c = Host(), Host(), Host()
s = Switch()
link(a, s.tips[0], 1)
link(b, s.tips[1], 2)
#link(c, s.tips[2], 3)
a += Retransmit()
a.send('hi')
#b.send('me').at(5)
start()
|
[
"[email protected]"
] | |
ad42586e96c02a379336285a2bc1b60cb0230dec
|
5e6d8b9989247801718dd1f10009f0f7f54c1eb4
|
/sdk/python/pulumi_azure_native/containerinstance/v20180401/container_group.py
|
393f32a489204ca350e64cfea46921dc0a2db827
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
vivimouret29/pulumi-azure-native
|
d238a8f91688c9bf09d745a7280b9bf2dd6d44e0
|
1cbd988bcb2aa75a83e220cb5abeb805d6484fce
|
refs/heads/master
| 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 21,452 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ContainerGroupArgs', 'ContainerGroup']
@pulumi.input_type
class ContainerGroupArgs:
def __init__(__self__, *,
containers: pulumi.Input[Sequence[pulumi.Input['ContainerArgs']]],
os_type: pulumi.Input[Union[str, 'OperatingSystemTypes']],
resource_group_name: pulumi.Input[str],
container_group_name: Optional[pulumi.Input[str]] = None,
image_registry_credentials: Optional[pulumi.Input[Sequence[pulumi.Input['ImageRegistryCredentialArgs']]]] = None,
ip_address: Optional[pulumi.Input['IpAddressArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
restart_policy: Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input['VolumeArgs']]]] = None):
"""
The set of arguments for constructing a ContainerGroup resource.
:param pulumi.Input[Sequence[pulumi.Input['ContainerArgs']]] containers: The containers within the container group.
:param pulumi.Input[Union[str, 'OperatingSystemTypes']] os_type: The operating system type required by the containers in the container group.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] container_group_name: The name of the container group.
:param pulumi.Input[Sequence[pulumi.Input['ImageRegistryCredentialArgs']]] image_registry_credentials: The image registry credentials by which the container group is created from.
:param pulumi.Input['IpAddressArgs'] ip_address: The IP address type of the container group.
:param pulumi.Input[str] location: The resource location.
:param pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']] restart_policy: Restart policy for all containers within the container group.
- `Always` Always restart
- `OnFailure` Restart on failure
- `Never` Never restart
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
:param pulumi.Input[Sequence[pulumi.Input['VolumeArgs']]] volumes: The list of volumes that can be mounted by containers in this container group.
"""
pulumi.set(__self__, "containers", containers)
pulumi.set(__self__, "os_type", os_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if container_group_name is not None:
pulumi.set(__self__, "container_group_name", container_group_name)
if image_registry_credentials is not None:
pulumi.set(__self__, "image_registry_credentials", image_registry_credentials)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if location is not None:
pulumi.set(__self__, "location", location)
if restart_policy is not None:
pulumi.set(__self__, "restart_policy", restart_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if volumes is not None:
pulumi.set(__self__, "volumes", volumes)
@property
@pulumi.getter
def containers(self) -> pulumi.Input[Sequence[pulumi.Input['ContainerArgs']]]:
"""
The containers within the container group.
"""
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: pulumi.Input[Sequence[pulumi.Input['ContainerArgs']]]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Input[Union[str, 'OperatingSystemTypes']]:
"""
The operating system type required by the containers in the container group.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: pulumi.Input[Union[str, 'OperatingSystemTypes']]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="containerGroupName")
def container_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the container group.
"""
return pulumi.get(self, "container_group_name")
@container_group_name.setter
def container_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_group_name", value)
@property
@pulumi.getter(name="imageRegistryCredentials")
def image_registry_credentials(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ImageRegistryCredentialArgs']]]]:
"""
The image registry credentials by which the container group is created from.
"""
return pulumi.get(self, "image_registry_credentials")
@image_registry_credentials.setter
def image_registry_credentials(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ImageRegistryCredentialArgs']]]]):
pulumi.set(self, "image_registry_credentials", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input['IpAddressArgs']]:
"""
The IP address type of the container group.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input['IpAddressArgs']]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="restartPolicy")
def restart_policy(self) -> Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]]:
"""
Restart policy for all containers within the container group.
- `Always` Always restart
- `OnFailure` Restart on failure
- `Never` Never restart
"""
return pulumi.get(self, "restart_policy")
@restart_policy.setter
def restart_policy(self, value: Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]]):
pulumi.set(self, "restart_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VolumeArgs']]]]:
"""
The list of volumes that can be mounted by containers in this container group.
"""
return pulumi.get(self, "volumes")
@volumes.setter
def volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VolumeArgs']]]]):
pulumi.set(self, "volumes", value)
class ContainerGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_group_name: Optional[pulumi.Input[str]] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerArgs']]]]] = None,
image_registry_credentials: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ImageRegistryCredentialArgs']]]]] = None,
ip_address: Optional[pulumi.Input[pulumi.InputType['IpAddressArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OperatingSystemTypes']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restart_policy: Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeArgs']]]]] = None,
__props__=None):
"""
A container group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_group_name: The name of the container group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerArgs']]]] containers: The containers within the container group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ImageRegistryCredentialArgs']]]] image_registry_credentials: The image registry credentials by which the container group is created from.
:param pulumi.Input[pulumi.InputType['IpAddressArgs']] ip_address: The IP address type of the container group.
:param pulumi.Input[str] location: The resource location.
:param pulumi.Input[Union[str, 'OperatingSystemTypes']] os_type: The operating system type required by the containers in the container group.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']] restart_policy: Restart policy for all containers within the container group.
- `Always` Always restart
- `OnFailure` Restart on failure
- `Never` Never restart
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeArgs']]]] volumes: The list of volumes that can be mounted by containers in this container group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ContainerGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A container group.
:param str resource_name: The name of the resource.
:param ContainerGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ContainerGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
container_group_name: Optional[pulumi.Input[str]] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ContainerArgs']]]]] = None,
image_registry_credentials: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ImageRegistryCredentialArgs']]]]] = None,
ip_address: Optional[pulumi.Input[pulumi.InputType['IpAddressArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OperatingSystemTypes']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
restart_policy: Optional[pulumi.Input[Union[str, 'ContainerGroupRestartPolicy']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VolumeArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ContainerGroupArgs.__new__(ContainerGroupArgs)
__props__.__dict__["container_group_name"] = container_group_name
if containers is None and not opts.urn:
raise TypeError("Missing required property 'containers'")
__props__.__dict__["containers"] = containers
__props__.__dict__["image_registry_credentials"] = image_registry_credentials
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["location"] = location
if os_type is None and not opts.urn:
raise TypeError("Missing required property 'os_type'")
__props__.__dict__["os_type"] = os_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["restart_policy"] = restart_policy
__props__.__dict__["tags"] = tags
__props__.__dict__["volumes"] = volumes
__props__.__dict__["instance_view"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerinstance/v20180401:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20170801preview:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20170801preview:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20171001preview:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20171001preview:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20171201preview:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20171201preview:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20180201preview:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20180201preview:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20180601:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20180601:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20180901:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20180901:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20181001:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20181001:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20191201:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20191201:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20201101:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20201101:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20210301:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20210301:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20210701:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20210701:ContainerGroup"), pulumi.Alias(type_="azure-native:containerinstance/v20210901:ContainerGroup"), pulumi.Alias(type_="azure-nextgen:containerinstance/v20210901:ContainerGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ContainerGroup, __self__).__init__(
'azure-native:containerinstance/v20180401:ContainerGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ContainerGroup':
"""
Get an existing ContainerGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ContainerGroupArgs.__new__(ContainerGroupArgs)
__props__.__dict__["containers"] = None
__props__.__dict__["image_registry_credentials"] = None
__props__.__dict__["instance_view"] = None
__props__.__dict__["ip_address"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["os_type"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["restart_policy"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["volumes"] = None
return ContainerGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def containers(self) -> pulumi.Output[Sequence['outputs.ContainerResponse']]:
"""
The containers within the container group.
"""
return pulumi.get(self, "containers")
@property
@pulumi.getter(name="imageRegistryCredentials")
def image_registry_credentials(self) -> pulumi.Output[Optional[Sequence['outputs.ImageRegistryCredentialResponse']]]:
"""
The image registry credentials by which the container group is created from.
"""
return pulumi.get(self, "image_registry_credentials")
@property
@pulumi.getter(name="instanceView")
def instance_view(self) -> pulumi.Output['outputs.ContainerGroupResponseInstanceView']:
"""
The instance view of the container group. Only valid in response.
"""
return pulumi.get(self, "instance_view")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional['outputs.IpAddressResponse']]:
"""
The IP address type of the container group.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Output[str]:
"""
The operating system type required by the containers in the container group.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the container group. This only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="restartPolicy")
def restart_policy(self) -> pulumi.Output[Optional[str]]:
"""
Restart policy for all containers within the container group.
- `Always` Always restart
- `OnFailure` Restart on failure
- `Never` Never restart
"""
return pulumi.get(self, "restart_policy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def volumes(self) -> pulumi.Output[Optional[Sequence['outputs.VolumeResponse']]]:
"""
The list of volumes that can be mounted by containers in this container group.
"""
return pulumi.get(self, "volumes")
|
[
"[email protected]"
] | |
17edec3a0cbd5397bc360dc2289f7aa23fef2f2b
|
02122ec38633c178ced34d8a027addc919b4c200
|
/Nutrients/api/urls.py
|
757826e0b86fe90b0ab82e9e332d35f5dd0ee419
|
[] |
no_license
|
SIBU99/serverCVKM
|
07907b3c416892bcc432b9317506927112750a93
|
8182f2274216016a15a2a98ea5a31d7e05222ed5
|
refs/heads/master
| 2023-01-12T10:19:54.966211 | 2020-11-10T08:33:41 | 2020-11-10T08:33:41 | 311,407,784 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
from django.urls import path
from .views import NutrientExamination
urlpatterns = [
path("nutrient-examination/", NutrientExamination.as_view(), name="nutrient-examination"),
]
|
[
"[email protected]"
] | |
2f42da8393cd536ef56b1a0bef15efe947177b66
|
d83118503614bb83ad8edb72dda7f449a1226f8b
|
/src/dprj/platinumegg/app/cabaret/views/mgr/model_edit/trade_shop.py
|
d402834b28b5ad1f8056bc5d4ec9eec808d29ae6
|
[] |
no_license
|
hitandaway100/caba
|
686fe4390e182e158cd9714c90024a082deb8c69
|
492bf477ac00c380f2b2758c86b46aa7e58bbad9
|
refs/heads/master
| 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,156 |
py
|
# -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.mgr.model_edit import AdminModelEditHandler,\
AppModelForm, ModelEditValidError, AppModelChoiceField
from defines import Defines
from platinumegg.app.cabaret.util.api import BackendApi
from platinumegg.app.cabaret.models.TradeShop import TradeShopMaster, TradeShopItemMaster
from platinumegg.app.cabaret.models.Schedule import ScheduleMaster
class Handler(AdminModelEditHandler):
"""マスターデータの操作.
"""
class Form(AppModelForm):
class Meta:
model = TradeShopMaster
exclude = (
Defines.MASTER_EDITTIME_COLUMN,
)
schedule = AppModelChoiceField(ScheduleMaster, required=False, label=u'期間')
def setting_property(self):
self.MODEL_LABEL = u'トレードショップ'
def valid_insert(self, master):
self.__valid_master(master)
def valid_update(self, master):
self.__valid_master(master)
def __valid_master(self, master):
model_mgr = self.getModelMgr()
self.__check_schedule(model_mgr, master)
self.__check_trade_shop_item_masetr_ids(model_mgr, master)
model_mgr.write_all()
def __check_schedule(self, model_mgr, master):
model = model_mgr.get_model(ScheduleMaster, master.schedule)
if model is None:
raise ModelEditValidError(u'スケジュールに、存在しないIDが指定されています.id=%d' % master.id)
def __check_trade_shop_item_masetr_ids(self, model_mgr, master):
if not isinstance(master.trade_shop_item_master_ids, (list)):
raise ModelEditValidError(u'trade_shop_item_master_idsのJsonが壊れています.id=%d' % master.id)
for trade_shop_item_master_id in master.trade_shop_item_master_ids:
model = model_mgr.get_model(TradeShopItemMaster, trade_shop_item_master_id)
if model is None:
raise ModelEditValidError(u'trade_shop_item_master_idsで指定されているidがTradeShopItemMasterに存在しません.id=%d' % master.id)
def main(request):
return Handler.run(request)
|
[
"[email protected]"
] | |
47b2fcaa1e74c97b42be077420a4335f38b24f8d
|
a7ff1ba9437204454c6b8639e99b007393c64118
|
/synapse/tools/aha/enroll.py
|
a643a485268842bbc531afab92dd9b5e8bf84112
|
[
"Apache-2.0"
] |
permissive
|
vishalbelsare/synapse
|
67013933db31ac71a4074b08a46b129774f63e47
|
a418b1354b2f94e32644ede612c271a6c362ccae
|
refs/heads/master
| 2023-09-01T10:45:34.439767 | 2022-05-13T21:07:20 | 2022-05-13T21:07:20 | 164,022,574 | 0 | 0 |
Apache-2.0
| 2022-05-15T07:45:07 | 2019-01-03T21:01:32 |
Python
|
UTF-8
|
Python
| false | false | 2,609 |
py
|
import os
import sys
import asyncio
import argparse
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.output as s_output
import synapse.lib.certdir as s_certdir
descr = '''
Use a one-time use key to initialize your AHA user enrivonment.
Examples:
python -m synapse.tools.aha.register tcp://aha.loop.vertex.link:27272/b751e6c3e6fc2dad7a28d67e315e1874
'''
async def main(argv, outp=s_output.stdout):
pars = argparse.ArgumentParser(prog='provision', description=descr)
pars.add_argument('onceurl', help='The one-time use AHA user enrollment URL.')
opts = pars.parse_args(argv)
async with s_telepath.withTeleEnv():
certpath = s_common.getSynDir('certs')
yamlpath = s_common.getSynPath('telepath.yaml')
teleyaml = s_common.yamlload(yamlpath)
if teleyaml is None:
teleyaml = {}
teleyaml.setdefault('version', 1)
teleyaml.setdefault('aha:servers', ())
s_common.gendir(certpath)
certdir = s_certdir.CertDir(path=certpath)
async with await s_telepath.openurl(opts.onceurl) as prov:
userinfo = await prov.getUserInfo()
ahaurls = userinfo.get('aha:urls')
ahauser = userinfo.get('aha:user')
ahanetw = userinfo.get('aha:network')
username = f'{ahauser}@{ahanetw}'
capath = certdir.getCaCertPath(ahanetw)
if capath is not None:
os.path.unlink(capath)
byts = await prov.getCaCert()
capath = certdir.saveCaCertByts(byts)
outp.printf(f'Saved CA certificate: {capath}')
keypath = certdir.getUserKeyPath(username)
if keypath is not None:
os.path.unlink(keypath)
crtpath = certdir.getUserCertPath(username)
if crtpath is not None:
os.path.unlink(keypath)
xcsr = certdir.genUserCsr(username)
byts = await prov.signUserCsr(xcsr)
crtpath = certdir.saveUserCertByts(byts)
outp.printf(f'Saved user certificate: {crtpath}')
ahaurls = s_telepath.modurl(ahaurls, user=ahauser)
if ahaurls not in teleyaml.get('aha:servers'):
outp.printf('Updating known AHA servers')
servers = list(teleyaml.get('aha:servers'))
servers.append(ahaurls)
teleyaml['aha:servers'] = servers
s_common.yamlsave(teleyaml, yamlpath)
if __name__ == '__main__': # pragma: no cover
sys.exit(asyncio.run(main(sys.argv[1:])))
|
[
"[email protected]"
] | |
ace388a41b74682d643ef7c6c7176d8cf1f6b831
|
3a5d8cdc7ac14c389fd9426f3f39c3b1dc906dda
|
/nautobot/extras/tests/test_jobs.py
|
e04668889b1dffc9a3853d2e190027a5f793514f
|
[
"Apache-2.0"
] |
permissive
|
nammie-punshine/nautobot
|
f3cdb9d269c37a74706c105d237b883650f10465
|
d6227b211ad89f25233a8791937cd75092421c8a
|
refs/heads/main
| 2023-03-08T10:51:29.437859 | 2021-02-24T20:44:32 | 2021-02-24T20:44:32 | 342,080,836 | 0 | 0 |
Apache-2.0
| 2021-02-25T01:01:36 | 2021-02-25T01:01:36 | null |
UTF-8
|
Python
| false | false | 1,970 |
py
|
import os
import uuid
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from nautobot.extras.choices import JobResultStatusChoices
from nautobot.extras.jobs import get_job, run_job
from nautobot.extras.models import JobResult
from nautobot.utilities.testing import TestCase
class JobTest(TestCase):
"""
Test basic jobs to ensure importing works.
"""
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_pass"
name = "TestPass"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result=job_result)
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_fail"
name = "TestFail"
job_class = get_job(f"local/{module}/{name}")
job_content_type = ContentType.objects.get(app_label="extras", model="job")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result=job_result)
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
|
[
"[email protected]"
] | |
0829499a37fc13ac636386433fe887068436789a
|
b8ab0e1ac2634741a05e5fef583585b597a6cdcf
|
/wsltools/utils/faker/providers/date_time/fil_PH/__init__.py
|
42a736439193745ecd672678cc198a9d48ef49e4
|
[
"MIT"
] |
permissive
|
Symbo1/wsltools
|
be99716eac93bfc270a5ef0e47769290827fc0c4
|
0b6e536fc85c707a1c81f0296c4e91ca835396a1
|
refs/heads/master
| 2022-11-06T16:07:50.645753 | 2020-06-30T13:08:00 | 2020-06-30T13:08:00 | 256,140,035 | 425 | 34 |
MIT
| 2020-04-16T14:10:45 | 2020-04-16T07:22:21 |
Python
|
UTF-8
|
Python
| false | false | 829 |
py
|
from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
"""Provider for datetimes for fil_PH locale"""
DAY_NAMES = {
'0': 'Linggo',
'1': 'Lunes',
'2': 'Martes',
'3': 'Miyerkules',
'4': 'Huwebes',
'5': 'Biyernes',
'6': 'Sabado',
}
MONTH_NAMES = {
'01': 'Enero',
'02': 'Pebrero',
'03': 'Marso',
'04': 'Abril',
'05': 'Mayo',
'06': 'Hunyo',
'07': 'Hulyo',
'08': 'Agosto',
'09': 'Setyembre',
'10': 'Oktubre',
'11': 'Nobyembre',
'12': 'Disyembre',
}
def day_of_week(self):
day = self.date('%w')
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
|
[
"[email protected]"
] | |
b3cffcaaac0bef8d65f8fdbae1aa31e4b48f15ed
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/FiltersAndTransformers/Scripts/JoinIfSingleElementOnly/JoinIfSingleElementOnly.py
|
c91e49454d83bdef53b8f6eeabbd9dcc16b073fc
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 |
MIT
| 2023-09-14T20:55:24 | 2016-06-06T12:17:02 |
Python
|
UTF-8
|
Python
| false | false | 466 |
py
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def return_first_element_if_single(value):
res = value
if isinstance(value, list):
if len(value) == 1:
res = value[0]
return res
def main(): # pragma: no cover
value = demisto.args()["value"]
res = return_first_element_if_single(value)
demisto.results(res)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
[
"[email protected]"
] | |
8d80bf9946e1f8e66795f476eeb0e9382bf7ca7d
|
0c70dcec22a090e70b1f20613ea6e0a64fd9a037
|
/GPS卫星位置的计算/venv/Lib/site-packages/pandas/core/arrays/boolean.py
|
071e1ce42914a78c713b3948405141a7734c0ce1
|
[
"MIT"
] |
permissive
|
payiz-asj/Gis
|
82c1096d830878f62c7a0d5dfb6630d4e4744764
|
3d315fed93e2ab850b836ddfd7a67f5618969d10
|
refs/heads/main
| 2023-06-27T15:25:17.301154 | 2021-08-03T10:02:58 | 2021-08-03T10:02:58 | 392,269,853 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 25,132 |
py
|
import numbers
from typing import TYPE_CHECKING, List, Tuple, Type, Union
import warnings
import numpy as np
from pandas._libs import lib, missing as libmissing
from pandas._typing import ArrayLike
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_bool_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import ops
from .masked import BaseMaskedArray, BaseMaskedDtype
if TYPE_CHECKING:
import pyarrow # noqa: F401
@register_extension_dtype
class BooleanDtype(BaseMaskedDtype):
"""
Extension dtype for boolean data.
.. versionadded:: 1.0.0
.. warning::
BooleanDtype is considered experimental. The implementation and
parts of the API may change without warning.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.BooleanDtype()
BooleanDtype
"""
name = "boolean"
@property
def type(self) -> Type[np.bool_]:
return np.bool_
@property
def kind(self) -> str:
return "b"
@property
def numpy_dtype(self) -> np.dtype:
return np.dtype("bool")
@classmethod
def construct_array_type(cls) -> Type["BooleanArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return BooleanArray
def __repr__(self) -> str:
return "BooleanDtype"
@property
def _is_boolean(self) -> bool:
return True
@property
def _is_numeric(self) -> bool:
return True
def __from_arrow__(
self, array: Union["pyarrow.Array", "pyarrow.ChunkedArray"]
) -> "BooleanArray":
"""
Construct BooleanArray from pyarrow Array/ChunkedArray.
"""
import pyarrow # noqa: F811
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
# TODO should optimize this without going through object array
bool_arr = BooleanArray._from_sequence(np.array(arr))
results.append(bool_arr)
return BooleanArray._concat_same_type(results)
def coerce_to_array(
values, mask=None, copy: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
if isinstance(values, BooleanArray):
if mask is not None:
raise ValueError("cannot pass mask for BooleanArray input")
values, mask = values._data, values._mask
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
mask_values = None
if isinstance(values, np.ndarray) and values.dtype == np.bool_:
if copy:
values = values.copy()
elif isinstance(values, np.ndarray) and is_numeric_dtype(values.dtype):
mask_values = isna(values)
values_bool = np.zeros(len(values), dtype=bool)
values_bool[~mask_values] = values[~mask_values].astype(bool)
if not np.all(
values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
):
raise TypeError("Need to pass bool-like values")
values = values_bool
else:
values_object = np.asarray(values, dtype=object)
inferred_dtype = lib.infer_dtype(values_object, skipna=True)
integer_like = ("floating", "integer", "mixed-integer-float")
if inferred_dtype not in ("boolean", "empty") + integer_like:
raise TypeError("Need to pass bool-like values")
mask_values = isna(values_object)
values = np.zeros(len(values), dtype=bool)
values[~mask_values] = values_object[~mask_values].astype(bool)
# if the values were integer-like, validate it were actually 0/1's
if inferred_dtype in integer_like:
if not np.all(
values[~mask_values].astype(float)
== values_object[~mask_values].astype(float)
):
raise TypeError("Need to pass bool-like values")
if mask is None and mask_values is None:
mask = np.zeros(len(values), dtype=bool)
elif mask is None:
mask = mask_values
else:
if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
if mask_values is not None:
mask = mask | mask_values
else:
if copy:
mask = mask.copy()
else:
mask = np.array(mask, dtype=bool)
if mask_values is not None:
mask = mask | mask_values
if not values.ndim == 1:
raise ValueError("values must be a 1D list-like")
if not mask.ndim == 1:
raise ValueError("mask must be a 1D list-like")
return values, mask
class BooleanArray(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct an BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. versionadded:: 1.0.0
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
Examples
--------
Create an BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = False
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
@property
def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "BooleanArray":
if dtype:
assert dtype == "boolean"
values, mask = coerce_to_array(scalars, copy=copy)
return BooleanArray(values, mask)
@classmethod
def _from_sequence_of_strings(
cls, strings: List[str], dtype=None, copy: bool = False
) -> "BooleanArray":
def map_string(s):
if isna(s):
return s
elif s in ["True", "TRUE", "true", "1", "1.0"]:
return True
elif s in ["False", "FALSE", "false", "0", "0.0"]:
return False
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = [map_string(x) for x in strings]
return cls._from_sequence(scalars, dtype, copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs):
# For BooleanArray inputs, we apply the ufunc to ._data
# and mask the result.
if method == "reduce":
# Not clear how to handle missing values in reductions. Raise.
raise NotImplementedError("The 'reduce' method is not supported.")
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (BooleanArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, BooleanArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
if is_bool_dtype(x.dtype):
m = mask.copy()
return BooleanArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if isinstance(result, tuple):
tuple(reconstruct(x) for x in result)
else:
return reconstruct(result)
def _coerce_to_array(self, value) -> Tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value)
def astype(self, dtype, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
ndarray or ExtensionArray
NumPy ndarray, BooleanArray or IntegerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an BooleanDtype, equivalent of same_kind
casting
"""
from pandas.core.arrays.string_ import StringDtype
dtype = pandas_dtype(dtype)
if isinstance(dtype, BooleanDtype):
values, mask = coerce_to_array(self, copy=copy)
return BooleanArray(values, mask, copy=False)
elif isinstance(dtype, StringDtype):
return dtype.construct_array_type()._from_sequence(self, copy=False)
if is_bool_dtype(dtype):
# astype_nansafe converts np.nan to True
if self._hasna:
raise ValueError("cannot convert float NaN to bool")
else:
return self._data.astype(dtype, copy=copy)
if is_extension_array_dtype(dtype) and is_integer_dtype(dtype):
from pandas.core.arrays import IntegerArray
return IntegerArray(
self._data.astype(dtype.numpy_dtype), self._mask.copy(), copy=False
)
# for integer, error if there are missing values
if is_integer_dtype(dtype):
if self._hasna:
raise ValueError("cannot convert NA to integer")
# for float dtype, ensure we use np.nan before casting (numpy cannot
# deal with pd.NA)
na_value = self._na_value
if is_float_dtype(dtype):
na_value = np.nan
# coerce
return self.to_numpy(dtype=dtype, na_value=na_value, copy=False)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
data = self._data.copy()
data[self._mask] = -1
return data
def any(self, skipna: bool = True, **kwargs):
"""
Return whether any element is True.
Returns False unless there is at least one element that is True.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be False, as for an empty array.
If `skipna` is False, the result will still be True if there is
at least one element that is True, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.any : Numpy version of this method.
BooleanArray.all : Return whether all elements are True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, False, True]).any()
True
>>> pd.array([True, False, pd.NA]).any()
True
>>> pd.array([False, False, pd.NA]).any()
False
>>> pd.array([], dtype="boolean").any()
False
>>> pd.array([pd.NA], dtype="boolean").any()
False
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, False, pd.NA]).any(skipna=False)
True
>>> pd.array([False, False, pd.NA]).any(skipna=False)
<NA>
"""
kwargs.pop("axis", None)
nv.validate_any((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, False)
result = values.any()
if skipna:
return result
else:
if result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def all(self, skipna: bool = True, **kwargs):
"""
Return whether all elements are True.
Returns True unless there is at least one element that is False.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be True, as for an empty array.
If `skipna` is False, the result will still be False if there is
at least one element that is False, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.all : Numpy version of this method.
BooleanArray.any : Return whether any element is True.
Examples
--------
The result indicates whether any element is True (and by default
skips NAs):
>>> pd.array([True, True, pd.NA]).all()
True
>>> pd.array([True, False, pd.NA]).all()
False
>>> pd.array([], dtype="boolean").all()
True
>>> pd.array([pd.NA], dtype="boolean").all()
True
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, True, pd.NA]).all(skipna=False)
<NA>
>>> pd.array([True, False, pd.NA]).all(skipna=False)
False
"""
kwargs.pop("axis", None)
nv.validate_all((), kwargs)
values = self._data.copy()
np.putmask(values, self._mask, True)
result = values.all()
if skipna:
return result
else:
if not result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
@classmethod
def _create_logical_method(cls, op):
def logical_method(self, other):
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other = lib.item_from_zerodim(other)
other_is_booleanarray = isinstance(other, BooleanArray)
other_is_scalar = lib.is_scalar(other)
mask = None
if other_is_booleanarray:
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
raise NotImplementedError(
"can only perform ops with 1-d structures"
)
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and not (other is libmissing.NA or lib.is_bool(other)):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match to compare")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
elif op.__name__ in {"xor", "rxor"}:
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
return BooleanArray(result, mask)
name = f"__{op.__name__}__"
return set_function_name(logical_method, name, cls)
@classmethod
def _create_comparison_method(cls, op):
def cmp_method(self, other):
from pandas.arrays import IntegerArray
if isinstance(
other, (ABCDataFrame, ABCSeries, ABCIndexClass, IntegerArray)
):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
other = lib.item_from_zerodim(other)
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError(
"can only perform ops with 1-d structures"
)
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
result = np.zeros_like(self._data)
mask = np.ones_like(self._data)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
result = op(self._data, other)
# nans propagate
if mask is None:
mask = self._mask.copy()
else:
mask = self._mask | mask
return BooleanArray(result, mask, copy=False)
name = f"__{op.__name__}"
return set_function_name(cmp_method, name, cls)
def _reduce(self, name: str, skipna: bool = True, **kwargs):
if name in {"any", "all"}:
return getattr(self, name)(skipna=skipna, **kwargs)
return super()._reduce(name, skipna, **kwargs)
def _maybe_mask_result(self, result, mask, other, op_name: str):
"""
Parameters
----------
result : array-like
mask : array-like bool
other : scalar or array-like
op_name : str
"""
# if we have a float operand we are by-definition
# a float result
# or our op is a divide
if (is_float_dtype(other) or is_float(other)) or (
op_name in ["rtruediv", "truediv"]
):
result[mask] = np.nan
return result
if is_bool_dtype(result):
return BooleanArray(result, mask, copy=False)
elif is_integer_dtype(result):
from pandas.core.arrays import IntegerArray
return IntegerArray(result, mask, copy=False)
else:
result[mask] = np.nan
return result
@classmethod
def _create_arithmetic_method(cls, op):
op_name = op.__name__
def boolean_arithmetic_method(self, other):
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
# Rely on pandas to unbox and dispatch to us.
return NotImplemented
other = lib.item_from_zerodim(other)
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError(
"can only perform ops with 1-d structures"
)
if len(self) != len(other):
raise ValueError("Lengths must match")
# nans propagate
if mask is None:
mask = self._mask
if other is libmissing.NA:
mask |= True
else:
mask = self._mask | mask
if other is libmissing.NA:
# if other is NA, the result will be all NA and we can't run the
# actual op, so we need to choose the resulting dtype manually
if op_name in {"floordiv", "rfloordiv", "mod", "rmod", "pow", "rpow"}:
dtype = "int8"
else:
dtype = "bool"
result = np.zeros(len(self._data), dtype=dtype)
else:
with np.errstate(all="ignore"):
result = op(self._data, other)
# divmod returns a tuple
if op_name == "divmod":
div, mod = result
return (
self._maybe_mask_result(div, mask, other, "floordiv"),
self._maybe_mask_result(mod, mask, other, "mod"),
)
return self._maybe_mask_result(result, mask, other, op_name)
name = f"__{op_name}__"
return set_function_name(boolean_arithmetic_method, name, cls)
BooleanArray._add_logical_ops()
BooleanArray._add_comparison_ops()
BooleanArray._add_arithmetic_ops()
|
[
"[email protected]"
] | |
40d836471602038f8e490438807b48014491d9e2
|
df97d5b25d40b54e0714ed9c0a6dd7a579011e2e
|
/mikadocms/flikr_grabber.py
|
966050a532ec3be0269d2f1bc60375d21d2ae39b
|
[] |
no_license
|
mikadosoftware/mikadoCMS
|
90ac1910b06f32bc3e808d1df656ba38a30e781c
|
7bb1ca4f66b74d4529a601540e1bf469f44d3b01
|
refs/heads/master
| 2021-01-17T00:20:34.489198 | 2018-06-13T15:27:53 | 2018-06-13T15:27:53 | 8,103,422 | 0 | 0 | null | 2013-05-03T23:07:59 | 2013-02-08T23:27:27 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,740 |
py
|
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
### Copyright Paul Brian 2013
# This program is licensed, without under the terms of the
# GNU General Public License version 2 (or later). Please see
# LICENSE.txt for details
###
"""
:author: [email protected] <Paul Brian>
Flikr.com provides a useful outlet for using photographs on
a website with minimal cost, and importantly, fuss.
1. visit http://www.flickr.com/search/advanced/
Search for a photo (by tag / text) but click "creative commons"
and "commercial" use.
2. Find the right photo URL
3. run ``python flickr_grabber.py <URL>``
4. I will grab the page and make a best guess as to the original photo
URL
5.
"""
import requests
from bs4 import BeautifulSoup
import sys
from bookmaker import lib
import conf
from optparse import OptionParser
import logging
import webbrowser
import urllib
import os
class myError(Exception):
pass
#########
PHOTO_STORE = "./photos"
testurl = "http://www.flickr.com/photos/comedynose/4230176889/"
def extract_photo_url(url):
r = requests.get(url)
soup = BeautifulSoup(r.text)
likelicandidate = soup.find(property='og:image')
resultstr = """
From page %s
We have likely candidate of
%s
or these:
"""
resultstr = resultstr % (url, str(likelicandidate))
for imgtag in soup.find_all("img"):
resultstr += str(imgtag)
return (likelicandidate, resultstr)
def get_photo(url):
"""
"""
tgt = os.path.join(PHOTO_STORE, os.path.basename(url))
urllib.urlretrieve(url, tgt)
#########
def parse_args():
parser = OptionParser()
parser.add_option("--config", dest="confpath",
help="path to ini file")
parser.add_option("--flikrpage", dest="flikrpage",
help="url to embedded photo")
parser.add_option("--flikrphoto", dest="flikrphoto",
help="url to stadnalone photo (mutually xlusive with glikrpage")
(options, args) = parser.parse_args()
return (options, args)
def main(opts, args):
"""
"""
if opts.confpath:
confd = conf.get_config(opts.confpath)
lgr.debug(pprint.pformat(confd))
else:
confd = {}
if opts.flikrpage:
likelicandidate, resultstr = extract_photo_url(opts.flikrpage)
print likelicandidate
print resultstr
if opts.flikrphoto:
get_photo(opts.flikrphoto)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
opts, args = parse_args()
try:
main(opts, args)
except Exception, e:
print "We can trap a lot up here"
raise e
|
[
"[email protected]"
] | |
5dfb79becde51feb01c67400ff548446d6963775
|
0cb38adedbe3a5192076de420e1aa0fd10ae3311
|
/return_merchandise_authorizations/admin.py
|
213dea63a59221b56ba699e6a457f59ff5076d67
|
[] |
no_license
|
fogcitymarathoner/rma
|
73ada816b98f068b6c00b2e1fcf39461259453fa
|
133d6026f99820d0702f0578b8a3b4574671f888
|
refs/heads/master
| 2021-01-11T00:32:47.797673 | 2016-10-10T18:34:54 | 2016-10-10T18:35:11 | 70,516,821 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 756 |
py
|
from django.contrib import admin
from return_merchandise_authorizations.models import Rma
from return_merchandise_authorizations.models import Item
from return_merchandise_authorizations.models import RmaAttachment
class ItemInline(admin.TabularInline):
model = Item
class AttachInline(admin.TabularInline):
model = RmaAttachment
class RmaAdmin(admin.ModelAdmin):
list_display = ('date', 'customer', 'case_number', 'reference_number', 'address')
search_fields = ('case_number', 'reference_number', 'address', 'issue')
inlines = [
ItemInline,
AttachInline
]
#
admin.site.register(Rma, RmaAdmin)
class ItemAdmin(admin.ModelAdmin):
list_display = ('note', 'quantity')
#
admin.site.register(Item, ItemAdmin)
|
[
"[email protected]"
] | |
2e2f74124954a3985bfb08d9d40e0bc56bc5fff2
|
6e373b40393fb56be4437c37b9bfd218841333a8
|
/Level_6/Lecture_9/enroll/forms.py
|
a24e95e08208751aa12e95e489b7e6bdfa3638eb
|
[] |
no_license
|
mahto4you/Django-Framework
|
6e56ac21fc76b6d0352f004a5969f9d4331defe4
|
ee38453d9eceea93e2c5f3cb6895eb0dce24dc2b
|
refs/heads/master
| 2023-01-22T01:39:21.734613 | 2020-12-04T03:01:17 | 2020-12-04T03:01:17 | 318,383,854 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 659 |
py
|
from django.contrib.auth.models import User
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
class SignUpForm(UserCreationForm):
password2 = forms.CharField(label='Confirm Password (again)', widget=forms.PasswordInput)
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
labels ={'email':'Email'}
class EditUserProfileForm(UserChangeForm):
password = None
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'date_joined', 'last_login', 'is_active']
labels = {'email':'Email'}
|
[
"[email protected]"
] | |
bb69649a492b5bb2e5ee249630dca2d8b04e8c78
|
8f1996c1b5a0211474c7fa287be7dc20a517f5f0
|
/batch/batch/cloud/driver.py
|
96349e4c4d578c9209d5ffabef4590256096a62d
|
[
"MIT"
] |
permissive
|
johnc1231/hail
|
9568d6effe05e68dcc7bf398cb32df11bec061be
|
3dcaa0e31c297e8452ebfcbeda5db859cd3f6dc7
|
refs/heads/main
| 2022-04-27T10:51:09.554544 | 2022-02-08T20:05:49 | 2022-02-08T20:05:49 | 78,463,138 | 0 | 0 |
MIT
| 2022-03-01T15:55:25 | 2017-01-09T19:52:45 |
Python
|
UTF-8
|
Python
| false | false | 936 |
py
|
from hailtop import aiotools
from gear import Database
from gear.cloud_config import get_global_config
from ..inst_coll_config import InstanceCollectionConfigs
from ..driver.driver import CloudDriver
from .azure.driver.driver import AzureDriver
from .gcp.driver.driver import GCPDriver
async def get_cloud_driver(
app,
db: Database,
machine_name_prefix: str,
namespace: str,
inst_coll_configs: InstanceCollectionConfigs,
credentials_file: str,
task_manager: aiotools.BackgroundTaskManager,
) -> CloudDriver:
cloud = get_global_config()['cloud']
if cloud == 'azure':
return await AzureDriver.create(
app, db, machine_name_prefix, namespace, inst_coll_configs, credentials_file, task_manager
)
assert cloud == 'gcp', cloud
return await GCPDriver.create(
app, db, machine_name_prefix, namespace, inst_coll_configs, credentials_file, task_manager
)
|
[
"[email protected]"
] | |
7796231c8f937912e9ccd9dd1399da035526bee6
|
55c0254b9889235844ca2fcfa5b80e6aedeb4841
|
/Book_app/wsgi.py
|
ea116599419347d50d5b310f5c940541109e1334
|
[] |
no_license
|
AKSHAY-KR99/book_project
|
a75761a40c544fe4ad38ebcdd01b9d524e5f8ea8
|
019b316ec97395ac080be86333d7902b7c590271
|
refs/heads/master
| 2023-05-30T05:09:12.888518 | 2021-06-15T11:03:47 | 2021-06-15T11:03:47 | 377,130,492 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 393 |
py
|
"""
WSGI config for Book_app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Book_app.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.