blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
daeecd30d81c8cbdc6492acdc2b0ed1ec95cae6e | c42142003122bc8172b00ccee0e733417d06fde0 | /webstore/cart/models.py | 187e1966774582896c01e0a3d38b75a74f863be0 | [] | no_license | sloniewski/django_webstore | 53fbe52b7284220a106b7d96abcc06308e1d1b23 | 76b46396b6915e21d65e1ad0fbc8786d6f15b122 | refs/heads/master | 2021-10-10T16:32:09.942377 | 2019-01-13T22:37:44 | 2019-01-13T22:37:44 | 115,758,610 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,923 | py | from decimal import Decimal, getcontext
from django.db import models
from django.db.models import Sum
from django.contrib.auth import get_user_model
from django.utils.functional import cached_property
from webstore.product.models import Product
User = get_user_model()
class CartManager(models.Manager):
def get_for_session(self, request):
session = request.session.session_key
cart = self.get_queryset().filter(
session=session).first()
return cart
class CartItem(models.Model):
"""
Reference table for m2m relation cart -> product.
Stores additional information about quantity.
"""
cart = models.ForeignKey(
'Cart',
on_delete=models.CASCADE,
)
product = models.ForeignKey(
Product,
on_delete=models.CASCADE,
)
quantity = models.PositiveIntegerField()
class Meta:
unique_together = [
('cart', 'product')
]
@property
def price(self):
return self.product.actual_price
@property
def weight(self):
return self.quantity * self.product.weight
def add_qty(self, qty):
self.quantity += qty
self.save()
def remove_qty(self, qty):
if self.quantity <= qty:
self.delete()
return None
self.quantity -= qty
self.save()
return self
@property
def value(self):
"""
Returns value of order-line.
"""
getcontext().prec = 4
# Order of multiplication is important, to call __mul__ of Cash class
price = self.product.actual_price
if price:
return price * self.quantity
return 0
class Cart(models.Model):
"""
Cart representation, has unique reference to session_key.
Does not store items, cart items are m2m relation to cart & product
"""
objects = CartManager()
session = models.CharField(
max_length=255,
unique=True
)
product = models.ManyToManyField(
Product,
through=CartItem,
)
created = models.DateTimeField(
auto_now_add=True,
)
def _get_item(self, item):
item = CartItem.objects.get(
product_id=item,
cart=self,
)
return item
def add_item(self, item, qty):
try:
cart_item = self._get_item(item)
cart_item.add_qty(qty)
except CartItem.DoesNotExist:
cart_item = CartItem.objects.create(
product_id=item,
cart=self,
quantity=qty,
)
return cart_item
def remove_item(self, item, qty):
try:
cart_item = self._get_item(item)
item = cart_item.remove_qty(qty)
if item is None:
return None
return item
except CartItem.DoesNotExist:
pass
def delete_item(self, item):
try:
cart_item = self._get_item(item)
cart_item.delete()
return True
except CartItem.DoesNotExist:
return True
@property
def item_count(self):
item_count = self.cartitem_set.aggregate(
Sum('quantity'))['quantity__sum']
if item_count is None:
return 0
return item_count
def get_items(self):
return self.cartitem_set.all().select_related('product')
@property
def value(self):
value = 0
# TODO should be aggregate
for item in self.cartitem_set.filter(quantity__gte=1):
value += item.value
return value
@property
def items(self):
return self.cartitem_set.all().select_related('product')
@cached_property
def weight(self):
weight = 0
for item in self.items:
weight += (item.product.weight * item.quantity)
return weight
| [
"[email protected]"
] | |
a66986685895e6214469af6309e06d1c7e0e0654 | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/Django-1.11/tests/urlpatterns_reverse/test_localeregexprovider.py | 401e9a1ad03f556c6b498ceba120ce6877d00bb2 | [
"BSD-3-Clause",
"Python-2.0",
"Apache-2.0"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 2,478 | py | from __future__ import unicode_literals
import os
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase, mock, override_settings
from django.urls import LocaleRegexProvider
from django.urls.resolvers import LocaleRegexDescriptor
from django.utils import translation
from django.utils._os import upath
here = os.path.dirname(upath(os.path.abspath(__file__)))
@override_settings(LOCALE_PATHS=[os.path.join(here, 'translations', 'locale')])
class LocaleRegexProviderTests(SimpleTestCase):
def setUp(self):
translation.trans_real._translations = {}
def tearDown(self):
translation.trans_real._translations = {}
def test_translated_regex_compiled_per_language(self):
provider = LocaleRegexProvider(translation.gettext_lazy('^foo/$'))
with translation.override('de'):
de_compiled = provider.regex
# compiled only once per language
error = AssertionError('tried to compile url regex twice for the same language')
with mock.patch('django.urls.resolvers.re.compile', side_effect=error):
de_compiled_2 = provider.regex
with translation.override('fr'):
fr_compiled = provider.regex
self.assertEqual(fr_compiled.pattern, '^foo-fr/$')
self.assertEqual(de_compiled.pattern, '^foo-de/$')
self.assertEqual(de_compiled, de_compiled_2)
def test_nontranslated_regex_compiled_once(self):
provider = LocaleRegexProvider('^foo/$')
with translation.override('de'):
de_compiled = provider.regex
with translation.override('fr'):
# compiled only once, regardless of language
error = AssertionError('tried to compile non-translated url regex twice')
with mock.patch('django.urls.resolvers.re.compile', side_effect=error):
fr_compiled = provider.regex
self.assertEqual(de_compiled.pattern, '^foo/$')
self.assertEqual(fr_compiled.pattern, '^foo/$')
def test_regex_compile_error(self):
"""Regex errors are re-raised as ImproperlyConfigured."""
provider = LocaleRegexProvider('*')
msg = '"*" is not a valid regular expression: nothing to repeat'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
provider.regex
def test_access_locale_regex_descriptor(self):
self.assertIsInstance(LocaleRegexProvider.regex, LocaleRegexDescriptor)
| [
"[email protected]"
] | |
4750561ea9d7788e3f16bfbe1c96adc5a4de2664 | 4d892dc51e2dda0fcce246ac608fc4e0ce98c52b | /FirstStepsInPython/Basics/Lab2 Conditional Statements/06.AreaOfFigures.py | 1f9ae6a548d4270bf0e1a3d28da1d38717d4e372 | [
"MIT"
] | permissive | inovei6un/SoftUni-Studies-1 | 510088ce65e2907c2755a15e427fd156909157f0 | 3837c2ea0cd782d3f79353e61945c08a53cd4a95 | refs/heads/main | 2023-08-14T16:44:15.823962 | 2021-10-03T17:30:48 | 2021-10-03T17:30:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | from math import pi
figure = input()
a = str("square")
b = str("rectangle")
c = str("circle")
d = str("triangle")
if figure == a:
side = float(input())
print(side * side)
elif figure == b:
side_a = float(input())
side_b = float(input())
print(side_a * side_b)
elif figure == c:
rad = float(input())
print(pi * rad * rad)
elif figure == d:
side_a = float(input())
side_b = float(input())
print((side_a * side_b) / 2)
| [
"[email protected]"
] | |
71464c227b83bec13b1cda37a74689e9e64c894d | 7b5c1352e1a4fb8352161cc135bfd1225a633828 | /2017-cvr-tencent-final/src/ffm_gbdt/evaluate.py | eff22cd6da75a597c9be09fbed629e6051ba6cfe | [] | no_license | zgcgreat/2017-cvr-tencent | b7f54ae8df55fbb30f2430f695a148844982aa3a | fe79d0756bbf862d45e63e35b7c28da8396bcbda | refs/heads/master | 2021-04-03T08:32:33.651705 | 2018-07-17T08:36:53 | 2018-07-17T08:36:53 | 124,724,199 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,248 | py | # _*_ coding: utf-8 _*_
import sys
from csv import DictReader
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
data_path = sys.argv[1]
result_path = sys.argv[2]
label_path = data_path + 'validation.csv'
predict_path = result_path + 'submission.csv'
label_reader = DictReader(open(label_path))
predict_reader = DictReader(open(predict_path))
count = 0
y_true = []
y_pred = []
y_scores = []
for t, row in enumerate(label_reader):
predict = predict_reader.__next__()
actual = float(row['Label'])
predicted = float(predict['Predicted'])
y_true.append(actual)
y_scores.append(predicted)
# 大于阈值的即视为点击
if (predicted >= 0.5):
y_pred.append(1)
else:
y_pred.append(0)
count += 1
# 计算性能指标
auc = roc_auc_score(y_true, y_scores)
logloss = log_loss(y_true, y_pred)
accuracy = accuracy_score(y_true, y_pred)
precision = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
print('Accuracy: {0} Precision: {1} Recall: {2} F1-Measure: {3}\n'.format(accuracy, precision, recall, f1))
print('logloss: {0} auc: {1}\n'.format(logloss, auc))
result = open(result_path + 'details.txt', 'w')
result.write('------------------------------------------------------------\n\n')
result.write('Total instances: {count}\n\n\nValidation File: {vafile}\n\nPrediction file: {prefile}\n\n'
.format(count=count, vafile=label_path, prefile=predict_path))
result.write(
'Accuracy: {0}\n\nPrecision: {1}\n\nRecall: {2}\n\nF1-Measure: {3}\n\n'.format(accuracy, precision, recall, f1))
result.write('logloss: {0}\n\nauc: {1}\n\n'.format(logloss, auc))
result.write('-------------------------------------------------------------\n\n')
result.close()
# 将结果写入表格
statistics = open(result_path + 'result.csv', 'w')
statistics.writelines('Accuracy,Precision,Recall,F1-Measure,Logloss,AUC\n')
statistics.writelines('{0},{1},{2},{3},{4},{5}'.format(accuracy, precision, recall, f1, logloss, auc))
statistics.close()
| [
"[email protected]"
] | |
ac538fcd79a7e716accd2aa0b73d989b81b002af | 12123592a54c4f292ed6a8df4bcc0df33e082206 | /py3/pgms/sec4/Circle.py | fa62fb9969f45e1a99a84b59831899e078e263fc | [] | no_license | alvinooo/advpython | b44b7322915f832c8dce72fe63ae6ac7c99ef3d4 | df95e06fd7ba11b0d2329f4b113863a9c866fbae | refs/heads/master | 2021-01-23T01:17:22.487514 | 2017-05-30T17:51:47 | 2017-05-30T17:51:47 | 92,860,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # Circle.py - Circle module
import math
class Circle(object):
def __init__(self, radius=1):
self.__radius = radius
def getRadius(self): return self.__radius
def circum(self): return 2 * math.pi * self.__radius
def area(self): return math.pi * self.__radius ** 2
def __str__(self):
return "Circle: %s" %(self.__radius)
| [
"[email protected]"
] | |
c5ea92fa5595fba5d666432acdda222cf54fe4cb | 5fe72bb13baf3649058ebe11aa86ad4fc56c69ed | /hard-gists/1f66e4d58074d64c8268/snippet.py | a669f0ac17f9e738398c8f61266bb602ed43eb70 | [
"Apache-2.0"
] | permissive | dockerizeme/dockerizeme | 8825fed45ff0ce8fb1dbe34959237e8048900a29 | 408f3fa3d36542d8fc1236ba1cac804de6f14b0c | refs/heads/master | 2022-12-10T09:30:51.029846 | 2020-09-02T13:34:49 | 2020-09-02T13:34:49 | 144,501,661 | 24 | 20 | Apache-2.0 | 2022-11-21T12:34:29 | 2018-08-12T21:21:04 | Python | UTF-8 | Python | false | false | 1,223 | py | import argparse
import urllib.request
import os
import img2pdf
from os import walk
from os.path import join
from bs4 import BeautifulSoup
work_dir = os.path.dirname(__file__)
def download_images(url):
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html)
title = 'pdf_images' # soup.title.string
images = soup.findAll('img', {'class': 'slide_image'})
for image in images:
image_url = image.get('data-full').split('?')[0]
command = 'wget %s -P %s --quiet' % (image_url, title)
os.system(command)
convert_pdf(title)
def convert_pdf(url):
f = []
for (dirpath, dirnames, filenames) in walk(join(work_dir, url)):
f.extend(filenames)
break
f = ["%s/%s" % (url, x) for x in f]
print("Making pdf")
pdf_bytes = img2pdf.convert(f, dpi=300, x=None, y=None)
doc = open('presentation.pdf', 'wb')
doc.write(pdf_bytes)
doc.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("url", type=str,
help="download an slideshare presentation given the url")
args = parser.parse_args()
download_images(args.url)
os.system('rm -r pdf_images')
| [
"[email protected]"
] | |
4120f2826dcf9ed8b34f5ccdbaa5e04098ba005c | 1677eaad65da601a3ac34bd6648c973ffd23c5a9 | /test/test_recipients_api.py | 3649e01a51dd5da0c43ad32857deb08372c0acba | [] | no_license | jeffkynaston/sdk-spike-python | dc557cc1557387f8a126cd8e546201d141de535e | f9c65f578abb801ffe5389b2680f9c6ed1fcebd3 | refs/heads/main | 2023-07-10T00:58:13.864373 | 2021-08-05T21:38:07 | 2021-08-05T21:38:07 | 393,175,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | """
Plastiq Public API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import unittest
import openapi_client
from openapi_client.api.recipients_api import RecipientsApi # noqa: E501
class TestRecipientsApi(unittest.TestCase):
"""RecipientsApi unit test stubs"""
def setUp(self):
self.api = RecipientsApi() # noqa: E501
def tearDown(self):
pass
def test_recipients_get(self):
"""Test case for recipients_get
Retrieve a paginated list of Recipients by query parameter(s) # noqa: E501
"""
pass
def test_recipients_id_delete(self):
"""Test case for recipients_id_delete
Delete a Recipient # noqa: E501
"""
pass
def test_recipients_id_get(self):
"""Test case for recipients_id_get
Retrieve a Recipient # noqa: E501
"""
pass
def test_recipients_id_patch(self):
"""Test case for recipients_id_patch
Update a Recipient # noqa: E501
"""
pass
def test_recipients_post(self):
"""Test case for recipients_post
Create a Recipient # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
bdfab790143c4ba126b8efec958c5486207c0a99 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf.0/gsn-edf_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=93/params.py | 278fd438e6fdf9246abe78754f18f7426f4fa985 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.543786',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 93,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
5e1f0040a2f5d440f1866df03195c490ee95d2ed | 927f1d546c840fa04702a1769b58f0f47dc48f64 | /backend/royal_cake_26283/settings.py | 3458119c3d63241ae113e4b4a87dc77ee0b1251c | [] | no_license | crowdbotics-apps/royal-cake-26283 | f27ad5b4fcd0293feb99d6462d62b7f6a5bd27c0 | 2059ddf36c16c6b6e9219142c5a9dab0987082a1 | refs/heads/master | 2023-04-18T19:12:15.375384 | 2021-05-06T21:49:04 | 2021-05-06T21:49:04 | 365,045,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,114 | py | """
Django settings for royal_cake_26283 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'royal_cake_26283.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'royal_cake_26283.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
2f71ff7ba4d2e8f7e6ddba2ab05056646a76a884 | e669b3fe7da2698da4ce02e98325ce154d2aa546 | /swaps/utils/api_signature_v2.py | 63401bc3ab76d3f913b31820716826a8895d2d7b | [
"Apache-2.0"
] | permissive | marcellinamichie291/cash_carry_leveraged_futures_arbitrageur | 0834a911fdd6c9f1462f6f2f59926f715fc51461 | 1120ebfb487ce4987fe70e6645b36e0d7ce041ec | refs/heads/main | 2023-03-16T18:35:28.730554 | 2020-12-04T07:46:13 | 2020-12-04T07:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | import base64
import hashlib
import hmac
import datetime
from urllib import parse
import urllib.parse
from swaps.exception.huobi_api_exception import HuobiApiException
def create_signature_v2(api_key, secret_key, method, url, builder):
if api_key is None or secret_key is None or api_key == "" or secret_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "API key and secret key are required")
timestamp = utc_now()
builder.put_url("accessKey", api_key)
builder.put_url("signatureVersion", "2.1")
builder.put_url("signatureMethod", "HmacSHA256")
builder.put_url("timestamp", timestamp)
host = urllib.parse.urlparse(url).hostname
path = urllib.parse.urlparse(url).path
# 对参数进行排序:
keys = sorted(builder.param_map.keys())
# 加入&
qs0 = '&'.join(['%s=%s' % (key, parse.quote(builder.param_map[key], safe='')) for key in keys])
# 请求方法,域名,路径,参数 后加入`\n`
payload0 = '%s\n%s\n%s\n%s' % (method, host, path, qs0)
dig = hmac.new(secret_key.encode('utf-8'), msg=payload0.encode('utf-8'), digestmod=hashlib.sha256).digest()
# 进行base64编码
s = base64.b64encode(dig).decode()
builder.put_url("signature", s)
builder.put_url("authType", "api")
params = {
"accessKey": api_key,
"signatureVersion": "2.1",
"signatureMethod": "HmacSHA256",
"timestamp": timestamp,
"signature":s,
"authType":"api"
}
builder.put_url("action", "req")
builder.put_url("ch", "auth")
builder.put_url("params", params)
"""
# for test
ret_maps = {
"action": "req",
"ch": "auth",
"params" : params
}
return json.dumps(ret_maps)
"""
def utc_now():
return datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
| [
"[email protected]"
] | |
0014e1e799b36fa9daf5f3be780340dca0a2ac61 | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/legacy_test/test_sparse_isnan_op.py | b807e6ba624452c55d74e668f661008d194d7a44 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 3,150 | py | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
class TestSparseIsnan(unittest.TestCase):
"""
Test the API paddle.sparse.isnan on some sparse tensors.
x: sparse tensor, out: sparse tensor
"""
def to_sparse(self, x, format):
if format == 'coo':
return x.detach().to_sparse_coo(sparse_dim=x.ndim)
elif format == 'csr':
return x.detach().to_sparse_csr()
def check_result(self, x_shape, format, data_type="float32"):
raw_inp = np.random.randint(-100, 100, x_shape)
mask = np.random.randint(0, 2, x_shape)
inp_x = (raw_inp * mask).astype(data_type)
inp_x[inp_x > 0] = np.nan
np_out = np.isnan(inp_x[inp_x != 0])
dense_x = paddle.to_tensor(inp_x)
sp_x = self.to_sparse(dense_x, format)
sp_out = paddle.sparse.isnan(sp_x)
sp_out_values = sp_out.values().numpy()
np.testing.assert_allclose(np_out, sp_out_values, rtol=1e-05)
def test_isnan_shape(self):
self.check_result([20], 'coo')
self.check_result([4, 5], 'coo')
self.check_result([4, 5], 'csr')
self.check_result([8, 16, 32], 'coo')
self.check_result([8, 16, 32], 'csr')
def test_isnan_dtype(self):
self.check_result([4, 5], 'coo', "float32")
self.check_result([4, 5], 'csr', "float32")
self.check_result([8, 16, 32], 'coo', "float64")
self.check_result([8, 16, 32], 'csr', "float64")
class TestStatic(unittest.TestCase):
def test(self):
paddle.enable_static()
indices = paddle.static.data(
name='indices', shape=[2, 3], dtype='int32'
)
values = paddle.static.data(name='values', shape=[3], dtype='float32')
dense_shape = [3, 3]
sp_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape)
sp_y = paddle.sparse.isnan(sp_x)
out = sp_y.to_dense()
exe = paddle.static.Executor()
indices_data = [[0, 1, 2], [1, 2, 0]]
values_data = np.array([1.0, float("nan"), 3.0]).astype('float32')
fetch = exe.run(
feed={'indices': indices_data, 'values': values_data},
fetch_list=[out],
return_numpy=True,
)
correct_out = np.array(
[[False, False, False], [False, False, True], [False, False, False]]
).astype('float32')
np.testing.assert_allclose(correct_out, fetch[0], rtol=1e-5)
paddle.disable_static()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
8cb3839fefedd518307f93a56d09d5034b97b681 | 472661f4a0094ce070ed9702da1d2e3e55f7cbe7 | /data/io/convert_data_to_tfrecord_voc2012.py | e8bf49e6fc5c81a9acab30099d5e87774e780eee | [
"MIT"
] | permissive | hasan-mh-aziz/RetinaNet_Tensorflow | 917612d4d58308b8c8444a650e4c43eef291c722 | d5d1103243816506f96d36f41f1fb0b56eeefcc1 | refs/heads/master | 2020-07-31T01:45:20.002881 | 2019-05-26T11:00:56 | 2019-05-26T11:00:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,125 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import sys
sys.path.append('../../')
import xml.etree.cElementTree as ET
import numpy as np
import tensorflow as tf
import glob
import cv2
from tqdm import tqdm
from libs.label_name_dict.label_dict import *
from help_utils.tools import *
tf.app.flags.DEFINE_string('VOC_dir', '/data/VOC2012/VOCdevkit/VOC2012/', 'Voc dir')
tf.app.flags.DEFINE_string('xml_dir', 'Annotations', 'xml dir')
tf.app.flags.DEFINE_string('image_dir', 'JPEGImages', 'image dir')
tf.app.flags.DEFINE_string('save_name', 'train2012', 'save name')
tf.app.flags.DEFINE_string('save_dir', '../tfrecord/', 'save name')
tf.app.flags.DEFINE_string('img_format', '.jpg', 'format of image')
tf.app.flags.DEFINE_string('dataset', 'pascal', 'dataset')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def read_xml_gtbox_and_label(xml_path):
"""
:param xml_path: the path of voc xml
:return: a list contains gtboxes and labels, shape is [num_of_gtboxes, 5],
and has [xmin, ymin, xmax, ymax, label] in a per row
"""
tree = ET.parse(xml_path)
root = tree.getroot()
img_width = None
img_height = None
box_list = []
for child_of_root in root:
# if child_of_root.tag == 'filename':
# assert child_of_root.text == xml_path.split('/')[-1].split('.')[0] \
# + FLAGS.img_format, 'xml_name and img_name cannot match'
if child_of_root.tag == 'size':
for child_item in child_of_root:
if child_item.tag == 'width':
img_width = int(child_item.text)
if child_item.tag == 'height':
img_height = int(child_item.text)
if child_of_root.tag == 'object':
label = None
for child_item in child_of_root:
if child_item.tag == 'name':
label = NAME_LABEL_MAP[child_item.text]
if child_item.tag == 'bndbox':
tmp_box = [0, 0, 0, 0]
for node in child_item:
if node.tag == 'xmin':
tmp_box[0] = int(node.text)
if node.tag == 'ymin':
tmp_box[1] = int(node.text)
if node.tag == 'xmax':
tmp_box[2] = int(node.text)
if node.tag == 'ymax':
tmp_box[3] = int(node.text)
assert label is not None, 'label is none, error'
tmp_box.append(label)
box_list.append(tmp_box)
gtbox_label = np.array(box_list, dtype=np.int32)
return img_height, img_width, gtbox_label
def convert_pascal_to_tfrecord():
xml_path = FLAGS.VOC_dir + FLAGS.xml_dir
image_path = FLAGS.VOC_dir + FLAGS.image_dir
save_path = FLAGS.save_dir + FLAGS.dataset + '_' + FLAGS.save_name + '.tfrecord'
mkdir(FLAGS.save_dir)
# writer_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)
# writer = tf.python_io.TFRecordWriter(path=save_path, options=writer_options)
writer = tf.python_io.TFRecordWriter(path=save_path)
fr = open('/data/VOC2012/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt', 'r')
lines = fr.readlines()
real_cnt = 0
pbar = tqdm(glob.glob(xml_path + '/*.xml'))
for xml in pbar:
xml = xml.replace('\\', '/')
tmp = xml.split('/')[-1].split('.')[0] + "\n"
if tmp not in lines:
continue
img_name = xml.split('/')[-1].split('.')[0] + FLAGS.img_format
img_path = image_path + '/' + img_name
if not os.path.exists(img_path):
print('{} is not exist!'.format(img_path))
continue
img_height, img_width, gtbox_label = read_xml_gtbox_and_label(xml)
# img = np.array(Image.open(img_path))
img = cv2.imread(img_path)[:, :, ::-1]
feature = tf.train.Features(feature={
# do not need encode() in linux
'img_name': _bytes_feature(img_name.encode()),
# 'img_name': _bytes_feature(img_name),
'img_height': _int64_feature(img_height),
'img_width': _int64_feature(img_width),
'img': _bytes_feature(img.tostring()),
'gtboxes_and_label': _bytes_feature(gtbox_label.tostring()),
'num_objects': _int64_feature(gtbox_label.shape[0])
})
example = tf.train.Example(features=feature)
writer.write(example.SerializeToString())
real_cnt += 1
pbar.set_description("Conversion progress")
print('\nConversion is complete! {} images.'.format(real_cnt))
if __name__ == '__main__':
# xml_path = '../data/dataset/VOCdevkit/VOC2007/Annotations/000005.xml'
# read_xml_gtbox_and_label(xml_path)
convert_pascal_to_tfrecord()
| [
"[email protected]"
] | |
1f07aa448ad1e6d68b20ec4e9f8479fc2df38a6e | ea04557e60fa600a19a2a47da78b0407cf7b3e17 | /cms/cms/doctype/module_menu/test_module_menu.py | 98c73da43b1ce3e1454a24e9870fea7408ac2018 | [
"MIT"
] | permissive | Nirchains/f-cms | ea5b5d09e492a0c3d6691b90454b01720894fc03 | 8cefaad087994ca3dad0b1c5fadb250904cdd2cb | refs/heads/master | 2021-07-19T06:26:10.804498 | 2020-02-10T12:02:00 | 2020-02-10T12:02:00 | 167,004,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Pedro Antonio Fernández Gómez and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestModulemenu(unittest.TestCase):
pass
| [
"[email protected]"
] | |
5f302d2892630ccf906162ace52dc3190c01f300 | 9c84c32da8e8e3efbeb1dfaa0f21fcdee20d8254 | /simplebook/wsgi.py | f123373d3a628355d76bcfd3986ec46a1cf85a06 | [] | no_license | zhujingxiu/simplebook | a53917e3883cf2441be451096ecf03870b8bdb4d | 393735e89011cdb47ae5855997924b9b1d7d706b | refs/heads/master | 2020-03-18T11:43:27.607548 | 2018-05-24T09:01:43 | 2018-05-24T09:01:43 | 134,688,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | """
WSGI config for simplebook project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simplebook.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
1722fd154551780247f251986b64cf22acbcd063 | 15c9450e30742cfaad5d5ce88e86ff29749af975 | /training/isotropic/train_auto_2stage.py | 6f02f81ae96c2a869392b0a50364775ef77882e9 | [
"BSD-2-Clause"
] | permissive | constantinpape/CNNectome | 4e4ed3987c7934c3f378f0758c5c545b4ea1ed54 | 102758cabd4bf9c149b9867709b0a8bea9222438 | refs/heads/master | 2021-04-15T13:10:17.213845 | 2018-03-09T20:47:21 | 2018-03-09T20:47:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,230 | py | from __future__ import print_function
from gunpowder import *
from gunpowder.tensorflow import *
from training.gunpowder_wrappers import prepare_h5source
import malis
import os
import math
import json
import tensorflow as tf
def train_until(max_iteration, data_sources):
if tf.train.latest_checkpoint('.'):
trained_until = int(tf.train.latest_checkpoint('.').split('_')[-1])
else:
trained_until = 0
if trained_until >= max_iteration:
return
data_providers = []
fib25_dir = "/groups/saalfeld/home/funkej/workspace/projects/caffe/run/fib25/01_data/train"
if 'fib25h5' in data_sources:
for volume_name in ("tstvol-520-1", "tstvol-520-2", "trvol-250-1", "trvol-250-2"):
h5_source = Hdf5Source(os.path.join(fib25_dir, volume_name + '.hdf'),
datasets={VolumeTypes.RAW: 'volumes/raw',
VolumeTypes.GT_LABELS: 'volumes/labels/neuron_ids',
VolumeTypes.GT_MASK: 'volumes/labels/mask', },
volume_specs={
VolumeTypes.GT_MASK: VolumeSpec(interpolatable=False)
})
data_providers.append(h5_source)
fib19_dir = "/groups/saalfeld/saalfeldlab/larissa/fib19"
if 'fib19h5' in data_sources:
for volume_name in ("trvol-250", "trvol-600"):
h5_source = prepare_h5source(fib19_dir, volume_name)
data_providers.append(h5_source)
with open('net_io_names.json', 'r') as f:
net_io_names = json.load(f)
register_volume_type('RAW')
#register_volume_type('ALPHA_MASK')
register_volume_type('GT_LABELS')
register_volume_type('GT_MASK')
register_volume_type('GT_AFFINITIES')
#register_volume_type('GT_AFFINITIES_MASK')
register_volume_type('GT_SCALE')
register_volume_type('PREDICTED_AFFS_1')
register_volume_type('PREDICTED_AFFS_2')
register_volume_type('LOSS_GRADIENT_1')
register_volume_type('LOSS_GRADIENT_2')
voxel_size = Coordinate((8, 8, 8))
input_size = Coordinate((220,)*3) * voxel_size
output_1_size = Coordinate((132,)*3) * voxel_size
output_2_size = Coordinate((44,)*3) * voxel_size
#input_size = Coordinate((66, 228, 228))*(40,4,4)
#output_1_size = Coordinate((38, 140, 140))*(40,4,4)
#output_2_size = Coordinate((10, 52, 52))*(40,4,4)
request = BatchRequest()
request.add(VolumeTypes.RAW, input_size)
request.add(VolumeTypes.GT_LABELS, output_1_size)
request.add(VolumeTypes.GT_MASK, output_1_size)
request.add(VolumeTypes.GT_AFFINITIES, output_1_size)
#request.add(VolumeTypes.GT_AFFINITIES_MASK, output_1_size)
request.add(VolumeTypes.GT_SCALE, output_1_size)
snapshot_request = BatchRequest()
snapshot_request.add(VolumeTypes.RAW, input_size) # just to center the rest correctly
snapshot_request.add(VolumeTypes.PREDICTED_AFFS_1, output_1_size)
snapshot_request.add(VolumeTypes.PREDICTED_AFFS_2, output_2_size)
snapshot_request.add(VolumeTypes.LOSS_GRADIENT_1, output_1_size)
snapshot_request.add(VolumeTypes.LOSS_GRADIENT_2, output_2_size)
data_sources = tuple(
provider +
Normalize() +
Pad(
{
VolumeTypes.RAW: Coordinate((100, 100, 100)) * voxel_size,
VolumeTypes.GT_MASK: Coordinate((100, 100, 100)) * voxel_size
}
) +
RandomLocation() +
Reject()
for provider in data_providers
)
train_pipeline = (
data_sources +
RandomProvider() +
ElasticAugment([40, 40, 40], [2, 2, 2], [0, math.pi/2.0], prob_slip=0.01, prob_shift=0.05, max_misalign=1,
subsample=8) +
SimpleAugment() +
IntensityAugment(0.9, 1.1, -0.1, 0.1) +
IntensityScaleShift(2, -1) +
ZeroOutConstSections()+
GrowBoundary(steps=2) +
SplitAndRenumberSegmentationLabels() +
AddGtAffinities(
malis.mknhood3d()) +
BalanceLabels({
VolumeTypes.GT_AFFINITIES: VolumeTypes.GT_SCALE
},
{
VolumeTypes.GT_AFFINITIES: VolumeTypes.GT_MASK
})+
PreCache(
cache_size=40,
num_workers=10) +
Train(
'wnet',
optimizer=net_io_names['optimizer'],
loss=net_io_names['loss'],
summary=net_io_names['summary'],
log_dir='.log',
inputs={
net_io_names['raw']: VolumeTypes.RAW,
net_io_names['gt_affs_1']: VolumeTypes.GT_AFFINITIES,
net_io_names['loss_weights_1']: VolumeTypes.GT_SCALE,
},
outputs={
net_io_names['affs_1']: VolumeTypes.PREDICTED_AFFS_1,
net_io_names['affs_2']: VolumeTypes.PREDICTED_AFFS_2
},
gradients={
net_io_names['affs_1']: VolumeTypes.LOSS_GRADIENT_1,
net_io_names['affs_2']: VolumeTypes.LOSS_GRADIENT_2
}) +
IntensityScaleShift(0.5, 0.5) +
Snapshot({
VolumeTypes.RAW: 'volumes/raw',
VolumeTypes.GT_LABELS: 'volumes/labels/neuron_ids',
VolumeTypes.GT_AFFINITIES: 'volumes/labels/affinities',
VolumeTypes.PREDICTED_AFFS_1: 'volumes/labels/pred_affinities_1',
VolumeTypes.PREDICTED_AFFS_2: 'volumes/labels/pred_affinities_2',
VolumeTypes.LOSS_GRADIENT_1: 'volumes/loss_gradient_1',
VolumeTypes.LOSS_GRADIENT_2: 'volumes/loss_gradient_2',
},
every=500,
output_filename='batch_{iteration}.hdf',
additional_request=snapshot_request) +
PrintProfilingStats(every=1000)
)
print("Starting training...")
with build(train_pipeline) as b:
for i in range(max_iteration - trained_until):
b.request_batch(request)
print("Training finished")
if __name__ == "__main__":
set_verbose(False)
data_sources = ['fib25h5']
max_iteration = 400000
train_until(max_iteration, data_sources)
| [
"[email protected]"
] | |
4df487dabf6fc57d7a08692f301529248de7184c | bdff6688cee79226723fbcf9980c3757a55651b7 | /algorithms/implementation/library_fine.py | c8265f6a9940b9b3618fc90b84b5b0ccf7ae488c | [] | no_license | kruthar/hackerrank | 1f151203c8f26c033585f30d2cf69a2b22dcaf71 | ef81b2aa41a678ad6b0692f933f438a62b1d6b64 | refs/heads/master | 2016-08-10T07:15:19.165058 | 2016-02-26T17:48:58 | 2016-02-26T17:48:58 | 49,286,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | import sys
import datetime
actual = map(int, sys.stdin.next().strip().split(" "))
expected = map(int, sys.stdin.next().strip().split(" "))
actual_date = datetime.date(actual[2], actual[1], actual[0])
expected_date = datetime.date(expected[2], expected[1], expected[0])
diff = actual_date.toordinal() - expected_date.toordinal()
if diff <= 0:
print 0
elif actual_date.year == expected_date.year:
if actual_date.month == expected_date.month:
print 15 * diff
else:
print 500 * (actual_date.month - expected_date.month)
else:
print 10000 | [
"[email protected]"
] | |
98f5338c30e8cc19a51612f76bb8ba0ad89b8674 | fb54704d4a6f9475f42b85d8c470e3425b37dcae | /medium/ex402.py | 70e030c095d74e75664d2cab7759e72aad5eef4e | [] | no_license | ziyuan-shen/leetcode_algorithm_python_solution | b2784071a94b04e687fd536b57e8d5a9ec1a4c05 | 920b65db80031fad45d495431eda8d3fb4ef06e5 | refs/heads/master | 2021-06-27T05:19:47.774044 | 2021-02-04T09:47:30 | 2021-02-04T09:47:30 | 210,991,299 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | from collections import deque
class Solution:
def removeKdigits(self, num: str, k: int) -> str:
if len(num) == k:
return "0"
q = deque([0])
for i in range(1, k + 1):
while q and num[i] < num[q[-1]]:
q.pop()
q.append(i)
ans = str(num[q[0]])
q.popleft()
for i in range(k + 1, len(num)):
while q and num[i] < num[q[-1]]:
q.pop()
q.append(i)
ans += str(num[q[0]])
q.popleft()
return str(int(ans)) | [
"[email protected]"
] | |
0a3de2a8f7bae927e858e34b21072ebc092e0e49 | 84d469877bbc3b84d3a21d92e12c688177c38715 | /tests/ppg1_compatibility_layer/test_job_gen_jobs.py | ad404a91e02e8f0e43701842b0a26aa244baa56f | [
"MIT"
] | permissive | iCodeIN/pypipegraph2 | e6c459822acf23e67fefdfe9ee2f40e03bef132a | 16f5d3a471b3dae71fafca98f63c4c80882dc20a | refs/heads/main | 2023-08-11T01:21:47.232920 | 2021-09-16T07:09:46 | 2021-09-16T07:09:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,938 | py | """
The MIT License (MIT)
Copyright (c) 2012, Florian Finkernagel <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import pytest
import pypipegraph as ppg
from .shared import write, assertRaises, read, append, writeappend, Dummy
shu = None
@pytest.mark.usefixtures("ppg1_compatibility_test")
class TestJobGeneratingJob:
def test_basic(self):
def gen():
ppg.FileGeneratingJob("out/A", lambda: write("out/A", "A"))
ppg.FileGeneratingJob("out/B", lambda: write("out/B", "B"))
ppg.FileGeneratingJob("out/C", lambda: write("out/C", "C"))
ppg.JobGeneratingJob("genjob", gen)
ppg.run_pipegraph()
assert read("out/A") == "A"
assert read("out/B") == "B"
assert read("out/C") == "C"
@pytest.mark.skip # we only support 1, all, or almost all cores now.
def test_raises_if_needs_more_cores_than_we_have(self):
def gen():
jobA = ppg.FileGeneratingJob("out/A", lambda: write("out/A", "A"))
jobA.cores_needed = 20000
j = ppg.JobGeneratingJob("genjob", gen)
try:
ppg.run_pipegraph()
raise ValueError("should not be reached")
except ppg.RuntimeError:
pass
assert not (os.path.exists("out/A")) # since the gen job crashed
jobGenerated = ppg.util.global_pipegraph.jobs["out/A"]
assert jobGenerated.failed
assert jobGenerated.error_reason == "Needed to much memory/cores"
@pytest.mark.skip # we don't support ram limits
def test_raises_if_needs_more_ram_than_we_have(self):
def gen():
jobA = ppg.FileGeneratingJob("out/A", lambda: write("out/A", "A"))
jobA.memory_needed = 1024 * 1024 * 1024 * 1024
ppg.JobGeneratingJob("genjob", gen)
try:
ppg.run_pipegraph()
raise ValueError("should not be reached")
except ppg.RuntimeError:
pass
assert not (os.path.exists("out/A")) # since the gen job crashed
jobGenerated = ppg.util.global_pipegraph.jobs["out/A"]
assert jobGenerated.failed
assert jobGenerated.error_reason == "Needed to much memory/cores"
@pytest.mark.skip # we don't support ram limits
def test_with_memory_needed(self):
jobA = ppg.FileGeneratingJob("out/A", lambda: write("out/A", "A"))
jobA.memory_needed = 1024
ppg.run_pipegraph()
assert os.path.exists("out/A") # since the gen job crashed
def test_injecting_multiple_stages(self):
def gen():
def genB():
def genC():
ppg.FileGeneratingJob("out/D", lambda: write("out/D", "D"))
ppg.JobGeneratingJob("C", genC)
ppg.JobGeneratingJob("B", genB)
ppg.JobGeneratingJob("A", gen)
ppg.run_pipegraph()
assert read("out/D") == "D"
def test_generated_job_depending_on_each_other_one_of_them_is_Invariant(
self, ppg1_compatibility_test
):
# basic idea. You have jobgen A,
# it not only creates filegenB, but also ParameterDependencyC that A depends on
# does that work
def gen():
jobB = ppg.FileGeneratingJob("out/B", lambda: write("out/B", "B"))
jobB.ignore_code_changes()
jobC = ppg.ParameterInvariant("C", ("ccc",))
jobB.depends_on(jobC)
ppg.JobGeneratingJob("A", gen)
ppg.run_pipegraph()
assert read("out/B") == "B"
ppg1_compatibility_test.new_pipegraph()
def gen2():
jobB = ppg.FileGeneratingJob("out/B", lambda: write("out/B", "C"))
jobB.ignore_code_changes()
jobC = ppg.ParameterInvariant("C", ("ccc",))
jobB.depends_on(jobC)
ppg.JobGeneratingJob("A", gen2)
ppg.run_pipegraph()
assert read("out/B") == "B" # no rerun
ppg1_compatibility_test.new_pipegraph()
def gen3():
jobB = ppg.FileGeneratingJob("out/B", lambda: write("out/B", "C"))
jobB.ignore_code_changes()
jobCX = ppg.ParameterInvariant("C", ("DDD",))
jobB.depends_on(jobCX)
ppg.JobGeneratingJob("A", gen3)
ppg.run_pipegraph()
assert read("out/B") == "C" # did get rerun
def test_generated_job_depending_on_job_that_cant_have_finished(
self, ppg1_compatibility_test
):
# basic idea. You have jobgen A, and filegen B.
# filegenB depends on jobgenA.
# jobGenA created C depends on filegenB
# Perhaps add a filegen D that's independand of jobGenA, but C also deps on D
def a():
jobB = ppg.FileGeneratingJob("out/B", lambda: write("out/B", "B"))
def genA():
jobC = ppg.FileGeneratingJob("out/C", lambda: write("out/C", "C"))
jobC.depends_on(jobB)
jobA = ppg.JobGeneratingJob("A", genA)
jobB.depends_on(jobA)
ppg.run_pipegraph()
assert read("out/B") == "B"
assert read("out/C") == "C"
def b():
jobB = ppg.FileGeneratingJob("out/B", lambda: write("out/B", "B"))
jobD = ppg.FileGeneratingJob("out/D", lambda: write("out/D", "D"))
def genA():
jobC = ppg.FileGeneratingJob("out/C", lambda: write("out/C", "C"))
jobC.depends_on(jobB)
jobC.depends_on(jobD)
jobA = ppg.JobGeneratingJob("A", genA)
jobB.depends_on(jobA)
ppg.run_pipegraph()
assert read("out/B") == "B"
assert read("out/C") == "C"
a()
ppg1_compatibility_test.new_pipegraph()
b()
def test_generated_job_depending_on_each_other(self):
# basic idea. You have jobgen A,
# it not only creates filegenB, but also filegenC that depends on B
# does that work
def gen():
jobB = ppg.FileGeneratingJob("out/B", lambda: write("out/B", "B"))
jobC = ppg.FileGeneratingJob("out/C", lambda: write("out/C", read("out/B")))
jobC.depends_on(jobB)
ppg.JobGeneratingJob("A", gen)
ppg.run_pipegraph()
assert read("out/B") == "B"
assert read("out/C") == "B"
def test_generated_job_depending_on_each_other_one_of_them_is_loading(self):
# basic idea. You have jobgen A,
# it not only creates filegenB, but also DataloadingC that depends on B
# does that work
def gen():
def load():
global shu
shu = "123"
def do_write():
global shu
write("out/A", shu)
dl = ppg.DataLoadingJob("dl", load)
jobB = ppg.FileGeneratingJob("out/A", do_write)
jobB.depends_on(dl)
ppg.JobGeneratingJob("gen", gen)
ppg.run_pipegraph()
assert read("out/A") == "123"
def test_passing_non_function(self):
def inner():
ppg.JobGeneratingJob("out/a", "shu")
#assertRaises(ValueError, inner)
assertRaises(TypeError, inner)
def test_passing_non_string_as_jobid(self):
def inner():
ppg.JobGeneratingJob(5, lambda: 1)
assertRaises(TypeError, inner)
def test_generated_jobs_that_can_not_run_right_away_because_of_dataloading_do_not_crash(
self,
):
o = Dummy()
existing_dl = ppg.AttributeLoadingJob("a", o, "a", lambda: "Ashu")
def gen():
new_dl = ppg.AttributeLoadingJob("b", o, "b", lambda: "Bshu")
fg_a = ppg.FileGeneratingJob("out/C", lambda: write("out/C", o.a))
fg_b = ppg.FileGeneratingJob("out/D", lambda: write("out/D", o.b))
fg_a.depends_on(existing_dl)
fg_b.depends_on(new_dl)
ppg.JobGeneratingJob("E", gen)
ppg.run_pipegraph()
assert read("out/C") == "Ashu"
assert read("out/D") == "Bshu"
#ppg2 name no longer applies
def test_filegen_invalidated_jobgen_created_filegen_later_also_invalidated(
self, ppg1_compatibility_test
):
a = ppg.FileGeneratingJob("out/A", lambda: writeappend("out/A", "out/Ac", "A"))
p = ppg.ParameterInvariant("p", "p")
a.depends_on(p)
def gen():
c = ppg.FileGeneratingJob(
"out/C", lambda: writeappend("out/C", "out/Cx", "C")
)
c.depends_on(a)
ppg.JobGeneratingJob("b", gen)
ppg.run_pipegraph()
assert read("out/A") == "A"
assert read("out/Ac") == "A"
assert read("out/C") == "C"
assert read("out/Cx") == "C"
ppg1_compatibility_test.new_pipegraph()
a = ppg.FileGeneratingJob("out/A", lambda: writeappend("out/A", "out/Ac", "A"))
p = ppg.ParameterInvariant("p", "p2")
a.depends_on(p)
ppg.JobGeneratingJob("b", gen)
ppg.run_pipegraph()
assert read("out/Ac") == "AA"
assert read("out/Cx") == "C" # ppg2 - not rebuild. out/A did not chaneg!
@pytest.mark.skip # ppg2: no longer forbidden.
def test_raises_if_generating_within_dataload(self):
ppg.util.global_pipegraph.quiet = False
write_job = ppg.FileGeneratingJob("out/A", lambda: write("out/A", "aa"))
def load():
ppg.FileGeneratingJob("out/B", lambda: write("out/B", "aa"))
dl = ppg.DataLoadingJob("load_data", load)
write_job.depends_on(dl)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "Trying to add new jobs to running pipeline" in str(dl.exception)
def test_ignored_if_generating_within_filegenerating(self):
write_job = ppg.FileGeneratingJob("out/A", lambda: write("out/A", "aa"))
def load():
ppg.FileGeneratingJob("out/B", lambda: write("out/B", "aa"))
write("out/C", "c")
dl = ppg.FileGeneratingJob("out/C", load)
write_job.depends_on(dl)
ppg.run_pipegraph()
assert read("out/C") == "c"
@pytest.mark.skip # you may muck up the graph as you wish in ppg2. We don't look at it
# till we run it again
def test_jobgenerating_is_not_dependency_injection(self):
old = ppg.FileGeneratingJob("out/D", lambda: write("out/D", "D"))
def gen():
write("out/E", "E")
p = ppg.FileGeneratingJob("out/C", lambda: write("out/C", "C"))
old.depends_on(p)
j = ppg.JobGeneratingJob("genjob", gen)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert isinstance(j.exception, ppg.JobContractError)
assert read("out/E") == "E"
assert not os.path.exists("out/C") # that job never makes it to the pipeline
assert read("out/D") == "D"
def test_invalidation(self, ppg1_compatibility_test):
def gen():
ppg.FileGeneratingJob("out/D", lambda: write("out/D", "D"))
ppg.JobGeneratingJob("A", gen)
ppg.run_pipegraph()
assert read("out/D") == "D"
ppg1_compatibility_test.new_pipegraph()
def gen():
ppg.FileGeneratingJob("out/D", lambda: write("out/D", "E"))
ppg.JobGeneratingJob("A", gen)
ppg.run_pipegraph()
assert read("out/D") == "E"
def test_invalidation_multiple_stages(self, ppg1_compatibility_test):
counter = [0]
def count():
counter[0] += 1
return str(counter[0])
def gen():
def genB():
def genC():
count()
ppg.FileGeneratingJob("out/D", lambda: write("out/D", "D"))
ppg.JobGeneratingJob("C", genC)
ppg.JobGeneratingJob("B", genB)
ppg.JobGeneratingJob("A", gen)
ppg.run_pipegraph()
assert read("out/D") == "D"
assert counter[0] == 1
ppg1_compatibility_test.new_pipegraph()
ppg.JobGeneratingJob("A", gen)
ppg.run_pipegraph()
assert read("out/D") == "D"
assert counter[0] == 2
ppg1_compatibility_test.new_pipegraph()
def gen():
def genB():
def genC():
count()
ppg.FileGeneratingJob("out/D", lambda: write("out/D", "E"))
ppg.JobGeneratingJob("C", genC)
ppg.JobGeneratingJob("B", genB)
ppg.JobGeneratingJob("A", gen)
ppg.run_pipegraph()
assert read("out/D") == "E"
assert counter[0] == 3
@pytest.mark.skip # dependency injections are gone.
@pytest.mark.usefixtures("ppg1_compatibility_test")
class TestDependencyInjectionJob:
def test_basic(self, ppg1_compatibility_test):
# TODO: there is a problem with this apporach. The AttributeLoadingJob
# references different objects, since it get's pickled alongside with the method,
# and depickled again, and then it's not the same object anymore,
# so the FileGeneratingJob and the AttributeLoadingJob in this test
# reference different objects.
# I'm not sure how to handle this right now though.
# I have an idea: Do JobGraphModifyingJobs in each worker, and send back just the
# dependency data (and new job name).
# that way, we can still execute on any worker, and all the pointers should be
# right.
ppg1_compatibility_test.new_pipegraph()
o = Dummy()
of = "out/A"
def do_write():
# logging.info("Accessing dummy (o) %i in pid %s" % (id(o), os.getpid()))
write(of, o.A + o.B)
job = ppg.FileGeneratingJob(of, do_write)
def generate_deps():
def load_a():
# logging.info('executing load A')
return "A"
def load_b():
# logging.info('executing load B')
return "B"
# logging.info("Creating dl on %i in pid %s" % (id(o), os.getpid()))
dlA = ppg.AttributeLoadingJob("dlA", o, "A", load_a)
# logging.info("created dlA")
dlB = ppg.AttributeLoadingJob("dlB", o, "B", load_b)
job.depends_on(dlA)
job.depends_on(dlB)
return [dlA, dlB]
gen_job = ppg.DependencyInjectionJob("C", generate_deps)
job.depends_on(gen_job)
ppg.run_pipegraph()
assert read(of) == "AB"
def test_raises_on_non_dependend_job_injection(self):
o = Dummy()
of = "out/A"
def do_write():
write(of, o.A + o.B)
job = ppg.FileGeneratingJob(of, do_write)
jobD = ppg.FileGeneratingJob("out/D", lambda: write("out/D", "D"))
def generate_deps():
def load_a():
return "A"
def load_b():
return "B"
dlA = ppg.AttributeLoadingJob("dlA", o, "A", load_a)
dlB = ppg.AttributeLoadingJob("dlB", o, "B", load_b)
job.depends_on(dlA)
jobD.depends_on(dlB) # this line must raise
gen_job = ppg.DependencyInjectionJob("C", generate_deps)
job.depends_on(gen_job)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert not (os.path.exists(of)) # since the gen job crashed
assert os.path.exists(
"out/D"
) # since it has no relation to the gen job actually...
assert isinstance(gen_job.exception, ppg.JobContractError)
assert "was not dependand on the " in str(gen_job.exception)
def test_raises_on_non_dependend_job_injection2(self):
o = Dummy()
of = "out/A"
def do_write():
write(of, o.A + o.B)
job = ppg.FileGeneratingJob(of, do_write)
ppg.FileGeneratingJob("out/D", lambda: write("out/D", "D"))
def generate_deps():
def load_a():
return "A"
def load_b():
return "B"
dlA = ppg.AttributeLoadingJob("dlA", o, "A", load_a)
ppg.AttributeLoadingJob("dlB", o, "B", load_b)
job.depends_on(dlA)
# let's not do anything with dlA
gen_job = ppg.DependencyInjectionJob("C", generate_deps)
job.depends_on(gen_job)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert not (os.path.exists(of)) # since the gen job crashed
assert os.path.exists(
"out/D"
) # since it has no relation to the gen job actually...
assert isinstance(gen_job.exception, ppg.JobContractError)
assert "case 1" in str(gen_job.exception)
def test_raises_on_non_dependend_job_injection2_can_be_ignored(self):
o = Dummy()
of = "out/A"
def do_write():
write(of, o.A) # + o.B - but B is not in the dependency chain!
job = ppg.FileGeneratingJob(of, do_write)
ppg.FileGeneratingJob("out/D", lambda: write("out/D", "D"))
def generate_deps():
def load_a():
return "A"
def load_b():
return "B"
dlA = ppg.AttributeLoadingJob("dlA", o, "A", load_a)
ppg.AttributeLoadingJob("dlB", o, "B", load_b)
job.depends_on(dlA)
# let's not do anything with dlA
gen_job = ppg.DependencyInjectionJob(
"C", generate_deps, check_for_dependency_injections=False
)
job.depends_on(gen_job)
ppg.run_pipegraph()
assert os.path.exists(of) # since the gen job crashed
def test_injecting_filegenerating_job(self):
of = "out/A"
def do_write():
write(of, read("out/B"))
job = ppg.FileGeneratingJob(of, do_write)
def generate_dep():
def write_B():
write("out/B", "B")
inner_job = ppg.FileGeneratingJob("out/B", write_B)
job.depends_on(inner_job)
job_gen = ppg.DependencyInjectionJob("gen_job", generate_dep)
job.depends_on(job_gen)
ppg.run_pipegraph()
assert read("out/A") == "B"
def test_passing_non_function(self):
def inner():
ppg.DependencyInjectionJob("out/a", "shu")
assertRaises(ValueError, inner)
def test_passing_non_string_as_jobid(self):
def inner():
ppg.DependencyInjectionJob(5, lambda: 1)
assertRaises(TypeError, inner)
def test_injecting_into_data_loading_does_not_retrigger(self, ppg1_compatibility_test):
o = Dummy()
def do_write():
append("out/A", o.a + o.b)
append("out/B", "X")
def dl_a():
o.a = "A"
def do_run():
of = "out/A"
def inject():
def dl_b():
o.b = "B"
job_dl_b = ppg.DataLoadingJob("ob", dl_b)
job_dl.depends_on(job_dl_b)
job_fg = ppg.FileGeneratingJob(of, do_write)
job_dl = ppg.DataLoadingJob("oa", dl_a)
job_fg.depends_on(job_dl)
job_inject = ppg.DependencyInjectionJob("inject", inject)
job_dl.depends_on(job_inject)
ppg.run_pipegraph()
do_run()
assert read("out/A") == "AB"
assert read("out/B") == "X"
ppg1_compatibility_test.new_pipegraph()
do_run()
assert read("out/A") == "AB" # same data
assert read("out/B") == "X" # no rerun!
# now let's test if a change triggers the rerun
def do_run2():
of = "out/A"
def inject():
def dl_b():
o.b = "C" # so this dl has changed...
job_dl_b = ppg.DataLoadingJob("ob", dl_b)
job_dl.depends_on(job_dl_b)
job_fg = ppg.FileGeneratingJob(of, do_write)
job_dl = ppg.DataLoadingJob("oa", dl_a)
job_fg.depends_on(job_dl)
job_inject = ppg.DependencyInjectionJob("inject", inject)
job_dl.depends_on(job_inject)
ppg.run_pipegraph()
ppg1_compatibility_test.new_pipegraph()
do_run2()
assert read("out/A") == "AC" # same data
assert read("out/B") == "XX" # one rerun...
def test_generated_job_depends_on_failing_job(self, ppg1_compatibility_test):
# import logging
# ppg1_compatibility_test.new_pipegraph(log_file="debug.log", log_level=logging.DEBUG)
def fn_a():
raise ValueError()
def fn_b():
c = ppg.FileGeneratingJob("c", lambda: write("c", read("a")))
c.depends_on(a)
return [c]
a = ppg.FileGeneratingJob("a", fn_a)
b = ppg.JobGeneratingJob("b", fn_b)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert isinstance(a.exception, ValueError)
assert a.error_reason == "Exception"
assert b.error_reason == "no error"
assert ppg.util.global_pipegraph.jobs["c"].error_reason == "Indirect"
def test_generated_job_depends_on_failing_job_inverse(self, ppg1_compatibility_test):
# import logging
# ppg1_compatibility_test.new_pipegraph(log_file="debug.log", log_level=logging.DEBUG)
def fn_a():
raise ValueError()
def fn_b():
c = ppg.FileGeneratingJob("c", lambda: write("c", read("a")))
c.depends_on(a)
return [c]
# note swapped order respective to previous test
b = ppg.JobGeneratingJob("b", fn_b)
a = ppg.FileGeneratingJob("a", fn_a)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert isinstance(a.exception, ValueError)
assert a.error_reason == "Exception"
assert b.error_reason == "no error"
assert ppg.util.global_pipegraph.jobs["c"].error_reason == "Indirect"
| [
"[email protected]"
] | |
d0b2fdbcbc1ba73c2421753c096f41008cea2e13 | 61050d0d7f0c0a60474e4e85d30be4e5ea7c6b04 | /vnf/dom/scattering_kernels/IsotropicElasticKernel.py | 7416c4fba85409320daab5dcb70dafe5be561ba0 | [] | no_license | danse-inelastic/vnf | 8173f06f32b4a2fa2b71fddfe0fecf9c19e05e9a | be989448577f14f424aca4ce852c7198304ca57b | refs/heads/master | 2021-01-22T01:06:00.294100 | 2015-05-02T23:25:45 | 2015-05-02T23:25:45 | 34,947,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# kernel that scatters neutron isotropicall and elastically
from AbstractScatteringKernel import AbstractScatteringKernel as base, TableBase
class IsotropicElasticKernel(base):
# scattering_length = 1.0
def customizeLubanObjectDrawer(self, drawer):
drawer.sequence = ['properties']
# drawer.mold.sequence = ['scattering_length']
return
pass
InvBase = base.Inventory
class Inventory(InvBase):
# scattering_length = InvBase.d.float(name = 'scattering_length', default = 1.)
dbtablename = 'isotropicelastickernels'
pass
IsotropicElasticKernel.Inventory = Inventory
del Inventory
from _ import o2t
IsotropicElasticKernelTable = o2t(
IsotropicElasticKernel,
{'subclassFrom': TableBase},
)
# version
__id__ = "$Id$"
# End of file
| [
"[email protected]"
] | |
2a8d5dcb4c882246f14bbb1b9d05dfd6ac54fd4a | 1cc8ecb740cb5550016bdaf18dab8b2651945ebc | /src/common/helpers/getrequest.py | 9dfcbab4279e3a15c4ccf47d36fd363b48656c94 | [] | no_license | ShipraShalini/BidEngine | 2e1b18c9a93e5be25422e3f521d17763d718c7a7 | a6f28b8de7b0e3d8442f7a5a6ebc06b0b9c19cda | refs/heads/master | 2021-01-10T15:43:20.510985 | 2017-12-12T13:40:43 | 2017-12-12T13:40:43 | 48,623,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | import json
def read_request(request):
user = request.user.get_username()
if request.method == "POST" or request.method == "DELETE" :
data = json.loads(request.body)
item_name = data['item']
try:
amount = data['amount']
except KeyError:
return user, item_name
else:
return user, item_name, amount
if request.method == "GET":
item_name = request.GET.get('item', None)
return user, item_name
if request.method == "PUT":
data = json.loads(request.body)
item_name = data['item']
del data['item']
return user, item_name, data
def values(item):
return item.item_name, item.created_at, item.status, item.seller, item.min_bid, item.sold_to | [
"[email protected]"
] | |
7516b196903db3fd1f64e5811a200d7669055a8a | 5f46ffd83e844df8e4aa4d8bd495f2653a924cad | /sessions/week_2/debug_example.py | bea50406ad8b5f1c174190dd63fdca72733e3732 | [] | no_license | mvwettum/basictrack-2020-2021-2b | 33605b48a982f91ac84e19f64218b7b16b164175 | 3967efdb9b67aa07f4168f7358503a94eb1c4444 | refs/heads/master | 2023-04-03T14:50:25.915630 | 2021-04-22T15:54:30 | 2021-04-22T15:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | current_time_str = input("What is the current time (in hours 0-23)?")
wait_time_str = input("How many hours do you want to wait")
current_time_int = int(current_time_str)
wait_time_int = int(wait_time_str)
final_time_int = current_time_int + wait_time_int
final_answer = final_time_int % 24
print("The time after waiting is: ", final_answer)
| [
"[email protected]"
] | |
d3bc6232292463809ee1b24a4047b595b648e6c6 | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /leet_code/127. Word Ladder.py | e10d37091ad01c1102b61a505f8948cdbf7af7bf | [] | no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | import time
from util.util_list import *
from util.util_tree import *
import copy
import collections
from typing import List
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
g = collections.defaultdict(list)
visited = set()
for word in wordList:
for i in range(len(word)):
key = word[:i] + '@' + word[i + 1:]
g[key] += word,
q = [(beginWord, 1)]
diff = 0
while q:
word, diff = q.pop(0)
if word == endWord:
break
for i in range(len(word)):
key = word[:i] + '@' + word[i + 1:]
for v in g[key]:
if v in visited:
continue
q += (v, diff + 1),
visited.add(v)
return diff if word == endWord else 0
stime = time.time()
#print(5 == Solution().ladderLength("hit", "cog", ["hot","dot","dog","lot","log","cog"]))
#print(0 == Solution().ladderLength("hit", "cog", ["hot","dot","dog","lot","log"]))
print(2 == Solution().ladderLength("hot", "dot", ["hot","dot","dog"]))
print('elapse time: {} sec'.format(time.time() - stime))
| [
"[email protected]"
] | |
a8268005cae24a2d3f0293db9ac13fef78c391e0 | 1880e6a98d9c7957414392cad17cec7455ec84f6 | /player71.py | 62eb5f846a08ed9604bdd6eec8848c22d9236e2e | [] | no_license | THABUULAGANATHAN/guviprojects | 613759b96875005175db308f2dfcdecc355d7894 | 9eca3e84d227984c5e7a3a988d55674ec31dcd05 | refs/heads/master | 2022-01-16T17:09:05.390055 | 2019-07-19T12:54:57 | 2019-07-19T12:54:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | n=int(input())
li=[int(i) for i in input().split()]
for i in range(n-1):
if(li[i]<li[i+1]):
print(li[i+1],end=" ")
else:
print(li[i],end=" ")
| [
"[email protected]"
] | |
5d60548f1170918b905f952667c82e95e24761a3 | d0df4037ac7cc1d229058ec46400bdb2c83599fb | /search_in_rotated_sorted_array_ii.py | c3f0fed03cfe683c8848c23429e7f8c015ec5b75 | [] | no_license | TechPuppies/leetcode-python | 470505b4217b54ee9e5a7f559079bf684dd4b5d1 | d3b5ef8ac49ec72213ad7d189f10a2818d7f0a89 | refs/heads/master | 2016-09-15T17:29:05.933313 | 2015-01-06T21:26:46 | 2015-01-06T21:26:46 | 25,418,174 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # coding=utf-8
# AC Rate: 30.7%
# SOURCE URL: https://oj.leetcode.com/problems/search-in-rotated-sorted-array-ii/
#
# Follow up for "Search in Rotated Sorted Array":
# What if duplicates are allowed?
# Would this affect the run-time complexity? How and why?
# Write a function to determine if a given target is in the array.
#
class Solution:
# @param A a list of integers
# @param target an integer
# @return a boolean
def search(self, A, target):
| [
"[email protected]"
] | |
e41b8aaa05fc038f29ca34d6623ee20b20eee4d9 | 8a8b0267c4db8847a898ac73ccb6e78e1744e24c | /Python_Net_Programming/pnp-ex01/sync/client.py | 828ad8fcea34b0480616031726d433815df0a484 | [] | no_license | entirelymagic/Link_Academy | 41ba890df6793924d186ea94dc8d13b0636c6679 | 844c39ff1281fae8406cd1a0dc06afd357f0bef3 | refs/heads/master | 2023-06-07T03:17:00.527924 | 2021-07-03T09:59:25 | 2021-07-03T09:59:25 | 314,755,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | import socket
sClient = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sClient.connect(("localhost",8005))
print("Server said: ", sClient.recv(256).decode("utf-8"))
msg = input("msg: ")
sClient.send(bytes(msg,"utf-8"))
print("Server said: ", sClient.recv(256).decode("utf-8"))
sClient.close() | [
"[email protected]"
] | |
61dfbb15403708b218bc03aaa4373465c39378ef | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nndemocraci.py | a6706b78bd88982fd2e29382b6e4dbf0e39da248 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 513 | py | ii = [('CookGHP3.py', 15), ('MarrFDI.py', 10), ('SadlMLP.py', 2), ('WilbRLW.py', 1), ('MartHSI2.py', 2), ('MarrFDI3.py', 10), ('AdamWEP.py', 1), ('CarlTFR.py', 14), ('CookGHP2.py', 5), ('MarrFDI2.py', 24), ('LandWPA.py', 2), ('LandWPA2.py', 3), ('WadeJEB.py', 4), ('CoopJBT.py', 1), ('HogaGMM.py', 1), ('MartHRW.py', 2), ('HallFAC.py', 1), ('RoscTTI.py', 1), ('StorJCC.py', 8), ('HaliTBC.py', 1), ('WilbRLW3.py', 1), ('JacoWHI.py', 1), ('FitzRNS2.py', 2), ('MartHSI.py', 12), ('ThomWEC.py', 2), ('KeigTSS.py', 3)] | [
"[email protected]"
] | |
f7d0d94ea568a92439811da2fd1771cfd3e92a1c | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/research/object_detection/exporter_lib_tf2_test.py | 8e85e1124bca40957464b5c80acb6a24ea7fcc3d | [
"MIT",
"Apache-2.0"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 12,721 | py | # Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for exporter_lib_v2.py."""
from __future__ import division
import io
import os
import unittest
from absl.testing import parameterized
import numpy as np
from PIL import Image
import six
import tensorflow.compat.v2 as tf
from object_detection import exporter_lib_v2
from object_detection.builders import model_builder
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.protos import pipeline_pb2
from object_detection.utils import dataset_util
from object_detection.utils import tf_version
if six.PY2:
import mock # pylint: disable=g-importing-member,g-import-not-at-top
else:
from unittest import mock # pylint: disable=g-importing-member,g-import-not-at-top
class FakeModel(model.DetectionModel):
def __init__(self, conv_weight_scalar=1.0):
super(FakeModel, self).__init__(num_classes=2)
self._conv = tf.keras.layers.Conv2D(
filters=1, kernel_size=1, strides=(1, 1), padding='valid',
kernel_initializer=tf.keras.initializers.Constant(
value=conv_weight_scalar))
def preprocess(self, inputs):
true_image_shapes = [] # Doesn't matter for the fake model.
return tf.identity(inputs), true_image_shapes
def predict(self, preprocessed_inputs, true_image_shapes, **side_inputs):
return_dict = {'image': self._conv(preprocessed_inputs)}
if 'side_inp_1' in side_inputs:
return_dict['image'] += side_inputs['side_inp_1']
return return_dict
def postprocess(self, prediction_dict, true_image_shapes):
predict_tensor_sum = tf.reduce_sum(prediction_dict['image'])
with tf.control_dependencies(list(prediction_dict.values())):
postprocessed_tensors = {
'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]], tf.float32),
'detection_scores': predict_tensor_sum + tf.constant(
[[0.7, 0.6], [0.9, 0.0]], tf.float32),
'detection_classes': tf.constant([[0, 1],
[1, 0]], tf.float32),
'num_detections': tf.constant([2, 1], tf.float32),
}
return postprocessed_tensors
def restore_map(self, checkpoint_path, fine_tune_checkpoint_type):
pass
def restore_from_objects(self, fine_tune_checkpoint_type):
pass
def loss(self, prediction_dict, true_image_shapes):
pass
def regularization_losses(self):
pass
def updates(self):
pass
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class ExportInferenceGraphTest(tf.test.TestCase, parameterized.TestCase):
def _save_checkpoint_from_mock_model(
self, checkpoint_dir, conv_weight_scalar=6.0):
mock_model = FakeModel(conv_weight_scalar)
fake_image = tf.zeros(shape=[1, 10, 10, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
mock_model.postprocess(predictions, true_image_shapes)
ckpt = tf.train.Checkpoint(model=mock_model)
exported_checkpoint_manager = tf.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=1)
exported_checkpoint_manager.save(checkpoint_number=0)
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'encoded_image_string_tensor'},
{'input_type': 'tf_example'},
)
def test_export_yields_correct_directory_structure(
self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'saved_model.pb')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'variables', 'variables.index')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'saved_model', 'variables',
'variables.data-00000-of-00001')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'checkpoint', 'ckpt-0.index')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'checkpoint', 'ckpt-0.data-00000-of-00001')))
self.assertTrue(os.path.exists(os.path.join(
output_directory, 'pipeline.config')))
def get_dummy_input(self, input_type):
"""Get dummy input for the given input type."""
if input_type == 'image_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.uint8)
if input_type == 'float_image_tensor':
return np.zeros((1, 20, 20, 3), dtype=np.float32)
elif input_type == 'encoded_image_string_tensor':
image = Image.new('RGB', (20, 20))
byte_io = io.BytesIO()
image.save(byte_io, 'PNG')
return [byte_io.getvalue()]
elif input_type == 'tf_example':
image_tensor = tf.zeros((20, 20, 3), dtype=tf.uint8)
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).numpy()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/source_id':
dataset_util.bytes_feature(six.b('image_id')),
})).SerializeToString()
return [example]
@parameterized.parameters(
{'input_type': 'image_tensor'},
{'input_type': 'encoded_image_string_tensor'},
{'input_type': 'tf_example'},
{'input_type': 'float_image_tensor'},
)
def test_export_saved_model_and_run_inference(
self, input_type='image_tensor'):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
image = self.get_dummy_input(input_type)
detections = detect_fn(tf.constant(image))
detection_fields = fields.DetectionResultFields
self.assertAllClose(detections[detection_fields.detection_boxes],
[[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(detections[detection_fields.detection_scores],
[[0.7, 0.6], [0.9, 0.0]])
self.assertAllClose(detections[detection_fields.detection_classes],
[[1, 2], [2, 1]])
self.assertAllClose(detections[detection_fields.num_detections], [2, 1])
@parameterized.parameters(
{'use_default_serving': True},
{'use_default_serving': False}
)
def test_export_saved_model_and_run_inference_with_side_inputs(
self, input_type='image_tensor', use_default_serving=True):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type=input_type,
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory,
use_side_inputs=True,
side_input_shapes='1/2,2',
side_input_names='side_inp_1,side_inp_2',
side_input_types='tf.float32,tf.uint8')
saved_model_path = os.path.join(output_directory, 'saved_model')
detect_fn = tf.saved_model.load(saved_model_path)
detect_fn_sig = detect_fn.signatures['serving_default']
image = tf.constant(self.get_dummy_input(input_type))
side_input_1 = np.ones((1,), dtype=np.float32)
side_input_2 = np.ones((2, 2), dtype=np.uint8)
if use_default_serving:
detections = detect_fn_sig(input_tensor=image,
side_inp_1=tf.constant(side_input_1),
side_inp_2=tf.constant(side_input_2))
else:
detections = detect_fn(image,
tf.constant(side_input_1),
tf.constant(side_input_2))
detection_fields = fields.DetectionResultFields
self.assertAllClose(detections[detection_fields.detection_boxes],
[[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.8, 0.8]],
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]]])
self.assertAllClose(detections[detection_fields.detection_scores],
[[400.7, 400.6], [400.9, 400.0]])
self.assertAllClose(detections[detection_fields.detection_classes],
[[1, 2], [2, 1]])
self.assertAllClose(detections[detection_fields.num_detections], [2, 1])
def test_export_checkpoint_and_run_inference_with_image(self):
tmp_dir = self.get_temp_dir()
self._save_checkpoint_from_mock_model(tmp_dir, conv_weight_scalar=2.0)
with mock.patch.object(
model_builder, 'build', autospec=True) as mock_builder:
mock_builder.return_value = FakeModel()
exporter_lib_v2.INPUT_BUILDER_UTIL_MAP['model_build'] = mock_builder
output_directory = os.path.join(tmp_dir, 'output')
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
exporter_lib_v2.export_inference_graph(
input_type='image_tensor',
pipeline_config=pipeline_config,
trained_checkpoint_dir=tmp_dir,
output_directory=output_directory)
mock_model = FakeModel()
ckpt = tf.compat.v2.train.Checkpoint(
model=mock_model)
checkpoint_dir = os.path.join(tmp_dir, 'output', 'checkpoint')
manager = tf.compat.v2.train.CheckpointManager(
ckpt, checkpoint_dir, max_to_keep=7)
ckpt.restore(manager.latest_checkpoint).expect_partial()
fake_image = tf.ones(shape=[1, 5, 5, 3], dtype=tf.float32)
preprocessed_inputs, true_image_shapes = mock_model.preprocess(fake_image)
predictions = mock_model.predict(preprocessed_inputs, true_image_shapes)
detections = mock_model.postprocess(predictions, true_image_shapes)
# 150 = conv_weight_scalar * height * width * channels = 2 * 5 * 5 * 3.
self.assertAllClose(detections['detection_scores'],
[[150 + 0.7, 150 + 0.6], [150 + 0.9, 150 + 0.0]])
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| [
"[email protected]"
] | |
9005c88fe2b68a760c0615bfdf885573b6c96618 | f24d16c0e064d1f77a09cc02217a6dfe9ee39d56 | /pipeline/data_process.py | 90837db91d24a7a228b1eb0243ba1a5a70990875 | [] | no_license | hbradlow/modelbuilder | 92ec5fd92527f6989d43212dd6ffd27abcb4738c | e537fb37f7331bf50e0ea849bc097b996dbfdbdd | refs/heads/master | 2020-05-20T00:17:37.352732 | 2012-11-13T23:44:11 | 2012-11-13T23:44:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,339 | py | import numpy as np
import scipy
import math
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
"""
An arrow object for a matplotlib 3d plot.
Code from http://stackoverflow.com/questions/11140163/python-matplotlib-plotting-a-3d-cube-a-sphere-and-a-vector
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def __repr__(self):
return self._verts3d
class Ellipse:
def __init__(self,origin=[0,0,0],radius=(0,0),angle=0):
self.origin = origin
self.radius = radius
self.angle = angle
self.axis = [0,0,1]
def __repr__(self):
return "center: " + str(self.origin) + ", radius: " + str(self.radius)
class Point:
def __init__(self,x=0,y=0,z=0,is_valid=True):
self.x = x
self.y = y
self.z = z
self.is_valid = is_valid
def set(self,l):
self.x = l[0]
self.y = l[1]
self.z = l[2]
def list(self):
return [self.x,self.y,self.z]
class CircleFit:
def __init__(self,points=[]):
self.circle = Circle()
self.points = []
for p in points:
self.points.append(Point(p[0],p[1],p[2]))
self.plane_to_xy_transform = None
self.flatten_transform = None
def process(self):
self.calculate_plane_to_xy_transform()
self.transform_data(self.plane_to_xy_transform)
self.calculate_flatten_transform()
self.transform_data(self.flatten_transform)
self.show()
self.calculate_best_fit_ellipse()
"""
self.transform_data(self.flatten_transform,inverse=True)
self.transform_data(self.plane_to_yz_transform,inverse=True)
"""
def transform_data(self,t,inverse=False):
def transform(t,v):
return np.dot(t,np.array(v)).tolist()
if inverse:
t = np.linalg.inv(np.array(t))
else:
t = np.array(t)
for (index, point) in enumerate(self.points):
self.points[index].set(transform(t,point.list()+[1]))
self.circle.origin = transform(t,self.circle.origin + [1])[0:3]
self.circle.axis = transform(t,self.circle.axis + [0])[0:3]
self.normal = transform(t,self.normal + [0])[0:3]
def best_fit_plane(self):
"""
Find the plane that best fits the set of translations
"""
def zeros(i):
return [0 for a in range(i)]
A = np.array([zeros(3) for j in range(3)])
b = np.array(zeros(3))
for point in self.points:
A = np.add(np.array([ [point.x*point.x, point.x*point.y, point.x],
[point.x*point.y, point.y*point.y, point.y],
[point.x, point.y, 1]]),A)
b = np.add(np.array([point.x*point.z,point.y*point.z,point.z]),b)
x = np.linalg.solve(A,b)
return x
def calculate_plane_to_xy_transform(self):
"""
Calculate the transform to rotate the plane of the circle into the yz plane.
"""
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
x = self.best_fit_plane()
normal = [x[0],x[1],-1]
self.normal = normal
from cgkit.cgtypes import quat, mat3, slerp
axis = np.cross(np.array(normal),np.array([0,0,1]))
angle = math.acos(np.dot(np.array(normal),np.array([0,0,1]))/np.linalg.norm(normal))
q = quat()
q = q.fromAngleAxis(angle,axis.tolist())
transform = [i for i in chunks(q.toMat4().toList(rowmajor=True),4)]
self.plane_to_xy_transform = transform
return transform
def calculate_flatten_transform(self):
"""
Calculate the transform to move all the translation points into the yz plane. Basically just remove the x values.
"""
def ave(l):
return reduce(lambda x,y: x+y,l)/len(l)
a = ave([point.z for point in self.points if point.is_valid])
transform = [ [1,0,0,0],
[0,1,0,0],
[0,0,1,-a],
[0,0,0,1]]
self.flatten_transform = transform
return transform
def calculate_best_fit_ellipse(self):
"""
http://math.stackexchange.com/questions/214661/circle-least-squares-fit
"""
A = []
b = []
def f(b,*args):
det = b[1]**2 - 4*b[0]*b[2]
if det > -.1:
return 999999
total = 0
for point in self.points:
total += np.dot(np.array([point.x**2,point.x*point.y,point.y**2,point.x,point.y,1]),np.array(b))**2
return total
x = scipy.optimize.fmin(f,(1,1,1,1,1,1))
self.circle = Circle([0] + x[0].tolist()[0:2],x[0].tolist()[2])
self.circle.radius = math.sqrt(self.circle.radius + self.circle.origin[1]**2 + self.circle.origin[2]**2)
self.circle.axis = [1,0,0]
def show(self):
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mpl_toolkits.mplot3d.art3d as art3d
import matplotlib.patches
fig = plt.figure("Circle Fit")
ax = fig.add_subplot(111,projection="3d",aspect=1)
x = [];y = [];z = []
for point in self.points:
x.append(point.x)
y.append(point.y)
z.append(point.z)
ax.scatter(x,y,z,color="r",s=200)
ax.auto_scale_xyz([-.5, .5], [-.5, .5], [-0, 1])
circle_axis = Arrow3D((0,self.normal[0]),(0,self.normal[1]),(0,self.normal[2]),mutation_scale=20,lw=3,arrowstyle="-|>", color="g")
ax.add_artist(circle_axis)
plt.show()
| [
"[email protected]"
] | |
9ed43354fe92a243dd52ae9e8338df41be2e2346 | 58df224689ab08c99359b1a6077d2fba3728dc61 | /lamda-ocr/merge-files/borb/toolkit/diff/pdf_diff.py | 86117069066e1f49a4c64f9246aec7c34fa2a4c9 | [] | no_license | LIT-Midas/LITHackathon | 2b286728c156d79d3f426f6d19b160a2a04690db | 7b990483dd48b91cf3ec3452b78ab67770da71af | refs/heads/main | 2023-08-13T05:22:59.373965 | 2021-08-16T01:09:49 | 2021-08-16T01:09:49 | 395,024,729 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,022 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This class was meant to perform a dictionary/array level comparison of PDF documents.
It makes it a lot easier to debug problems.
"""
import typing
from borb.io.filter.stream_decode_util import decode_stream
from borb.io.read.types import Decimal, Dictionary, List, Name, Stream
from borb.pdf.document import Document
class PDFDiff:
"""
This class was meant to perform a dictionary/array level comparison of PDF documents.
It makes it a lot easier to debug problems.
"""
def __init__(self, pdf_a: Document, pdf_b: Document):
self._document_a: Document = pdf_a
self._document_b: Document = pdf_b
self._already_compared: typing.List[int] = []
self._errors: typing.List[str] = []
def compare(self) -> None:
"""
This method compares the given PDF documents, logging any differences between them.
"""
self._compare(self._document_a, self._document_b, "", "")
@staticmethod
def _get_reference_or_none(obj) -> str:
try:
if obj.get_reference() is not None:
return "(%d 0 R)" % obj.get_reference().object_number
except:
pass
return ""
def _log_difference(self, error_msg: str) -> None:
print(error_msg)
self._errors.append(error_msg)
def _compare(self, a, b, path_to_a, path_to_b) -> None:
if id(a) in self._already_compared:
return
if id(b) in self._already_compared:
return
self._already_compared.append(id(a))
self._already_compared.append(id(b))
# check type
if a.__class__.__name__ != b.__class__.__name__:
self._log_difference(
"Class mismatch : %s %s <--> %s %s"
% (path_to_a, a.__class__.__name__, path_to_b, b.__class__.__name__)
)
if isinstance(a, Name):
if str(a) != str(b):
self._log_difference(
"Name mismatch : %s %s <--> %s %s"
% (path_to_a, str(a), path_to_b, str(b))
)
return
if isinstance(a, Decimal):
if int(a) != int(b):
self._log_difference(
"Value mismatch : %s %s <--> %s %s"
% (path_to_a, str(a), path_to_b, str(b))
)
# get references if they exist
ref_a = PDFDiff._get_reference_or_none(a)
ref_b = PDFDiff._get_reference_or_none(a)
# compare streams
if isinstance(a, Stream):
decode_stream(a)
decode_stream(b)
if "DecodedBytes" not in a:
self._log_difference("Unable to decode Stream %s" % (path_to_a + ref_a))
if "DecodedBytes" not in b:
self._log_difference("Unable to decode Stream %s" % (path_to_b + ref_b))
dba: bytes = a["DecodedBytes"]
dbb: bytes = b["DecodedBytes"]
if len(dba) != len(dbb):
self._errors.append(
"Stream Length mismatch : %s %d <--> %s %d"
% (path_to_a + ref_a, len(a), path_to_b + ref_b, len(b))
)
else:
for i in range(0, len(dba)):
if dba[i] != dbb[i]:
self._errors.append(
"Stream content mismatch : %s %d <--> %s %d"
% (path_to_a + ref_a, i, path_to_b + ref_b, i)
)
# compare dictionary
if isinstance(a, Dictionary):
for k, v in a.items():
if k == "ID":
continue
if k == "Bytes":
continue
if k == "DecodedBytes":
continue
if isinstance(a, Stream) and k == "Length":
continue
if k not in b:
self._log_difference(
"Key absent/present mismatch : %s %s <--> %s %s"
% (path_to_a + ref_a, str(k), path_to_b + ref_b, None)
)
continue
self._compare(
a[k],
b[k],
path_to_a + "/" + str(k) + ref_a,
path_to_b + "/" + str(k) + ref_b,
)
return
# compare array
if isinstance(a, List):
if len(a) != len(b):
self._errors.append(
"Array Length mismatch : %s %d <--> %s %d"
% (path_to_a + ref_a, len(a), path_to_b + ref_b, len(b))
)
for i in range(0, min(len(a), len(b))):
self._compare(
a[i],
b[i],
path_to_a + ref_a + "/" + str(i),
path_to_b + ref_b + "/" + str(i),
)
return
| [
"[email protected]"
] | |
cf6f57b4c05d78d82369ca1c29ef07844bdec546 | 2d98f950a9bc701b360e3fd807bb07b85edabee9 | /9/认识爬虫-课件-v1/Py爬虫课件/15/example-project/example/pipelines.py | 4db113535b1e543050b79319adff4a9e45c4bba5 | [] | no_license | Ran-oops/python_notes2 | eaf3e98ee460d0d63d2bf8881cacd10916baa902 | 3a1bf86a803c716f4ef4aeec53a69ebb3662cf49 | refs/heads/master | 2020-11-30T15:40:53.850721 | 2019-12-28T05:22:49 | 2019-12-28T05:22:49 | 230,429,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/topics/item-pipeline.html
from datetime import datetime
class ExamplePipeline(object):
def process_item(self, item, spider):
item["crawled"] = datetime.utcnow() # 获取utc时间
item["spider"] = spider.name # 爬虫名称
return item
| [
"[email protected]"
] | |
81fb2fce5bc257958679a04b84d127ffaecb919c | c9afaf387faf7c478e860f4ab5f087b254b5b87f | /main.py | b864b06dab49d5e7afad121326acdff0184725b9 | [] | no_license | INNFINITEMINDS/FitzHugh-Nagumo-Neuron-Model | e3a5c7b7187fe77e575f96c955cb4253d00bf4fb | b1afb1745e3773c1ff7913a12ed98679094f0c2c | refs/heads/master | 2022-12-19T17:41:06.466021 | 2020-10-20T10:51:01 | 2020-10-20T10:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,582 | py | import numpy as np
from src.model import FNNeuron
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
from scipy.fft import fft
from scipy import signal
def exp1():
exp_plot_dir = 'images/exp1'
dt = 0.001
niter = int(10e4)
b = 0.25
I_ext = 0.1
fn = FNNeuron(
dt,
niter
)
v = np.random.random()
w = np.random.random()
b_ = np.arange(10)*0.1
I_ = np.arange(30)*0.1
for b in tqdm(b_):
for I_ext in I_:
fn.set_b(b)
fn.set_v(v)
fn.set_w(w)
fn.set_I_ext(I_ext)
image_name = 'v_{val_v:.4f}_w_{val_w:.4f}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
val_v = v,
val_w = w,
val_b = b,
val_dt = dt,
val_I = I_ext,
n = niter
)
if not os.path.exists(exp_plot_dir):
os.mkdir(exp_plot_dir)
fn()
fn.plot(os.path.join(exp_plot_dir, image_name))
fn.reset()
def exp2():
exp_plot_dir = 'images/exp2'
dt = 0.001
niter = int(10e4)
b = -0.5
I_ext = 0
fn = FNNeuron(
dt,
niter
)
num_exp = 50
v = np.random.normal(0, 1, num_exp)
w = np.random.normal(0, 1, num_exp)
fig, axes = plt.subplots(1, 1,figsize = (5, 5))
for i in tqdm(range(num_exp)):
fn.set_b(b)
fn.set_v(v[i])
fn.set_w(w[i])
fn.set_I_ext(I_ext)
fn()
axes.plot(fn.v_hist, fn.w_hist)
axes.set_xlabel('voltage')
axes.set_ylabel('recovery variable')
axes.set_title('phase plot')
fn.reset()
if not os.path.exists(exp_plot_dir):
os.mkdir(exp_plot_dir)
image_name = 'case_1a_phase_plot_num_iter_{num}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
num = num_exp,
val_b = b,
val_dt = dt,
val_I = I_ext,
n = niter
)
fig.savefig(os.path.join(exp_plot_dir, image_name))
def exp3():
exp_plot_dir = 'images/exp3'
dt = 0.001
niter = int(10e4)
b = 0
I_ext = 0
fn = FNNeuron(
dt,
niter
)
V = np.arange(-10, 20)*0.1
w = 0
for v in tqdm(V):
fn.set_b(b)
fn.set_v(v)
fn.set_w(w)
fn.set_I_ext(I_ext)
image_name = 'case_1b_v_{val_v:.4f}_w_{val_w:.4f}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
val_v = v,
val_w = w,
val_b = b,
val_dt = dt,
val_I = I_ext,
n = niter
)
exp = 'b_{val_b:.4f}'.format(val_b=b)
dir = os.path.join(exp_plot_dir, exp)
if not os.path.exists(dir):
os.mkdir(os.path.join(dir))
fn()
fn.plot(os.path.join(dir,image_name))
fn.reset()
def exp4():
I_ext = np.arange(115, 285, 2)*0.01
b = 0.4
v = np.random.normal(0, 1)
w = np.random.normal(0, 1)
exp_plot_dir = 'images/exp4'
dt = 0.001
niter = int(10e5)
fn = FNNeuron(
dt,
niter
)
def is_periodic(samples, tol):
m = tol*max(samples)
t = (max(samples) - min(samples)) <= 0.25*max(samples)
return all(m <= d for d in samples) and t
osc_I = []
for I in I_ext:
fn.set_b(b)
fn.set_v(v)
fn.set_w(w)
fn.set_I_ext(I)
image_name = 'case_2a_v_{val_v:.4f}_w_{val_w:.4f}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
val_v = v,
val_w = w,
val_b = b,
val_dt = dt,
val_I = I,
n = niter
)
if not os.path.exists(exp_plot_dir):
os.mkdir(os.path.join(exp_plot_dir))
fn()
peaks, _ = signal.find_peaks(fn.v_hist)
heights = [fn.v_hist[p] for p in peaks]
print('\n')
print(I)
val = is_periodic(heights[1:], 0.75)
print(val)
if val:
osc_I.append(I)
fn.plot(os.path.join(exp_plot_dir, 'phase_'+image_name))
fn.reset()
return osc_I
def exp5():
exp_plot_dir = 'images/exp5'
dt = 0.001
niter = int(10e4)
b = 0.4
I_ext = 1.55
fn = FNNeuron(
dt,
niter
)
num_exp = 50
v = np.random.normal(0, 1, num_exp)
w = np.random.normal(0, 1, num_exp)
fig, axes = plt.subplots(1, 1,figsize = (5, 5))
for i in tqdm(range(num_exp)):
fn.set_b(b)
fn.set_v(v[i])
fn.set_w(w[i])
fn.set_I_ext(I_ext)
fn()
axes.plot(fn.v_hist, fn.w_hist, 'b')
axes.set_xlabel('voltage')
axes.set_ylabel('recovery variable')
axes.set_title('phase plot')
fn.reset()
fn.set_niter(int(10e5))
fn.set_b(b)
fn.set_v(np.random.normal(0, 1))
fn.set_w(np.random.normal(0, 1))
fn.set_I_ext(0.5)
fn(True)
axes.plot(fn.v_hist, fn.w_hist, 'r')
axes.set_xlabel('voltage')
axes.set_ylabel('recovery variable')
axes.set_title('phase plot')
fn.reset()
if not os.path.exists(exp_plot_dir):
os.mkdir(exp_plot_dir)
image_name = 'case_2ba_phase_plot_num_iter_{num}_b_{val_b:.4f}_dt_{val_dt:.4f}_I_ext_{val_I:.4f}_niter_{n}.png'.format(
num = num_exp,
val_b = b,
val_dt = dt,
val_I = I_ext,
n = niter
)
fig.savefig(os.path.join(exp_plot_dir, image_name))
exp5()
#I = exp4()
#exp3()
#exp2()
#exp1()
| [
"[email protected]"
] | |
06a47fd3f3d748fea890d46dfe910447204d0544 | 0db19410e9751790af8ce4a0a9332293e379c02f | /mmpose/models/heads/heatmap_heads/__init__.py | b482216b36f61ceb66aae8974ae178a8455d5022 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpose | 2c9986521d35eee35d822fb255e8e68486026d94 | 537bd8e543ab463fb55120d5caaa1ae22d6aaf06 | refs/heads/main | 2023-08-30T19:44:21.349410 | 2023-07-04T13:18:22 | 2023-07-04T13:18:22 | 278,003,645 | 4,037 | 1,171 | Apache-2.0 | 2023-09-14T09:44:55 | 2020-07-08T06:02:55 | Python | UTF-8 | Python | false | false | 373 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .ae_head import AssociativeEmbeddingHead
from .cid_head import CIDHead
from .cpm_head import CPMHead
from .heatmap_head import HeatmapHead
from .mspn_head import MSPNHead
from .vipnas_head import ViPNASHead
__all__ = [
'HeatmapHead', 'CPMHead', 'MSPNHead', 'ViPNASHead',
'AssociativeEmbeddingHead', 'CIDHead'
]
| [
"[email protected]"
] | |
ded383f66dbaa90059ccca1ab9639a978ad264f9 | 78db5bc74181173f2d00bea409997a64b4682adf | /venv/lib/python3.9/site-packages/pip/_vendor/chardet/euctwprober.py | 7dbc136e80b7e704891fc4fdde70bb8b6d72ba56 | [
"MIT"
] | permissive | CiscoDevNet/meraki-code | dfe680f077ebd053a3b663f1434f648f5a91b541 | d031aab82e3fa5ce7cf57b257fef8c9a4c63d71e | refs/heads/master | 2023-05-28T18:43:28.848983 | 2022-04-11T19:45:19 | 2022-04-11T19:45:19 | 188,288,487 | 67 | 60 | MIT | 2023-05-23T00:51:58 | 2019-05-23T18:43:15 | Python | UTF-8 | Python | false | false | 1,793 | py | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTW_SM_MODEL
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
super(EUCTWProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
self.distribution_analyzer = EUCTWDistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "EUC-TW"
@property
def language(self):
return "Taiwan"
| [
"[email protected]"
] | |
36024e8c09241fb2a5405711eeb21edb7a07e067 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /monai/handlers/confusion_matrix.py | 368aacc6cbe04f1ed17742b90b4cd21c7b41fda1 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 3,205 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from monai.handlers.ignite_metric import IgniteMetric
from monai.metrics import ConfusionMatrixMetric
from monai.metrics.utils import MetricReduction
class ConfusionMatrix(IgniteMetric):
"""
Compute confusion matrix related metrics from full size Tensor and collects average over batch, class-channels, iterations.
"""
def __init__(
self,
include_background: bool = True,
metric_name: str = "hit_rate",
output_transform: Callable = lambda x: x,
save_details: bool = True,
) -> None:
"""
Args:
include_background: whether to skip metric computation on the first channel of
the predicted output. Defaults to True.
metric_name: [``"sensitivity"``, ``"specificity"``, ``"precision"``, ``"negative predictive value"``,
``"miss rate"``, ``"fall out"``, ``"false discovery rate"``, ``"false omission rate"``,
``"prevalence threshold"``, ``"threat score"``, ``"accuracy"``, ``"balanced accuracy"``,
``"f1 score"``, ``"matthews correlation coefficient"``, ``"fowlkes mallows index"``,
``"informedness"``, ``"markedness"``]
Some of the metrics have multiple aliases (as shown in the wikipedia page aforementioned),
and you can also input those names instead.
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
lists of `channel-first` Tensors. the form of `(y_pred, y)` is required by the `update()`.
for example: if `ignite.engine.state.output` is `{"pred": xxx, "label": xxx, "other": xxx}`,
output_transform can be `lambda x: (x["pred"], x["label"])`.
save_details: whether to save metric computation details per image, for example: TP/TN/FP/FN of every image.
default to True, will save to `engine.state.metric_details` dict with the metric name as key.
See also:
:py:meth:`monai.metrics.confusion_matrix`
"""
metric_fn = ConfusionMatrixMetric(
include_background=include_background,
metric_name=metric_name,
compute_sample=False,
reduction=MetricReduction.MEAN,
)
self.metric_name = metric_name
super().__init__(
metric_fn=metric_fn,
output_transform=output_transform,
save_details=save_details,
)
| [
"[email protected]"
] | |
1de3d74ecc579a3482423f8897072d74c418249d | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/mgmt/network/models/virtual_network.py | d04dbdddf6025303d691acc85256825d49980cec | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 3,797 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetwork(Resource):
"""
Virtual Network resource
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource Id
:type id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param address_space: Gets or sets AddressSpace that contains an array of
IP address ranges that can be used by subnets
:type address_space: :class:`AddressSpace
<azure.mgmt.network.models.AddressSpace>`
:param dhcp_options: Gets or sets DHCPOptions that contains an array of
DNS servers available to VMs deployed in the virtual network
:type dhcp_options: :class:`DhcpOptions
<azure.mgmt.network.models.DhcpOptions>`
:param subnets: Gets or sets List of subnets in a VirtualNetwork
:type subnets: list of :class:`Subnet <azure.mgmt.network.models.Subnet>`
:param resource_guid: Gets or sets resource guid property of the
VirtualNetwork resource
:type resource_guid: str
:param provisioning_state: Gets or sets Provisioning state of the
PublicIP resource Updating/Deleting/Failed
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, address_space=None, dhcp_options=None, subnets=None, resource_guid=None, provisioning_state=None, etag=None):
super(VirtualNetwork, self).__init__(id=id, location=location, tags=tags)
self.address_space = address_space
self.dhcp_options = dhcp_options
self.subnets = subnets
self.resource_guid = resource_guid
self.provisioning_state = provisioning_state
self.etag = etag
| [
"[email protected]"
] | |
1124f2ed2301c3d86e4644f9b6d42729ac140055 | 19a5937501ff40d53b69617d6b05484c2861c54b | /apps/urls.py | 212155a4226cd217323270aab6a2cd9abf0d642c | [] | no_license | pombredanne/abyss | 44319541f614669861157955b5d4059fcf3f8aad | 8de3f2438ad74ad4d2703ce0bb7ccf7672423820 | refs/heads/master | 2020-12-29T19:03:52.168087 | 2013-09-16T13:48:28 | 2013-09-16T13:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | from django.conf.urls import patterns, url
from apps import views
urlpatterns = patterns(
'',
url(r'^$', views.ListApp.as_view(), name='list-app'),
url(r'^create/$', views.CreateApp.as_view(), name='create-app'),
url(r'^run/$', views.Run.as_view(), name='run'),
url(r'^(?P<app_name>[\w-]+)/$',
views.AppDetail.as_view(), name='detail-app'),
url(r'^(?P<name>[\w-]+)/remove/$',
views.RemoveApp.as_view(), name='remove_app'),
url(r'^(?P<app_name>[\w-]+)/log/$', views.AppLog.as_view(),
name='app_log'),
url(r'^(?P<app_name>[\w-]+)/env/$', views.AppEnv.as_view(),
name='get-env'),
url(r'^(?P<app_name>[\w-]+)/teams/$',
views.AppTeams.as_view(), name='app-teams'),
url(r'^(?P<app_name>[\w-]+)/team/add/$',
views.AppAddTeam.as_view(), name='app-add-team'),
url(r'^(?P<app_name>[\w-]+)/units/$',
views.ChangeUnit.as_view(), name='change-units'),
)
| [
"[email protected]"
] | |
d6461e3ed1fa74dfbabee2a0c0d5db2f1b055f26 | c9287937c4d7900d311640a2b16c08c42eedfe58 | /tensorflow/python/distribute/mirrored_function_strategy.py | bbe52984d1eff41f9c4d304dfe927d7e70cfaddd | [
"Apache-2.0"
] | permissive | Purplme/tensorflow | e868e9bf59cc8eb680f1c35bf0b8615ec2b68c62 | d2d6c3f07a0b874e64a024c767deb7c9fb39b704 | refs/heads/master | 2022-11-23T23:38:00.243591 | 2020-07-16T06:20:19 | 2020-07-16T06:25:23 | 280,074,885 | 2 | 0 | Apache-2.0 | 2020-07-16T06:39:14 | 2020-07-16T06:39:13 | null | UTF-8 | Python | false | false | 7,594 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class MirroredFunctionStrategy implementing tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.util import nest
_replica_index = threading.local()
_replica_id_key = object()
def _replica_id_tensor():
return ops.get_default_graph().capture_call_time_value(
closure=lambda: constant_op.constant(_replica_index.current),
spec=tensor_spec.TensorSpec((), dtypes.int32),
key=_replica_id_key)
def _in_run():
return (hasattr(_replica_index, "current") and
_replica_index.current is not None)
def _outside_run_graph():
if hasattr(_replica_index, "graph_outside_run"):
return _replica_index.graph_outside_run
else:
return None
class MirroredFunctionStrategy(distribute_lib.Strategy):
"""Mirrors vars to distribute across multiple devices and machines.
This strategy uses one replica per device and sync replication for its
multi-GPU version. Unlike `tf.distribute.MirroredStrategy`, it creates a
function for a single replica, and calls that function repeatedly instead of
recording the operations for each replica separately.
"""
def __init__(self, devices=None):
"""Create an instance of `MirroredFunctionStrategy`.
Args:
devices: a list of device strings. If `None`, all available GPUs are
used. If no GPUs are found, CPU is used.
"""
extended = MirroredFunctionExtended(self, devices)
super(MirroredFunctionStrategy, self).__init__(extended)
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class MirroredFunctionExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of MirroredFunctionStrategy."""
def __init__(self, container_strategy, devices):
super(MirroredFunctionExtended, self).__init__(container_strategy)
if devices is None:
devices = mirrored_strategy.all_devices()
if not devices:
raise ValueError("Got an empty `devices` list. Please make sure the "
"`devices` you pass in is not empty.")
device_tuple = tuple(device_util.resolve(d) for d in devices)
assert len(set(device_tuple)) == len(device_tuple), (
"No duplicates allowed in `devices` argument: %s" % (devices,))
self._devices = device_tuple
self._retrace_functions_for_each_device = False
def _call_for_each_replica(self, fn, args, kwargs):
# For now, `fn` must be an @tf.function.
# TODO(josh11b): Relax this restriction? Main problem is if
# (a) executing eagerly, (b) `fn` not @tf.function, and
# (c) executed frequently.
assert isinstance(fn, def_function.Function)
if _outside_run_graph() is not None:
# Nested case, should just use outer function's context for things like
# the current replica index.
# TODO(josh11b): Test this case!
with MirroredFunctionReplicaContext(self._container_strategy()):
results = fn(*nest.map_structure(_unwrap_tensors, args),
**nest.map_structure(_unwrap_tensors, kwargs))
return nest.map_structure(_wrap_tensors, results)
_replica_index.graph_outside_run = ops.get_default_graph()
return_values = []
try:
with MirroredFunctionReplicaContext(self._container_strategy()):
for index, device in enumerate(self._devices):
_replica_index.current = index
with ops.device(device):
if context.executing_eagerly():
# NOTE: These functions need to execute concurrently if they
# use a collective op. This is a particular concern with eager
# execution.
with context.execution_mode(context.ASYNC):
return_values.append(
fn(*distribute_utils.select_replica(index, args),
**distribute_utils.select_replica(index, kwargs)))
else:
return_values.append(
fn(*distribute_utils.select_replica(index, args),
**distribute_utils.select_replica(index, kwargs)))
finally:
_replica_index.graph_outside_run = None
_replica_index.current = None
return distribute_utils.regroup(return_values)
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
class FnMergedValue(object):
def __init__(self, value):
self._value = value
def _wrap_tensors(maybe_tensor):
if isinstance(maybe_tensor, ops.Tensor): # TODO(josh11b): or composite tensor?
return FnMergedValue(maybe_tensor)
return maybe_tensor
def _unwrap_tensors(maybe_wrapped):
if isinstance(maybe_wrapped, FnMergedValue):
return maybe_wrapped._value # pylint: disable=protected-access
return maybe_wrapped
class MirroredFunctionReplicaContext(distribute_lib.ReplicaContext):
"""ReplicaContext used in MirroredFunctionStrategy."""
def __init__(self, strategy):
distribute_lib.ReplicaContext.__init__(self, strategy, None)
@property
def _replica_id_in_sync_group(self):
return _replica_id_tensor()
@_replica_id_in_sync_group.setter
def _replica_id_in_sync_group(self, value):
assert value is None
def _merge_call(self, merge_fn, args, kwargs):
# We wrap all args/kwargs with tensor values in a class that prevents them
# for being used by anything other than MirroredFunctionStrategy APIs that
# have been specifically written to recognize the wrapper and unwrap the
# values (such as extended.reduce_to/update).
# TODO(josh11b): Should these set expand_composites=True?
args = nest.map_structure(_wrap_tensors, args)
kwargs = nest.map_structure(_wrap_tensors, kwargs)
# pylint: disable=protected-access
distribution_strategy_context._push_per_thread_mode(
distribution_strategy_context._CrossReplicaThreadMode(self._strategy))
try:
results = merge_fn(self._strategy, *args, **kwargs)
finally:
distribution_strategy_context._pop_per_thread_mode()
# pylint: enable=protected-access
return nest.map_structure(_unwrap_tensors, results)
@property
def devices(self):
raise RuntimeError("Can't get the devices for the current replica.")
| [
"[email protected]"
] | |
d29f0e7212d54245a016a4c1738e9a56780420ae | f82c474bd7d5f60d976b14432a9f20d5e561ca4c | /low_level/tensors/E01_eval_tensors.py | 4c5de2fabfddd42b36d5709c324d61965ec7b948 | [] | no_license | GlassyWing/tf-learn | 1065551e27adf8a3f2b05e540e52d820e6b931d6 | 6733ac86cda430ecce13c8694c8bdfb79e8b70ad | refs/heads/master | 2020-03-27T05:52:51.466385 | 2018-09-03T12:58:54 | 2018-09-03T12:58:54 | 146,059,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import tensorflow as tf
constant = tf.constant([1, 2, 3])
tensor = constant * constant
sess = tf.Session()
with sess.as_default():
print(tensor.eval())
| [
"[email protected]"
] | |
6734850359cb4971c5ffeac81b4d804ea15b9c6a | b8faf65ea23a2d8b119b9522a0aa182e9f51d8b1 | /vmraid/website/page_renderers/not_permitted_page.py | 7acaf0baaf57f3b618f32d9267e2b99aa555c797 | [
"MIT"
] | permissive | vmraid/vmraid | a52868c57b1999a8d648441eb9cd05815204345d | 3c2e2a952003ba7ea2cf13673b9e79e127f4166e | refs/heads/main | 2022-07-29T18:59:28.585133 | 2022-04-22T08:02:52 | 2022-04-22T08:02:52 | 372,473,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import vmraid
from vmraid import _
from vmraid.utils import cstr
from vmraid.website.page_renderers.template_page import TemplatePage
class NotPermittedPage(TemplatePage):
def __init__(self, path=None, http_status_code=None, exception=""):
vmraid.local.message = cstr(exception)
super().__init__(path=path, http_status_code=http_status_code)
self.http_status_code = 403
def can_render(self):
return True
def render(self):
vmraid.local.message_title = _("Not Permitted")
vmraid.local.response["context"] = dict(
indicator_color="red", primary_action="/login", primary_label=_("Login"), fullpage=True
)
self.set_standard_path("message")
return super().render()
| [
"[email protected]"
] | |
f1efb7f4fea5ef7e9421f004aabccd95b303d845 | b345171a3968240caf135b8b9f2780324319cb22 | /__init__.py | 98b9499ad471ceefd2c1cd32ad977467eaff0937 | [] | no_license | pytsite/plugin-auth_google | ecd8bb9b8a2d59ed1fe1eb3515d2079b4359f03b | f22c90ac560d25c839db9b94cee6339e8681f299 | refs/heads/master | 2020-06-18T21:43:35.932709 | 2019-07-12T09:41:45 | 2019-07-12T09:41:45 | 74,938,727 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | """PytSite Google Authentication Driver Plugin
"""
__author__ = 'Oleksandr Shepetko'
__email__ = '[email protected]'
__license__ = 'MIT'
# Public API
from . import _error as error
from ._api import get_client_id, get_authorization_url, get_client_secret, get_user_credentials, create_oauth2_flow
def plugin_load_wsgi():
from plugins import auth
from . import _driver
auth.register_auth_driver(_driver.Auth())
| [
"[email protected]"
] | |
211e94253365bf19b8a5e01da1a514175bac390e | 4df948c31bde1b49c110820ecf8a38f949a78f62 | /vta/python/vta/pkg_config.py | 30b4808f5e2d8976d70677e197960aa94182db4d | [
"Apache-2.0"
] | permissive | jroesch/tvm | 40b4b8707177e3354c264ce31092721930ced376 | c2b36154778503a509a70a3b5309b201969eccab | refs/heads/master | 2021-12-19T03:38:13.732405 | 2018-10-22T16:31:59 | 2018-10-22T16:31:59 | 135,759,537 | 4 | 7 | Apache-2.0 | 2021-06-17T07:22:42 | 2018-06-01T20:15:33 | C++ | UTF-8 | Python | false | false | 2,677 | py | """VTA Package configuration module
This module is dependency free and can be used to configure package.
"""
from __future__ import absolute_import as _abs
import json
import glob
class PkgConfig(object):
"""Simple package config tool for VTA.
This is used to provide runtime specific configurations.
Parameters
----------
cfg : dict
The config dictionary
proj_root : str
Path to the project root
"""
cfg_keys = [
"TARGET",
"HW_FREQ",
"HW_CLK_TARGET",
"HW_VER",
"LOG_INP_WIDTH",
"LOG_WGT_WIDTH",
"LOG_ACC_WIDTH",
"LOG_OUT_WIDTH",
"LOG_BATCH",
"LOG_BLOCK_IN",
"LOG_BLOCK_OUT",
"LOG_UOP_BUFF_SIZE",
"LOG_INP_BUFF_SIZE",
"LOG_WGT_BUFF_SIZE",
"LOG_ACC_BUFF_SIZE",
]
def __init__(self, cfg, proj_root):
# include path
self.include_path = [
"-I%s/include" % proj_root,
"-I%s/vta/include" % proj_root,
"-I%s/3rdparty/dlpack/include" % proj_root,
"-I%s/3rdparty/dmlc-core/include" % proj_root
]
# List of source files that can be used to build standalone library.
self.lib_source = []
self.lib_source += glob.glob("%s/vta/src/*.cc" % proj_root)
self.lib_source += glob.glob("%s/vta/src/%s/*.cc" % (proj_root, cfg["TARGET"]))
# macro keys
self.macro_defs = []
self.cfg_dict = {}
for key in self.cfg_keys:
self.macro_defs.append("-DVTA_%s=%s" % (key, str(cfg[key])))
self.cfg_dict[key] = cfg[key]
self.target = cfg["TARGET"]
if self.target == "pynq":
self.ldflags = [
"-L/usr/lib",
"-lsds_lib",
"-L/opt/python3.6/lib/python3.6/site-packages/pynq/drivers/",
"-L/opt/python3.6/lib/python3.6/site-packages/pynq/lib/",
"-l:libdma.so"]
else:
self.ldflags = []
@property
def cflags(self):
return self.include_path + self.macro_defs
@property
def cfg_json(self):
return json.dumps(self.cfg_dict, indent=2)
def same_config(self, cfg):
"""Compare if cfg is same as current config.
Parameters
----------
cfg : the configuration
The configuration
Returns
-------
equal : bool
Whether the configuration is the same.
"""
for k, v in self.cfg_dict.items():
if k not in cfg:
return False
if cfg[k] != v:
return False
return True
| [
"[email protected]"
] | |
312bb054808e401ffd15a254249a682d033752f3 | c8077a0a60f0d76d12e409f751884e52ae361355 | /lib/axis/tb/test_axis_adapter_64_8.py | cba5c29b70dffe723dc804f57736f9fa1eb9eae3 | [
"MIT"
] | permissive | crasyrobot/verilog-ethernet | 4977d40b6d9eaa863fd81893e591eed91d3579ba | 2c5679ff6a7886979c433ed3bdd140431c5e3179 | refs/heads/master | 2020-04-02T22:10:02.552599 | 2018-10-24T17:59:02 | 2018-10-24T17:59:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,203 | py | #!/usr/bin/env python
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
module = 'axis_adapter'
testbench = 'test_%s_64_8' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
INPUT_DATA_WIDTH = 64
INPUT_KEEP_ENABLE = (INPUT_DATA_WIDTH>8)
INPUT_KEEP_WIDTH = (INPUT_DATA_WIDTH/8)
OUTPUT_DATA_WIDTH = 8
OUTPUT_KEEP_ENABLE = (OUTPUT_DATA_WIDTH>8)
OUTPUT_KEEP_WIDTH = (OUTPUT_DATA_WIDTH/8)
ID_ENABLE = 1
ID_WIDTH = 8
DEST_ENABLE = 1
DEST_WIDTH = 8
USER_ENABLE = 1
USER_WIDTH = 1
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_axis_tdata = Signal(intbv(0)[INPUT_DATA_WIDTH:])
input_axis_tkeep = Signal(intbv(1)[INPUT_KEEP_WIDTH:])
input_axis_tvalid = Signal(bool(0))
input_axis_tlast = Signal(bool(0))
input_axis_tid = Signal(intbv(0)[ID_WIDTH:])
input_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
input_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
output_axis_tready = Signal(bool(0))
# Outputs
input_axis_tready = Signal(bool(0))
output_axis_tdata = Signal(intbv(0)[OUTPUT_DATA_WIDTH:])
output_axis_tkeep = Signal(intbv(1)[OUTPUT_KEEP_WIDTH:])
output_axis_tvalid = Signal(bool(0))
output_axis_tlast = Signal(bool(0))
output_axis_tid = Signal(intbv(0)[ID_WIDTH:])
output_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
output_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
# sources and sinks
source_pause = Signal(bool(0))
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource()
source_logic = source.create_logic(
clk,
rst,
tdata=input_axis_tdata,
tkeep=input_axis_tkeep,
tvalid=input_axis_tvalid,
tready=input_axis_tready,
tlast=input_axis_tlast,
tid=input_axis_tid,
tdest=input_axis_tdest,
tuser=input_axis_tuser,
pause=source_pause,
name='source'
)
sink = axis_ep.AXIStreamSink()
sink_logic = sink.create_logic(
clk,
rst,
tdata=output_axis_tdata,
tkeep=output_axis_tkeep,
tvalid=output_axis_tvalid,
tready=output_axis_tready,
tlast=output_axis_tlast,
tid=output_axis_tid,
tdest=output_axis_tdest,
tuser=output_axis_tuser,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
input_axis_tdata=input_axis_tdata,
input_axis_tkeep=input_axis_tkeep,
input_axis_tvalid=input_axis_tvalid,
input_axis_tready=input_axis_tready,
input_axis_tlast=input_axis_tlast,
input_axis_tid=input_axis_tid,
input_axis_tdest=input_axis_tdest,
input_axis_tuser=input_axis_tuser,
output_axis_tdata=output_axis_tdata,
output_axis_tkeep=output_axis_tkeep,
output_axis_tvalid=output_axis_tvalid,
output_axis_tready=output_axis_tready,
output_axis_tlast=output_axis_tlast,
output_axis_tid=output_axis_tid,
output_axis_tdest=output_axis_tdest,
output_axis_tuser=output_axis_tuser
)
@always(delay(4))
def clkgen():
clk.next = not clk
def wait_normal():
while input_axis_tvalid or output_axis_tvalid:
yield clk.posedge
def wait_pause_source():
while input_axis_tvalid or output_axis_tvalid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
def wait_pause_sink():
while input_axis_tvalid or output_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
for payload_len in range(1,18):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)),
id=1,
dest=1,
)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)),
id=2,
dest=1,
)
test_frame2 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)),
id=2,
dest=2,
)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)),
id=3,
dest=1,
last_cycle_user=1
)
test_frame2 = axis_ep.AXIStreamFrame(
b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)),
id=3,
dest=2,
)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
assert rx_frame.last_cycle_user
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| [
"[email protected]"
] | |
47ce00883a6c73232c93db6f262fb2ce48452c9c | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_private_endpoint_connections_operations.py | 9287d43a0a94aab6890f89e32398b92c812f7f9f | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 27,222 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_endpoint_connections_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_server_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.sql.aio.SqlManagementClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Gets a private endpoint connection.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> Optional[_models.PrivateEndpointConnection]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.PrivateEndpointConnection]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PrivateEndpointConnection")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Required.
:type parameters: ~azure.mgmt.sql.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Is either a PrivateEndpointConnection type or a IO type. Required.
:type parameters: ~azure.mgmt.sql.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"
}
@distributed_trace
def list_by_server(
self, resource_group_name: str, server_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnection"]:
"""Gets all private endpoint connections on a server.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2020-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
)
cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_server.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_server.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/privateEndpointConnections"
}
| [
"[email protected]"
] | |
c28d9ea03c3a4c015e6ec2a8902dee29434d1c66 | 1ee3dc4fa096d12e409af3a298ba01f5558c62b5 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/bgpipv4evpnvpws.py | c61eb826bb2e9aed3e33f4c3c3991b22fee6679a | [
"MIT"
] | permissive | parthpower/ixnetwork_restpy | 321e64a87be0a4d990276d26f43aca9cf4d43cc9 | 73fa29796a5178c707ee4e21d90ff4dad31cc1ed | refs/heads/master | 2020-07-04T13:34:42.162458 | 2019-08-13T20:33:17 | 2019-08-13T20:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,071 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BgpIPv4EvpnVpws(Base):
"""The BgpIPv4EvpnVpws class encapsulates a user managed bgpIPv4EvpnVpws node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the BgpIPv4EvpnVpws property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'bgpIPv4EvpnVpws'
def __init__(self, parent):
super(BgpIPv4EvpnVpws, self).__init__(parent)
@property
def BgpAsPathSegmentList(self):
"""An instance of the BgpAsPathSegmentList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpaspathsegmentlist.BgpAsPathSegmentList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpaspathsegmentlist import BgpAsPathSegmentList
return BgpAsPathSegmentList(self)
@property
def BgpClusterIdList(self):
"""An instance of the BgpClusterIdList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpclusteridlist.BgpClusterIdList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpclusteridlist import BgpClusterIdList
return BgpClusterIdList(self)
@property
def BgpCommunitiesList(self):
"""An instance of the BgpCommunitiesList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpcommunitieslist.BgpCommunitiesList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpcommunitieslist import BgpCommunitiesList
return BgpCommunitiesList(self)
@property
def BgpExportRouteTargetList(self):
"""An instance of the BgpExportRouteTargetList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpexportroutetargetlist.BgpExportRouteTargetList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpexportroutetargetlist import BgpExportRouteTargetList
return BgpExportRouteTargetList(self)
@property
def BgpExtendedCommunitiesList(self):
"""An instance of the BgpExtendedCommunitiesList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpextendedcommunitieslist.BgpExtendedCommunitiesList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpextendedcommunitieslist import BgpExtendedCommunitiesList
return BgpExtendedCommunitiesList(self)
@property
def BgpImportRouteTargetList(self):
"""An instance of the BgpImportRouteTargetList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpimportroutetargetlist.BgpImportRouteTargetList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpimportroutetargetlist import BgpImportRouteTargetList
return BgpImportRouteTargetList(self)
@property
def BgpL3VNIExportRouteTargetList(self):
"""An instance of the BgpL3VNIExportRouteTargetList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpl3vniexportroutetargetlist.BgpL3VNIExportRouteTargetList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpl3vniexportroutetargetlist import BgpL3VNIExportRouteTargetList
return BgpL3VNIExportRouteTargetList(self)
@property
def BgpL3VNIImportRouteTargetList(self):
"""An instance of the BgpL3VNIImportRouteTargetList class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpl3vniimportroutetargetlist.BgpL3VNIImportRouteTargetList)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.bgpl3vniimportroutetargetlist import BgpL3VNIImportRouteTargetList
return BgpL3VNIImportRouteTargetList(self)
@property
def BroadcastDomainV4Vpws(self):
"""An instance of the BroadcastDomainV4Vpws class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.broadcastdomainv4vpws.BroadcastDomainV4Vpws)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.broadcastdomainv4vpws import BroadcastDomainV4Vpws
return BroadcastDomainV4Vpws(self)._select()
@property
def Connector(self):
"""An instance of the Connector class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector.Connector)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector import Connector
return Connector(self)
@property
def Tag(self):
"""An instance of the Tag class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag.Tag)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag import Tag
return Tag(self)
@property
def Active(self):
"""Activate/Deactivate Configuration
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('active')
@property
def AdRouteLabel(self):
"""AD Route Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('adRouteLabel')
@property
def AdvertiseL3vniSeparately(self):
"""Advertise L3 Route Separately
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('advertiseL3vniSeparately')
@property
def AggregatorAs(self):
"""Aggregator AS
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('aggregatorAs')
@property
def AggregatorId(self):
"""Aggregator ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('aggregatorId')
@property
def AsSetMode(self):
"""AS# Set Mode
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('asSetMode')
@property
def AutoConfigOriginatingRouterIp(self):
"""If set to true, this field enables option to configure Originating router IP address automatically from BGP Router's local IP
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('autoConfigOriginatingRouterIp')
@property
def AutoConfigPMSITunnelId(self):
"""Auto Configure PMSI Tunnel ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('autoConfigPMSITunnelId')
@property
def AutoConfigureRdIpAddress(self):
"""Auto-Configure RD IP Addresses
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('autoConfigureRdIpAddress')
@property
def BMacFirstLabel(self):
"""B MAC First Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('bMacFirstLabel')
@property
def BMacSecondLabel(self):
"""B MAC Second Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('bMacSecondLabel')
@property
def ConnectedVia(self):
"""DEPRECATED List of layers this layer used to connect to the wire
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('connectedVia')
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute('connectedVia', value)
@property
def Count(self):
"""DEPRECATED Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""DEPRECATED Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def EnableAggregatorId(self):
"""DEPRECATED Enable Aggregator ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableAggregatorId')
@property
def EnableAsPathSegments(self):
"""DEPRECATED Enable AS Path Segments
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableAsPathSegments')
@property
def EnableAtomicAggregate(self):
"""DEPRECATED Enable Atomic Aggregate
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableAtomicAggregate')
@property
def EnableBMacSecondLabel(self):
"""DEPRECATED Enable B MAC Second Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableBMacSecondLabel')
@property
def EnableCluster(self):
"""DEPRECATED Enable Cluster
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableCluster')
@property
def EnableCommunity(self):
"""DEPRECATED Enable Community
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableCommunity')
@property
def EnableExtendedCommunity(self):
"""DEPRECATED Enable Extended Community
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableExtendedCommunity')
@property
def EnableL3TargetOnlyForRouteType5(self):
"""DEPRECATED Enable L3 Target only for Route Type 5
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableL3TargetOnlyForRouteType5')
@property
def EnableL3vniTargetList(self):
"""DEPRECATED Enable L3 Target List
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableL3vniTargetList')
@property
def EnableLocalPreference(self):
"""DEPRECATED Enable Local Preference
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableLocalPreference')
@property
def EnableMultiExitDiscriminator(self):
"""DEPRECATED Enable Multi Exit
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableMultiExitDiscriminator')
@property
def EnableNextHop(self):
"""DEPRECATED Enable Next Hop
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableNextHop')
@property
def EnableOrigin(self):
"""DEPRECATED Enable Origin
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableOrigin')
@property
def EnableOriginatorId(self):
"""DEPRECATED Enable Originator ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('enableOriginatorId')
@property
def Errors(self):
"""DEPRECATED A list of errors that have occurred
Returns:
list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))
"""
return self._get_attribute('errors')
@property
def EsiType(self):
"""DEPRECATED ESI Type
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('esiType')
@property
def EsiValue(self):
"""DEPRECATED ESI Value
Returns:
list(str)
"""
return self._get_attribute('esiValue')
@property
def ImportRtListSameAsExportRtList(self):
"""DEPRECATED Import RT List Same As Export RT List
Returns:
bool
"""
return self._get_attribute('importRtListSameAsExportRtList')
@ImportRtListSameAsExportRtList.setter
def ImportRtListSameAsExportRtList(self, value):
self._set_attribute('importRtListSameAsExportRtList', value)
@property
def IncludePmsiTunnelAttribute(self):
"""DEPRECATED Include PMSI Tunnel Attribute
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('includePmsiTunnelAttribute')
@property
def Ipv4NextHop(self):
"""DEPRECATED IPv4 Next Hop
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ipv4NextHop')
@property
def Ipv6NextHop(self):
"""DEPRECATED IPv6 Next Hop
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('ipv6NextHop')
@property
def L3vniImportRtListSameAsL3vniExportRtList(self):
"""DEPRECATED L3 Import RT List Same As L3 Export RT List
Returns:
bool
"""
return self._get_attribute('l3vniImportRtListSameAsL3vniExportRtList')
@L3vniImportRtListSameAsL3vniExportRtList.setter
def L3vniImportRtListSameAsL3vniExportRtList(self, value):
self._set_attribute('l3vniImportRtListSameAsL3vniExportRtList', value)
@property
def LocalPreference(self):
"""DEPRECATED Local Preference
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('localPreference')
@property
def MultiExitDiscriminator(self):
"""DEPRECATED Multi Exit
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('multiExitDiscriminator')
@property
def MulticastTunnelType(self):
"""DEPRECATED Multicast Tunnel Type
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('multicastTunnelType')
@property
def Multiplier(self):
"""DEPRECATED Number of layer instances per parent instance (multiplier)
Returns:
number
"""
return self._get_attribute('multiplier')
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute('multiplier', value)
@property
def Name(self):
"""DEPRECATED Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def NoOfASPathSegmentsPerRouteRange(self):
"""DEPRECATED Number Of AS Path Segments Per Route Range
Returns:
number
"""
return self._get_attribute('noOfASPathSegmentsPerRouteRange')
@NoOfASPathSegmentsPerRouteRange.setter
def NoOfASPathSegmentsPerRouteRange(self, value):
self._set_attribute('noOfASPathSegmentsPerRouteRange', value)
@property
def NoOfClusters(self):
"""DEPRECATED Number of Clusters
Returns:
number
"""
return self._get_attribute('noOfClusters')
@NoOfClusters.setter
def NoOfClusters(self, value):
self._set_attribute('noOfClusters', value)
@property
def NoOfCommunities(self):
"""DEPRECATED Number of Communities
Returns:
number
"""
return self._get_attribute('noOfCommunities')
@NoOfCommunities.setter
def NoOfCommunities(self, value):
self._set_attribute('noOfCommunities', value)
@property
def NoOfExtendedCommunity(self):
"""DEPRECATED Number of Extended Communities
Returns:
number
"""
return self._get_attribute('noOfExtendedCommunity')
@NoOfExtendedCommunity.setter
def NoOfExtendedCommunity(self, value):
self._set_attribute('noOfExtendedCommunity', value)
@property
def NumBroadcastDomainV4(self):
"""DEPRECATED The number of broadcast domain to be configured under EVI
Returns:
number
"""
return self._get_attribute('numBroadcastDomainV4')
@NumBroadcastDomainV4.setter
def NumBroadcastDomainV4(self, value):
self._set_attribute('numBroadcastDomainV4', value)
@property
def NumRtInExportRouteTargetList(self):
"""DEPRECATED Number of RTs in Export Route Target List(multiplier)
Returns:
number
"""
return self._get_attribute('numRtInExportRouteTargetList')
@NumRtInExportRouteTargetList.setter
def NumRtInExportRouteTargetList(self, value):
self._set_attribute('numRtInExportRouteTargetList', value)
@property
def NumRtInImportRouteTargetList(self):
"""DEPRECATED Number of RTs in Import Route Target List(multiplier)
Returns:
number
"""
return self._get_attribute('numRtInImportRouteTargetList')
@NumRtInImportRouteTargetList.setter
def NumRtInImportRouteTargetList(self, value):
self._set_attribute('numRtInImportRouteTargetList', value)
@property
def NumRtInL3vniExportRouteTargetList(self):
"""DEPRECATED Number of RTs in L3 Export Route Target List(multiplier)
Returns:
number
"""
return self._get_attribute('numRtInL3vniExportRouteTargetList')
@NumRtInL3vniExportRouteTargetList.setter
def NumRtInL3vniExportRouteTargetList(self, value):
self._set_attribute('numRtInL3vniExportRouteTargetList', value)
@property
def NumRtInL3vniImportRouteTargetList(self):
"""DEPRECATED Number of RTs in L3 Import Route Target List(multiplier)
Returns:
number
"""
return self._get_attribute('numRtInL3vniImportRouteTargetList')
@NumRtInL3vniImportRouteTargetList.setter
def NumRtInL3vniImportRouteTargetList(self, value):
self._set_attribute('numRtInL3vniImportRouteTargetList', value)
@property
def Origin(self):
"""DEPRECATED Origin
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('origin')
@property
def OriginatingRouterIpv4(self):
"""DEPRECATED Configures Originating Router IP address in IPv4 Address format
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('originatingRouterIpv4')
@property
def OriginatingRouterIpv6(self):
"""DEPRECATED Configures Originating Router IP address in IPv6 Address format
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('originatingRouterIpv6')
@property
def OriginatorId(self):
"""DEPRECATED Originator ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('originatorId')
@property
def OverridePeerAsSetMode(self):
"""DEPRECATED Override Peer AS# Set Mode
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('overridePeerAsSetMode')
@property
def PmsiTunnelIDv4(self):
"""DEPRECATED PMSI Tunnel ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('pmsiTunnelIDv4')
@property
def PmsiTunnelIDv6(self):
"""DEPRECATED PMSI Tunnel ID
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('pmsiTunnelIDv6')
@property
def RdEvi(self):
"""DEPRECATED RD EVI
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('rdEvi')
@property
def RdIpAddress(self):
"""DEPRECATED RD IP Addresses
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('rdIpAddress')
@property
def SessionStatus(self):
"""DEPRECATED Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
Returns:
list(str[down|notStarted|up])
"""
return self._get_attribute('sessionStatus')
@property
def SetNextHop(self):
"""DEPRECATED Set Next Hop
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('setNextHop')
@property
def SetNextHopIpType(self):
"""DEPRECATED Set Next Hop IP Type
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('setNextHopIpType')
@property
def StackedLayers(self):
"""DEPRECATED List of secondary (many to one) child layer protocols
Returns:
list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])
"""
return self._get_attribute('stackedLayers')
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute('stackedLayers', value)
@property
def StateCounts(self):
"""DEPRECATED A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Returns:
dict(total:number,notStarted:number,down:number,up:number)
"""
return self._get_attribute('stateCounts')
@property
def Status(self):
"""DEPRECATED Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
str(configured|error|mixed|notStarted|started|starting|stopping)
"""
return self._get_attribute('status')
@property
def UpstreamDownstreamAssignedMplsLabel(self):
"""DEPRECATED Upstream/Downstream Assigned MPLS Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('upstreamDownstreamAssignedMplsLabel')
@property
def UseIpv4MappedIpv6Address(self):
"""DEPRECATED Use IPv4 Mapped IPv6 Address
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('useIpv4MappedIpv6Address')
@property
def UseUpstreamDownstreamAssignedMplsLabel(self):
"""DEPRECATED Use Upstream/Downstream Assigned MPLS Label
Returns:
obj(ixnetwork_restpy.multivalue.Multivalue)
"""
return self._get_attribute('useUpstreamDownstreamAssignedMplsLabel')
def update(self, ConnectedVia=None, ImportRtListSameAsExportRtList=None, L3vniImportRtListSameAsL3vniExportRtList=None, Multiplier=None, Name=None, NoOfASPathSegmentsPerRouteRange=None, NoOfClusters=None, NoOfCommunities=None, NoOfExtendedCommunity=None, NumBroadcastDomainV4=None, NumRtInExportRouteTargetList=None, NumRtInImportRouteTargetList=None, NumRtInL3vniExportRouteTargetList=None, NumRtInL3vniImportRouteTargetList=None, StackedLayers=None):
"""Updates a child instance of bgpIPv4EvpnVpws on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has the associated documentation that details the possible values for those named parameters.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
ImportRtListSameAsExportRtList (bool): Import RT List Same As Export RT List
L3vniImportRtListSameAsL3vniExportRtList (bool): L3 Import RT List Same As L3 Export RT List
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
NoOfASPathSegmentsPerRouteRange (number): Number Of AS Path Segments Per Route Range
NoOfClusters (number): Number of Clusters
NoOfCommunities (number): Number of Communities
NoOfExtendedCommunity (number): Number of Extended Communities
NumBroadcastDomainV4 (number): The number of broadcast domain to be configured under EVI
NumRtInExportRouteTargetList (number): Number of RTs in Export Route Target List(multiplier)
NumRtInImportRouteTargetList (number): Number of RTs in Import Route Target List(multiplier)
NumRtInL3vniExportRouteTargetList (number): Number of RTs in L3 Export Route Target List(multiplier)
NumRtInL3vniImportRouteTargetList (number): Number of RTs in L3 Import Route Target List(multiplier)
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def add(self, ConnectedVia=None, ImportRtListSameAsExportRtList=None, L3vniImportRtListSameAsL3vniExportRtList=None, Multiplier=None, Name=None, NoOfASPathSegmentsPerRouteRange=None, NoOfClusters=None, NoOfCommunities=None, NoOfExtendedCommunity=None, NumBroadcastDomainV4=None, NumRtInExportRouteTargetList=None, NumRtInImportRouteTargetList=None, NumRtInL3vniExportRouteTargetList=None, NumRtInL3vniImportRouteTargetList=None, StackedLayers=None):
"""Adds a new bgpIPv4EvpnVpws node on the server and retrieves it in this instance.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
ImportRtListSameAsExportRtList (bool): Import RT List Same As Export RT List
L3vniImportRtListSameAsL3vniExportRtList (bool): L3 Import RT List Same As L3 Export RT List
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
NoOfASPathSegmentsPerRouteRange (number): Number Of AS Path Segments Per Route Range
NoOfClusters (number): Number of Clusters
NoOfCommunities (number): Number of Communities
NoOfExtendedCommunity (number): Number of Extended Communities
NumBroadcastDomainV4 (number): The number of broadcast domain to be configured under EVI
NumRtInExportRouteTargetList (number): Number of RTs in Export Route Target List(multiplier)
NumRtInImportRouteTargetList (number): Number of RTs in Import Route Target List(multiplier)
NumRtInL3vniExportRouteTargetList (number): Number of RTs in L3 Export Route Target List(multiplier)
NumRtInL3vniImportRouteTargetList (number): Number of RTs in L3 Import Route Target List(multiplier)
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
Returns:
self: This instance with all currently retrieved bgpIPv4EvpnVpws data using find and the newly added bgpIPv4EvpnVpws data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the bgpIPv4EvpnVpws data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, Errors=None, EsiValue=None, ImportRtListSameAsExportRtList=None, L3vniImportRtListSameAsL3vniExportRtList=None, Multiplier=None, Name=None, NoOfASPathSegmentsPerRouteRange=None, NoOfClusters=None, NoOfCommunities=None, NoOfExtendedCommunity=None, NumBroadcastDomainV4=None, NumRtInExportRouteTargetList=None, NumRtInImportRouteTargetList=None, NumRtInL3vniExportRouteTargetList=None, NumRtInL3vniImportRouteTargetList=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves bgpIPv4EvpnVpws data from the server.
All named parameters support regex and can be used to selectively retrieve bgpIPv4EvpnVpws data from the server.
By default the find method takes no parameters and will retrieve all bgpIPv4EvpnVpws data from the server.
Args:
ConnectedVia (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of layers this layer used to connect to the wire
Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Errors (list(dict(arg1:str[None|/api/v1/sessions/1/ixnetwork/?deepchild=*],arg2:list[str]))): A list of errors that have occurred
EsiValue (list(str)): ESI Value
ImportRtListSameAsExportRtList (bool): Import RT List Same As Export RT List
L3vniImportRtListSameAsL3vniExportRtList (bool): L3 Import RT List Same As L3 Export RT List
Multiplier (number): Number of layer instances per parent instance (multiplier)
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
NoOfASPathSegmentsPerRouteRange (number): Number Of AS Path Segments Per Route Range
NoOfClusters (number): Number of Clusters
NoOfCommunities (number): Number of Communities
NoOfExtendedCommunity (number): Number of Extended Communities
NumBroadcastDomainV4 (number): The number of broadcast domain to be configured under EVI
NumRtInExportRouteTargetList (number): Number of RTs in Export Route Target List(multiplier)
NumRtInImportRouteTargetList (number): Number of RTs in Import Route Target List(multiplier)
NumRtInL3vniExportRouteTargetList (number): Number of RTs in L3 Export Route Target List(multiplier)
NumRtInL3vniImportRouteTargetList (number): Number of RTs in L3 Import Route Target List(multiplier)
SessionStatus (list(str[down|notStarted|up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
StackedLayers (list(str[None|/api/v1/sessions/1/ixnetwork/topology?deepchild=*])): List of secondary (many to one) child layer protocols
StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
Status (str(configured|error|mixed|notStarted|started|starting|stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns:
self: This instance with matching bgpIPv4EvpnVpws data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of bgpIPv4EvpnVpws data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the bgpIPv4EvpnVpws data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AdRouteLabel=None, AdvertiseL3vniSeparately=None, AggregatorAs=None, AggregatorId=None, AsSetMode=None, AutoConfigOriginatingRouterIp=None, AutoConfigPMSITunnelId=None, AutoConfigureRdIpAddress=None, BMacFirstLabel=None, BMacSecondLabel=None, EnableAggregatorId=None, EnableAsPathSegments=None, EnableAtomicAggregate=None, EnableBMacSecondLabel=None, EnableCluster=None, EnableCommunity=None, EnableExtendedCommunity=None, EnableL3TargetOnlyForRouteType5=None, EnableL3vniTargetList=None, EnableLocalPreference=None, EnableMultiExitDiscriminator=None, EnableNextHop=None, EnableOrigin=None, EnableOriginatorId=None, EsiType=None, IncludePmsiTunnelAttribute=None, Ipv4NextHop=None, Ipv6NextHop=None, LocalPreference=None, MultiExitDiscriminator=None, MulticastTunnelType=None, Origin=None, OriginatingRouterIpv4=None, OriginatingRouterIpv6=None, OriginatorId=None, OverridePeerAsSetMode=None, PmsiTunnelIDv4=None, PmsiTunnelIDv6=None, RdEvi=None, RdIpAddress=None, SetNextHop=None, SetNextHopIpType=None, UpstreamDownstreamAssignedMplsLabel=None, UseIpv4MappedIpv6Address=None, UseUpstreamDownstreamAssignedMplsLabel=None):
"""Base class infrastructure that gets a list of bgpIPv4EvpnVpws device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args:
PortNames (str): optional regex of port names
Active (str): optional regex of active
AdRouteLabel (str): optional regex of adRouteLabel
AdvertiseL3vniSeparately (str): optional regex of advertiseL3vniSeparately
AggregatorAs (str): optional regex of aggregatorAs
AggregatorId (str): optional regex of aggregatorId
AsSetMode (str): optional regex of asSetMode
AutoConfigOriginatingRouterIp (str): optional regex of autoConfigOriginatingRouterIp
AutoConfigPMSITunnelId (str): optional regex of autoConfigPMSITunnelId
AutoConfigureRdIpAddress (str): optional regex of autoConfigureRdIpAddress
BMacFirstLabel (str): optional regex of bMacFirstLabel
BMacSecondLabel (str): optional regex of bMacSecondLabel
EnableAggregatorId (str): optional regex of enableAggregatorId
EnableAsPathSegments (str): optional regex of enableAsPathSegments
EnableAtomicAggregate (str): optional regex of enableAtomicAggregate
EnableBMacSecondLabel (str): optional regex of enableBMacSecondLabel
EnableCluster (str): optional regex of enableCluster
EnableCommunity (str): optional regex of enableCommunity
EnableExtendedCommunity (str): optional regex of enableExtendedCommunity
EnableL3TargetOnlyForRouteType5 (str): optional regex of enableL3TargetOnlyForRouteType5
EnableL3vniTargetList (str): optional regex of enableL3vniTargetList
EnableLocalPreference (str): optional regex of enableLocalPreference
EnableMultiExitDiscriminator (str): optional regex of enableMultiExitDiscriminator
EnableNextHop (str): optional regex of enableNextHop
EnableOrigin (str): optional regex of enableOrigin
EnableOriginatorId (str): optional regex of enableOriginatorId
EsiType (str): optional regex of esiType
IncludePmsiTunnelAttribute (str): optional regex of includePmsiTunnelAttribute
Ipv4NextHop (str): optional regex of ipv4NextHop
Ipv6NextHop (str): optional regex of ipv6NextHop
LocalPreference (str): optional regex of localPreference
MultiExitDiscriminator (str): optional regex of multiExitDiscriminator
MulticastTunnelType (str): optional regex of multicastTunnelType
Origin (str): optional regex of origin
OriginatingRouterIpv4 (str): optional regex of originatingRouterIpv4
OriginatingRouterIpv6 (str): optional regex of originatingRouterIpv6
OriginatorId (str): optional regex of originatorId
OverridePeerAsSetMode (str): optional regex of overridePeerAsSetMode
PmsiTunnelIDv4 (str): optional regex of pmsiTunnelIDv4
PmsiTunnelIDv6 (str): optional regex of pmsiTunnelIDv6
RdEvi (str): optional regex of rdEvi
RdIpAddress (str): optional regex of rdIpAddress
SetNextHop (str): optional regex of setNextHop
SetNextHopIpType (str): optional regex of setNextHopIpType
UpstreamDownstreamAssignedMplsLabel (str): optional regex of upstreamDownstreamAssignedMplsLabel
UseIpv4MappedIpv6Address (str): optional regex of useIpv4MappedIpv6Address
UseUpstreamDownstreamAssignedMplsLabel (str): optional regex of useUpstreamDownstreamAssignedMplsLabel
Returns:
list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
restartDown()
restartDown(SessionIndices:list)
Args:
args[0] is SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
restartDown(SessionIndices:string)
Args:
args[0] is SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start selected protocols.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
start()
start(SessionIndices:list)
Args:
args[0] is SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
start(SessionIndices:string)
Args:
args[0] is SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop selected protocols.
The IxNetwork modeling infrastructure allows for multiple method Signatures with the same name while python does not.
The following correlates the modeling Signatures to the python *args variable length list:
stop()
stop(SessionIndices:list)
Args:
args[0] is SessionIndices (list(number)): This parameter requires an array of session numbers 0 1 2 3
stop(SessionIndices:string)
Args:
args[0] is SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| [
"[email protected]"
] | |
f4c54df91cedb4ab534312995cd85ac41bb8b565 | d4bcb9cc3b6aa9f690be59f630778d512882d34d | /ht/conv_jacket.pyi | 237e03450cf918d66fad5744e2f420949830a545 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | CalebBell/ht | ba31cd9a7a2b3cb83599770c81b343ea5c60fc23 | d9385d98311debcc47def7f5fc093f2e0152d059 | refs/heads/master | 2023-08-03T10:45:09.062053 | 2023-07-25T05:36:50 | 2023-07-25T05:36:50 | 48,963,057 | 154 | 38 | MIT | 2023-04-20T01:39:50 | 2016-01-03T22:33:12 | Python | UTF-8 | Python | false | false | 810 | pyi | # DO NOT EDIT - AUTOMATICALLY GENERATED BY tests/make_test_stubs.py!
from typing import List
from typing import Optional
def Lehrer(
m: float,
Dtank: float,
Djacket: float,
H: float,
Dinlet: float,
rho: float,
Cp: float,
k: float,
mu: float,
muw: Optional[float] = ...,
isobaric_expansion: Optional[float] = ...,
dT: Optional[float] = ...,
inlettype: str = ...,
inletlocation: str = ...
) -> float: ...
def Stein_Schmidt(
m: float,
Dtank: float,
Djacket: float,
H: float,
Dinlet: float,
rho: float,
Cp: float,
k: float,
mu: float,
muw: Optional[float] = ...,
rhow: Optional[float] = ...,
inlettype: str = ...,
inletlocation: str = ...,
roughness: float = ...
) -> float: ...
__all__: List[str] | [
"[email protected]"
] | |
551b5d163e6711d7d9c88891ba544fe08eab673e | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2864/60793/262899.py | 1bb97523a9632ac2e7d6fe558c7be342f39d36d9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | input()
ls = list(map(int, input().split()))
if ls == []:
print()
elif ls == []:
print()
else:
print(ls) | [
"[email protected]"
] | |
7744da668bfa14a29636e0a5e6d816ccf32ea932 | ffc479dadf059388dad3a66c5f4662b113dc6285 | /basics/psdemospliut.py | aae2237e7becdf9c66bfabf64d9db3040c555452 | [] | no_license | ravijaya/oct15-2020 | fd87ee4f6aa7f0a63c77c8c470405eff479289b3 | 4fe4d4f2aac1f40349cec831c175652834b17b5d | refs/heads/main | 2022-12-29T09:45:08.837682 | 2020-10-15T12:31:09 | 2020-10-15T12:31:09 | 304,318,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | s = 'root:x:0:0:root:/root:/bin/bash'
items = s.split(':')
print(items)
print()
print(s.split(':')[0]) # indexing
print()
print(s.split(':')[1:]) # slicing
print()
# iterator
for item in s.split(':')[1:]: # iteration
print(item) | [
"[email protected]"
] | |
c015332c0c82523069c560e8844c293953c87e5d | c28a9cbbbb12c43342ca49263b1309acc0016879 | /rlzoo/algorithms/a3c/a3c.py | b7449ea32db9dc3fd1e2cbc726ab959c0223638f | [
"Apache-2.0"
] | permissive | mirkomorati/RLzoo | 894593051fda77f76828e9023dc3abe66ac09cbe | 260aba32e68c31e7d637a2a21e2906e45ac66b46 | refs/heads/master | 2022-04-15T20:53:21.454210 | 2020-04-15T08:36:58 | 2020-04-15T08:36:58 | 255,854,689 | 0 | 0 | Apache-2.0 | 2020-04-15T08:35:05 | 2020-04-15T08:35:05 | null | UTF-8 | Python | false | false | 11,523 | py | """
Asynchronous Advantage Actor Critic (A3C) with Continuous Action Space.
Actor Critic History
----------------------
A3C > DDPG (for continuous action space) > AC
Advantage
----------
Train faster and more stable than AC.
Disadvantage
-------------
Have bias.
Reference
----------
Original Paper: https://arxiv.org/pdf/1602.01783.pdf
MorvanZhou's tutorial: https://morvanzhou.github.io/tutorials/
MorvanZhou's code: https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/experiments/Solve_BipedalWalker/A3C.py
Environment
-----------
BipedalWalker-v2 : https://gym.openai.com/envs/BipedalWalker-v2
Reward is given for moving forward, total 300+ points up to the far end.
If the robot falls, it gets -100. Applying motor torque costs a small amount of
points, more optimal agent will get better score. State consists of hull angle
speed, angular velocity, horizontal speed, vertical speed, position of joints
and joints angular speed, legs contact with ground, and 10 lidar rangefinder
measurements. There's no coordinates in the state vector.
Prerequisites
--------------
tensorflow 2.0.0a0
tensorflow-probability 0.6.0
tensorlayer 2.0.0
&&
pip install box2d box2d-kengz --user
"""
import argparse
import multiprocessing
import threading
import time
import numpy as np
import gym
import tensorflow as tf
import tensorflow_probability as tfp
import tensorlayer as tl
import copy
from tensorlayer.layers import DenseLayer, InputLayer
from tensorlayer.models import Model
from rlzoo.common.utils import *
from rlzoo.common.buffer import *
tfd = tfp.distributions
# tl.logging.set_verbosity(tl.logging.DEBUG)
################### Asynchronous Advantage Actor Critic (A3C) ####################################
class ACNet(object):
def __init__(self, net_list, scope, entropy_beta):
self.ENTROPY_BETA = entropy_beta
self.actor, self.critic = net_list
# @tf.function # shouldn't use here!
def update_global(
self, buffer_s, buffer_a, buffer_v_target, globalAC
): # refer to the global Actor-Crtic network for updating it with samples
''' update the global critic '''
with tf.GradientTape() as tape:
self.v = self.critic(buffer_s)
self.v_target = buffer_v_target
td = tf.subtract(self.v_target, self.v, name='TD_error')
self.c_loss = tf.reduce_mean(tf.square(td))
self.c_grads = tape.gradient(self.c_loss, self.critic.trainable_weights)
OPT_C.apply_gradients(zip(self.c_grads, globalAC.critic.trainable_weights)) # local grads applies to global net
del tape # Drop the reference to the tape
''' update the global actor '''
with tf.GradientTape() as tape:
self.actor(buffer_s)
self.a_his = buffer_a # float32
log_prob = self.actor.policy_dist.logp(self.a_his)
exp_v = log_prob * td # td is from the critic part, no gradients for it
entropy = self.actor.policy_dist.entropy() # encourage exploration
self.exp_v = self.ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
self.a_grads = tape.gradient(self.a_loss, self.actor.trainable_weights)
OPT_A.apply_gradients(zip(self.a_grads, globalAC.actor.trainable_weights)) # local grads applies to global net
del tape # Drop the reference to the tape
# @tf.function
def pull_global(self, globalAC): # run by a local, pull weights from the global nets
for l_p, g_p in zip(self.actor.trainable_weights, globalAC.actor.trainable_weights):
l_p.assign(g_p)
for l_p, g_p in zip(self.critic.trainable_weights, globalAC.critic.trainable_weights):
l_p.assign(g_p)
def get_action(self, s): # run by a local
return self.actor(np.array([s])).numpy()[0]
def get_action_greedy(self, s):
return self.actor(np.array([s]), greedy=True)[0].numpy()
def save_ckpt(self, env_name): # save trained weights
save_model(self.actor, 'model_actor', 'A3C', env_name)
save_model(self.critic, 'model_critic', 'A3C', env_name)
def load_ckpt(self, env_name): # load trained weights
load_model(self.actor, 'model_actor', 'A3C', env_name)
load_model(self.critic, 'model_critic', 'A3C', env_name)
class Worker(object):
def __init__(self, env, net_list, name, train_episodes, max_steps, gamma, update_itr, entropy_beta,
render):
self.name = name
self.AC = ACNet(net_list, name, entropy_beta)
self.MAX_GLOBAL_EP = train_episodes
self.UPDATE_GLOBAL_ITER = update_itr
self.GAMMA = gamma
self.env = env
self.max_steps = max_steps
self.render = render
def work(self, globalAC):
global COORD, GLOBAL_RUNNING_R, GLOBAL_EP, OPT_A, OPT_C, t0, SAVE_INTERVAL
total_step = 1
save_cnt = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < self.MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for epi_step in range(self.max_steps):
# visualize Worker_0 during training
if self.name == 'Worker_0' and total_step % 30 == 0 and self.render:
self.env.render()
s = s.astype('float32') # double to float
a = self.AC.get_action(s)
s_, r, done, _info = self.env.step(a)
s_ = s_.astype('float32') # double to float
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % self.UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = self.AC.critic(s_[np.newaxis, :])[0, 0] # reduce dim from 2 to 0
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + self.GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s = buffer_s if len(buffer_s[0].shape) > 1 else np.vstack(
buffer_s) # no vstack for raw-pixel input
buffer_a, buffer_v_target = (
np.vstack(buffer_a), np.vstack(buffer_v_target)
)
# update gradients on global network
self.AC.update_global(buffer_s, buffer_a, buffer_v_target.astype('float32'), globalAC)
buffer_s, buffer_a, buffer_r = [], [], []
# update local network from global network
self.AC.pull_global(globalAC)
s = s_
total_step += 1
if self.name == 'Worker_0' and GLOBAL_EP >= save_cnt * SAVE_INTERVAL:
plot_save_log(GLOBAL_RUNNING_R, algorithm_name=self.name, env_name=self.env.spec.id)
globalAC.save_ckpt(env_name=self.env.spec.id)
save_cnt += 1
if done:
break
GLOBAL_RUNNING_R.append(ep_r)
print('{}, Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}' \
.format(self.name, GLOBAL_EP, self.MAX_GLOBAL_EP, ep_r, time.time() - t0))
GLOBAL_EP += 1
class A3C():
def __init__(self, net_list, optimizers_list, entropy_beta=0.005):
'''
parameters
----------
entropy_beta: factor for entropy boosted exploration
'''
self.net_list = net_list
self.optimizers_list = optimizers_list
self.GLOBAL_AC = ACNet(self.net_list[0], 'global', entropy_beta) # we only need its params
self.entropy_beta = entropy_beta
self.name = 'A3C'
def learn(self, env, train_episodes=1000, test_episodes=10, max_steps=150, render=False, n_workers=1, update_itr=10,
gamma=0.99, save_interval=500, mode='train'):
'''
parameters
-----------
env: a list of same learning environments
train_episodes: total number of episodes for training
test_episodes: total number of episodes for testing
max_steps: maximum number of steps for one episode
render: render or not
n_workers: manually set number of workers
update_itr: update global policy after several episodes
gamma: reward discount factor
save_interval: timesteps for saving the weights and plotting the results
mode: train or test
'''
global COORD, GLOBAL_RUNNING_R, GLOBAL_EP, OPT_A, OPT_C, t0, SAVE_INTERVAL
SAVE_INTERVAL = save_interval
COORD = tf.train.Coordinator()
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0 # will increase during training, stop training when it >= MAX_GLOBAL_EP
N_WORKERS = n_workers if n_workers > 0 else multiprocessing.cpu_count()
if mode == 'train':
# ============================= TRAINING ===============================
print('Training... | Algorithm: {} | Environment: {}'.format(self.name, env[0].spec.id))
t0 = time.time()
with tf.device("/cpu:0"):
[OPT_A, OPT_C] = self.optimizers_list
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'Worker_%i' % i # worker name
workers.append(
Worker(env[i], self.net_list[i + 1], i_name, train_episodes, max_steps, gamma,
update_itr, self.entropy_beta, render))
# start TF threading
worker_threads = []
for worker in workers:
# t = threading.Thread(target=worker.work)
job = lambda: worker.work(self.GLOBAL_AC)
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plot_save_log(GLOBAL_RUNNING_R, algorithm_name=self.name, env_name=env[0].spec.id)
self.GLOBAL_AC.save_ckpt(env_name=env[0].spec.id)
elif mode == 'test':
# ============================= EVALUATION =============================
env = env[0] # only need one env for test
self.GLOBAL_AC.load_ckpt(env_name=env.spec.id)
print('Testing... | Algorithm: {} | Environment: {}'.format(self.name, env.spec.id))
frame_idx = 0
for eps in range(test_episodes):
s = env.reset()
rall = 0
for step in range(max_steps):
env.render()
frame_idx += 1
s = s.astype('float32') # double to float
a = self.GLOBAL_AC.get_action_greedy(s)
s, r, d, _ = env.step(a)
if render:
env.render()
rall += r
if d:
break
print('Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
eps, test_episodes, rall, time.time() - t0))
elif mode is not 'test':
print('unknow mode type')
| [
"[email protected]"
] | |
94c538986256e0c9b56cfb18d0def97857f7224a | 3006ba184fd85d9bfe64a2040683618d7aa24e54 | /paylogic/settings_base.py | fb3a17cdd1544a5df7dd2096072c1f69f5589c59 | [
"Apache-2.0"
] | permissive | esjee/codereview | 909ca5ecff6b5436b023c5e4e6872366db1a0c3f | a8fc0e0b51be18db387b8b915aeda3f63e37c04f | refs/heads/master | 2021-01-24T03:49:09.315286 | 2014-07-14T11:50:52 | 2014-07-14T11:50:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,616 | py | """Django settings for django_gae2django project."""
# NOTE: Keep the settings.py in examples directories in sync with this one!
# from settings import *
import re
import os
import statsd
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Admin', '[email protected]'),
)
EMAIL_HOST = 'localhost'
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(ROOT, 'static')
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT, 'static'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'some-secret'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'gae2django.middleware.FixRequestUserMiddleware',
# Keep in mind, that CSRF protection is DISABLED!
'rietveld_helper.middleware.DisableCSRFMiddleware',
'rietveld_helper.middleware.AddUserToRequestMiddleware',
'django.middleware.doc.XViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth', # required by admin panel
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.static',
)
ROOT_URLCONF = 'paylogic.urls'
TEMPLATE_DIRS = (
os.path.join(ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django_openid_auth',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.staticfiles',
'django.contrib.messages',
'jquery',
'django_select2',
'gae2django',
'rietveld_helper',
'paylogic',
'codereview',
)
OPENID_CREATE_USERS = True
OPENID_SSO_SERVER_URL = 'https://google.com/accounts/o8/site-xrds?hd=paylogic.eu'
OPENID_USE_AS_ADMIN_LOGIN = False
LOGIN_URL = '/openid/login/'
LOGIN_REDIRECT_URL = '/'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_openid_auth.auth.OpenIDBackend',
)
# Set your DSN value
RAVEN_CONFIG = {
'dsn': 'https://yor-dsn',
}
# Add raven to the list of installed apps
INSTALLED_APPS = INSTALLED_APPS + (
'raven.contrib.django.raven_compat',
)
INTERNAL_IPS = ('127.0.0.1',)
AUTH_PROFILE_MODULE = 'codereview.Account'
LOGIN_REDIRECT_URL = '/'
# This won't work with gae2django.
RIETVELD_INCOMING_MAIL_ADDRESS = None
RIETVELD_REVISION = open(os.path.join(ROOT, 'VERSION')).read().strip()
UPLOAD_PY_SOURCE = os.path.join(ROOT, 'upload.py')
VCS = {
'hg': {
'base_dir': '/var/codereview/hg/',
'regex': re.compile('^((ssh://code\.(?:example\.com)/)?/var/codereview/hg/|hg\+)(.+)$'),
'supports_direct_export': True,
'supports_simple_cloning': True,
'default_branch': 'default',
},
'bzr': {
'base_dir': '/var/codereview/bzr/',
'regex': re.compile('^((ssh://code\.(?:example\.com)/)?/var/codereview/bzr/|bzr\+)(.+)$'),
'supports_direct_export': True,
'supports_simple_cloning': False,
'default_branch': 'trunk',
},
'git': {
'base_dir': '/var/codereview/git/',
'regex': re.compile('^((ssh://code\.(?:example\.com)/)?/var/codereview/git/|git\+)(.+)$'),
'supports_direct_export': False,
'supports_simple_cloning': True,
'default_branch': 'master',
}
}
FEATURE_BRANCH_DEFAULT_PREFIX = 'hg+/var/codereview/hg/users/'
ORIGINAL_BRANCH_DEFAULT_PREFIX = 'hg+/var/hg/codereview/example.com#'
TEMP_FOLDER = '/var/tmp/codereview/'
FOGBUGZ_URL = 'https://fogbugz.example.com'
FOGBUGZ_TOKEN = 'fogbugz-token'
# Override this token in your settings_local.py file in order to
# API functions
API_TOKEN = 'some-token'
FOGBUGZ_MERGEKEEPER_USER_ID = 999
FOGBUGZ_APPROVED_REVISION_FIELD_ID = "plugin_customfields_at_fogcreek_com_approvedxrevision"
FOGBUGZ_TARGET_BRANCH_FIELD_ID = "plugin_customfields_at_fogcreek_com_targetxbranch"
FOGBUGZ_ORIGINAL_BRANCH_FIELD_ID = "plugin_customfields_at_fogcreek_com_originalxbranch"
FOGBUGZ_FEATURE_BRANCH_FIELD_ID = "plugin_customfields_at_fogcreek_com_featurexbranch"
FOGBUGZ_CI_PROJECT_FIELD_ID = "cixproject"
CODEREVIEW_IGNORED_FILES = ['.hg_archival.txt']
CODEREVIEW_MAX_FILE_SIZE = 1024 * 1024
CODEREVIEW_VALIDATORS = [
]
CODEREVIEW_TARGET_BRANCH_CHOICES_GETTER = lambda ci_project, original_branch, branches: []
AUTO_RENDER_SELECT2_STATICS = False
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
},
'loggers': {
}
}
DEFAULT_MAIL_CC = '[email protected]'
statsd.Connection.set_defaults(host='localhost', port=8125)
try:
from paylogic.settings_local import * # NOQA
except ImportError:
pass
| [
"[email protected]"
] | |
60d4b90698ef3312ded89560fe60b0bb05059590 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_luring.py | d57b90bae21a1db2e394effd996a53fd1662c59d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _LURING():
def __init__(self,):
self.name = "LURING"
self.definitions = lure
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['lure']
| [
"[email protected]"
] | |
d9f79170b88b22d84a6fd118b71d63ef8095c4fb | 4a0e0608ae505df68a43604efd363beaec742159 | /src/muypicky/settings/base.py | 69f2cdabe2214781ab498528588edb21fd7f7808 | [] | no_license | ionescuig/trydjango1-11 | 728c65a7481f7d54cdffa35d5456474a9eb69889 | 99e205880af8d307e2ef496afc983d3deeb435e8 | refs/heads/master | 2021-04-29T19:24:15.710487 | 2018-04-13T00:18:06 | 2018-04-13T00:18:06 | 121,702,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,813 | py | """
Django settings for muypicky project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5h97=bi^k5_rdp-6urx1(3)z_ldcv^g^($@@9-847)*%3*-ju8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'YourName <[email protected]>'
ADMINS = (
'YourName <[email protected]>',
)
MANAGERS = ADMINS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'menus',
'profiles',
'restaurants',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'muypicky.urls'
LOGIN_URL = '/login/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'muypicky.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
LOGOUT_REDIRECT_URL = '/'
LOGIN_REDIRECT_URL = '/'
CORS_REPLACE_HTTPS_REFERER = False
HOST_SCHEME = "http://"
SECURE_PROXY_SSL_HEADER = None
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SECURE_HSTS_SECONDS = None
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_FRAME_DENY = False | [
"[email protected]"
] | |
bf64b0f79d6364e6e1c98299288c5bd337a03015 | f10db3b11131ddf2bf5026e42cdd72c275e49693 | /ToolsX/leetcode/0012/0012_1.py | 86533e91b9177214e14e1ab80ed1278373daa4c7 | [] | no_license | JunLei-MI/PythonX | 36def40e33c9ebb64ce28af2b5da010393b08356 | efea806d49f07d78e3db0390696778d4a7fc6c28 | refs/heads/master | 2023-04-07T10:58:45.647430 | 2021-01-25T16:54:37 | 2021-04-15T13:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | class Solution:
def intToRoman(self, num: int) -> str:
"""
1
https://leetcode.com/problems/integer-to-roman/discuss/6274/Simple-Solution
这个答案笑到我了
>>> Solution().intToRoman(3)
'III'
>>> Solution().intToRoman(4)
'IV'
>>> Solution().intToRoman(9)
'IX'
>>> Solution().intToRoman(58)
'LVIII'
>>> Solution().intToRoman(1994)
'MCMXCIV'
"""
M = ['', 'M', 'MM', 'MMM']
C = ['', 'C', 'CC', 'CCC', 'CD', 'D', 'DC', 'DCC', 'DCCC', 'CM']
X = ['', 'X', 'XX', 'XXX', 'XL', 'L', 'LX', 'LXX', 'LXXX', 'XC']
I = ['', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX']
return M[num // 1000] + C[(num % 1000) // 100] + X[(num % 100) // 10] + I[num % 10]
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| [
"[email protected]"
] | |
aba1e97c6943b979394ff3086c1a6feb58340f0e | eafddc14e1381db53b87d42e7aa12dfb4bcf2d6e | /pandemonium/implementations/sarsa.py | a66e3d0d8b81ef0035bbce22252096518a5927b5 | [] | no_license | konichuvak/pandemonium | b9d1d2f8c3529b6869f6bda1d6ca10c6c0f94052 | 57083b311ea209fe156f8575cc682e6c88211b74 | refs/heads/master | 2022-11-23T21:57:21.276033 | 2020-07-26T03:42:51 | 2020-07-26T03:42:51 | 240,851,837 | 1 | 0 | null | 2020-07-06T19:54:41 | 2020-02-16T07:45:09 | Python | UTF-8 | Python | false | false | 339 | py | from pandemonium.demons.control import SARSA, OfflineTDControl, OnlineTDControl
from pandemonium.demons.offline_td import TTD
from pandemonium.utilities.utilities import get_all_classes
class MultistepSARSA(SARSA, OfflineTDControl, TTD):
...
class OnlineSARSA(SARSA, OnlineTDControl):
...
__all__ = get_all_classes(__name__)
| [
"[email protected]"
] | |
79c71fa20f12d5ed5a0151bf6692b97ba1da8d44 | 4fcad69a9b2aec97fa29e0010d82f0f085cdc446 | /tsampi/pypy/lib_pypy/distributed/test/test_socklayer.py | 6870eda430d4ec75a8c7d5f2e3475edfb0534513 | [] | no_license | tsampi/tsampi-0 | b64d4457f58314343630b04232c6ecc74c7bfda1 | 5e0183e80718d5668b4b5b96631853942e344b64 | refs/heads/master | 2021-01-19T04:35:05.640785 | 2016-09-12T18:34:25 | 2016-09-12T18:34:25 | 49,612,767 | 1 | 3 | null | 2016-03-25T10:35:41 | 2016-01-14T01:02:18 | Python | UTF-8 | Python | false | false | 995 | py | import py
from pypy.conftest import gettestobjspace
def setup_module(mod):
py.test.importorskip("pygreen") # found e.g. in py/trunk/contrib
# XXX think how to close the socket
class AppTestSocklayer:
def setup_class(cls):
cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
"usemodules":("_stackless","_socket", "select")})
def test_socklayer(self):
class X(object):
z = 3
x = X()
try:
import py
except ImportError:
skip("pylib not importable")
from pygreen.pipe.gsocke import GreenSocket
from distributed.socklayer import socket_loop, connect
from pygreen.greensock2 import oneof, allof
def one():
socket_loop(('127.0.0.1', 21211), {'x':x}, socket=GreenSocket)
def two():
rp = connect(('127.0.0.1', 21211), GreenSocket)
assert rp.x.z == 3
oneof(one, two)
| [
"[email protected]"
] | |
00071a7edaece30967b862cb1b4b8a03105931c9 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnguess.py | 1ec9315b1a42073325191b67d7fa6a7c066171e1 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,634 | py | ii = [('BentJDO2.py', 2), ('EmerRN.py', 2), ('CookGHP3.py', 1), ('MarrFDI.py', 1), ('CoolWHM2.py', 1), ('KembFFF.py', 5), ('GodwWSL2.py', 9), ('SadlMLP.py', 6), ('FerrSDO3.py', 5), ('WilbRLW.py', 1), ('RennJIT.py', 1), ('AubePRP2.py', 4), ('CookGHP.py', 3), ('KembFJ1.py', 3), ('WilbRLW5.py', 2), ('PeckJNG.py', 2), ('KnowJMM.py', 3), ('BailJD2.py', 19), ('ChalTPW2.py', 2), ('FitzRNS3.py', 3), ('WilbRLW2.py', 2), ('WilkJMC2.py', 5), ('CarlTFR.py', 19), ('SeniNSP.py', 1), ('LyttELD.py', 7), ('TalfTAC.py', 7), ('AinsWRR3.py', 7), ('CookGHP2.py', 1), ('BailJD1.py', 23), ('RoscTTI2.py', 1), ('MarrFDI2.py', 3), ('CrokTPS.py', 1), ('ClarGE.py', 1), ('BuckWGM.py', 1), ('LyelCPG.py', 1), ('GilmCRS.py', 2), ('DibdTRL2.py', 2), ('AinsWRR.py', 3), ('CrocDNL.py', 10), ('MedwTAI.py', 2), ('LandWPA2.py', 3), ('WadeJEB.py', 1), ('FerrSDO2.py', 6), ('TalfTIT.py', 1), ('NewmJLP.py', 3), ('GodwWLN.py', 2), ('MedwTAI2.py', 4), ('SoutRD.py', 1), ('DickCSG.py', 1), ('MereHHB3.py', 1), ('BailJD3.py', 22), ('MereHHB.py', 2), ('WilkJMC.py', 1), ('HogaGMM.py', 2), ('MackCNH.py', 1), ('FitzRNS4.py', 10), ('CoolWHM3.py', 2), ('DequTKM.py', 2), ('BentJRP.py', 1), ('EdgeMHT.py', 6), ('LyttELD3.py', 4), ('FerrSDO.py', 2), ('KembFJ2.py', 2), ('LewiMJW.py', 8), ('WilbRLW3.py', 6), ('AinsWRR2.py', 4), ('MereHHB2.py', 1), ('ClarGE3.py', 6), ('RogeSIP.py', 4), ('DibdTRL.py', 2), ('FitzRNS2.py', 1), ('MartHSI.py', 1), ('NortSTC.py', 6), ('SadlMLP2.py', 2), ('LyelCPG3.py', 1), ('BowrJMM3.py', 1), ('BeckWRE.py', 1), ('WordWYR.py', 3), ('ChalTPW.py', 2), ('KeigTSS.py', 1), ('KirbWPW.py', 1), ('WaylFEP.py', 2), ('BentJDO.py', 2), ('ClarGE4.py', 6)] | [
"[email protected]"
] | |
05fa5d26d2b5020a17dd191dd3777319051f64af | d1626536c867604efdb1b9b1d8305729f28233df | /tests/types/test_urls.py | f5483ca750cd1176a1aea6c1ed6716e369de006a | [
"MIT"
] | permissive | Rafiot/followthemoney | 3fb056604ab672de1d9eea3330cd890af794c01e | 18dd0ec410e598f8766f300b1f820e484034920f | refs/heads/master | 2022-09-15T10:02:18.145766 | 2020-05-05T08:37:05 | 2020-05-05T08:37:05 | 259,663,871 | 1 | 0 | MIT | 2020-05-04T22:25:09 | 2020-04-28T14:41:07 | null | UTF-8 | Python | false | false | 1,065 | py | import unittest
from followthemoney.types import registry
urls = registry.url
class UrlsTest(unittest.TestCase):
def test_is_url(self):
self.assertTrue(urls.validate('http://foo.org'))
self.assertFalse(urls.validate(None))
self.assertFalse(urls.validate('hello'))
def test_unicode_url(self):
utext = 'http://ko.wikipedia.org/wiki/위키백과:대문'
self.assertTrue(urls.validate(utext))
self.assertFalse(urls.validate(utext.encode('euc-kr')))
def test_parse_url(self):
self.assertEqual(urls.clean('http://foo.com'), 'http://foo.com/')
self.assertEqual(urls.clean('http://foo.com/#lala'), 'http://foo.com/')
self.assertEqual(urls.clean('http://foo.com?b=1&a=2'),
'http://foo.com/?a=2&b=1')
self.assertEqual(urls.clean('http://FOO.com'), 'http://foo.com/')
self.assertEqual(urls.clean('http://FOO.com/A'), 'http://foo.com/A')
def test_specificity(self):
self.assertEqual(urls.specificity('http://foo.com/'), 1)
| [
"[email protected]"
] | |
feb14fa7af6317555eaeb4a19bf9065069c3bc12 | 1556527f9077813490366d1fef284bc0ae1e02ba | /sdk/lusid/models/corporate_action.py | 11aa86df66e848821bd79205cc47de54deb113d2 | [
"MIT"
] | permissive | timbarrass/lusid-sdk-python-preview | a04ce8887c7001bd7ddf099027ab94c97d8fa400 | 9a54e98bf748d87469aa7c797607550fe65ba6ba | refs/heads/master | 2020-08-03T20:02:37.282370 | 2019-09-30T10:51:19 | 2019-09-30T10:51:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,609 | py | # coding: utf-8
"""
LUSID API
# Introduction This page documents the [LUSID APIs](https://api.lusid.com/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages : * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) # Data Model The LUSID API has a relatively lightweight but extremely powerful data model. One of the goals of LUSID was not to enforce on clients a single rigid data model but rather to provide a flexible foundation onto which clients can streamline their data. One of the primary tools to extend the data model is through using properties. Properties can be associated with amongst others: - * Transactions * Instruments * Portfolios The LUSID data model is exposed through the LUSID APIs. The APIs provide access to both business objects and the meta data used to configure the systems behaviours. The key business entities are: - * **Portfolios** A portfolio is the primary container for transactions and holdings. * **Derived Portfolios** Derived portfolios allow portfolios to be created based on other portfolios, by overriding or overlaying specific items * **Holdings** A holding is a position account for a instrument within a portfolio. Holdings can only be adjusted via transactions. * **Transactions** A Transaction is a source of transactions used to manipulate holdings. * **Corporate Actions** A corporate action is a market event which occurs to a instrument, for example a stock split * **Instruments** A instrument represents a currency, tradable instrument or OTC contract that is attached to a transaction and a holding. * **Properties** Several entities allow additional user defined properties to be associated with them. For example, a Portfolio manager may be associated with a portfolio Meta data includes: - * **Transaction Types** Transactions are booked with a specific transaction type. The types are client defined and are used to map the Transaction to a series of movements which update the portfolio holdings. * **Properties Types** Types of user defined properties used within the system. This section describes the data model that LUSID exposes via the APIs. ## Scope All data in LUSID is segregated at the client level. Entities in LUSID are identifiable by a unique code. Every entity lives within a logical data partition known as a Scope. Scope is an identity namespace allowing two entities with the same unique code to co-exist within individual address spaces. For example, prices for equities from different vendors may be uploaded into different scopes such as `client/vendor1` and `client/vendor2`. A portfolio may then be valued using either of the price sources by referencing the appropriate scope. LUSID Clients cannot access scopes of other clients. ## Schema A detailed description of the entities used by the API and parameters for endpoints which take a JSON document can be retrieved via the `schema` endpoint. ## Instruments LUSID has its own built-in instrument master which you can use to master your own instrument universe. Every instrument must be created with one or more unique market identifiers, such as [FIGI](https://openfigi.com/). For any non-listed instruments (eg OTCs), you can upload an instrument against a custom ID of your choosing. In addition, LUSID will allocate each instrument a unique 'LUSID instrument identifier'. The LUSID instrument identifier is what is used when uploading transactions, holdings, prices, etc. The API exposes an `instrument/lookup` endpoint which can be used to lookup these LUSID identifiers using their market identifiers. Cash can be referenced using the ISO currency code prefixed with \"`CCY_`\" e.g. `CCY_GBP` ## Instrument Prices (Analytics) Instrument prices are stored in LUSID's Analytics Store | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|Unique instrument identifier | | Value|decimal|Value of the analytic, eg price | | Denomination|string|Underlying unit of the analytic, eg currency, EPS etc. | ## Instrument Data Instrument data can be uploaded to the system using the [Instrument Properties](#tag/InstrumentProperties) endpoint. | Field|Type|Description | | ---|---|--- | | Key|propertykey|The key of the property. This takes the format {domain}/{scope}/{code} e.g. 'Instrument/system/Name' or 'Transaction/strategy/quantsignal'. | | Value|string|The value of the property. | | EffectiveFrom|datetimeoffset|The effective datetime from which the property is valid. | ## Portfolios Portfolios are the top-level entity containers within LUSID, containing transactions, corporate actions and holdings. The transactions build up the portfolio holdings on which valuations, analytics profit & loss and risk can be calculated. Properties can be associated with Portfolios to add in additional model data. Portfolio properties can be changed over time as well. For example, to allow a Portfolio Manager to be linked with a Portfolio. Additionally, portfolios can be securitised and held by other portfolios, allowing LUSID to perform \"drill-through\" into underlying fund holdings ### Reference Portfolios Reference portfolios are portfolios that contain only weights, as opposed to transactions, and are designed to represent entities such as indices. ### Derived Portfolios LUSID also allows for a portfolio to be composed of another portfolio via derived portfolios. A derived portfolio can contain its own transactions and also inherits any transactions from its parent portfolio. Any changes made to the parent portfolio are automatically reflected in derived portfolio. Derived portfolios in conjunction with scopes are a powerful construct. For example, to do pre-trade what-if analysis, a derived portfolio could be created a new namespace linked to the underlying live (parent) portfolio. Analysis can then be undertaken on the derived portfolio without affecting the live portfolio. ### Portfolio Groups Portfolio groups allow the construction of a hierarchy from portfolios and groups. Portfolio operations on the group are executed on an aggregated set of portfolios in the hierarchy. For example: * Global Portfolios _(group)_ * APAC _(group)_ * Hong Kong _(portfolio)_ * Japan _(portfolio)_ * Europe _(group)_ * France _(portfolio)_ * Germany _(portfolio)_ * UK _(portfolio)_ In this example **Global Portfolios** is a group that consists of an aggregate of **Hong Kong**, **Japan**, **France**, **Germany** and **UK** portfolios. ### Movements Engine The Movements engine sits on top of the immutable event store and is used to manage the relationship between input trading actions and their associated portfolio holdings. The movements engine reads in the following entity types:- * Posting Transactions * Applying Corporate Actions * Holding Adjustments These are converted to one or more movements and used by the movements engine to calculate holdings. At the same time it also calculates running balances, and realised P&L. The outputs from the movements engine are holdings and transactions. ## Transactions A transaction represents an economic activity against a Portfolio. Transactions are processed according to a configuration. This will tell the LUSID engine how to interpret the transaction and correctly update the holdings. LUSID comes with a set of transaction types you can use out of the box, or you can configure your own set(s) of transactions. For more details see the [LUSID Getting Started Guide for transaction configuration.](https://support.lusid.com/configuring-transaction-types) | Field|Type|Description | | ---|---|--- | | TransactionId|string|The unique identifier for the transaction. | | Type|string|The type of the transaction e.g. 'Buy', 'Sell'. The transaction type should have been pre-configured via the System Configuration API endpoint. If it hasn't been pre-configured the transaction will still be updated or inserted however you will be unable to generate the resultant holdings for the portfolio that contains this transaction as LUSID does not know how to process it. | | InstrumentIdentifiers|map|A set of instrument identifiers to use to resolve the transaction to a unique instrument. | | TransactionDate|dateorcutlabel|The date of the transaction. | | SettlementDate|dateorcutlabel|The settlement date of the transaction. | | Units|decimal|The number of units transacted in the associated instrument. | | TransactionPrice|transactionprice|The price for each unit of the transacted instrument in the transaction currency. | | TotalConsideration|currencyandamount|The total value of the transaction in the settlement currency. | | ExchangeRate|decimal|The exchange rate between the transaction and settlement currency. For example if the transaction currency is in USD and the settlement currency is in GBP this this the USD/GBP rate. | | TransactionCurrency|currency|The transaction currency. | | Properties|map|Set of unique transaction properties and associated values to store with the transaction. Each property must be from the 'Transaction' domain. | | CounterpartyId|string|The identifier for the counterparty of the transaction. | | Source|string|The source of the transaction. This is used to look up the appropriate transaction group set in the transaction type configuration. | From these fields, the following values can be calculated * **Transaction value in Transaction currency**: TotalConsideration / ExchangeRate * **Transaction value in Portfolio currency**: Transaction value in Transaction currency * TradeToPortfolioRate ### Example Transactions #### A Common Purchase Example Three example transactions are shown in the table below. They represent a purchase of USD denominated IBM shares within a Sterling denominated portfolio. * The first two transactions are for separate buy and fx trades * Buying 500 IBM shares for $71,480.00 * A foreign exchange conversion to fund the IBM purchase. (Buy $71,480.00 for £54,846.60) * The third transaction is an alternate version of the above trades. Buying 500 IBM shares and settling directly in Sterling. | Column | Buy Trade | Fx Trade | Buy Trade with foreign Settlement | | ----- | ----- | ----- | ----- | | TransactionId | FBN00001 | FBN00002 | FBN00003 | | Type | Buy | FxBuy | Buy | | InstrumentIdentifiers | { \"figi\", \"BBG000BLNNH6\" } | { \"CCY\", \"CCY_USD\" } | { \"figi\", \"BBG000BLNNH6\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | 2018-08-02 | | SettlementDate | 2018-08-06 | 2018-08-06 | 2018-08-06 | | Units | 500 | 71480 | 500 | | TransactionPrice | 142.96 | 1 | 142.96 | | TradeCurrency | USD | USD | USD | | ExchangeRate | 1 | 0.7673 | 0.7673 | | TotalConsideration.Amount | 71480.00 | 54846.60 | 54846.60 | | TotalConsideration.Currency | USD | GBP | GBP | | Trade/default/TradeToPortfolioRate* | 0.7673 | 0.7673 | 0.7673 | [* This is a property field] #### A Forward FX Example LUSID has a flexible transaction modelling system, and there are a number of different ways of modelling forward fx trades. The default LUSID transaction types are FwdFxBuy and FwdFxSell. Other types and behaviours can be configured as required. Using these transaction types, the holdings query will report two forward positions. One in each currency. Since an FX trade is an exchange of one currency for another, the following two 6 month forward transactions are equivalent: | Column | Forward 'Sell' Trade | Forward 'Buy' Trade | | ----- | ----- | ----- | | TransactionId | FBN00004 | FBN00005 | | Type | FwdFxSell | FwdFxBuy | | InstrumentIdentifiers | { \"CCY\", \"CCY_GBP\" } | { \"CCY\", \"CCY_USD\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | | SettlementDate | 2019-02-06 | 2019-02-06 | | Units | 10000.00 | 13142.00 | | TransactionPrice |1 | 1 | | TradeCurrency | GBP | USD | | ExchangeRate | 1.3142 | 0.760919 | | TotalConsideration.Amount | 13142.00 | 10000.00 | | TotalConsideration.Currency | USD | GBP | | Trade/default/TradeToPortfolioRate | 1.0 | 0.760919 | ## Holdings A holding represents a position in a instrument or cash on a given date. | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|The unqiue Lusid Instrument Id (LUID) of the instrument that the holding is in. | | SubHoldingKeys|map|The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. | | Properties|map|The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' or 'Holding' domain. | | HoldingType|string|The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. | | Units|decimal|The total number of units of the holding. | | SettledUnits|decimal|The total number of settled units of the holding. | | Cost|currencyandamount|The total cost of the holding in the transaction currency. | | CostPortfolioCcy|currencyandamount|The total cost of the holding in the portfolio currency. | | Transaction|transaction|The transaction associated with an unsettled holding. | ## Corporate Actions Corporate actions are represented within LUSID in terms of a set of instrument-specific 'transitions'. These transitions are used to specify the participants of the corporate action, and the effect that the corporate action will have on holdings in those participants. ### Corporate Action | Field|Type|Description | | ---|---|--- | | CorporateActionCode|code|The unique identifier of this corporate action | | Description|string| | | AnnouncementDate|datetimeoffset|The announcement date of the corporate action | | ExDate|datetimeoffset|The ex date of the corporate action | | RecordDate|datetimeoffset|The record date of the corporate action | | PaymentDate|datetimeoffset|The payment date of the corporate action | | Transitions|corporateactiontransition[]|The transitions that result from this corporate action | ### Transition | Field|Type|Description | | ---|---|--- | | InputTransition|corporateactiontransitioncomponent|Indicating the basis of the corporate action - which security and how many units | | OutputTransitions|corporateactiontransitioncomponent[]|What will be generated relative to the input transition | ### Example Corporate Action Transitions #### A Dividend Action Transition In this example, for each share of IBM, 0.20 units (or 20 pence) of GBP are generated. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"ccy\" : \"CCY_GBP\" } | | Units Factor | 1 | 0.20 | | Cost Factor | 1 | 0 | #### A Split Action Transition In this example, for each share of IBM, we end up with 2 units (2 shares) of IBM, with total value unchanged. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | | Units Factor | 1 | 2 | | Cost Factor | 1 | 1 | #### A Spinoff Action Transition In this example, for each share of IBM, we end up with 1 unit (1 share) of IBM and 3 units (3 shares) of Celestica, with 85% of the value remaining on the IBM share, and 5% in each Celestica share (15% total). | Column | Input Transition | Output Transition 1 | Output Transition 2 | | ----- | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000HBGRF3\" } | | Units Factor | 1 | 1 | 3 | | Cost Factor | 1 | 0.85 | 0.15 | ## Property Properties are key-value pairs that can be applied to any entity within a domain (where a domain is `trade`, `portfolio`, `security` etc). Properties must be defined before use with a `PropertyDefinition` and can then subsequently be added to entities. # Schemas The following headers are returned on all responses from LUSID | Name | Purpose | | --- | --- | | lusid-meta-duration | Duration of the request | | lusid-meta-success | Whether or not LUSID considered the request to be successful | | lusid-meta-requestId | The unique identifier for the request | | lusid-schema-url | Url of the schema for the data being returned | | lusid-property-schema-url | Url of the schema for any properties | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"102\">102</a>|VersionNotFound| | | <a name=\"104\">104</a>|InstrumentNotFound| | | <a name=\"105\">105</a>|PropertyNotFound| | | <a name=\"106\">106</a>|PortfolioRecursionDepth| | | <a name=\"108\">108</a>|GroupNotFound| | | <a name=\"109\">109</a>|PortfolioNotFound| | | <a name=\"110\">110</a>|PropertySchemaNotFound| | | <a name=\"111\">111</a>|PortfolioAncestryNotFound| | | <a name=\"112\">112</a>|PortfolioWithIdAlreadyExists| | | <a name=\"113\">113</a>|OrphanedPortfolio| | | <a name=\"119\">119</a>|MissingBaseClaims| | | <a name=\"121\">121</a>|PropertyNotDefined| | | <a name=\"122\">122</a>|CannotDeleteSystemProperty| | | <a name=\"123\">123</a>|CannotModifyImmutablePropertyField| | | <a name=\"124\">124</a>|PropertyAlreadyExists| | | <a name=\"125\">125</a>|InvalidPropertyLifeTime| | | <a name=\"127\">127</a>|CannotModifyDefaultDataType| | | <a name=\"128\">128</a>|GroupAlreadyExists| | | <a name=\"129\">129</a>|NoSuchDataType| | | <a name=\"132\">132</a>|ValidationError| | | <a name=\"133\">133</a>|LoopDetectedInGroupHierarchy| | | <a name=\"135\">135</a>|SubGroupAlreadyExists| | | <a name=\"138\">138</a>|PriceSourceNotFound| | | <a name=\"139\">139</a>|AnalyticStoreNotFound| | | <a name=\"141\">141</a>|AnalyticStoreAlreadyExists| | | <a name=\"143\">143</a>|ClientInstrumentAlreadyExists| | | <a name=\"144\">144</a>|DuplicateInParameterSet| | | <a name=\"147\">147</a>|ResultsNotFound| | | <a name=\"148\">148</a>|OrderFieldNotInResultSet| | | <a name=\"149\">149</a>|OperationFailed| | | <a name=\"150\">150</a>|ElasticSearchError| | | <a name=\"151\">151</a>|InvalidParameterValue| | | <a name=\"153\">153</a>|CommandProcessingFailure| | | <a name=\"154\">154</a>|EntityStateConstructionFailure| | | <a name=\"155\">155</a>|EntityTimelineDoesNotExist| | | <a name=\"156\">156</a>|EventPublishFailure| | | <a name=\"157\">157</a>|InvalidRequestFailure| | | <a name=\"158\">158</a>|EventPublishUnknown| | | <a name=\"159\">159</a>|EventQueryFailure| | | <a name=\"160\">160</a>|BlobDidNotExistFailure| | | <a name=\"162\">162</a>|SubSystemRequestFailure| | | <a name=\"163\">163</a>|SubSystemConfigurationFailure| | | <a name=\"165\">165</a>|FailedToDelete| | | <a name=\"166\">166</a>|UpsertClientInstrumentFailure| | | <a name=\"167\">167</a>|IllegalAsAtInterval| | | <a name=\"168\">168</a>|IllegalBitemporalQuery| | | <a name=\"169\">169</a>|InvalidAlternateId| | | <a name=\"170\">170</a>|CannotAddSourcePortfolioPropertyExplicitly| | | <a name=\"171\">171</a>|EntityAlreadyExistsInGroup| | | <a name=\"173\">173</a>|EntityWithIdAlreadyExists| | | <a name=\"174\">174</a>|DerivedPortfolioDetailsDoNotExist| | | <a name=\"176\">176</a>|PortfolioWithNameAlreadyExists| | | <a name=\"177\">177</a>|InvalidTransactions| | | <a name=\"178\">178</a>|ReferencePortfolioNotFound| | | <a name=\"179\">179</a>|DuplicateIdFailure| | | <a name=\"180\">180</a>|CommandRetrievalFailure| | | <a name=\"181\">181</a>|DataFilterApplicationFailure| | | <a name=\"182\">182</a>|SearchFailed| | | <a name=\"183\">183</a>|MovementsEngineConfigurationKeyFailure| | | <a name=\"184\">184</a>|FxRateSourceNotFound| | | <a name=\"185\">185</a>|AccrualSourceNotFound| | | <a name=\"186\">186</a>|AccessDenied| | | <a name=\"187\">187</a>|InvalidIdentityToken| | | <a name=\"188\">188</a>|InvalidRequestHeaders| | | <a name=\"189\">189</a>|PriceNotFound| | | <a name=\"190\">190</a>|InvalidSubHoldingKeysProvided| | | <a name=\"191\">191</a>|DuplicateSubHoldingKeysProvided| | | <a name=\"192\">192</a>|CutDefinitionNotFound| | | <a name=\"193\">193</a>|CutDefinitionInvalid| | | <a name=\"194\">194</a>|TimeVariantPropertyDeletionDateUnspecified| | | <a name=\"195\">195</a>|PerpetualPropertyDeletionDateSpecified| | | <a name=\"196\">196</a>|TimeVariantPropertyUpsertDateUnspecified| | | <a name=\"197\">197</a>|PerpetualPropertyUpsertDateSpecified| | | <a name=\"200\">200</a>|InvalidUnitForDataType| | | <a name=\"201\">201</a>|InvalidTypeForDataType| | | <a name=\"202\">202</a>|InvalidValueForDataType| | | <a name=\"203\">203</a>|UnitNotDefinedForDataType| | | <a name=\"204\">204</a>|UnitsNotSupportedOnDataType| | | <a name=\"205\">205</a>|CannotSpecifyUnitsOnDataType| | | <a name=\"206\">206</a>|UnitSchemaInconsistentWithDataType| | | <a name=\"207\">207</a>|UnitDefinitionNotSpecified| | | <a name=\"208\">208</a>|DuplicateUnitDefinitionsSpecified| | | <a name=\"209\">209</a>|InvalidUnitsDefinition| | | <a name=\"210\">210</a>|InvalidInstrumentIdentifierUnit| | | <a name=\"211\">211</a>|HoldingsAdjustmentDoesNotExist| | | <a name=\"212\">212</a>|CouldNotBuildExcelUrl| | | <a name=\"213\">213</a>|CouldNotGetExcelVersion| | | <a name=\"214\">214</a>|InstrumentByCodeNotFound| | | <a name=\"215\">215</a>|EntitySchemaDoesNotExist| | | <a name=\"216\">216</a>|FeatureNotSupportedOnPortfolioType| | | <a name=\"217\">217</a>|QuoteNotFoundFailure| | | <a name=\"218\">218</a>|InvalidQuoteIdentifierFailure| | | <a name=\"219\">219</a>|InvalidInstrumentDefinition| | | <a name=\"221\">221</a>|InstrumentUpsertFailure| | | <a name=\"222\">222</a>|ReferencePortfolioRequestNotSupported| | | <a name=\"223\">223</a>|TransactionPortfolioRequestNotSupported| | | <a name=\"224\">224</a>|InvalidPropertyValueAssignment| | | <a name=\"230\">230</a>|TransactionTypeNotFound| | | <a name=\"231\">231</a>|TransactionTypeDuplication| | | <a name=\"232\">232</a>|PortfolioDoesNotExistAtGivenDate| | | <a name=\"233\">233</a>|QueryParserFailure| | | <a name=\"234\">234</a>|DuplicateConstituentFailure| | | <a name=\"235\">235</a>|UnresolvedInstrumentConstituentFailure| | | <a name=\"236\">236</a>|UnresolvedInstrumentInTransitionFailure| | | <a name=\"300\">300</a>|MissingRecipeFailure| | | <a name=\"301\">301</a>|DependenciesFailure| | | <a name=\"304\">304</a>|PortfolioPreprocessFailure| | | <a name=\"310\">310</a>|ValuationEngineFailure| | | <a name=\"311\">311</a>|TaskFactoryFailure| | | <a name=\"312\">312</a>|TaskEvaluationFailure| | | <a name=\"350\">350</a>|InstrumentFailure| | | <a name=\"351\">351</a>|CashFlowsFailure| | | <a name=\"360\">360</a>|AggregationFailure| | | <a name=\"370\">370</a>|ResultRetrievalFailure| | | <a name=\"371\">371</a>|ResultProcessingFailure| | | <a name=\"371\">371</a>|ResultProcessingFailure| | | <a name=\"372\">372</a>|VendorResultProcessingFailure| | | <a name=\"373\">373</a>|VendorResultMappingFailure| | | <a name=\"374\">374</a>|VendorLibraryUnauthorisedFailure| | | <a name=\"390\">390</a>|AttemptToUpsertDuplicateQuotes| | | <a name=\"391\">391</a>|CorporateActionSourceDoesNotExist| | | <a name=\"392\">392</a>|CorporateActionSourceAlreadyExists| | | <a name=\"393\">393</a>|InstrumentIdentifierAlreadyInUse| | | <a name=\"394\">394</a>|PropertiesNotFound| | | <a name=\"395\">395</a>|BatchOperationAborted| | | <a name=\"400\">400</a>|InvalidIso4217CurrencyCodeFailure| | | <a name=\"410\">410</a>|IndexDoesNotExist| | | <a name=\"411\">411</a>|SortFieldDoesNotExist| | | <a name=\"413\">413</a>|NegativePaginationParameters| | | <a name=\"414\">414</a>|InvalidSearchSyntax| | | <a name=\"-10\">-10</a>|ServerConfigurationError| | | <a name=\"-1\">-1</a>|Unknown error| | # noqa: E501
The version of the OpenAPI document: 0.10.739
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CorporateAction(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'corporate_action_code': 'str',
'description': 'str',
'announcement_date': 'datetime',
'ex_date': 'datetime',
'record_date': 'datetime',
'payment_date': 'datetime',
'transitions': 'list[CorporateActionTransition]'
}
attribute_map = {
'corporate_action_code': 'corporateActionCode',
'description': 'description',
'announcement_date': 'announcementDate',
'ex_date': 'exDate',
'record_date': 'recordDate',
'payment_date': 'paymentDate',
'transitions': 'transitions'
}
def __init__(self, corporate_action_code=None, description=None, announcement_date=None, ex_date=None, record_date=None, payment_date=None, transitions=None): # noqa: E501
"""CorporateAction - a model defined in OpenAPI""" # noqa: E501
self._corporate_action_code = None
self._description = None
self._announcement_date = None
self._ex_date = None
self._record_date = None
self._payment_date = None
self._transitions = None
self.discriminator = None
self.corporate_action_code = corporate_action_code
if description is not None:
self.description = description
if announcement_date is not None:
self.announcement_date = announcement_date
if ex_date is not None:
self.ex_date = ex_date
if record_date is not None:
self.record_date = record_date
if payment_date is not None:
self.payment_date = payment_date
if transitions is not None:
self.transitions = transitions
@property
def corporate_action_code(self):
"""Gets the corporate_action_code of this CorporateAction. # noqa: E501
The unique identifier of this corporate action # noqa: E501
:return: The corporate_action_code of this CorporateAction. # noqa: E501
:rtype: str
"""
return self._corporate_action_code
@corporate_action_code.setter
def corporate_action_code(self, corporate_action_code):
"""Sets the corporate_action_code of this CorporateAction.
The unique identifier of this corporate action # noqa: E501
:param corporate_action_code: The corporate_action_code of this CorporateAction. # noqa: E501
:type: str
"""
if corporate_action_code is None:
raise ValueError("Invalid value for `corporate_action_code`, must not be `None`") # noqa: E501
self._corporate_action_code = corporate_action_code
@property
def description(self):
"""Gets the description of this CorporateAction. # noqa: E501
# noqa: E501
:return: The description of this CorporateAction. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CorporateAction.
# noqa: E501
:param description: The description of this CorporateAction. # noqa: E501
:type: str
"""
self._description = description
@property
def announcement_date(self):
"""Gets the announcement_date of this CorporateAction. # noqa: E501
The announcement date of the corporate action # noqa: E501
:return: The announcement_date of this CorporateAction. # noqa: E501
:rtype: datetime
"""
return self._announcement_date
@announcement_date.setter
def announcement_date(self, announcement_date):
"""Sets the announcement_date of this CorporateAction.
The announcement date of the corporate action # noqa: E501
:param announcement_date: The announcement_date of this CorporateAction. # noqa: E501
:type: datetime
"""
self._announcement_date = announcement_date
@property
def ex_date(self):
"""Gets the ex_date of this CorporateAction. # noqa: E501
The ex date of the corporate action # noqa: E501
:return: The ex_date of this CorporateAction. # noqa: E501
:rtype: datetime
"""
return self._ex_date
@ex_date.setter
def ex_date(self, ex_date):
"""Sets the ex_date of this CorporateAction.
The ex date of the corporate action # noqa: E501
:param ex_date: The ex_date of this CorporateAction. # noqa: E501
:type: datetime
"""
self._ex_date = ex_date
@property
def record_date(self):
"""Gets the record_date of this CorporateAction. # noqa: E501
The record date of the corporate action # noqa: E501
:return: The record_date of this CorporateAction. # noqa: E501
:rtype: datetime
"""
return self._record_date
@record_date.setter
def record_date(self, record_date):
"""Sets the record_date of this CorporateAction.
The record date of the corporate action # noqa: E501
:param record_date: The record_date of this CorporateAction. # noqa: E501
:type: datetime
"""
self._record_date = record_date
@property
def payment_date(self):
"""Gets the payment_date of this CorporateAction. # noqa: E501
The payment date of the corporate action # noqa: E501
:return: The payment_date of this CorporateAction. # noqa: E501
:rtype: datetime
"""
return self._payment_date
@payment_date.setter
def payment_date(self, payment_date):
"""Sets the payment_date of this CorporateAction.
The payment date of the corporate action # noqa: E501
:param payment_date: The payment_date of this CorporateAction. # noqa: E501
:type: datetime
"""
self._payment_date = payment_date
@property
def transitions(self):
"""Gets the transitions of this CorporateAction. # noqa: E501
The transitions that result from this corporate action # noqa: E501
:return: The transitions of this CorporateAction. # noqa: E501
:rtype: list[CorporateActionTransition]
"""
return self._transitions
@transitions.setter
def transitions(self, transitions):
"""Sets the transitions of this CorporateAction.
The transitions that result from this corporate action # noqa: E501
:param transitions: The transitions of this CorporateAction. # noqa: E501
:type: list[CorporateActionTransition]
"""
self._transitions = transitions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CorporateAction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
059f528589706da920ac5bba05a259793242aab9 | c60c199410289c1d7ec4aea00833b461e1f08f88 | /mar09-11-2015/day1/demoonmap.py | bcd2b35b605214be7613c34b87933de7252e5dc4 | [] | no_license | ver007/pythonjumpstart | 66fb111e6af197fad3e853b2c2d712a1b57a7d59 | 5b1f52479abd07456e2da494149e491d398f3b7d | refs/heads/master | 2021-01-21T01:34:35.501870 | 2015-05-13T14:10:13 | 2015-05-13T14:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | __author__ = 'ravi'
a = [2,3,4]
b = [1,2,3]
print map(lambda x, n: x ** n, a, b)
| [
"[email protected]"
] | |
db89ca96687b30bfe5e1575b3c951f4d67d15153 | e8574a1eb466c37ab15a3722f762702023998ce8 | /tests/test_parse/test_tinyseq.py | 6ba1597dd6aa6e106a3e600f21714cbd4ff86c7f | [
"BSD-3-Clause"
] | permissive | cogent3/c3test | 0415210e5aee197ea7256fd3747698073a5b29c3 | e200ed18a7fbc317abf7ebe76871fb2a7004375c | refs/heads/master | 2021-04-16T12:35:36.652805 | 2020-05-19T00:03:10 | 2020-05-19T00:03:10 | 249,356,408 | 0 | 1 | BSD-3-Clause | 2020-05-04T02:46:01 | 2020-03-23T06:50:50 | Python | UTF-8 | Python | false | false | 1,762 | py | #!/usr/bin/env python
import xml.dom.minidom
from io import StringIO
from unittest import TestCase, main
from cogent3.parse.tinyseq import TinyseqParser
__author__ = "Matthew Wakefield"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Matthew Wakefield"]
__license__ = "BSD-3"
__version__ = "2020.2.7a"
__maintainer__ = "Matthew Wakefield"
__email__ = "[email protected]"
__status__ = "Production"
data = """<?xml version="1.0"?>
<!DOCTYPE TSeqSet PUBLIC "-//NCBI//NCBI TSeq/EN" "http://www.ncbi.nlm.nih.gov/dtd/NCBI_TSeq.dtd">
<TSeqSet>
<TSeq>
<TSeq_seqtype value="nucleotide"/>
<TSeq_gi>31322957</TSeq_gi>
<TSeq_accver>AY286018.1</TSeq_accver>
<TSeq_taxid>9315</TSeq_taxid>
<TSeq_orgname>Macropus eugenii</TSeq_orgname>
<TSeq_defline>Macropus eugenii medium wave-sensitive opsin 1 (OPN1MW) mRNA, complete cds</TSeq_defline>
<TSeq_length>99</TSeq_length>
<TSeq_sequence>GGCAGGGAAAGGGAAGAAAGTAAAGGGGCCATGACACAGGCATGGGACCCTGCAGGGTTCTTGGCTTGGCGGCGGGACGAGAACGAGGAGACGACTCGG</TSeq_sequence>
</TSeq>
</TSeqSet>
"""
sample_seq = ">AY286018.1\nGGCAGGGAAAGGGAAGAAAGTAAAGGGGCCATGACACAGGCATGGGACCCTGCAGGGTTCTTGGCTTGGCGGCGGGACGAGAACGAGGAGACGACTCGG\n"
sample_annotations = (
'[genbank_id "AY286018.1" at [0:99]/99, organism "Macropus eugenii" at [0:99]/99]'
)
class ParseTinyseq(TestCase):
def test_parse(self):
for name, seq in [
next(TinyseqParser(data)),
next(TinyseqParser(xml.dom.minidom.parseString(data))),
]:
self.assertEqual(name, "AY286018.1")
self.assertEqual(sample_seq, seq.to_fasta(block_size=len(sample_seq)))
self.assertEqual(str(seq.annotations), sample_annotations)
pass
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
2ac2aedd914fe42678d31bbf51cad93ef6ff6d78 | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /tests/snuba/api/endpoints/test_discover_homepage_query.py | 71f3bc1e624c3e770c60f1a832a392d146a6f94c | [
"Apache-2.0",
"BUSL-1.1"
] | permissive | nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | Python | UTF-8 | Python | false | false | 5,947 | py | from django.urls import reverse
from sentry.api.serializers import serialize
from sentry.discover.models import DiscoverSavedQuery
from tests.snuba.api.endpoints.test_discover_saved_queries import DiscoverSavedQueryBase
FEATURES = ("organizations:discover-query",)
class DiscoverHomepageQueryTest(DiscoverSavedQueryBase):
def setUp(self):
super().setUp()
self.url = reverse("sentry-api-0-discover-homepage-query", args=[self.org.slug])
self.query = {"fields": ["test"], "conditions": [], "limit": 10}
self.project_ids = [
self.create_project(organization=self.org).id,
self.create_project(organization=self.org).id,
]
def test_returns_no_response_if_no_homepage_query_for_user(self):
with self.feature(FEATURES):
response = self.client.get(self.url)
assert response.status_code == 204, response.content
assert response.data is None
def test_returns_serialized_saved_query_if_homepage_is_set(self):
saved_query = DiscoverSavedQuery.objects.create(
organization=self.org,
created_by=self.user,
name="Test query",
query=self.query,
is_homepage=True,
)
with self.feature(FEATURES):
response = self.client.get(self.url)
assert response.status_code == 200, response.content
assert response.data == serialize(saved_query)
def test_put_updates_existing_homepage_query_to_reflect_new_data(self):
saved_query = DiscoverSavedQuery.objects.create(
organization=self.org,
created_by=self.user,
name="Test query",
query=self.query,
is_homepage=True,
)
with self.feature(FEATURES):
response = self.client.put(
self.url,
{
"name": "A new homepage query update",
"projects": self.project_ids,
"fields": ["field1", "field2"],
},
)
assert response.status_code == 200, response.content
saved_query.refresh_from_db()
assert response.data == serialize(saved_query)
assert saved_query.query["fields"] == ["field1", "field2"]
assert set(saved_query.projects.values_list("id", flat=True)) == set(self.project_ids)
def test_put_creates_new_discover_saved_query_if_none_exists(self):
homepage_query_payload = {
"version": 2,
"name": "New Homepage Query",
"projects": self.project_ids,
"environment": ["alpha"],
"fields": ["environment", "platform.name"],
"orderby": "-timestamp",
"range": None,
}
with self.feature(FEATURES):
response = self.client.put(self.url, data=homepage_query_payload)
assert response.status_code == 201, response.content
new_query = DiscoverSavedQuery.objects.get(
created_by=self.user, organization=self.org, is_homepage=True
)
assert response.data == serialize(new_query)
assert new_query.query["fields"] == homepage_query_payload["fields"]
assert new_query.query["environment"] == homepage_query_payload["environment"]
assert set(new_query.projects.values_list("id", flat=True)) == set(self.project_ids)
def test_put_responds_with_saved_empty_name_field(self):
homepage_query_payload = {
"version": 2,
"name": "New Homepage Query",
"projects": self.project_ids,
"environment": ["alpha"],
"fields": ["environment", "platform.name"],
"orderby": "-timestamp",
"range": None,
}
with self.feature(FEATURES):
response = self.client.put(self.url, data=homepage_query_payload)
assert response.status_code == 201, response.content
new_query = DiscoverSavedQuery.objects.get(
created_by=self.user, organization=self.org, is_homepage=True
)
assert new_query.name == ""
assert response.data["name"] == ""
def test_put_with_no_name(self):
homepage_query_payload = {
"version": 2,
"name": "",
"projects": self.project_ids,
"environment": ["alpha"],
"fields": ["environment", "platform.name"],
"orderby": "-timestamp",
"range": None,
}
with self.feature(FEATURES):
response = self.client.put(self.url, data=homepage_query_payload)
assert response.status_code == 201, response.content
new_query = DiscoverSavedQuery.objects.get(
created_by=self.user, organization=self.org, is_homepage=True
)
assert new_query.name == ""
assert response.data["name"] == ""
def test_post_not_allowed(self):
homepage_query_payload = {
"version": 2,
"name": "New Homepage Query",
"projects": ["-1"],
"environment": ["alpha"],
"fields": ["environment", "platform.name"],
"orderby": "-timestamp",
"range": None,
}
with self.feature(FEATURES):
response = self.client.post(self.url, data=homepage_query_payload)
assert response.status_code == 405, response.content
def test_delete_resets_saved_query(self):
DiscoverSavedQuery.objects.create(
organization=self.org,
created_by=self.user,
name="Test query",
query=self.query,
is_homepage=True,
)
with self.feature(FEATURES):
response = self.client.delete(self.url)
assert response.status_code == 204
assert not DiscoverSavedQuery.objects.filter(
created_by=self.user, organization=self.org, is_homepage=True
).exists()
| [
"[email protected]"
] | |
565b2f83dc13e0eb8d5e64aed59cdbec399264f0 | 78a20d2ceac95d3afb55b215305a8c40670e873f | /QsimMaster/clients/kiethley_control/kiethley_controller.py | 2b777b9dee35479f43c628755f50cc07e298445e | [] | no_license | johnpalsberg/John-Palsberg | 9957dd2fb2a6da62e5118c4e2a9a471d5e2f4c9b | 91aa6476bc319524c6f6a4bfc5561ca6aa95e6c4 | refs/heads/master | 2023-01-13T07:26:04.250541 | 2023-01-05T08:48:17 | 2023-01-05T08:48:17 | 206,402,363 | 0 | 0 | null | 2022-06-20T16:29:42 | 2019-09-04T19:57:25 | Python | UTF-8 | Python | false | false | 2,269 | py | from common.lib.clients.qtui.QCustomSpinBox import QCustomSpinBox
from twisted.internet.defer import inlineCallbacks
from PyQt4 import QtGui
from common.lib.clients.qtui.q_custom_text_changing_button import \
TextChangingButton
class kiethleyclient(QtGui.QWidget):
def __init__(self, reactor, parent = None):
"""initializels the GUI creates the reactor
"""
super(kiethleyclient, self).__init__()
self.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.connect()
self.reactor = reactor
@inlineCallbacks
def connect(self):
"""Creates an Asynchronous connection
"""
from labrad.wrappers import connectAsync
from labrad.units import WithUnit as U
self.U = U
self.cxn = yield connectAsync(name = "kiethley client")
self.server = self.cxn.keithley_2230g_server
yield self.server.select_device(0)
self.initializeGUI()
def initializeGUI(self):
layout = QtGui.QGridLayout()
self.setWindowTitle('kiethley Control')
qBox = QtGui.QGroupBox('Kiethley 2230G')
subLayout = QtGui.QGridLayout()
qBox.setLayout(subLayout)
layout.addWidget(qBox, 0, 0)
self.volt1widget = QCustomSpinBox('Amplitude (Vpp)', (0, 30))
self.volt2widget = QCustomSpinBox('Amplitude (Vpp)', (0, 30))
self.volt1widget.spinLevel.valueChanged.connect(lambda value = self.volt1widget.spinLevel.value(), chan = 1 : self.voltchanged(chan, value))
self.volt2widget.spinLevel.valueChanged.connect(lambda value = self.volt2widget.spinLevel.value(), chan = 2 : self.voltchanged(chan, value))
subLayout.addWidget(self.volt1widget, 1,1)
subLayout.addWidget(self.volt2widget, 1,3)
self.setLayout(layout)
@inlineCallbacks
def voltchanged(self, chan, value):
value = self.U(value, 'V')
yield self.server.voltage(chan, value)
def closeEvent(self, x):
self.reactor.stop()
if __name__ == "__main__":
a = QtGui.QApplication([])
import qt4reactor
qt4reactor.install()
from twisted.internet import reactor
kiethleyWidget = kiethleyclient(reactor)
kiethleyWidget.show()
run = reactor.run()
| [
"[email protected]"
] | |
2f90b60ed7ed37e8e46b21c3764967ab7950903c | 57ea759b2f400f1dc155b7637533732c78a3f1b7 | /marketplace/settings.py | 23ff23f48b98d6562bca294310e93b56e9313612 | [] | no_license | hanifmisbah/Marketplace | 219dce7b8cfe2a9509a6549729fea3febbe9cd3b | ee40a9a624021d10ff57befac88f306b1730bac7 | refs/heads/master | 2023-01-01T06:56:07.605659 | 2020-10-29T09:49:26 | 2020-10-29T09:49:26 | 308,255,051 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,505 | py | """
Django settings for marketplace project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f*^(1n3zwav8smf-rb4n&&08@q6u&or2d@5g^fy1nxpt^#7_gu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# ALLOWED_HOSTS = ['192.168.43.164']
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'crispy_forms',
'sales',
'toko',
'customers',
'products',
'suppliers',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'marketplace.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'marketplace.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'market',
'USER': 'postgres',
'PASSWORD':'hanifmisbah97',
'HOST' : 'localhost',
'PORT' : '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS =[
BASE_DIR + "/static",
'var/www/static',
]
USE_THOUSAND_SEPARATOR = True | [
"[email protected]"
] | |
25e3e1ad2d878f59ad263ae884e96026e3554a26 | 0cb42f98050eef8689f3d87067367b688871bd47 | /petshop/api/urls.py | 81f5d9a65074aae151c6af209f32f083baf07932 | [] | no_license | infsolution/AtividadeFinalAngular | b42554add907a5b502940997460451a7410ecb54 | 1071c38968d2e2d74c81030b7bd380442045c327 | refs/heads/master | 2023-01-12T09:07:10.807024 | 2019-07-14T03:46:07 | 2019-07-14T03:46:07 | 196,790,171 | 0 | 0 | null | 2023-01-07T07:45:41 | 2019-07-14T03:39:09 | TypeScript | UTF-8 | Python | false | false | 809 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('user/', views.UserList.as_view(), name=views.UserList.name),
path('user/<int:pk>/', views.UserDetail.as_view(), name=views.UserDetail.name),
path('pet/', views.PetList.as_view(), name=views.PetList.name),
path('pet/<int:pk>/', views.PetDetail.as_view(), name=views.PetDetail.name),
path('atendimento/', views.AtendimentoList.as_view(), name=views.AtendimentoList.name),
path('atendimento/<int:pk>/', views.AtendimentoDetail.as_view(), name=views.AtendimentoDetail.name),
path('medicamento/', views.MedicamentoList.as_view(), name=views.MedicamentoList.name),
path('medicamento/<int:pk>/', views.MedicamentoDetail.as_view(), name=views.MedicamentoDetail.name),
] | [
"[email protected]"
] | |
d3ad1ddb9912f5b9b310d13e2e040d02b687faba | 9cc76b1b1dd0064ab6613cbca6ce93bc179db355 | /ros_ws/devel/lib/python3/dist-packages/object_finder/msg/_objectFinderGoal.py | 16fd1b88d6e38231d2f5710965045d1b42331d05 | [] | no_license | ABCaps35/learning_ros_ready_ws | 1131c32b2ecadffa8dd186c9ebcfdba7284f30ad | 1aa9c512d5006584e8bc84101a715e16a222a47d | refs/heads/main | 2023-04-03T20:32:58.671255 | 2021-04-13T23:41:13 | 2021-04-13T23:41:13 | 357,715,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,668 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from object_finder/objectFinderGoal.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class objectFinderGoal(genpy.Message):
_md5sum = "8657c16ee27b175765f0d86cc8f66fbd"
_type = "object_finder/objectFinderGoal"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#objectFinder.action
#goal:
#get object ID codes from <object_manipulation_properties/object_ID_codes.h>
#goal fields to fill in:
int32 object_id
#boolean to declare if object is on a horizontal surface of known height:
bool known_surface_ht
#if surface ht is known, fill it in
float32 surface_ht
"""
__slots__ = ['object_id','known_surface_ht','surface_ht']
_slot_types = ['int32','bool','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
object_id,known_surface_ht,surface_ht
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(objectFinderGoal, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.object_id is None:
self.object_id = 0
if self.known_surface_ht is None:
self.known_surface_ht = False
if self.surface_ht is None:
self.surface_ht = 0.
else:
self.object_id = 0
self.known_surface_ht = False
self.surface_ht = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_iBf().pack(_x.object_id, _x.known_surface_ht, _x.surface_ht))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 9
(_x.object_id, _x.known_surface_ht, _x.surface_ht,) = _get_struct_iBf().unpack(str[start:end])
self.known_surface_ht = bool(self.known_surface_ht)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_iBf().pack(_x.object_id, _x.known_surface_ht, _x.surface_ht))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 9
(_x.object_id, _x.known_surface_ht, _x.surface_ht,) = _get_struct_iBf().unpack(str[start:end])
self.known_surface_ht = bool(self.known_surface_ht)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_iBf = None
def _get_struct_iBf():
global _struct_iBf
if _struct_iBf is None:
_struct_iBf = struct.Struct("<iBf")
return _struct_iBf
| [
"[email protected]"
] | |
580c7380ba8954b267f1ada589a644997b08598f | 8ee12ccce396e0d43bd8473ec9f0a13c9c7844c7 | /Mani_Vijay/python buit-in functions/002_raw+add.py | c440b472c2479eeb4a1cccf56783f1d859f043e9 | [] | no_license | Purushotamprasai/Python | 4ed44e26ca5cec7bb39c5561f545bfc68499bcfd | ed6fbd0f73cc7be91661f544f464222030197097 | refs/heads/master | 2023-06-05T13:39:04.602783 | 2020-01-23T14:30:25 | 2020-01-23T14:30:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | # wap addition of two numbers using raw_input functions
def main( ):
var = raw_input("enter some data")
var = complex(var)
print "var = ",var
print " type of var ",type(var)
print " id of var ",id(var)
if(__name__ == "__main__"):
main()
| [
"[email protected]"
] | |
05197295c0708c220fb104fc0d6cdc664e16e627 | 1b15b42087d58002432daff45fafb7eb4d0ca2d8 | /100_same_tree.py | 7a4573991e2ee2aa8d8e23c58b17e4b3cb9268a4 | [] | no_license | georgebzhang/Python_LeetCode | 2b92be66880eaf4642a603897386622dc81fbaf3 | c1703358ceeed67e3e85de05eda74447f31176a2 | refs/heads/master | 2020-04-26T01:38:33.750580 | 2019-06-21T21:51:13 | 2019-06-21T21:51:13 | 173,209,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSameTree(self, p, q):
def list_traversal(n):
def traversal(n):
if n is None:
l.append('null')
else:
l.append(n.val)
traversal(n.left)
traversal(n.right)
l = []
traversal(n)
return l
l_p = list_traversal(p)
l_q = list_traversal(q)
return l_p == l_q
def print_answer(self, ans):
print(ans)
def test(self):
p = TreeNode(1)
p.left = TreeNode(2)
p.right = TreeNode(3)
q = TreeNode(1)
q.left = TreeNode(2)
q.right = TreeNode(3)
ans = self.isSameTree(p, q)
self.print_answer(ans)
if __name__ == '__main__':
s = Solution()
s.test()
| [
"[email protected]"
] | |
730e52ddc3019fbcdf08b983b8ff9fa0c8190726 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/apimanagement/v20210101preview/identity_provider.py | 9988da868a822edb51d45e7470069e7c7ae09b00 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,324 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['IdentityProviderArgs', 'IdentityProvider']
@pulumi.input_type
class IdentityProviderArgs:
def __init__(__self__, *,
client_id: pulumi.Input[str],
client_secret: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
allowed_tenants: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
authority: Optional[pulumi.Input[str]] = None,
identity_provider_name: Optional[pulumi.Input[str]] = None,
password_reset_policy_name: Optional[pulumi.Input[str]] = None,
profile_editing_policy_name: Optional[pulumi.Input[str]] = None,
signin_policy_name: Optional[pulumi.Input[str]] = None,
signin_tenant: Optional[pulumi.Input[str]] = None,
signup_policy_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'IdentityProviderType']]] = None):
"""
The set of arguments for constructing a IdentityProvider resource.
:param pulumi.Input[str] client_id: Client Id of the Application in the external Identity Provider. It is App ID for Facebook login, Client ID for Google login, App ID for Microsoft.
:param pulumi.Input[str] client_secret: Client secret of the Application in external Identity Provider, used to authenticate login request. For example, it is App Secret for Facebook login, API Key for Google login, Public Key for Microsoft. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_tenants: List of Allowed Tenants when configuring Azure Active Directory login.
:param pulumi.Input[str] authority: OpenID Connect discovery endpoint hostname for AAD or AAD B2C.
:param pulumi.Input[str] identity_provider_name: Identity Provider Type identifier.
:param pulumi.Input[str] password_reset_policy_name: Password Reset Policy Name. Only applies to AAD B2C Identity Provider.
:param pulumi.Input[str] profile_editing_policy_name: Profile Editing Policy Name. Only applies to AAD B2C Identity Provider.
:param pulumi.Input[str] signin_policy_name: Signin Policy Name. Only applies to AAD B2C Identity Provider.
:param pulumi.Input[str] signin_tenant: The TenantId to use instead of Common when logging into Active Directory
:param pulumi.Input[str] signup_policy_name: Signup Policy Name. Only applies to AAD B2C Identity Provider.
:param pulumi.Input[Union[str, 'IdentityProviderType']] type: Identity Provider Type identifier.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret", client_secret)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if allowed_tenants is not None:
pulumi.set(__self__, "allowed_tenants", allowed_tenants)
if authority is not None:
pulumi.set(__self__, "authority", authority)
if identity_provider_name is not None:
pulumi.set(__self__, "identity_provider_name", identity_provider_name)
if password_reset_policy_name is not None:
pulumi.set(__self__, "password_reset_policy_name", password_reset_policy_name)
if profile_editing_policy_name is not None:
pulumi.set(__self__, "profile_editing_policy_name", profile_editing_policy_name)
if signin_policy_name is not None:
pulumi.set(__self__, "signin_policy_name", signin_policy_name)
if signin_tenant is not None:
pulumi.set(__self__, "signin_tenant", signin_tenant)
if signup_policy_name is not None:
pulumi.set(__self__, "signup_policy_name", signup_policy_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Input[str]:
"""
Client Id of the Application in the external Identity Provider. It is App ID for Facebook login, Client ID for Google login, App ID for Microsoft.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Input[str]:
"""
Client secret of the Application in external Identity Provider, used to authenticate login request. For example, it is App Secret for Facebook login, API Key for Google login, Public Key for Microsoft. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: pulumi.Input[str]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="allowedTenants")
def allowed_tenants(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Allowed Tenants when configuring Azure Active Directory login.
"""
return pulumi.get(self, "allowed_tenants")
@allowed_tenants.setter
def allowed_tenants(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_tenants", value)
@property
@pulumi.getter
def authority(self) -> Optional[pulumi.Input[str]]:
"""
OpenID Connect discovery endpoint hostname for AAD or AAD B2C.
"""
return pulumi.get(self, "authority")
@authority.setter
def authority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authority", value)
@property
@pulumi.getter(name="identityProviderName")
def identity_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
Identity Provider Type identifier.
"""
return pulumi.get(self, "identity_provider_name")
@identity_provider_name.setter
def identity_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identity_provider_name", value)
@property
@pulumi.getter(name="passwordResetPolicyName")
def password_reset_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Password Reset Policy Name. Only applies to AAD B2C Identity Provider.
"""
return pulumi.get(self, "password_reset_policy_name")
@password_reset_policy_name.setter
def password_reset_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password_reset_policy_name", value)
@property
@pulumi.getter(name="profileEditingPolicyName")
def profile_editing_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Profile Editing Policy Name. Only applies to AAD B2C Identity Provider.
"""
return pulumi.get(self, "profile_editing_policy_name")
@profile_editing_policy_name.setter
def profile_editing_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "profile_editing_policy_name", value)
@property
@pulumi.getter(name="signinPolicyName")
def signin_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Signin Policy Name. Only applies to AAD B2C Identity Provider.
"""
return pulumi.get(self, "signin_policy_name")
@signin_policy_name.setter
def signin_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signin_policy_name", value)
@property
@pulumi.getter(name="signinTenant")
def signin_tenant(self) -> Optional[pulumi.Input[str]]:
"""
The TenantId to use instead of Common when logging into Active Directory
"""
return pulumi.get(self, "signin_tenant")
@signin_tenant.setter
def signin_tenant(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signin_tenant", value)
@property
@pulumi.getter(name="signupPolicyName")
def signup_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Signup Policy Name. Only applies to AAD B2C Identity Provider.
"""
return pulumi.get(self, "signup_policy_name")
@signup_policy_name.setter
def signup_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signup_policy_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'IdentityProviderType']]]:
"""
Identity Provider Type identifier.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'IdentityProviderType']]]):
pulumi.set(self, "type", value)
class IdentityProvider(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_tenants: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
authority: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
identity_provider_name: Optional[pulumi.Input[str]] = None,
password_reset_policy_name: Optional[pulumi.Input[str]] = None,
profile_editing_policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
signin_policy_name: Optional[pulumi.Input[str]] = None,
signin_tenant: Optional[pulumi.Input[str]] = None,
signup_policy_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'IdentityProviderType']]] = None,
__props__=None):
"""
Identity Provider details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_tenants: List of Allowed Tenants when configuring Azure Active Directory login.
:param pulumi.Input[str] authority: OpenID Connect discovery endpoint hostname for AAD or AAD B2C.
:param pulumi.Input[str] client_id: Client Id of the Application in the external Identity Provider. It is App ID for Facebook login, Client ID for Google login, App ID for Microsoft.
:param pulumi.Input[str] client_secret: Client secret of the Application in external Identity Provider, used to authenticate login request. For example, it is App Secret for Facebook login, API Key for Google login, Public Key for Microsoft. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value.
:param pulumi.Input[str] identity_provider_name: Identity Provider Type identifier.
:param pulumi.Input[str] password_reset_policy_name: Password Reset Policy Name. Only applies to AAD B2C Identity Provider.
:param pulumi.Input[str] profile_editing_policy_name: Profile Editing Policy Name. Only applies to AAD B2C Identity Provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] signin_policy_name: Signin Policy Name. Only applies to AAD B2C Identity Provider.
:param pulumi.Input[str] signin_tenant: The TenantId to use instead of Common when logging into Active Directory
:param pulumi.Input[str] signup_policy_name: Signup Policy Name. Only applies to AAD B2C Identity Provider.
:param pulumi.Input[Union[str, 'IdentityProviderType']] type: Identity Provider Type identifier.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IdentityProviderArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Identity Provider details.
:param str resource_name: The name of the resource.
:param IdentityProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IdentityProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_tenants: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
authority: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
identity_provider_name: Optional[pulumi.Input[str]] = None,
password_reset_policy_name: Optional[pulumi.Input[str]] = None,
profile_editing_policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
signin_policy_name: Optional[pulumi.Input[str]] = None,
signin_tenant: Optional[pulumi.Input[str]] = None,
signup_policy_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'IdentityProviderType']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IdentityProviderArgs.__new__(IdentityProviderArgs)
__props__.__dict__["allowed_tenants"] = allowed_tenants
__props__.__dict__["authority"] = authority
if client_id is None and not opts.urn:
raise TypeError("Missing required property 'client_id'")
__props__.__dict__["client_id"] = client_id
if client_secret is None and not opts.urn:
raise TypeError("Missing required property 'client_secret'")
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["identity_provider_name"] = identity_provider_name
__props__.__dict__["password_reset_policy_name"] = password_reset_policy_name
__props__.__dict__["profile_editing_policy_name"] = profile_editing_policy_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["signin_policy_name"] = signin_policy_name
__props__.__dict__["signin_tenant"] = signin_tenant
__props__.__dict__["signup_policy_name"] = signup_policy_name
__props__.__dict__["type"] = type
__props__.__dict__["name"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:apimanagement:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20160707:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20161010:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20210401preview:IdentityProvider"), pulumi.Alias(type_="azure-native:apimanagement/v20210801:IdentityProvider")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(IdentityProvider, __self__).__init__(
'azure-native:apimanagement/v20210101preview:IdentityProvider',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IdentityProvider':
"""
Get an existing IdentityProvider resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = IdentityProviderArgs.__new__(IdentityProviderArgs)
__props__.__dict__["allowed_tenants"] = None
__props__.__dict__["authority"] = None
__props__.__dict__["client_id"] = None
__props__.__dict__["client_secret"] = None
__props__.__dict__["name"] = None
__props__.__dict__["password_reset_policy_name"] = None
__props__.__dict__["profile_editing_policy_name"] = None
__props__.__dict__["signin_policy_name"] = None
__props__.__dict__["signin_tenant"] = None
__props__.__dict__["signup_policy_name"] = None
__props__.__dict__["type"] = None
return IdentityProvider(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowedTenants")
def allowed_tenants(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of Allowed Tenants when configuring Azure Active Directory login.
"""
return pulumi.get(self, "allowed_tenants")
@property
@pulumi.getter
def authority(self) -> pulumi.Output[Optional[str]]:
"""
OpenID Connect discovery endpoint hostname for AAD or AAD B2C.
"""
return pulumi.get(self, "authority")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[str]:
"""
Client Id of the Application in the external Identity Provider. It is App ID for Facebook login, Client ID for Google login, App ID for Microsoft.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Output[Optional[str]]:
"""
Client secret of the Application in external Identity Provider, used to authenticate login request. For example, it is App Secret for Facebook login, API Key for Google login, Public Key for Microsoft. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="passwordResetPolicyName")
def password_reset_policy_name(self) -> pulumi.Output[Optional[str]]:
"""
Password Reset Policy Name. Only applies to AAD B2C Identity Provider.
"""
return pulumi.get(self, "password_reset_policy_name")
@property
@pulumi.getter(name="profileEditingPolicyName")
def profile_editing_policy_name(self) -> pulumi.Output[Optional[str]]:
"""
Profile Editing Policy Name. Only applies to AAD B2C Identity Provider.
"""
return pulumi.get(self, "profile_editing_policy_name")
@property
@pulumi.getter(name="signinPolicyName")
def signin_policy_name(self) -> pulumi.Output[Optional[str]]:
"""
Signin Policy Name. Only applies to AAD B2C Identity Provider.
"""
return pulumi.get(self, "signin_policy_name")
@property
@pulumi.getter(name="signinTenant")
def signin_tenant(self) -> pulumi.Output[Optional[str]]:
"""
The TenantId to use instead of Common when logging into Active Directory
"""
return pulumi.get(self, "signin_tenant")
@property
@pulumi.getter(name="signupPolicyName")
def signup_policy_name(self) -> pulumi.Output[Optional[str]]:
"""
Signup Policy Name. Only applies to AAD B2C Identity Provider.
"""
return pulumi.get(self, "signup_policy_name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
358b4ca183c5bb806e89aba3e3424841ea807616 | 8f4c691f190a1d4ffd4261ea6dca6a2d3a96284c | /csa/csa/doctype/union/union.py | 86d5969dfee1ff7a1083ecbd7571ff80e1308109 | [
"MIT"
] | permissive | Jishnu70055/usermanagement | 57abb738160fb213acdc2c71b40244eae4b06cee | f7b526335c2b99899afac188696071fa35df09ca | refs/heads/master | 2023-09-03T17:30:50.147750 | 2021-10-21T13:27:38 | 2021-10-21T13:27:38 | 399,362,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | # Copyright (c) 2021, s and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class Union(Document):
pass
| [
"[email protected]"
] | |
7fc72e54f6e06bb8165c54aaaa72791618d8551f | 51086c09f2c920d057db12e373a01b08571c4cbf | /pebble-sdk/SDKs/3.9/.env/bin/easy_install-2.7 | be6acc61a8b4923989902d3b802c79df09f25787 | [] | no_license | JohnHoder/pebble-dev | 66dc69258dfd009313c23ba5c2eb518aec257652 | e9d95bd564ba6f58b539a1a68f21fe82b6d0992b | refs/heads/master | 2022-11-23T17:32:26.573394 | 2018-12-26T03:17:37 | 2018-12-26T03:17:37 | 163,131,045 | 0 | 1 | null | 2022-10-31T10:03:38 | 2018-12-26T03:15:57 | Python | UTF-8 | Python | false | false | 264 | 7 | #!/home/john/.pebble-sdk/SDKs/3.9/.env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
c72fd3b58d7b69c030a63601b5e20b4f72228f2a | 7966fa31437cc8a539621a5a0642ce24c1c9de50 | /PycharmProjects/leetcode/knapsack/139单词拆分.py | 202f72100f96201909c7949f236c9d1a17e238ce | [] | no_license | crystal30/DataStructure | 4f938508f4c60af9c5f8ec5520d5acedbe2dc90e | c55b0cfd2967a2221c27ed738e8de15034775945 | refs/heads/master | 2021-06-25T17:49:03.048853 | 2021-01-22T00:37:04 | 2021-01-22T00:37:04 | 192,374,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | class Solution:
def __init__(self):
self.memo = dict()
def wordBreak(self, s, wordDict) -> bool:
# s 和 wordDict均为非空
if len(s) == 0:
return True
if s in self.memo.keys():
return self.memo[s]
re = []
for word in wordDict:
re_s = self.word_isvalid(s, word)
if len(s) != len(re_s):
re.append(self.wordBreak(re_s, wordDict))
for sub_re in re:
if sub_re is True:
self.memo[s] = True
return True
self.memo[s] = False
return False
def word_isvalid(self, s, word):
len_word = len(word)
if s[:len_word] == word:
return s[len_word:]
elif s[-len_word:] == word:
return s[:-len_word]
else:
return s
if __name__ == "__main__":
so = Solution()
# s = "catsandog"
# wordDict = ["cats", "dog", "san", "and", "cat"]
s = "catskicatcats"
wordDict = ["cats", "cat", "dog", "ski"]
# s = "leetcode"
# wordDict = ["leet", "code"]
# s = "applepenapple"
# wordDict = ["apple", "pen"]
re = so.wordBreak(s, wordDict)
print(re)
# re = so.word_isvalid(s, "cats")
# print(re) | [
"[email protected]"
] | |
3b1180cd98b60c6bbd029fbed6efad1462671dfd | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/e1g.py | 4e27f609a8666e529f200bd39ffac772ad607a43 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'e1G':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
73813473d4d0efb587cdd6610c0b33ac88cb6778 | 4820d5748f1374809c97221f11f3f96cfd2a054a | /runway/embedded/stacker/actions/build.py | ed10d6cb6489626532ba80c4b836945648671735 | [
"BSD-2-Clause",
"Apache-2.0",
"ISC"
] | permissive | ShaunEdiger/runway | 7efe0ec4e8fb7b307bf6f244707c0c3d318251a5 | 2472b7a6854655e20605d2d64476d38812c13dd6 | refs/heads/master | 2020-03-28T00:17:26.426905 | 2018-09-04T18:54:56 | 2018-09-04T19:49:26 | 147,392,034 | 0 | 0 | Apache-2.0 | 2018-09-04T18:37:47 | 2018-09-04T18:37:46 | null | UTF-8 | Python | false | false | 15,408 | py | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
from .base import BaseAction, plan, build_walker
from .base import STACK_POLL_TIME
from ..providers.base import Template
from .. import util
from ..exceptions import (
MissingParameterException,
StackDidNotChange,
StackDoesNotExist,
CancelExecution,
)
from ..status import (
NotSubmittedStatus,
NotUpdatedStatus,
DidNotChangeStatus,
SubmittedStatus,
CompleteStatus,
FailedStatus,
SkippedStatus,
SUBMITTED,
INTERRUPTED
)
logger = logging.getLogger(__name__)
def build_stack_tags(stack):
"""Builds a common set of tags to attach to a stack"""
return [{'Key': t[0], 'Value': t[1]} for t in stack.tags.items()]
def should_update(stack):
"""Tests whether a stack should be submitted for updates to CF.
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be updated, return True.
"""
if stack.locked:
if not stack.force:
logger.debug("Stack %s locked and not in --force list. "
"Refusing to update.", stack.name)
return False
else:
logger.debug("Stack %s locked, but is in --force "
"list.", stack.name)
return True
def should_submit(stack):
"""Tests whether a stack should be submitted to CF for update/create
Args:
stack (:class:`stacker.stack.Stack`): The stack object to check.
Returns:
bool: If the stack should be submitted, return True.
"""
if stack.enabled:
return True
logger.debug("Stack %s is not enabled. Skipping.", stack.name)
return False
def should_ensure_cfn_bucket(outline, dump):
"""Test whether access to the cloudformation template bucket is required
Args:
outline (bool): The outline action.
dump (bool): The dump action.
Returns:
bool: If access to CF bucket is needed, return True.
"""
return not outline and not dump
def _resolve_parameters(parameters, blueprint):
"""Resolves CloudFormation Parameters for a given blueprint.
Given a list of parameters, handles:
- discard any parameters that the blueprint does not use
- discard any empty values
- convert booleans to strings suitable for CloudFormation
Args:
parameters (dict): A dictionary of parameters provided by the
stack definition
blueprint (:class:`stacker.blueprint.base.Blueprint`): A Blueprint
object that is having the parameters applied to it.
Returns:
dict: The resolved parameters.
"""
params = {}
param_defs = blueprint.get_parameter_definitions()
for key, value in parameters.items():
if key not in param_defs:
logger.debug("Blueprint %s does not use parameter %s.",
blueprint.name, key)
continue
if value is None:
logger.debug("Got None value for parameter %s, not submitting it "
"to cloudformation, default value should be used.",
key)
continue
if isinstance(value, bool):
logger.debug("Converting parameter %s boolean \"%s\" to string.",
key, value)
value = str(value).lower()
params[key] = value
return params
class UsePreviousParameterValue(object):
""" A simple class used to indicate a Parameter should use it's existng
value.
"""
pass
def _handle_missing_parameters(parameter_values, all_params, required_params,
existing_stack=None):
"""Handles any missing parameters.
If an existing_stack is provided, look up missing parameters there.
Args:
parameter_values (dict): key/value dictionary of stack definition
parameters
all_params (list): A list of all the parameters used by the
template/blueprint.
required_params (list): A list of all the parameters required by the
template/blueprint.
existing_stack (dict): A dict representation of the stack. If
provided, will be searched for any missing parameters.
Returns:
list of tuples: The final list of key/value pairs returned as a
list of tuples.
Raises:
MissingParameterException: Raised if a required parameter is
still missing.
"""
missing_params = list(set(all_params) - set(parameter_values.keys()))
if existing_stack and 'Parameters' in existing_stack:
stack_parameters = [
p["ParameterKey"] for p in existing_stack["Parameters"]
]
for p in missing_params:
if p in stack_parameters:
logger.debug(
"Using previous value for parameter %s from existing "
"stack",
p
)
parameter_values[p] = UsePreviousParameterValue
final_missing = list(set(required_params) - set(parameter_values.keys()))
if final_missing:
raise MissingParameterException(final_missing)
return list(parameter_values.items())
def handle_hooks(stage, hooks, provider, context, dump, outline):
"""Handle pre/post hooks.
Args:
stage (str): The name of the hook stage - pre_build/post_build.
hooks (list): A list of dictionaries containing the hooks to execute.
provider (:class:`stacker.provider.base.BaseProvider`): The provider
the current stack is using.
context (:class:`stacker.context.Context`): The current stacker
context.
dump (bool): Whether running with dump set or not.
outline (bool): Whether running with outline set or not.
"""
if not outline and not dump and hooks:
util.handle_hooks(
stage=stage,
hooks=hooks,
provider=provider,
context=context
)
class Action(BaseAction):
"""Responsible for building & coordinating CloudFormation stacks.
Generates the build plan based on stack dependencies (these dependencies
are determined automatically based on output lookups from other stacks).
The plan can then either be printed out as an outline or executed. If
executed, each stack will get launched in order which entails:
- Pushing the generated CloudFormation template to S3 if it has changed
- Submitting either a build or update of the given stack to the
:class:`stacker.provider.base.Provider`.
"""
def build_parameters(self, stack, provider_stack=None):
"""Builds the CloudFormation Parameters for our stack.
Args:
stack (:class:`stacker.stack.Stack`): A stacker stack
provider_stack (dict): An optional Stacker provider object
Returns:
dict: The parameters for the given stack
"""
resolved = _resolve_parameters(stack.parameter_values, stack.blueprint)
required_parameters = list(stack.required_parameter_definitions)
all_parameters = list(stack.all_parameter_definitions)
parameters = _handle_missing_parameters(resolved, all_parameters,
required_parameters,
provider_stack)
param_list = []
for key, value in parameters:
param_dict = {"ParameterKey": key}
if value is UsePreviousParameterValue:
param_dict["UsePreviousValue"] = True
else:
param_dict["ParameterValue"] = str(value)
param_list.append(param_dict)
return param_list
def _launch_stack(self, stack, **kwargs):
"""Handles the creating or updating of a stack in CloudFormation.
Also makes sure that we don't try to create or update a stack while
it is already updating or creating.
"""
old_status = kwargs.get("status")
wait_time = STACK_POLL_TIME if old_status == SUBMITTED else 0
if self.cancel.wait(wait_time):
return INTERRUPTED
if not should_submit(stack):
return NotSubmittedStatus()
provider = self.build_provider(stack)
try:
provider_stack = provider.get_stack(stack.fqn)
except StackDoesNotExist:
provider_stack = None
if provider_stack and not should_update(stack):
stack.set_outputs(
self.provider.get_output_dict(provider_stack))
return NotUpdatedStatus()
recreate = False
if provider_stack and old_status == SUBMITTED:
logger.debug(
"Stack %s provider status: %s",
stack.fqn,
provider.get_stack_status(provider_stack),
)
if provider.is_stack_rolling_back(provider_stack):
if 'rolling back' in old_status.reason:
return old_status
logger.debug("Stack %s entered a roll back", stack.fqn)
if 'updating' in old_status.reason:
reason = 'rolling back update'
else:
reason = 'rolling back new stack'
return SubmittedStatus(reason)
elif provider.is_stack_in_progress(provider_stack):
logger.debug("Stack %s in progress.", stack.fqn)
return old_status
elif provider.is_stack_destroyed(provider_stack):
logger.debug("Stack %s finished deleting", stack.fqn)
recreate = True
# Continue with creation afterwards
# Failure must be checked *before* completion, as both will be true
# when completing a rollback, and we don't want to consider it as
# a successful update.
elif provider.is_stack_failed(provider_stack):
reason = old_status.reason
if 'rolling' in reason:
reason = reason.replace('rolling', 'rolled')
return FailedStatus(reason)
elif provider.is_stack_completed(provider_stack):
stack.set_outputs(
provider.get_output_dict(provider_stack))
return CompleteStatus(old_status.reason)
else:
return old_status
logger.debug("Resolving stack %s", stack.fqn)
stack.resolve(self.context, self.provider)
logger.debug("Launching stack %s now.", stack.fqn)
template = self._template(stack.blueprint)
stack_policy = self._stack_policy(stack)
tags = build_stack_tags(stack)
parameters = self.build_parameters(stack, provider_stack)
force_change_set = stack.blueprint.requires_change_set
if recreate:
logger.debug("Re-creating stack: %s", stack.fqn)
provider.create_stack(stack.fqn, template, parameters,
tags, stack_policy=stack_policy)
return SubmittedStatus("re-creating stack")
elif not provider_stack:
logger.debug("Creating new stack: %s", stack.fqn)
provider.create_stack(stack.fqn, template, parameters, tags,
force_change_set,
stack_policy=stack_policy)
return SubmittedStatus("creating new stack")
try:
if provider.prepare_stack_for_update(provider_stack, tags):
existing_params = provider_stack.get('Parameters', [])
provider.update_stack(
stack.fqn,
template,
existing_params,
parameters,
tags,
force_interactive=stack.protected,
force_change_set=force_change_set,
stack_policy=stack_policy,
)
logger.debug("Updating existing stack: %s", stack.fqn)
return SubmittedStatus("updating existing stack")
else:
return SubmittedStatus("destroying stack for re-creation")
except CancelExecution:
stack.set_outputs(provider.get_output_dict(provider_stack))
return SkippedStatus(reason="canceled execution")
except StackDidNotChange:
stack.set_outputs(provider.get_output_dict(provider_stack))
return DidNotChangeStatus()
def _template(self, blueprint):
"""Generates a suitable template based on whether or not an S3 bucket
is set.
If an S3 bucket is set, then the template will be uploaded to S3 first,
and CreateStack/UpdateStack operations will use the uploaded template.
If not bucket is set, then the template will be inlined.
"""
if self.bucket_name:
return Template(url=self.s3_stack_push(blueprint))
else:
return Template(body=blueprint.rendered)
def _stack_policy(self, stack):
"""Returns a Template object for the stacks stack policy, or None if
the stack doesn't have a stack policy."""
if stack.stack_policy:
return Template(body=stack.stack_policy)
def _generate_plan(self, tail=False):
return plan(
description="Create/Update stacks",
action=self._launch_stack,
tail=self._tail_stack if tail else None,
stacks=self.context.get_stacks(),
targets=self.context.stack_names)
def pre_run(self, outline=False, dump=False, *args, **kwargs):
"""Any steps that need to be taken prior to running the action."""
if should_ensure_cfn_bucket(outline, dump):
self.ensure_cfn_bucket()
hooks = self.context.config.pre_build
handle_hooks(
"pre_build",
hooks,
self.provider,
self.context,
dump,
outline
)
def run(self, concurrency=0, outline=False,
tail=False, dump=False, *args, **kwargs):
"""Kicks off the build/update of the stacks in the stack_definitions.
This is the main entry point for the Builder.
"""
plan = self._generate_plan(tail=tail)
if not plan.keys():
logger.warn('WARNING: No stacks detected (error in config?)')
if not outline and not dump:
plan.outline(logging.DEBUG)
logger.debug("Launching stacks: %s", ", ".join(plan.keys()))
walker = build_walker(concurrency)
plan.execute(walker)
else:
if outline:
plan.outline()
if dump:
plan.dump(directory=dump, context=self.context,
provider=self.provider)
def post_run(self, outline=False, dump=False, *args, **kwargs):
"""Any steps that need to be taken after running the action."""
hooks = self.context.config.post_build
handle_hooks(
"post_build",
hooks,
self.provider,
self.context,
dump,
outline
)
| [
"[email protected]"
] | |
c4744c241dfcca1e7a26ee3f2f6e104452cc97f7 | cb61ba31b27b232ebc8c802d7ca40c72bcdfe152 | /Misc/subarray_sum_negative.py | 0847ef7b4914676b35fe5134c25da1ab1f5c7345 | [
"Apache-2.0"
] | permissive | saisankargochhayat/algo_quest | c7c48187c76b5cd7c2ec3f0557432606e9096241 | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | refs/heads/master | 2021-07-04T15:21:33.606174 | 2021-02-07T23:42:43 | 2021-02-07T23:42:43 | 67,831,927 | 5 | 1 | Apache-2.0 | 2019-10-28T03:51:03 | 2016-09-09T20:51:29 | Python | UTF-8 | Python | false | false | 183 | py |
t=int(input())
for i in range(t):
arr=list()
N,X=map(int,input().split())
for j in range(N):
num=int(input())
arr.append(num)
subarray(arr,N,X)
| [
"[email protected]"
] | |
acdc0bb6b71acfc1b51ebfe3911a130e22a71455 | 6a74ae0a776dfa50e946651362ff97326fc9f6e1 | /200/pt3/089.py | 49dee26b7d7005bec0e64fda65c4c349d6b6596a | [] | no_license | teotiwg/studyPython | 799c1307d50ad77a27b8a8ca59c79b79f07c29cd | fd0c7f8af9b0ba9d832818d42aec320386bf857b | refs/heads/master | 2023-02-17T17:04:23.809231 | 2021-01-11T09:27:20 | 2021-01-11T09:27:20 | 327,474,697 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | numstr = input('숫자를 입력하세요')
try:
num = int(numstr)
print("입력한 수는 정수 %d 입니다." %num)
except:
try:
num = float(numstr)
print('입력한 수는 실수 %f 입니다.' %num)
except:
print('+++숫자를 입력하세요+++') | [
"[email protected]"
] | |
5f656e42754c32c4967d3e7d1cb279ab1be83196 | 6d0ca19b8c0f986954135bca68fd3abc558e8ab8 | /PKUTreeMaker/test/processDump5511280880.py | fbef1194f9b962905db3049f0d621cb2b5eb863a | [] | no_license | AnYpku/Ntuple | 8e018a2980d0440bf48c918a328d75e406df9595 | 7e3a41a7da5ef0005be67e32d615752ca6f130e1 | refs/heads/master | 2020-03-22T14:24:27.658961 | 2018-04-06T12:26:08 | 2018-04-06T12:26:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,666 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/mc/RunIISummer16MiniAODv2/LLAJJ_EWK_MLL-50_MJJ-120_13TeV-madgraph-pythia8/MINIAODSIM/PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/50000/08DCD9BB-2C25-E711-90C9-C454449229AF.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.ChargeSignificanceTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('ChargeSignificanceTrajectoryFilter'),
chargeSignificance = cms.double(-1.0)
)
process.CkfBaseTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('CkfBaseTrajectoryFilter'),
chargeSignificance = cms.double(-1.0),
constantValueForLostHitsFractionFilter = cms.double(2.0),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32(4),
maxCCCLostHits = cms.int32(9999),
maxConsecLostHits = cms.int32(1),
maxLostHits = cms.int32(999),
maxLostHitsFraction = cms.double(0.1),
maxNumberOfHits = cms.int32(100),
minGoodStripCharge = cms.PSet(
refToPSet_ = cms.string('SiStripClusterChargeCutNone')
),
minHitsMinPt = cms.int32(3),
minNumberOfHitsForLoopers = cms.int32(13),
minNumberOfHitsPerLoop = cms.int32(4),
minPt = cms.double(0.9),
minimumNumberOfHits = cms.int32(5),
nSigmaMinPt = cms.double(5.0),
seedExtension = cms.int32(0),
seedPairPenalty = cms.int32(0),
strictSeedExtension = cms.bool(False)
)
process.CkfTrajectoryBuilder = cms.PSet(
ComponentType = cms.string('CkfTrajectoryBuilder'),
MeasurementTrackerName = cms.string(''),
TTRHBuilder = cms.string('WithTrackAngle'),
alwaysUseInvalidHits = cms.bool(True),
estimator = cms.string('Chi2'),
intermediateCleaning = cms.bool(True),
lostHitPenalty = cms.double(30.0),
maxCand = cms.int32(5),
propagatorAlong = cms.string('PropagatorWithMaterial'),
propagatorOpposite = cms.string('PropagatorWithMaterialOpposite'),
trajectoryFilter = cms.PSet(
refToPSet_ = cms.string('CkfBaseTrajectoryFilter_block')
),
updator = cms.string('KFUpdator')
)
process.CompositeTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('CompositeTrajectoryFilter'),
filters = cms.VPSet()
)
process.GroupedCkfTrajectoryBuilder = cms.PSet(
ComponentType = cms.string('GroupedCkfTrajectoryBuilder'),
MeasurementTrackerName = cms.string(''),
TTRHBuilder = cms.string('WithTrackAngle'),
alwaysUseInvalidHits = cms.bool(True),
bestHitOnly = cms.bool(True),
estimator = cms.string('Chi2'),
foundHitBonus = cms.double(5.0),
inOutTrajectoryFilter = cms.PSet(
refToPSet_ = cms.string('CkfBaseTrajectoryFilter_block')
),
intermediateCleaning = cms.bool(True),
keepOriginalIfRebuildFails = cms.bool(False),
lockHits = cms.bool(True),
lostHitPenalty = cms.double(30.0),
maxCand = cms.int32(5),
minNrOfHitsForRebuild = cms.int32(5),
propagatorAlong = cms.string('PropagatorWithMaterial'),
propagatorOpposite = cms.string('PropagatorWithMaterialOpposite'),
requireSeedHitsInRebuild = cms.bool(True),
trajectoryFilter = cms.PSet(
refToPSet_ = cms.string('CkfBaseTrajectoryFilter_block')
),
updator = cms.string('KFUpdator'),
useSameTrajFilter = cms.bool(True)
)
process.MaxCCCLostHitsTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('MaxCCCLostHitsTrajectoryFilter'),
maxCCCLostHits = cms.int32(3),
minGoodStripCharge = cms.PSet(
refToPSet_ = cms.string('SiStripClusterChargeCutLoose')
)
)
process.MaxConsecLostHitsTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('MaxConsecLostHitsTrajectoryFilter'),
maxConsecLostHits = cms.int32(1)
)
process.MaxHitsTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('MaxHitsTrajectoryFilter'),
maxNumberOfHits = cms.int32(100)
)
process.MaxLostHitsTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('MaxLostHitsTrajectoryFilter'),
maxLostHits = cms.int32(2)
)
process.MinHitsTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('MinHitsTrajectoryFilter'),
minimumNumberOfHits = cms.int32(5)
)
process.MinPtTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('MinPtTrajectoryFilter'),
minHitsMinPt = cms.int32(3),
minPt = cms.double(1.0),
nSigmaMinPt = cms.double(5.0)
)
process.SiStripClusterChargeCutLoose = cms.PSet(
value = cms.double(1620.0)
)
process.SiStripClusterChargeCutNone = cms.PSet(
value = cms.double(-1.0)
)
process.SiStripClusterChargeCutTight = cms.PSet(
value = cms.double(1945.0)
)
process.SiStripClusterChargeCutTiny = cms.PSet(
value = cms.double(800.0)
)
process.ThresholdPtTrajectoryFilter_block = cms.PSet(
ComponentType = cms.string('ThresholdPtTrajectoryFilter'),
minHitsThresholdPt = cms.int32(3),
nSigmaThresholdPt = cms.double(5.0),
thresholdPt = cms.double(10.0)
)
process.ckfBaseInOutTrajectoryFilter = cms.PSet(
ComponentType = cms.string('CkfBaseTrajectoryFilter'),
chargeSignificance = cms.double(-1.0),
constantValueForLostHitsFractionFilter = cms.double(2.0),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32(4),
maxCCCLostHits = cms.int32(9999),
maxConsecLostHits = cms.int32(1),
maxLostHits = cms.int32(999),
maxLostHitsFraction = cms.double(0.1),
maxNumberOfHits = cms.int32(100),
minGoodStripCharge = cms.PSet(
refToPSet_ = cms.string('SiStripClusterChargeCutNone')
),
minHitsMinPt = cms.int32(3),
minNumberOfHitsForLoopers = cms.int32(13),
minNumberOfHitsPerLoop = cms.int32(4),
minPt = cms.double(0.9),
minimumNumberOfHits = cms.int32(5),
nSigmaMinPt = cms.double(5.0),
seedExtension = cms.int32(0),
seedPairPenalty = cms.int32(0),
strictSeedExtension = cms.bool(False)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(300)
)
process.options = cms.untracked.PSet(
SkipEvent = cms.untracked.vstring('ProductNotFound'),
wantSummary = cms.untracked.bool(True)
)
process.pfJetIDSelector = cms.PSet(
quality = cms.string('LOOSE'),
version = cms.string('RUNIISTARTUP')
)
process.regressionModifier = cms.PSet(
eOverP_ECALTRKThr = cms.double(0.025),
ecalrechitsEB = cms.InputTag("reducedEgamma","reducedEBRecHits"),
ecalrechitsEE = cms.InputTag("reducedEgamma","reducedEERecHits"),
electron_config = cms.PSet(
regressionKey_ecalonly = cms.vstring('electron_eb_ECALonly_lowpt',
'electron_eb_ECALonly',
'electron_ee_ECALonly_lowpt',
'electron_ee_ECALonly'),
regressionKey_ecaltrk = cms.vstring('electron_eb_ECALTRK_lowpt',
'electron_eb_ECALTRK',
'electron_ee_ECALTRK_lowpt',
'electron_ee_ECALTRK'),
uncertaintyKey_ecalonly = cms.vstring('electron_eb_ECALonly_lowpt_var',
'electron_eb_ECALonly_var',
'electron_ee_ECALonly_lowpt_var',
'electron_ee_ECALonly_var'),
uncertaintyKey_ecaltrk = cms.vstring('electron_eb_ECALTRK_lowpt_var',
'electron_eb_ECALTRK_var',
'electron_ee_ECALTRK_lowpt_var',
'electron_ee_ECALTRK_var')
),
epDiffSig_ECALTRKThr = cms.double(15.0),
epSig_ECALTRKThr = cms.double(10.0),
highEnergy_ECALTRKThr = cms.double(200.0),
lowEnergy_ECALTRKThr = cms.double(50.0),
lowEnergy_ECALonlyThr = cms.double(300.0),
modifierName = cms.string('EGExtraInfoModifierFromDBUser'),
photon_config = cms.PSet(
regressionKey_ecalonly = cms.vstring('photon_eb_ECALonly_lowpt',
'photon_eb_ECALonly',
'photon_ee_ECALonly_lowpt',
'photon_ee_ECALonly'),
uncertaintyKey_ecalonly = cms.vstring('photon_eb_ECALonly_lowpt_var',
'photon_eb_ECALonly_var',
'photon_ee_ECALonly_lowpt_var',
'photon_ee_ECALonly_var')
),
rhoCollection = cms.InputTag("fixedGridRhoFastjetAll"),
useLocalFile = cms.bool(False)
)
process.egamma_modifications = cms.VPSet(cms.PSet(
eOverP_ECALTRKThr = cms.double(0.025),
ecalrechitsEB = cms.InputTag("reducedEgamma","reducedEBRecHits"),
ecalrechitsEE = cms.InputTag("reducedEgamma","reducedEERecHits"),
electron_config = cms.PSet(
regressionKey_ecalonly = cms.vstring('electron_eb_ECALonly_lowpt',
'electron_eb_ECALonly',
'electron_ee_ECALonly_lowpt',
'electron_ee_ECALonly'),
regressionKey_ecaltrk = cms.vstring('electron_eb_ECALTRK_lowpt',
'electron_eb_ECALTRK',
'electron_ee_ECALTRK_lowpt',
'electron_ee_ECALTRK'),
uncertaintyKey_ecalonly = cms.vstring('electron_eb_ECALonly_lowpt_var',
'electron_eb_ECALonly_var',
'electron_ee_ECALonly_lowpt_var',
'electron_ee_ECALonly_var'),
uncertaintyKey_ecaltrk = cms.vstring('electron_eb_ECALTRK_lowpt_var',
'electron_eb_ECALTRK_var',
'electron_ee_ECALTRK_lowpt_var',
'electron_ee_ECALTRK_var')
),
epDiffSig_ECALTRKThr = cms.double(15.0),
epSig_ECALTRKThr = cms.double(10.0),
highEnergy_ECALTRKThr = cms.double(200.0),
lowEnergy_ECALTRKThr = cms.double(50.0),
lowEnergy_ECALonlyThr = cms.double(300.0),
modifierName = cms.string('EGExtraInfoModifierFromDBUser'),
photon_config = cms.PSet(
regressionKey_ecalonly = cms.vstring('photon_eb_ECALonly_lowpt',
'photon_eb_ECALonly',
'photon_ee_ECALonly_lowpt',
'photon_ee_ECALonly'),
uncertaintyKey_ecalonly = cms.vstring('photon_eb_ECALonly_lowpt_var',
'photon_eb_ECALonly_var',
'photon_ee_ECALonly_lowpt_var',
'photon_ee_ECALonly_var')
),
rhoCollection = cms.InputTag("fixedGridRhoFastjetAll"),
useLocalFile = cms.bool(False)
))
process.Ztoee = cms.EDProducer("CandViewCombiner",
cut = cms.string(''),
decay = cms.string('goodElectrons@+ goodElectrons@-')
)
process.Ztomumu = cms.EDProducer("CandViewCombiner",
cut = cms.string(''),
decay = cms.string('goodMuons@+ goodMuons@-')
)
process.calibratedPatElectrons = cms.EDProducer("CalibratedPatElectronProducerRun2",
autoDataType = cms.bool(True),
correctionFile = cms.string('EgammaAnalysis/ElectronTools/data/ScalesSmearings/Moriond17_23Jan_ele'),
electrons = cms.InputTag("slimmedElectrons"),
gbrForestName = cms.vstring('electron_eb_ECALTRK_lowpt',
'electron_eb_ECALTRK',
'electron_ee_ECALTRK_lowpt',
'electron_ee_ECALTRK',
'electron_eb_ECALTRK_lowpt_var',
'electron_eb_ECALTRK_var',
'electron_ee_ECALTRK_lowpt_var',
'electron_ee_ECALTRK_var'),
isMC = cms.bool(True),
isSynchronization = cms.bool(False),
recHitCollectionEB = cms.InputTag("reducedEgamma","reducedEBRecHits"),
recHitCollectionEE = cms.InputTag("reducedEgamma","reducedEERecHits")
)
process.calibratedPatPhotons = cms.EDProducer("CalibratedPatPhotonProducerRun2",
autoDataType = cms.bool(True),
correctionFile = cms.string('EgammaAnalysis/ElectronTools/data/ScalesSmearings/Moriond17_23Jan_ele'),
isMC = cms.bool(True),
isSynchronization = cms.bool(False),
photons = cms.InputTag("slimmedPhotons"),
recHitCollectionEB = cms.InputTag("reducedEgamma","reducedEBRecHits"),
recHitCollectionEE = cms.InputTag("reducedEgamma","reducedEERecHits")
)
process.ckfTrackCandidates = cms.EDProducer("CkfTrackCandidateMaker",
MeasurementTrackerEvent = cms.InputTag("MeasurementTrackerEvent"),
NavigationSchool = cms.string('SimpleNavigationSchool'),
RedundantSeedCleaner = cms.string('CachingSeedCleanerBySharedInput'),
SimpleMagneticField = cms.string(''),
TrajectoryBuilder = cms.string('GroupedCkfTrajectoryBuilder'),
TrajectoryBuilderPSet = cms.PSet(
refToPSet_ = cms.string('GroupedCkfTrajectoryBuilder')
),
TrajectoryCleaner = cms.string('TrajectoryCleanerBySharedHits'),
TransientInitialStateEstimatorParameters = cms.PSet(
numberMeasurementsForFit = cms.int32(4),
propagatorAlongTISE = cms.string('PropagatorWithMaterial'),
propagatorOppositeTISE = cms.string('PropagatorWithMaterialOpposite')
),
cleanTrajectoryAfterInOut = cms.bool(True),
doSeedingRegionRebuilding = cms.bool(True),
maxNSeeds = cms.uint32(500000),
maxSeedsBeforeCleaning = cms.uint32(5000),
src = cms.InputTag("globalMixedSeeds"),
useHitsSplitting = cms.bool(True)
)
process.cleanAK4Jets = cms.EDProducer("PATJetCleaner",
checkOverlaps = cms.PSet(
electrons = cms.PSet(
algorithm = cms.string('byDeltaR'),
checkRecoComponents = cms.bool(False),
deltaR = cms.double(0.4),
pairCut = cms.string(''),
preselection = cms.string(''),
requireNoOverlaps = cms.bool(True),
src = cms.InputTag("goodElectrons")
),
muons = cms.PSet(
algorithm = cms.string('byDeltaR'),
checkRecoComponents = cms.bool(False),
deltaR = cms.double(0.4),
pairCut = cms.string(''),
preselection = cms.string(''),
requireNoOverlaps = cms.bool(True),
src = cms.InputTag("goodMuons")
),
photons = cms.PSet(
),
taus = cms.PSet(
),
tkIsoElectrons = cms.PSet(
)
),
finalCut = cms.string('pt > 20 & abs(eta) < 4.7'),
preselection = cms.string(''),
src = cms.InputTag("goodAK4Jets")
)
process.goodElectrons = cms.EDProducer("PATElectronIdSelector",
effAreasConfigFile = cms.FileInPath('RecoEgamma/ElectronIdentification/data/Summer16/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_80X.txt'),
idLabel = cms.string('medium'),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
src = cms.InputTag("calibratedPatElectrons"),
vertex = cms.InputTag("offlineSlimmedPrimaryVertices")
)
process.goodMuons = cms.EDProducer("PATMuonIdSelector",
idLabel = cms.string('tight'),
src = cms.InputTag("slimmedMuons"),
vertex = cms.InputTag("offlineSlimmedPrimaryVertices")
)
process.leptonicV = cms.EDProducer("CandViewMerger",
cut = cms.string(''),
src = cms.VInputTag("Ztoee", "Ztomumu")
)
process.looseMuons = cms.EDProducer("PATMuonIdSelector",
idLabel = cms.string('loose'),
src = cms.InputTag("slimmedMuons"),
vertex = cms.InputTag("offlineSlimmedPrimaryVertices")
)
process.modifiedElectrons = cms.EDProducer("ModifiedElectronProducer",
modifierConfig = cms.PSet(
modifications = cms.VPSet()
),
src = cms.InputTag("slimmedElectrons","","@skipCurrentProcess")
)
process.modifiedPhotons = cms.EDProducer("ModifiedPhotonProducer",
modifierConfig = cms.PSet(
modifications = cms.VPSet()
),
src = cms.InputTag("slimmedPhotons","","@skipCurrentProcess")
)
process.photonIDValueMapProducer = cms.EDProducer("PhotonIDValueMapProducer",
ebReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsEB"),
ebReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma","reducedEBRecHits"),
eeReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsEE"),
eeReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma","reducedEERecHits"),
esReducedRecHitCollection = cms.InputTag("reducedEcalRecHitsES"),
esReducedRecHitCollectionMiniAOD = cms.InputTag("reducedEgamma","reducedESRecHits"),
particleBasedIsolation = cms.InputTag("particleBasedIsolation","gedPhotons"),
pfCandidates = cms.InputTag("particleFlow"),
pfCandidatesMiniAOD = cms.InputTag("packedPFCandidates"),
src = cms.InputTag("gedPhotons"),
srcMiniAOD = cms.InputTag("slimmedPhotons","","@skipCurrentProcess"),
vertices = cms.InputTag("offlinePrimaryVertices"),
verticesMiniAOD = cms.InputTag("offlineSlimmedPrimaryVertices")
)
process.randomEngineStateProducer = cms.EDProducer("RandomEngineStateProducer")
process.slimmedElectrons = cms.EDProducer("ModifiedElectronProducer",
modifierConfig = cms.PSet(
modifications = cms.VPSet(cms.PSet(
eOverP_ECALTRKThr = cms.double(0.025),
ecalrechitsEB = cms.InputTag("reducedEgamma","reducedEBRecHits"),
ecalrechitsEE = cms.InputTag("reducedEgamma","reducedEERecHits"),
electron_config = cms.PSet(
regressionKey_ecalonly = cms.vstring('electron_eb_ECALonly_lowpt',
'electron_eb_ECALonly',
'electron_ee_ECALonly_lowpt',
'electron_ee_ECALonly'),
regressionKey_ecaltrk = cms.vstring('electron_eb_ECALTRK_lowpt',
'electron_eb_ECALTRK',
'electron_ee_ECALTRK_lowpt',
'electron_ee_ECALTRK'),
uncertaintyKey_ecalonly = cms.vstring('electron_eb_ECALonly_lowpt_var',
'electron_eb_ECALonly_var',
'electron_ee_ECALonly_lowpt_var',
'electron_ee_ECALonly_var'),
uncertaintyKey_ecaltrk = cms.vstring('electron_eb_ECALTRK_lowpt_var',
'electron_eb_ECALTRK_var',
'electron_ee_ECALTRK_lowpt_var',
'electron_ee_ECALTRK_var')
),
epDiffSig_ECALTRKThr = cms.double(15.0),
epSig_ECALTRKThr = cms.double(10.0),
highEnergy_ECALTRKThr = cms.double(200.0),
lowEnergy_ECALTRKThr = cms.double(50.0),
lowEnergy_ECALonlyThr = cms.double(300.0),
modifierName = cms.string('EGExtraInfoModifierFromDBUser'),
photon_config = cms.PSet(
regressionKey_ecalonly = cms.vstring('photon_eb_ECALonly_lowpt',
'photon_eb_ECALonly',
'photon_ee_ECALonly_lowpt',
'photon_ee_ECALonly'),
uncertaintyKey_ecalonly = cms.vstring('photon_eb_ECALonly_lowpt_var',
'photon_eb_ECALonly_var',
'photon_ee_ECALonly_lowpt_var',
'photon_ee_ECALonly_var')
),
rhoCollection = cms.InputTag("fixedGridRhoFastjetAll"),
useLocalFile = cms.bool(False)
))
),
src = cms.InputTag("slimmedElectrons","","@skipCurrentProcess")
)
process.slimmedPhotons = cms.EDProducer("ModifiedPhotonProducer",
modifierConfig = cms.PSet(
modifications = cms.VPSet(cms.PSet(
eOverP_ECALTRKThr = cms.double(0.025),
ecalrechitsEB = cms.InputTag("reducedEgamma","reducedEBRecHits"),
ecalrechitsEE = cms.InputTag("reducedEgamma","reducedEERecHits"),
electron_config = cms.PSet(
regressionKey_ecalonly = cms.vstring('electron_eb_ECALonly_lowpt',
'electron_eb_ECALonly',
'electron_ee_ECALonly_lowpt',
'electron_ee_ECALonly'),
regressionKey_ecaltrk = cms.vstring('electron_eb_ECALTRK_lowpt',
'electron_eb_ECALTRK',
'electron_ee_ECALTRK_lowpt',
'electron_ee_ECALTRK'),
uncertaintyKey_ecalonly = cms.vstring('electron_eb_ECALonly_lowpt_var',
'electron_eb_ECALonly_var',
'electron_ee_ECALonly_lowpt_var',
'electron_ee_ECALonly_var'),
uncertaintyKey_ecaltrk = cms.vstring('electron_eb_ECALTRK_lowpt_var',
'electron_eb_ECALTRK_var',
'electron_ee_ECALTRK_lowpt_var',
'electron_ee_ECALTRK_var')
),
epDiffSig_ECALTRKThr = cms.double(15.0),
epSig_ECALTRKThr = cms.double(10.0),
highEnergy_ECALTRKThr = cms.double(200.0),
lowEnergy_ECALTRKThr = cms.double(50.0),
lowEnergy_ECALonlyThr = cms.double(300.0),
modifierName = cms.string('EGExtraInfoModifierFromDBUser'),
photon_config = cms.PSet(
regressionKey_ecalonly = cms.vstring('photon_eb_ECALonly_lowpt',
'photon_eb_ECALonly',
'photon_ee_ECALonly_lowpt',
'photon_ee_ECALonly'),
uncertaintyKey_ecalonly = cms.vstring('photon_eb_ECALonly_lowpt_var',
'photon_eb_ECALonly_var',
'photon_ee_ECALonly_lowpt_var',
'photon_ee_ECALonly_var')
),
rhoCollection = cms.InputTag("fixedGridRhoFastjetAll"),
useLocalFile = cms.bool(False)
))
),
src = cms.InputTag("slimmedPhotons","","@skipCurrentProcess")
)
process.vetoElectrons = cms.EDProducer("PATElectronIdSelector",
effAreasConfigFile = cms.FileInPath('RecoEgamma/ElectronIdentification/data/Summer16/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_80X.txt'),
idLabel = cms.string('veto'),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
src = cms.InputTag("slimmedElectrons"),
vertex = cms.InputTag("offlineSlimmedPrimaryVertices")
)
process.BadChargedCandidateFilter = cms.EDFilter("BadChargedCandidateFilter",
PFCandidates = cms.InputTag("packedPFCandidates"),
debug = cms.bool(False),
innerTrackRelErr = cms.double(1.0),
maxDR = cms.double(1e-05),
minMuonPt = cms.double(100.0),
minMuonTrackRelErr = cms.double(2.0),
minPtDiffRel = cms.double(1e-05),
muons = cms.InputTag("slimmedMuons"),
segmentCompatibility = cms.double(0.3),
taggingMode = cms.bool(False)
)
process.BadPFMuonFilter = cms.EDFilter("BadPFMuonFilter",
PFCandidates = cms.InputTag("packedPFCandidates"),
algo = cms.int32(14),
debug = cms.bool(False),
innerTrackRelErr = cms.double(1.0),
minDZ = cms.double(0.1),
minMuPt = cms.double(100),
minPtError = cms.double(2.0),
muons = cms.InputTag("slimmedMuons"),
segmentCompatibility = cms.double(0.3),
taggingMode = cms.bool(False)
)
process.goodAK4Jets = cms.EDFilter("PFJetIDSelectionFunctorFilter",
filterParams = cms.PSet(
quality = cms.string('LOOSE'),
version = cms.string('RUNIISTARTUP')
),
src = cms.InputTag("slimmedJets")
)
process.goodPhotons = cms.EDFilter("PATPhotonSelector",
cut = cms.string('pt > 15 && abs(eta) < 2.5'),
src = cms.InputTag("calibratedPatPhotons")
)
process.leptonicVFilter = cms.EDFilter("CandViewCountFilter",
filter = cms.bool(False),
minNumber = cms.uint32(0),
src = cms.InputTag("leptonicV")
)
process.leptonicVSelector = cms.EDFilter("CandViewSelector",
cut = cms.string('pt > 0.0'),
filter = cms.bool(False),
src = cms.InputTag("leptonicV")
)
process.treeDumper = cms.EDAnalyzer("ZPKUTreeMaker",
PKUChannel = cms.string('VW_CHANNEL'),
RunOnMC = cms.bool(True),
ak4jetsSrc = cms.InputTag("cleanAK4Jets"),
badMuonFilterSelection = cms.string('Flag_badMuons'),
beamSpot = cms.InputTag("offlineBeamSpot","","RECO"),
conversions = cms.InputTag("reducedEgamma","reducedConversions","PAT"),
crossSectionPb = cms.double(1),
duplicateMuonFilterSelection = cms.string('Flag_duplicateMuons'),
effAreaChHadFile = cms.FileInPath('RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfChargedHadrons_25ns_NULLcorrection.txt'),
effAreaNeuHadFile = cms.FileInPath('RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfNeutralHadrons_25ns_90percentBased.txt'),
effAreaPhoFile = cms.FileInPath('RecoEgamma/PhotonIdentification/data/Spring15/effAreaPhotons_cone03_pfPhotons_25ns_90percentBased.txt'),
elPaths1 = cms.vstring('HLT_DoubleEle24_22_eta2p1_WPLoose_Gsf_v*'),
elPaths2 = cms.vstring('HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v*'),
electrons = cms.InputTag("calibratedPatElectrons"),
full5x5SigmaIEtaIEtaMap = cms.InputTag("photonIDValueMapProducer","phoFull5x5SigmaIEtaIEta"),
genJet = cms.InputTag("slimmedGenJets"),
genSrc = cms.InputTag("prunedGenParticles"),
generator = cms.InputTag("generator"),
goodmuonSrc = cms.InputTag("goodMuons"),
hltToken = cms.InputTag("TriggerResults","","HLT"),
isGen = cms.bool(False),
jecAK4PayloadNames = cms.vstring('Summer16_23Sep2016V3_MC_L1FastJet_AK4PFchs.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK4PFchs.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFchs.txt'),
jecAK4chsPayloadNames = cms.vstring('Summer16_23Sep2016V3_MC_L1FastJet_AK4PFchs.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK4PFchs.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFchs.txt'),
leptonicVSrc = cms.InputTag("leptonicV"),
looseelectronSrc = cms.InputTag("vetoElectrons"),
loosemuonSrc = cms.InputTag("looseMuons"),
metSrc = cms.InputTag("slimmedMETs"),
muPaths1 = cms.vstring('HLT_Mu17_TrkIsoVVL_v*'),
muPaths2 = cms.vstring('HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ_v*',
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v*'),
muPaths3 = cms.vstring('HLT_IsoMu24_v*'),
muPaths4 = cms.vstring('HLT_Mu17_v*'),
muPaths5 = cms.vstring('HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_v*',
'HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_v*'),
muPaths6 = cms.vstring('HLT_IsoMu22_v*',
'HLT_IsoTkMu22_v*'),
muPaths7 = cms.vstring('HLT_IsoMu24_v*',
'HLT_IsoTkMu24_v*'),
muPaths8 = cms.vstring('HLT_IsoMu27_v*',
'HLT_IsoTkMu27_v*'),
noiseFilter = cms.InputTag("TriggerResults","","PAT"),
noiseFilterSelection_EcalDeadCellTriggerPrimitiveFilter = cms.string('Flag_EcalDeadCellTriggerPrimitiveFilter'),
noiseFilterSelection_HBHENoiseFilter = cms.string('Flag_HBHENoiseFilter'),
noiseFilterSelection_HBHENoiseIsoFilter = cms.string('Flag_HBHENoiseIsoFilter'),
noiseFilterSelection_badChargedHadron = cms.InputTag("BadChargedCandidateFilter"),
noiseFilterSelection_badMuon = cms.InputTag("BadPFMuonFilter"),
noiseFilterSelection_eeBadScFilter = cms.string('Flag_eeBadScFilter'),
noiseFilterSelection_globalTightHaloFilter = cms.string('Flag_globalTightHalo2016Filter'),
noiseFilterSelection_goodVertices = cms.string('Flag_goodVertices'),
originalNEvents = cms.int32(1),
phoChargedIsolation = cms.InputTag("photonIDValueMapProducer","phoChargedIsolation"),
phoNeutralHadronIsolation = cms.InputTag("photonIDValueMapProducer","phoNeutralHadronIsolation"),
phoPhotonIsolation = cms.InputTag("photonIDValueMapProducer","phoPhotonIsolation"),
photonSrc = cms.InputTag("calibratedPatPhotons"),
pileup = cms.InputTag("slimmedAddPileupInfo"),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
t1jetSrc = cms.InputTag("slimmedJets"),
t1muSrc = cms.InputTag("slimmedMuons"),
targetLumiInvPb = cms.double(1.0),
vertex = cms.InputTag("offlineSlimmedPrimaryVertices")
)
process.eleSequence = cms.Sequence(process.goodElectrons+process.vetoElectrons)
process.muSequence = cms.Sequence(process.goodMuons+process.looseMuons)
process.leptonicVSequence = cms.Sequence(process.Ztomumu+process.Ztoee+process.leptonicV)
process.NJetsSequence = cms.Sequence(process.goodAK4Jets+process.cleanAK4Jets)
process.regressionApplication = cms.Sequence(process.slimmedElectrons+process.slimmedPhotons)
process.photonSequence = cms.Sequence(process.goodPhotons)
process.metfilterSequence = cms.Sequence(process.BadPFMuonFilter+process.BadChargedCandidateFilter)
process.jetSequence = cms.Sequence(process.NJetsSequence)
process.leptonSequence = cms.Sequence(process.muSequence+process.regressionApplication+process.calibratedPatElectrons+process.calibratedPatPhotons+process.eleSequence+process.leptonicVSequence+process.leptonicVSelector+process.leptonicVFilter)
process.analysis = cms.Path(process.leptonSequence+process.jetSequence+process.metfilterSequence+process.photonIDValueMapProducer+process.treeDumper)
process.DQMStore = cms.Service("DQMStore",
LSbasedMode = cms.untracked.bool(False),
collateHistograms = cms.untracked.bool(False),
enableMultiThread = cms.untracked.bool(False),
forceResetOnBeginLumi = cms.untracked.bool(False),
referenceFileName = cms.untracked.string(''),
verbose = cms.untracked.int32(0),
verboseQT = cms.untracked.int32(0)
)
process.MessageLogger = cms.Service("MessageLogger",
FrameworkJobReport = cms.untracked.PSet(
FwkJob = cms.untracked.PSet(
limit = cms.untracked.int32(10000000),
optionalPSet = cms.untracked.bool(True)
),
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
optionalPSet = cms.untracked.bool(True)
),
categories = cms.untracked.vstring('FwkJob',
'FwkReport',
'FwkSummary',
'Root_NoDictionary'),
cerr = cms.untracked.PSet(
FwkJob = cms.untracked.PSet(
limit = cms.untracked.int32(0),
optionalPSet = cms.untracked.bool(True)
),
FwkReport = cms.untracked.PSet(
limit = cms.untracked.int32(99999999),
optionalPSet = cms.untracked.bool(True),
reportEvery = cms.untracked.int32(200)
),
FwkSummary = cms.untracked.PSet(
limit = cms.untracked.int32(10000000),
optionalPSet = cms.untracked.bool(True),
reportEvery = cms.untracked.int32(1)
),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
Root_NoDictionary = cms.untracked.PSet(
limit = cms.untracked.int32(0),
optionalPSet = cms.untracked.bool(True)
),
default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
),
noTimeStamps = cms.untracked.bool(False),
optionalPSet = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO')
),
cerr_stats = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
output = cms.untracked.string('cerr'),
threshold = cms.untracked.string('WARNING')
),
cout = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
debugModules = cms.untracked.vstring(),
debugs = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
default = cms.untracked.PSet(
),
destinations = cms.untracked.vstring('warnings',
'errors',
'infos',
'debugs',
'cout',
'cerr'),
errors = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
fwkJobReports = cms.untracked.vstring('FrameworkJobReport'),
infos = cms.untracked.PSet(
Root_NoDictionary = cms.untracked.PSet(
limit = cms.untracked.int32(0),
optionalPSet = cms.untracked.bool(True)
),
optionalPSet = cms.untracked.bool(True),
placeholder = cms.untracked.bool(True)
),
statistics = cms.untracked.vstring('cerr_stats'),
suppressDebug = cms.untracked.vstring(),
suppressInfo = cms.untracked.vstring(),
suppressWarning = cms.untracked.vstring(),
warnings = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
)
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
calibratedPatElectrons = cms.PSet(
engineName = cms.untracked.string('TRandom3'),
initialSeed = cms.untracked.uint32(81)
),
calibratedPatPhotons = cms.PSet(
engineName = cms.untracked.string('TRandom3'),
initialSeed = cms.untracked.uint32(81)
)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('ZtreePKU.root')
)
process.CSCGeometryESModule = cms.ESProducer("CSCGeometryESModule",
alignmentsLabel = cms.string(''),
appendToDataLabel = cms.string(''),
applyAlignment = cms.bool(True),
debugV = cms.untracked.bool(False),
useCentreTIOffsets = cms.bool(False),
useDDD = cms.bool(False),
useGangedStripsInME1a = cms.bool(True),
useOnlyWiresInME1a = cms.bool(False),
useRealWireGeometry = cms.bool(True)
)
process.CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",
SelectedCalos = cms.vstring('HCAL',
'ZDC',
'CASTOR',
'EcalBarrel',
'EcalEndcap',
'EcalPreshower',
'TOWER')
)
process.CaloTopologyBuilder = cms.ESProducer("CaloTopologyBuilder")
process.CaloTowerGeometryFromDBEP = cms.ESProducer("CaloTowerGeometryFromDBEP",
applyAlignment = cms.bool(False),
hcalTopologyConstants = cms.PSet(
maxDepthHB = cms.int32(2),
maxDepthHE = cms.int32(3),
mode = cms.string('HcalTopologyMode::LHC')
)
)
process.CaloTowerTopologyEP = cms.ESProducer("CaloTowerTopologyEP")
process.CastorDbProducer = cms.ESProducer("CastorDbProducer")
process.CastorGeometryFromDBEP = cms.ESProducer("CastorGeometryFromDBEP",
applyAlignment = cms.bool(False)
)
process.Chi2MeasurementEstimator = cms.ESProducer("Chi2MeasurementEstimatorESProducer",
ComponentName = cms.string('Chi2'),
MaxChi2 = cms.double(30),
MaxDisplacement = cms.double(0.5),
MaxSagitta = cms.double(2),
MinPtForHitRecoveryInGluedDet = cms.double(1000000),
MinimalTolerance = cms.double(0.5),
appendToDataLabel = cms.string(''),
nSigma = cms.double(3)
)
process.DTGeometryESModule = cms.ESProducer("DTGeometryESModule",
alignmentsLabel = cms.string(''),
appendToDataLabel = cms.string(''),
applyAlignment = cms.bool(True),
fromDDD = cms.bool(False)
)
process.EcalBarrelGeometryFromDBEP = cms.ESProducer("EcalBarrelGeometryFromDBEP",
applyAlignment = cms.bool(True)
)
process.EcalElectronicsMappingBuilder = cms.ESProducer("EcalElectronicsMappingBuilder")
process.EcalEndcapGeometryFromDBEP = cms.ESProducer("EcalEndcapGeometryFromDBEP",
applyAlignment = cms.bool(True)
)
process.EcalLaserCorrectionService = cms.ESProducer("EcalLaserCorrectionService")
process.EcalPreshowerGeometryFromDBEP = cms.ESProducer("EcalPreshowerGeometryFromDBEP",
applyAlignment = cms.bool(True)
)
process.EcalTrigTowerConstituentsMapBuilder = cms.ESProducer("EcalTrigTowerConstituentsMapBuilder",
MapFile = cms.untracked.string('Geometry/EcalMapping/data/EndCap_TTMap.txt')
)
process.GlobalTrackingGeometryESProducer = cms.ESProducer("GlobalTrackingGeometryESProducer")
process.HcalAlignmentEP = cms.ESProducer("HcalAlignmentEP")
process.HcalGeometryFromDBEP = cms.ESProducer("HcalGeometryFromDBEP",
applyAlignment = cms.bool(True),
hcalTopologyConstants = cms.PSet(
maxDepthHB = cms.int32(2),
maxDepthHE = cms.int32(3),
mode = cms.string('HcalTopologyMode::LHC')
)
)
process.KFUpdatorESProducer = cms.ESProducer("KFUpdatorESProducer",
ComponentName = cms.string('KFUpdator')
)
process.MaterialPropagator = cms.ESProducer("PropagatorWithMaterialESProducer",
ComponentName = cms.string('PropagatorWithMaterial'),
Mass = cms.double(0.105),
MaxDPhi = cms.double(1.6),
PropagationDirection = cms.string('alongMomentum'),
SimpleMagneticField = cms.string(''),
ptMin = cms.double(-1.0),
useRungeKutta = cms.bool(False)
)
process.MeasurementTracker = cms.ESProducer("MeasurementTrackerESProducer",
ComponentName = cms.string(''),
DebugPixelModuleQualityDB = cms.untracked.bool(False),
DebugPixelROCQualityDB = cms.untracked.bool(False),
DebugStripAPVFiberQualityDB = cms.untracked.bool(False),
DebugStripModuleQualityDB = cms.untracked.bool(False),
DebugStripStripQualityDB = cms.untracked.bool(False),
HitMatcher = cms.string('StandardMatcher'),
MaskBadAPVFibers = cms.bool(True),
PixelCPE = cms.string('PixelCPEGeneric'),
SiStripQualityLabel = cms.string(''),
StripCPE = cms.string('StripCPEfromTrackAngle'),
UsePixelModuleQualityDB = cms.bool(True),
UsePixelROCQualityDB = cms.bool(True),
UseStripAPVFiberQualityDB = cms.bool(True),
UseStripModuleQualityDB = cms.bool(True),
UseStripStripQualityDB = cms.bool(True),
badStripCuts = cms.PSet(
TEC = cms.PSet(
maxBad = cms.uint32(4),
maxConsecutiveBad = cms.uint32(2)
),
TIB = cms.PSet(
maxBad = cms.uint32(4),
maxConsecutiveBad = cms.uint32(2)
),
TID = cms.PSet(
maxBad = cms.uint32(4),
maxConsecutiveBad = cms.uint32(2)
),
TOB = cms.PSet(
maxBad = cms.uint32(4),
maxConsecutiveBad = cms.uint32(2)
)
)
)
process.MuonDetLayerGeometryESProducer = cms.ESProducer("MuonDetLayerGeometryESProducer")
process.OppositeMaterialPropagator = cms.ESProducer("PropagatorWithMaterialESProducer",
ComponentName = cms.string('PropagatorWithMaterialOpposite'),
Mass = cms.double(0.105),
MaxDPhi = cms.double(1.6),
PropagationDirection = cms.string('oppositeToMomentum'),
SimpleMagneticField = cms.string(''),
ptMin = cms.double(-1.0),
useRungeKutta = cms.bool(False)
)
process.ParabolicParametrizedMagneticFieldProducer = cms.ESProducer("AutoParametrizedMagneticFieldProducer",
label = cms.untracked.string('ParabolicMf'),
valueOverride = cms.int32(18268),
version = cms.string('Parabolic')
)
process.RPCGeometryESModule = cms.ESProducer("RPCGeometryESModule",
compatibiltyWith11 = cms.untracked.bool(True),
useDDD = cms.untracked.bool(False)
)
process.SiStripRecHitMatcherESProducer = cms.ESProducer("SiStripRecHitMatcherESProducer",
ComponentName = cms.string('StandardMatcher'),
NSigmaInside = cms.double(3.0),
PreFilter = cms.bool(False)
)
process.SteppingHelixPropagatorAlong = cms.ESProducer("SteppingHelixPropagatorESProducer",
ApplyRadX0Correction = cms.bool(True),
AssumeNoMaterial = cms.bool(False),
ComponentName = cms.string('SteppingHelixPropagatorAlong'),
NoErrorPropagation = cms.bool(False),
PropagationDirection = cms.string('alongMomentum'),
SetVBFPointer = cms.bool(False),
VBFName = cms.string('VolumeBasedMagneticField'),
debug = cms.bool(False),
endcapShiftInZNeg = cms.double(0.0),
endcapShiftInZPos = cms.double(0.0),
returnTangentPlane = cms.bool(True),
sendLogWarning = cms.bool(False),
useEndcapShiftsInZ = cms.bool(False),
useInTeslaFromMagField = cms.bool(False),
useIsYokeFlag = cms.bool(True),
useMagVolumes = cms.bool(True),
useMatVolumes = cms.bool(True),
useTuningForL2Speed = cms.bool(False)
)
process.StripCPEESProducer = cms.ESProducer("StripCPEESProducer",
ComponentName = cms.string('SimpleStripCPE'),
ComponentType = cms.string('SimpleStripCPE'),
parameters = cms.PSet(
)
)
process.StripCPEfromTrackAngleESProducer = cms.ESProducer("StripCPEESProducer",
ComponentName = cms.string('StripCPEfromTrackAngle'),
ComponentType = cms.string('StripCPEfromTrackAngle'),
parameters = cms.PSet(
mLC_P0 = cms.double(-0.326),
mLC_P1 = cms.double(0.618),
mLC_P2 = cms.double(0.3),
mTEC_P0 = cms.double(-1.885),
mTEC_P1 = cms.double(0.471),
mTIB_P0 = cms.double(-0.742),
mTIB_P1 = cms.double(0.202),
mTID_P0 = cms.double(-1.427),
mTID_P1 = cms.double(0.433),
mTOB_P0 = cms.double(-1.026),
mTOB_P1 = cms.double(0.253),
maxChgOneMIP = cms.double(6000.0),
useLegacyError = cms.bool(False)
)
)
process.TrackerRecoGeometryESProducer = cms.ESProducer("TrackerRecoGeometryESProducer")
process.VolumeBasedMagneticFieldESProducer = cms.ESProducer("VolumeBasedMagneticFieldESProducerFromDB",
debugBuilder = cms.untracked.bool(False),
label = cms.untracked.string(''),
valueOverride = cms.int32(18268)
)
process.ZdcGeometryFromDBEP = cms.ESProducer("ZdcGeometryFromDBEP",
applyAlignment = cms.bool(False)
)
process.beamHaloNavigationSchoolESProducer = cms.ESProducer("NavigationSchoolESProducer",
ComponentName = cms.string('BeamHaloNavigationSchool'),
SimpleMagneticField = cms.string('')
)
process.cosmicsNavigationSchoolESProducer = cms.ESProducer("SkippingLayerCosmicNavigationSchoolESProducer",
ComponentName = cms.string('CosmicNavigationSchool'),
allSelf = cms.bool(True),
noPXB = cms.bool(False),
noPXF = cms.bool(False),
noTEC = cms.bool(False),
noTIB = cms.bool(False),
noTID = cms.bool(False),
noTOB = cms.bool(False),
selfSearch = cms.bool(True)
)
process.fakeForIdealAlignment = cms.ESProducer("FakeAlignmentProducer",
appendToDataLabel = cms.string('fakeForIdeal')
)
process.hcalDDDRecConstants = cms.ESProducer("HcalDDDRecConstantsESModule",
appendToDataLabel = cms.string('')
)
process.hcalDDDSimConstants = cms.ESProducer("HcalDDDSimConstantsESModule",
appendToDataLabel = cms.string('')
)
process.hcalTopologyIdeal = cms.ESProducer("HcalTopologyIdealEP",
Exclude = cms.untracked.string(''),
appendToDataLabel = cms.string('')
)
process.hcal_db_producer = cms.ESProducer("HcalDbProducer",
dump = cms.untracked.vstring(''),
file = cms.untracked.string('')
)
process.idealForDigiCSCGeometry = cms.ESProducer("CSCGeometryESModule",
alignmentsLabel = cms.string('fakeForIdeal'),
appendToDataLabel = cms.string('idealForDigi'),
applyAlignment = cms.bool(False),
debugV = cms.untracked.bool(False),
useCentreTIOffsets = cms.bool(False),
useDDD = cms.bool(False),
useGangedStripsInME1a = cms.bool(True),
useOnlyWiresInME1a = cms.bool(False),
useRealWireGeometry = cms.bool(True)
)
process.idealForDigiDTGeometry = cms.ESProducer("DTGeometryESModule",
alignmentsLabel = cms.string('fakeForIdeal'),
appendToDataLabel = cms.string('idealForDigi'),
applyAlignment = cms.bool(False),
fromDDD = cms.bool(False)
)
process.idealForDigiTrackerGeometry = cms.ESProducer("TrackerDigiGeometryESModule",
alignmentsLabel = cms.string('fakeForIdeal'),
appendToDataLabel = cms.string('idealForDigi'),
applyAlignment = cms.bool(False),
fromDDD = cms.bool(False)
)
process.navigationSchoolESProducer = cms.ESProducer("NavigationSchoolESProducer",
ComponentName = cms.string('SimpleNavigationSchool'),
SimpleMagneticField = cms.string('')
)
process.siPixelQualityESProducer = cms.ESProducer("SiPixelQualityESProducer",
ListOfRecordToMerge = cms.VPSet(cms.PSet(
record = cms.string('SiPixelQualityFromDbRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiPixelDetVOffRcd'),
tag = cms.string('')
))
)
process.siStripBackPlaneCorrectionDepESProducer = cms.ESProducer("SiStripBackPlaneCorrectionDepESProducer",
BackPlaneCorrectionDeconvMode = cms.PSet(
label = cms.untracked.string('deconvolution'),
record = cms.string('SiStripBackPlaneCorrectionRcd')
),
BackPlaneCorrectionPeakMode = cms.PSet(
label = cms.untracked.string('peak'),
record = cms.string('SiStripBackPlaneCorrectionRcd')
),
LatencyRecord = cms.PSet(
label = cms.untracked.string(''),
record = cms.string('SiStripLatencyRcd')
)
)
process.siStripGainESProducer = cms.ESProducer("SiStripGainESProducer",
APVGain = cms.VPSet(cms.PSet(
Label = cms.untracked.string(''),
NormalizationFactor = cms.untracked.double(1.0),
Record = cms.string('SiStripApvGainRcd')
),
cms.PSet(
Label = cms.untracked.string(''),
NormalizationFactor = cms.untracked.double(1.0),
Record = cms.string('SiStripApvGain2Rcd')
)),
AutomaticNormalization = cms.bool(False),
appendToDataLabel = cms.string(''),
printDebug = cms.untracked.bool(False)
)
process.siStripLorentzAngleDepESProducer = cms.ESProducer("SiStripLorentzAngleDepESProducer",
LatencyRecord = cms.PSet(
label = cms.untracked.string(''),
record = cms.string('SiStripLatencyRcd')
),
LorentzAngleDeconvMode = cms.PSet(
label = cms.untracked.string('deconvolution'),
record = cms.string('SiStripLorentzAngleRcd')
),
LorentzAnglePeakMode = cms.PSet(
label = cms.untracked.string('peak'),
record = cms.string('SiStripLorentzAngleRcd')
)
)
process.siStripQualityESProducer = cms.ESProducer("SiStripQualityESProducer",
ListOfRecordToMerge = cms.VPSet(cms.PSet(
record = cms.string('SiStripDetVOffRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripDetCablingRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('RunInfoRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripBadChannelRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripBadFiberRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripBadModuleRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripBadStripRcd'),
tag = cms.string('')
)),
PrintDebugOutput = cms.bool(False),
ReduceGranularity = cms.bool(False),
ThresholdForReducedGranularity = cms.double(0.3),
UseEmptyRunInfo = cms.bool(False),
appendToDataLabel = cms.string('')
)
process.sistripconn = cms.ESProducer("SiStripConnectivity")
process.stripCPEESProducer = cms.ESProducer("StripCPEESProducer",
ComponentName = cms.string('stripCPE'),
ComponentType = cms.string('SimpleStripCPE'),
parameters = cms.PSet(
)
)
process.trackerGeometryDB = cms.ESProducer("TrackerDigiGeometryESModule",
alignmentsLabel = cms.string(''),
appendToDataLabel = cms.string(''),
applyAlignment = cms.bool(True),
fromDDD = cms.bool(False)
)
process.trackerNumberingGeometryDB = cms.ESProducer("TrackerGeometricDetESModule",
appendToDataLabel = cms.string(''),
fromDDD = cms.bool(False)
)
process.trackerTopology = cms.ESProducer("TrackerTopologyEP",
appendToDataLabel = cms.string('')
)
process.trajectoryCleanerBySharedHits = cms.ESProducer("TrajectoryCleanerESProducer",
ComponentName = cms.string('TrajectoryCleanerBySharedHits'),
ComponentType = cms.string('TrajectoryCleanerBySharedHits'),
MissingHitPenalty = cms.double(20.0),
ValidHitBonus = cms.double(5.0),
allowSharedFirstHit = cms.bool(True),
fractionShared = cms.double(0.19)
)
process.ttrhbwr = cms.ESProducer("TkTransientTrackingRecHitBuilderESProducer",
ComponentName = cms.string('WithTrackAngle'),
ComputeCoarseLocalPositionFromDisk = cms.bool(False),
Matcher = cms.string('StandardMatcher'),
PixelCPE = cms.string('PixelCPEGeneric'),
StripCPE = cms.string('StripCPEfromTrackAngle')
)
process.GlobalTag = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
authenticationPath = cms.untracked.string(''),
authenticationSystem = cms.untracked.int32(0),
messageLevel = cms.untracked.int32(0),
security = cms.untracked.string('')
),
DumpStat = cms.untracked.bool(False),
ReconnectEachRun = cms.untracked.bool(False),
RefreshAlways = cms.untracked.bool(False),
RefreshEachRun = cms.untracked.bool(False),
RefreshOpenIOVs = cms.untracked.bool(False),
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
globaltag = cms.string('80X_mcRun2_asymptotic_2016_TrancheIV_v7'),
pfnPostfix = cms.untracked.string(''),
pfnPrefix = cms.untracked.string(''),
snapshotTime = cms.string(''),
toGet = cms.VPSet(cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_eb_ECALonly'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_EBCorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_eb_ECALonly_lowpt'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_lowpt_EBCorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_eb_ECALonly_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_EBUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_eb_ECALonly_lowpt_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_lowpt_EBUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_ee_ECALonly'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_EECorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_ee_ECALonly_lowpt'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_lowpt_EECorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_ee_ECALonly_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_EEUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_ee_ECALonly_lowpt_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_lowpt_EEUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_eb_ECALTRK'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_track_EBCorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_eb_ECALTRK_lowpt'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_track_lowpt_EBCorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_eb_ECALTRK_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_track_EBUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_eb_ECALTRK_lowpt_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_track_lowpt_EBUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_ee_ECALTRK'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_track_EECorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_ee_ECALTRK_lowpt'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_track_lowpt_EECorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_ee_ECALTRK_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_track_EEUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('electron_ee_ECALTRK_lowpt_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDelectron_track_lowpt_EEUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('photon_eb_ECALonly'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDphoton_EBCorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('photon_eb_ECALonly_lowpt'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDphoton_lowpt_EBCorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('photon_eb_ECALonly_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDphoton_EBUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('photon_eb_ECALonly_lowpt_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDphoton_lowpt_EBUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('photon_ee_ECALonly'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDphoton_EECorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('photon_ee_ECALonly_lowpt'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDphoton_lowpt_EECorrection_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('photon_ee_ECALonly_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDphoton_EEUncertainty_80X_EGM_v4')
),
cms.PSet(
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
label = cms.untracked.string('photon_ee_ECALonly_lowpt_var'),
record = cms.string('GBRDWrapperRcd'),
tag = cms.string('GEDphoton_lowpt_EEUncertainty_80X_EGM_v4')
))
)
process.HepPDTESSource = cms.ESSource("HepPDTESSource",
pdtFileName = cms.FileInPath('SimGeneral/HepPDTESSource/data/pythiaparticle.tbl')
)
process.eegeom = cms.ESSource("EmptyESSource",
firstValid = cms.vuint32(1),
iovIsRunNotTime = cms.bool(True),
recordName = cms.string('EcalMappingRcd')
)
process.es_hardcode = cms.ESSource("HcalHardcodeCalibrations",
GainWidthsForTrigPrims = cms.bool(False),
HERecalibration = cms.bool(False),
HEreCalibCutoff = cms.double(20.0),
HFRecalibration = cms.bool(False),
iLumi = cms.double(-1.0),
testHFQIE10 = cms.bool(False),
toGet = cms.untracked.vstring('GainWidths')
)
process.prefer("es_hardcode")
| [
"[email protected]"
] | |
8e4e9a84ac90cc3c82cd6cdaf5e67eb084cac8c2 | d5ba475a6a782b0eed5d134b66eb8c601c41421c | /terrascript/resource/kubernetes.py | b36d593401d9adecbf53bb7af121910a44601d7d | [
"BSD-2-Clause",
"Python-2.0"
] | permissive | amlodzianowski/python-terrascript | ab42a06a5167e53ad8093b656a9bf14a03cb031d | 142b1a4d1164d1012ac8865d12fdcc72f1e7ae75 | refs/heads/master | 2021-05-19T11:59:47.584554 | 2020-03-26T07:13:47 | 2020-03-26T07:13:47 | 251,688,045 | 0 | 0 | BSD-2-Clause | 2020-03-31T18:00:22 | 2020-03-31T18:00:22 | null | UTF-8 | Python | false | false | 2,743 | py | # terrascript/resource/kubernetes.py
import terrascript
class kubernetes_api_service(terrascript.Resource):
pass
class kubernetes_cluster_role(terrascript.Resource):
pass
class kubernetes_cluster_role_binding(terrascript.Resource):
pass
class kubernetes_config_map(terrascript.Resource):
pass
class kubernetes_cron_job(terrascript.Resource):
pass
class kubernetes_daemonset(terrascript.Resource):
pass
class kubernetes_deployment(terrascript.Resource):
pass
class kubernetes_endpoints(terrascript.Resource):
pass
class kubernetes_horizontal_pod_autoscaler(terrascript.Resource):
pass
class kubernetes_ingress(terrascript.Resource):
pass
class kubernetes_job(terrascript.Resource):
pass
class kubernetes_limit_range(terrascript.Resource):
pass
class kubernetes_namespace(terrascript.Resource):
pass
class kubernetes_network_policy(terrascript.Resource):
pass
class kubernetes_persistent_volume(terrascript.Resource):
pass
class kubernetes_persistent_volume_claim(terrascript.Resource):
pass
class kubernetes_pod(terrascript.Resource):
pass
class kubernetes_pod_disruption_budget(terrascript.Resource):
pass
class kubernetes_priority_class(terrascript.Resource):
pass
class kubernetes_replication_controller(terrascript.Resource):
pass
class kubernetes_role_binding(terrascript.Resource):
pass
class kubernetes_resource_quota(terrascript.Resource):
pass
class kubernetes_role(terrascript.Resource):
pass
class kubernetes_secret(terrascript.Resource):
pass
class kubernetes_service(terrascript.Resource):
pass
class kubernetes_service_account(terrascript.Resource):
pass
class kubernetes_stateful_set(terrascript.Resource):
pass
class kubernetes_storage_class(terrascript.Resource):
pass
__all__ = [
"kubernetes_api_service",
"kubernetes_cluster_role",
"kubernetes_cluster_role_binding",
"kubernetes_config_map",
"kubernetes_cron_job",
"kubernetes_daemonset",
"kubernetes_deployment",
"kubernetes_endpoints",
"kubernetes_horizontal_pod_autoscaler",
"kubernetes_ingress",
"kubernetes_job",
"kubernetes_limit_range",
"kubernetes_namespace",
"kubernetes_network_policy",
"kubernetes_persistent_volume",
"kubernetes_persistent_volume_claim",
"kubernetes_pod",
"kubernetes_pod_disruption_budget",
"kubernetes_priority_class",
"kubernetes_replication_controller",
"kubernetes_role_binding",
"kubernetes_resource_quota",
"kubernetes_role",
"kubernetes_secret",
"kubernetes_service",
"kubernetes_service_account",
"kubernetes_stateful_set",
"kubernetes_storage_class",
]
| [
"[email protected]"
] | |
12c4009dcc6246635dd6e61c1e166dddbab555ce | 87f676da996db82348282d7bdd49d05fe5372356 | /anchore_engine/version.py | 1c5682895bdff5c60ce4f3d9454ffb7652cc6283 | [
"Apache-2.0"
] | permissive | shrikant-rajappan/anchore | abad5baca5258c665d5f3fdc471ab75b86b71163 | e9838894f666161e84ecce408922e48ca045eaf6 | refs/heads/master | 2023-01-06T04:47:14.516052 | 2019-08-26T20:35:39 | 2019-08-26T20:35:39 | 204,663,580 | 0 | 1 | Apache-2.0 | 2022-12-21T15:42:54 | 2019-08-27T09:05:05 | Python | UTF-8 | Python | false | false | 40 | py | version="0.5.0-dev"
db_version="0.0.11"
| [
"[email protected]"
] | |
b6ae8c8519f939cc6a51e7ee6f2fefd15c8b2259 | 3de2a746243ad1cb000994a06a0f9699db9a901f | /abc184b.py | 64694289e5c4aec649f12bd9438abd27db4e5b01 | [] | no_license | takumi152/atcoder | 71d726ffdf2542d8abac0d9817afaff911db7c6c | ebac94f1227974aa2e6bf372e18605518de46441 | refs/heads/master | 2022-10-30T12:14:41.742596 | 2022-09-29T19:49:32 | 2022-09-29T19:49:32 | 181,502,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | def main():
n, x = map(int, input().split())
s = input()
score = x
for i in range(n):
if s[i] == 'o':
score += 1
elif s[i] == 'x' and score > 0:
score -= 1
print(score)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
931bfaa4cc8c3856ad42bb1d98c301e63b8c1bd6 | c1fcfa74629b0ab3cf806c2a565aa869f7fea3d9 | /Contents/Libraries/Shared/resources/lib/proxies/anonymster.py | b424e52f3034d1352d01ebb78a5228d0b7a1245b | [] | no_license | gus4520/FMoviesPlus.bundle | e884e37f0aca68ac1d4c1e8d7dc7ff741ea323eb | 102baa1a5c7cef3ef3f728db226e01fbdf34da7f | refs/heads/master | 2020-03-10T02:43:18.087100 | 2018-01-15T20:07:11 | 2018-01-15T20:07:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,657 | py | import re,urllib,urlparse,base64,time,json
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import control
from resources.lib import resolvers
# Web Proxy
name = 'Anonymster'
loggertxt = []
PROXY_URL = "https://proxy.anonymster.com/browse.php?b=2&u="
class proxy:
def __init__(self):
del loggertxt[:]
self.ver = '0.0.1'
self.update_date = 'Dec. 19, 2017'
log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date))
self.base_link = 'https://proxy.anonymster.com'
self.name = name
self.loggertxt = []
self.disabled = False
self.captcha = False
self.ssl = True
self.speedtest = 0
self.headers = {'Connection' : 'keep-alive', 'User-Agent' : client.randomagent()}
self.working = self.testSite()
log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date))
def getLog(self):
self.loggertxt = loggertxt
return self.loggertxt
def testSite(self):
try:
if self.disabled == True:
log('INFO','testSite', 'Plugin Disabled')
return False
x1 = time.time()
http_res = client.request(url=self.base_link, output='responsecode')
self.speedtest = time.time() - x1
if http_res in client.HTTP_GOOD_RESP_CODES:
log('SUCCESS', 'testSite', 'HTTP Resp : %s for %s' % (http_res,self.base_link))
return True
log('ERROR', 'testSite', 'HTTP Resp : %s via proxy for %s' % (http_res,self.base_link))
return False
except Exception as e:
log('ERROR','testSite', '%s' % e)
return False
def request(self, url, close=True, redirect=True, followredirect=False, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30', httpsskip=False, use_web_proxy=False, XHR=False, IPv4=False):
if self.working == False:
log("Proxy working status is %s" % self.working)
return None
if headers == None:
headers = self.headers
return requestdirect(url=url, close=close, redirect=redirect, followredirect=followredirect, error=error, proxy=proxy, post=post, headers=headers, mobile=mobile, limit=limit, referer=referer, cookie=cookie, output=output, timeout=timeout, httpsskip=httpsskip, use_web_proxy=use_web_proxy, XHR=XHR, IPv4=IPv4)
def requestdirect(url, close=True, redirect=True, followredirect=False, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30', httpsskip=False, use_web_proxy=False, XHR=False, IPv4=False):
try:
urlhost = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if headers == None:
headers = {'Connection' : 'keep-alive'}
headers['User-Agent'] = client.randomagent()
res = client.request(url = PROXY_URL + url, close=close, redirect=redirect, followredirect=followredirect, error=error, proxy=proxy, post=post, headers=headers, mobile=mobile, limit=limit, referer=referer, cookie=cookie, output=output, timeout=timeout, httpsskip=httpsskip, use_web_proxy=use_web_proxy, XHR=XHR, IPv4=IPv4)
page_data_string = client.getPageDataBasedOnOutput(res, output)
#print page_data_string
pattern = re.compile('<script[\s\S]+?/script>')
page_data_string = re.sub(pattern, '', page_data_string)
try:
page_data_string = page_data_string.replace('\n','')
#page_data_string = page_data_string.replace('\r','r').replace('\n','<br/>').replace('\w','').replace('\.','').replace('\t','').replace('\ ','')
except Exception as e:
log('FAIL','requestdirect-1', '%s' % e, dolog=False)
#print page_data_string
try:
page_data_stringx = json.dumps(page_data_string)
page_data_stringx = page_data_stringx.replace('\\','')
page_data_stringx = page_data_stringx[1:-1]
page_data_string = page_data_stringx
except Exception as e:
log('FAIL','requestdirect-2', '%s' % e, dolog=False)
#print page_data_string
#page_data_string = str(page_data_string)
try:
r = unicode(page_data_string, "utf-8")
page_data_string = r
except Exception as e:
log('FAIL','requestdirect-3', '%s' % e, dolog=False)
try:
r = str(page_data_string)
page_data_string = r
except Exception as e:
log('FAIL','requestdirect-4', '%s' % e, dolog=False)
page_data_string = page_data_string.replace('https://proxy.anonymster.com/browse.php?', '')
page_data_string = page_data_string.replace('/browse.php?u=', '')
page_data_string = page_data_string.replace('&b=2', '')
page_data_string = page_data_string.replace('b=2', '')
page_data_string = page_data_string.replace('u=', '')
page_data_string = page_data_string.replace('&http', 'http')
page_data_string = page_data_string.replace('/http', 'http')
try:
page_data_string = page_data_string.decode('utf-8')
except:
pass
try:
page_data_string = urllib.unquote_plus(page_data_string)
except:
pass
try:
page_data_string = page_data_string.encode('utf-8')
except:
pass
return client.getResponseDataBasedOnOutput(page_data_string, res, output)
except Exception as e:
log('ERROR','requestdirect', '%s' % e)
return None
def log(type='INFO', method='undefined', err='', dolog=True, logToControl=False, doPrint=True):
try:
msg = '%s: %s > %s > %s : %s' % (time.ctime(time.time()), type, name, method, err)
if dolog == True:
loggertxt.append(msg)
if logToControl == True:
control.log(msg)
if control.doPrint == True and doPrint == True:
print msg
except Exception as e:
control.log('Error in Logging: %s >>> %s' % (msg,e)) | [
"[email protected]"
] | |
c69f07208e4745e5ea251ca85528efeaedd27a54 | e21330ac23917670799616e7fc44d3a73171042d | /algorithm_project/users/urls.py | 4af926f1a6913423f31c430f03386b4e9e85ebd4 | [
"MIT"
] | permissive | godwon2095/algorithm_project | 813b25e16a26723d0d1748ee24b23b4d16e70974 | c8140f75a14535592cac06a62c480be13c45d7c1 | refs/heads/master | 2020-05-28T06:28:22.094441 | 2019-05-27T21:37:35 | 2019-05-27T21:37:35 | 188,909,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from django.urls import path
from algorithm_project.users.views import (
user_redirect_view,
user_update_view,
user_detail_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| [
"[email protected]"
] | |
cfd4a4929001cfecb69580906667d2c26c280741 | 664646ccbeb6575582299e7d1c6ccc696f07ccba | /tools/oneforall/modules/datasets/passivedns_api.py | 13da6b1fa0bfbddd994246c0aad0e6be01b09d64 | [] | no_license | 0xss/bayonet | 3f1ce5832a06eef7e60b198c6c56cf59e4543199 | d723dbf0299ac86d9a4419741a197985558e283c | refs/heads/master | 2021-02-25T20:21:11.342592 | 2020-03-06T04:40:14 | 2020-03-06T04:40:14 | 245,462,098 | 0 | 1 | null | 2020-03-06T16:02:33 | 2020-03-06T16:02:32 | null | UTF-8 | Python | false | false | 1,501 | py | from config import Oneforall
from tools.oneforall.common.query import Query
class PassiveDnsAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'PassiveDnsQuery'
self.addr = Oneforall.passivedns_api_addr or 'http://api.passivedns.cn'
self.token = Oneforall.passivedns_api_token
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.header.update({'X-AuthToken': self.token})
self.proxy = self.get_proxy(self.source)
url = self.addr + '/flint/rrset/*.' + self.domain
resp = self.get(url)
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.addr):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = PassiveDnsAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')
| [
"[email protected]"
] | |
209bbad24dacc64dfcafafba3760d51026cf9ce4 | 7ec35bd037077e9b65d3fa26a91978e8652c7409 | /Stream-3/Full-Stack-Development/21.Django REST Framework/2.Serializers-And-Class-Based-Views/django_todo/todo/views.py | f79744a2bc2b32f9331530f531e9a4543b000663 | [
"MIT"
] | permissive | GunnerJnr/_CodeInstitute | 8f743abef66c33a77ce13ca719963e93ffe22607 | efba0984a3dc71558eef97724c85e274a712798c | refs/heads/master | 2023-01-05T10:53:57.536047 | 2020-01-27T13:04:12 | 2020-01-27T13:04:12 | 99,014,961 | 8 | 6 | MIT | 2022-12-26T20:24:35 | 2017-08-01T15:15:55 | CSS | UTF-8 | Python | false | false | 947 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.views import APIView
from todo.serializers import TodoSerializer
from todo.models import Todo
# Create your views here.
class TodoView(APIView):
"""
TodoView: used to handle the incoming requests relating to `todo` items
"""
def get(self, request):
"""
Retrieve a complete list of `todo` items from the Todo
model, serialize them to JSON and return the serialized
todo items
"""
todo_items = Todo.objects.all()
# Serialize the data retrieved from the DB and serialize
# them using the `TodoSerializer`
serializer = TodoSerializer(todo_items, many=True)
# Store the serialized data `serialized_data`
serialized_data = serializer.data
return Response(serialized_data)
| [
"[email protected]"
] | |
cbe47d4a7fded25f5b1a067869bebac87802ba37 | 4f325c55882487af0aab8853179a7ed5867953b9 | /pssshclient.py | 15bd33ee5692b7fcac7d53d0c6cd2adec6d166e1 | [] | no_license | ravijaya/oct26 | 6ce7112f85db86fa41fbb2ff6f9f01650055ad6a | 64f7dbe759ddc652297365c8a635b239e1ef0cba | refs/heads/master | 2020-08-28T12:29:54.644234 | 2019-10-26T11:41:41 | 2019-10-26T11:41:41 | 217,700,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | """single threaded ssh client"""
import paramiko
def ssh_client(host, port, user, pwd, job):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, port, user, pwd)
stdin, stdout, stderr = ssh.exec_command(job)
output = stdout.read()
response = output if output else stderr.read() # if else conditional operator
ssh.close()
return response.decode()
if __name__ == '__main__':
r = ssh_client('52.66.251.190', '22', 'training', 'training', 'lscpu')
print(r)
| [
"[email protected]"
] | |
c12bee6b4ad787e6305c5304f580559473b1b30b | 4c45bd5cb5d71e8563c8aca3e706e7275965e5fd | /users/tests.py | 5ea92a18481ef8300ddf38bc8967401bb7f7ce4b | [
"BSD-2-Clause"
] | permissive | diogobaeder/n2n | 4794faa6de46e1dadb9ac6c3611cb9ac738411c3 | aca2488d3d54067d4aea2f69ec37643a897735eb | refs/heads/master | 2021-01-23T10:43:14.405346 | 2017-06-01T18:19:52 | 2017-06-01T18:19:52 | 93,084,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | from uuid import UUID
from django.test import TestCase
from .models import *
class UserTest(TestCase):
def test_creates_basic_user(self):
user = User.objects.create(name='John')
self.assertEqual(user.name, 'John')
self.assertIsInstance(user.uuid, UUID)
def test_adds_a_project(self):
user = User.objects.create(name='John')
project = Project.objects.create(name='Lunch')
user.projects.add(project)
self.assertEqual(user.projects.first().name, 'Lunch')
def test_can_belong_to_a_company(self):
company = Company.objects.create(name='Acme')
user = User.objects.create(name='John', company=company)
self.assertEqual(user.company.name, 'Acme')
def test_gets_by_uuid(self):
user = User.objects.create(name='John')
retrieved = User.objects.get_by_uuid(user.uuid)
self.assertEqual(retrieved.id, user.id)
class CompanyTest(TestCase):
def test_creates_basic_company(self):
company = Company.objects.create(name='Acme')
self.assertEqual(company.name, 'Acme')
self.assertIsInstance(company.uuid, UUID)
def test_gets_by_uuid(self):
company = Company.objects.create(name='Acme')
retrieved = Company.objects.get_by_uuid(company.uuid)
self.assertEqual(retrieved.id, company.id)
class ProjectTest(TestCase):
def test_creates_basic_project(self):
project = Project.objects.create(name='Lunch')
self.assertEqual(project.name, 'Lunch')
self.assertIsInstance(project.uuid, UUID)
| [
"[email protected]"
] | |
4a2d45213d44ffeeaded394b450ea7daf5b65bb7 | bb198232df12a1adb9e8a6164ff2a403bf3107cf | /wifi-dump-parser-3/bar_graph_plot.py | 4c8876a1e9badd818c8ba52c278afd5a7bcc5621 | [] | no_license | vanello/wifi-arsenal | 9eb79a43dfdd73d3ead1ccd5d2caf9bad9e327ee | 1ca4c5a472687f8f017222893f09a970652e9a51 | refs/heads/master | 2021-01-16T22:00:37.657041 | 2015-09-03T03:40:43 | 2015-09-03T03:40:43 | 42,060,303 | 1 | 0 | null | 2015-09-07T15:24:11 | 2015-09-07T15:24:11 | null | UTF-8 | Python | false | false | 5,829 | py | #Author : Abhinav Narain
#Date : 9-sept-2013
#Purpose : To plot the #devices,AP inside homes
from magicplott import *
def pickle_reader(input_folder):
print "the pickle reader called "
data_fs=os.listdir(input_folder)
home_device_table=defaultdict(list)
home_ap_table=defaultdict(list)
for f_name in data_fs :
#router_id,ap_macs,device_macs,ap_map,device_map,rate_map ; maps are of times
_f_content= pickle.load(open(input_folder+f_name,'rb'))
router_id= _f_content[0]
ap_mac=_f_content[1]
device_mac=_f_content[2]
home_device_table[router_id]=device_mac
home_ap_table[router_id]=ap_mac
return [home_ap_table,home_device_table]
def pickle_reader_time_map(input_folder):
print "the pickle reader called "
data_fs=os.listdir(input_folder)
home_device_table=defaultdict(list)
home_ap_table=defaultdict(list)
for f_name in data_fs :
#router_id,ap_macs,device_macs,ap_map,device_map,rate_map ; maps are of times
_f_content= pickle.load(open(input_folder+f_name,'rb'))
router_id= _f_content[0]
ap_map=_f_content[3]
device_map=_f_content[4]
home_device_table[router_id]=device_map
home_ap_table[router_id]=ap_map
return [home_ap_table,home_device_table]
if __name__=='__main__':
'''
This main function is for plotting the number
of distinct devices and Access Points seen by
the BISmark Access Points inside homes
'''
if len(sys.argv) !=4:
print "usage : python unpickeler.py <data_folder_2GHz> <data_folder_5GHz> <filename(without png extention)> "
sys.exit(0)
input_folder = sys.argv[1]
input_folder5 = sys.argv[2]
outfile_name = sys.argv[3]
home_ap_2_table=defaultdict(list)
home_ap_5_table=defaultdict(list)
home_device_2_table=defaultdict(list)
home_device_5_table=defaultdict(list)
[home_ap_2_table,home_device_2_table]=pickle_reader(input_folder)
[home_ap_5_table,home_device_5_table]=pickle_reader(input_folder5)
new_list_2=[]
for k,v in home_ap_2_table.iteritems():
list_devices=home_device_2_table[k]
new_list_devices= [x for x in list_devices if x not in v]
new_list_2.append([k,len(new_list_devices),len(v)])
new_list_2.sort(key=lambda x: x[1])
labels_2,home_device_count_2,home_ap_count_2=[],[],[]
for i in new_list_2 :
labels_2.append(i[0])
home_device_count_2.append(i[1])
home_ap_count_2.append(i[2])
new_list_5=[]
for k,v in home_ap_5_table.iteritems():
list_devices=home_device_5_table[k]
new_list_devices= [x for x in list_devices if x not in v]
new_list_5.append([k,len(new_list_devices),len(v)])
new_list_5.sort(key=lambda x: x[1])
labels_5,home_device_count_5,home_ap_count_5=[],[],[]
for i in new_list_5 :
labels_5.append(i[0])
home_device_count_5.append(i[1])
home_ap_count_5.append(i[2])
bar_graph_plotter(labels_5,
home_device_count_5,
'RouterID',
'Device Count',
'Number of Devices observed in homes(5 GHz)',
outfile_name+'5_devices.png'
)
bar_graph_plotter(labels_2,
home_device_count_2,
'RouterID',
'Device Count',
'Number of Devices observed in homes(2.4 GHz)',
outfile_name+'2_4_devices.png'
)
new_list_2.sort(key=lambda x: x[2])
labels_2,home_device_count_2,home_ap_count_2=[],[],[]
for i in new_list_2 :
labels_2.append(i[0])
home_device_count_2.append(i[1])
home_ap_count_2.append(i[2])
new_list_5.sort(key=lambda x: x[2])
labels_5,home_device_count_5,home_ap_count_5=[],[],[]
for i in new_list_5 :
labels_5.append(i[0])
home_device_count_5.append(i[1])
home_ap_count_5.append(i[2])
bar_graph_plotter(labels_5,
home_ap_count_5,
'RouterID',
'Access Points Count',
'Number of Access Points observed in homes(5 GHz)',
outfile_name+'5_ap.png'
)
bar_graph_plotter(labels_2,
home_ap_count_2,
'RouterID',
'Device Count',
'Number of Devices and Access Points observed in homes(2.4 GHz)',
outfile_name+'2_4_ap.png'
)
#Date : 15 Sept, 2012
#Partially written; needs to be completed
if 0:# __name__=='__main__':
'''
This function is for plotting the number of Devices
and Access Points witnessed by BISmark Access Point
*persistently*
'''
if len(sys.argv) !=3:
print "usage : python unpickeler.py data_folder_2GHz filename.png "
sys.exit(0)
input_folder = sys.argv[1]
outfile_name = sys.argv[2]
home_ap_2_table=defaultdict(list)
home_device_2_table=defaultdict(list)
[home_ap_2_table,home_device_2_table]=pickle_reader_time_map(input_folder)
new_list_2=[]
for k,ap_time_map in home_ap_2_table.iteritems():
for time,list_of_aps in ap_time_map.iteritems():
print time, len(list_of_aps)
print "++++"
sys.exit(1)
new_list_devices= [x for x in list_devices if x not in v]
new_list_2.append([k,len(new_list_devices),len(v)])
new_list_2.sort(key=lambda x: x[1])
labels_2,home_device_count_2,home_ap_count_2=[],[],[]
for i in new_list_2 :
labels_2.append(i[0])
home_device_count_2.append(i[1])
home_ap_count_2.append(i[2])
| [
"[email protected]"
] | |
eecd8f9f717bf3993d28bdb0a215b4bf7bcc4cf9 | 8ffccb986def3f1f669b475a8575e05dccd77163 | /py02프로그래밍기초/py02_09ex3_average.py | 05c1e29c77f959eaded24fb96d3d2e583ce1fbf5 | [] | no_license | pythonJhu/testProject | 09f5d412b0271e17b31f78fd7133d6e54464c41a | 74f27b0f5c2b798122c3edbd66f7b485a8add6d5 | refs/heads/master | 2021-01-01T10:55:56.370576 | 2020-03-08T07:09:33 | 2020-03-08T07:09:33 | 239,248,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | value1 = input("첫번째 과목 점수를 입력하세요 : ")
value2 = input("두번째 과목 점수를 입력하세요 : ")
value1 = int(value1) # 문자열 value1 을 정수로 변환, 형변환
value2 = int(value2) # 문자열 value1 을 정수로 변환, 형변환
sum = value1 + value2
average = sum / 2
print(' ------------------ value1 = ', value1, ' value2 = ', value2, '-------------------- ')
if average >= 95:
print('verry good')
else:
print('just good')
print(' ------------------- average = ', average, '------------------- ') | [
"[email protected]"
] | |
fe2b091cab4cd2ea964b6dc9f009c5ffa2a2e6d5 | c22fa68c3ebc8f112300c02d9f66cdb651bee648 | /manage.py | 513689d5061edf0b9fc2e2fb89db77fe6b716ea3 | [
"Apache-2.0"
] | permissive | ans2human/DjNg-CRUD | b053108244a9a07f3ed3f56a8d4322100ddf9c5a | eb5b911fbd643cc182ba272ec50191cbfbb7cf03 | refs/heads/master | 2020-03-21T04:57:07.845134 | 2018-07-05T09:25:33 | 2018-07-05T09:25:33 | 138,136,069 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "movierater.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
7e174e8fb9a2e4c5b0c0a5341a661db12f49e731 | b9cee0411d39d25a534c44d7e43e18924af02214 | /highway_env/road/lane.py | 46acc9e73f7079c07e9687b19374e078548c74ba | [
"MIT"
] | permissive | galleon/highway-env | 1a848b0c742d6bb5c888da680a98c9f21d665b31 | 2fba2bda9c6b29218db3a6d2a7c2d7de2f1a4bf1 | refs/heads/master | 2022-11-06T17:46:48.533001 | 2020-06-22T05:31:48 | 2020-06-22T05:31:48 | 272,162,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | import numpy as np
class LineType:
"""
A lane side line type.
"""
NONE = 0
STRIPED = 1
CONTINUOUS = 2
CONTINUOUS_LINE = 3
class StraightLane:
"""
A straight line lane
"""
def __init__(beg, end, width=4, line_types=None, forbidden=False, speed_limit=20, priority=0):
self.beg = beg
self.end = end
self.width = width
self.heading = 0
self.length = np.linalg.norm(self.end - self.beg)
self.line_types = line_types or [LineType.STRIPED, LineType.STRIPED]
self.direction = (self.end - self.beg) / self.length
self.direction_lateral = np.array([-self.direction[1], self.direction[0]])
self.forbidden = forbidden
self.priority = priority
self.speed_limit = speed_limit
def position(self, longitudinal, lateral):
return self.beg + longitudinal*self.direction + lateral*self.direction_lateral
def heading_at(self, longitudinal):
return self.heading
def width_at(self, longitudinal):
return self.longitudinal
def local_coordinates(self, position):
delta = position - self.start
longitudinal = np.dot(delta, self.direction)
lateral = self.dot(delta, self.direction_lateral)
return longitudinal, lateral | [
"[email protected]"
] | |
39f5ffac6c1fa9cc6b5aca33866d41cba6910f02 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/sudoku_20201103142305.py | 4850f9b602a9fe01ba0698b7ba97c70a49c9bd0d | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,312 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
from math import floor
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = int((pos[0] - TOP_LX)//BLOCK_SIZE)
global box_index_y
box_index_y = int((pos[1] - TOP_LY)//BLOCK_SIZE)
def valid(grid, x, y, val):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
def valid_cdt(cdt_list, val):
if type(cdt_list) == int: # ignore
return True
if len(cdt_list) > 9:
return False
else:
for iter in cdt_list:
if iter == val:
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
val = 0
blink = False
alpha = 1
a_change = True
blink_color = GREEN
candidates = []
get_cord(INITIAL_CORDS)
set_highlight(INITIAL_CORDS, INITIAL_CORDS, INITIAL_CORDS, INITIAL_LOCK)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN and input_lock != 1:
pos = pg.mouse.get_pos()
get_cord(pos)
# Checks if selection is on the board
if pos[0] < TOP_LX or pos[1] < TOP_LY or pos[0] > int(BOT_RX) or pos[1] > int(BOT_RY):
blink = False
else:
blink = True
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
if event.key == pg.K_BACKSPACE:
board[int(box_index_x)][int(box_index_y)] = 0
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight(INITIAL_CORDS, INITIAL_CORDS, INITIAL_CORDS, INITIAL_LOCK)
blink_color = GREEN
board[int(box_index_x)][int(box_index_y)] = 0
if val != 0:
# display.draw_val(val, box_index_x, box_index_y)
candidates = []
print("value: ", val,"occupying position: ", board[box_index_x][box_index_y])
if valid(board, box_index_x, box_index_y, val):
if type(board[box_index_x][box_index_y]) == 0: # No candidates
board[box_index_x][box_index_y] = val
elif valid_cdt(board[box_index_x][box_index_y], val): # Switching from number to list
candidates.append(val)
board[box_index_x][box_index_y] = candidates
elif valid_cdt(board[box_index_x][box_index_y], val): # Adding candidate to list
candidates = board[box_index_x][box_index_y]
candidates.append(val)
board[box_index_x][box_index_y] = candidates
else:
board[box_index_x][box_index_y] = val
# Draws the screen
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
# Draws the board
display.draw(board)
# Check if cell is selected
if blink:
cell = display.find_cell(box_index_x, box_index_y)
blink = display.blink(alpha, a_change)
alpha = blink[0]
a_change = blink[1]
myRect = pg.Rect(cell)
rectSurf = pg.Surface(myRect.size, pg.SRCALPHA)
rectSurf.fill(blink_color)
rectSurf.set_alpha(alpha)
self.screen.blit(rectSurf, (myRect.x, myRect.y))
# Check if incorrect input
if input_lock == 1 and val != 0:
display.update(board, row_index, col_index, blk_index)
blink_color = RED
val = 0
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
| [
"[email protected]"
] | |
72f861f016dcd6fdff6b6467986d2be6d78a4439 | 927fc31a0144c308a5c8d6dbe46ba8f2728276c9 | /tasks/final_tasks/iterator/1.simple_string_iterator.py | 2f13e87860aabfa853eaa9c7ff6fea24321ff01b | [] | no_license | ChandraSiva11/sony-presamplecode | b3ee1ba599ec90e357a4b3a656f7a00ced1e8ad3 | 393826039e5db8a448fa4e7736b2199c30f5ed24 | refs/heads/master | 2023-01-14T00:09:19.185822 | 2020-11-23T02:07:00 | 2020-11-23T02:07:00 | 299,527,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | # First Simple iterator progrma in python
def main():
string = "Hi Hellow world"
itr_obj = iter(string)
# For loop will automatically handle the stop iteration
# for i in itr_obj:
# print(i)
# With out for loop
try :
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
# print(next(itr_obj))
except Exception as error:
print('next iteration Error : ', error)
# print(itr_obj)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
714df2c4872aa8297b8ca60a4c4ec251566f37f7 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /wPMgjmQMoCwm3G6mt_5.py | 1c4eae077e1c33c1d203e3a596da095b005adddc | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py |
def upload_count(dates, month):
l = len(month)
ctr = 0
for i in dates:
if i[:l] == month:
ctr += 1
return ctr
| [
"[email protected]"
] | |
f4cbe2ace7b56a43abc0197ceb831adbd5082b8d | 5b7c2feb27a71837edf526315d413706a6bf82ff | /tests/utils/test_env.py | 5a4a4d3a3b4e68fd3b764190b3d69f00f25ea3ec | [
"BSD-3-Clause"
] | permissive | facebookresearch/mmf | df675223566dc8fb2359aa3e1a2d49db5e3c2b9a | 63f76fbcfe2d056b88734fc41a983251d20e6c61 | refs/heads/main | 2023-08-23T23:40:46.827046 | 2023-07-11T06:18:50 | 2023-07-11T06:18:50 | 138,831,170 | 2,432 | 592 | NOASSERTION | 2023-08-11T20:26:11 | 2018-06-27T04:52:40 | Python | UTF-8 | Python | false | false | 4,585 | py | # Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import os
import sys
import unittest
from mmf.common.registry import registry
from mmf.utils.configuration import get_mmf_env
from mmf.utils.env import import_user_module, setup_imports
from mmf.utils.general import get_mmf_root
from mmf_cli.run import run
from tests.test_utils import make_temp_dir, search_log
class TestUtilsEnvE2E(unittest.TestCase):
def _delete_dirty_modules(self):
for key in list(sys.modules.keys()):
if key not in self._initial_modules:
del sys.modules[key]
def _sanitize_registry(self):
registry.mapping["builder_name_mapping"].pop("always_one", None)
registry.mapping["model_name_mapping"].pop("simple", None)
registry.mapping["state"] = {}
def _get_user_dir(self, abs_path=True):
if abs_path:
return os.path.join(get_mmf_root(), "..", "tests", "data", "user_dir")
else:
return os.path.join("tests", "data", "user_dir")
def setUp(self):
setup_imports()
self._initial_modules = set(sys.modules)
self._sanitize_registry()
def tearDown(self):
self._delete_dirty_modules()
self._sanitize_registry()
def _test_user_import_e2e(self, extra_opts=None):
if extra_opts is None:
extra_opts = []
MAX_UPDATES = 50
user_dir = self._get_user_dir()
with make_temp_dir() as temp_dir:
opts = [
"model=simple",
"run_type=train_val_test",
"dataset=always_one",
"config=configs/experiment.yaml",
f"env.user_dir={user_dir}",
"training.seed=1",
"training.num_workers=3",
f"training.max_updates={MAX_UPDATES}",
f"env.save_dir={temp_dir}",
]
opts = opts + extra_opts
out = io.StringIO()
with contextlib.redirect_stdout(out):
run(opts)
train_log = os.path.join(temp_dir, "train.log")
log_line = search_log(
train_log,
search_condition=[
lambda x: x["progress"] == f"{MAX_UPDATES}/{MAX_UPDATES}",
lambda x: "best_val/always_one/accuracy" in x,
],
)
self.assertEqual(float(log_line["val/always_one/accuracy"]), 1)
log_line = search_log(
train_log,
search_condition=[
lambda x: x["progress"] == f"{MAX_UPDATES}/{MAX_UPDATES}",
lambda x: "test/always_one/accuracy" in x,
],
)
self.assertEqual(float(log_line["test/always_one/accuracy"]), 1)
def test_user_import_e2e(self):
self._test_user_import_e2e()
def test_cpu_evaluation_e2e(self):
self._test_user_import_e2e(extra_opts=["evaluation.use_cpu=True"])
def test_import_user_module_from_directory_absolute(self, abs_path=True):
# Make sure the modules are not available first
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNone(registry.get_model_class("simple"))
self.assertFalse("mmf_user_dir" in sys.modules)
# Now, import and test
user_dir = self._get_user_dir(abs_path)
import_user_module(user_dir)
self.assertIsNotNone(registry.get_builder_class("always_one"))
self.assertIsNotNone(registry.get_model_class("simple"))
self.assertTrue("mmf_user_dir" in sys.modules)
self.assertTrue(user_dir in get_mmf_env("user_dir"))
def test_import_user_module_from_directory_relative(self):
self.test_import_user_module_from_directory_absolute(abs_path=False)
user_dir = self._get_user_dir(abs_path=False)
self.assertEqual(user_dir, get_mmf_env("user_dir"))
def test_import_user_module_from_file(self):
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNone(registry.get_model_class("simple"))
user_dir = self._get_user_dir()
user_file = os.path.join(user_dir, "models", "simple.py")
import_user_module(user_file)
# Only model should be found and build should be none
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNotNone(registry.get_model_class("simple"))
self.assertTrue("mmf_user_dir" in sys.modules)
self.assertTrue(user_dir in get_mmf_env("user_dir"))
| [
"[email protected]"
] | |
d82b06ed955306e40a89c3f9dae61aae64c70312 | c51eef37bb983a9c35635c7ccc96a0cf689a7438 | /lecture/lecture_gn3/week3/appendix_pandas.py | 0344d0f2f1232e55f1999224d4ea9967a9fa725b | [] | no_license | Kyeongrok/python_crawler | 0a717b43be36584af1b0f7c1ad0c79108a5d11e0 | 5a5da8af7bb080f752a9a066741ac8adab136a3a | refs/heads/master | 2022-09-13T03:15:08.053639 | 2022-08-02T15:45:03 | 2022-08-02T15:45:03 | 124,719,435 | 40 | 34 | null | 2019-02-27T08:29:52 | 2018-03-11T03:20:32 | HTML | UTF-8 | Python | false | false | 134 | py | import pandas as pd
df = pd.read_excel("./jongmok.xlsx")
print(df['code'])
for item in df['code']:
print(item.replace("'", ""))
| [
"[email protected]"
] | |
118d9f8fd993b2d062494e29f3c3420c020ff27b | 894e2bc2b02226a23fcaff30d5d75c53b111fbe9 | /www/models.py | 29d709a9450cf26880b06894b2678e47a52e5236 | [] | no_license | frank-xman/python_web | 26b57614e70a7f58fe5ccfffef77340e4dff28be | 76bb0a590ebd113d391f9fd75bc6f3756ac920da | refs/heads/master | 2020-03-20T08:14:55.974308 | 2018-06-17T11:27:23 | 2018-06-17T11:27:23 | 137,302,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | import time, uuid
from orm import Model, StringField, BooleanField, FloatField, TextField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(ddl='varchar(50)')
password = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(200)')
content = TextField()
created_at = FloatField(default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
content = TextField()
created_at = FloatField(default=time.time) | [
"[email protected]"
] | |
3fb64a3e8d1896b2dbb68da8318452a55210d6b7 | 4521bb234771215d678890ed084f6336e3653542 | /_examples/seqs/test.py | 9ba8745db744cd39dd83292b9f5b362bf39e73e9 | [
"BSD-3-Clause"
] | permissive | mingrammer/gopy | 16b8cfcec31ff993e62a383032192600b33b7681 | 3b8a754b6c689175fac23de448e31b96c231e001 | refs/heads/master | 2021-01-01T06:28:35.777032 | 2017-07-13T23:04:38 | 2017-07-16T13:19:44 | 97,432,545 | 1 | 0 | null | 2017-07-17T03:42:30 | 2017-07-17T03:42:30 | null | UTF-8 | Python | false | false | 880 | py | # Copyright 2015 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
## py2/py3 compat
from __future__ import print_function
import seqs
### test docs
print("doc(seqs): %r" % (seqs.__doc__,))
print("arr = seqs.Array(xrange(2))")
arr = seqs.Array(xrange(2))
print("arr = %s" % (arr,))
print("s = seqs.Slice()")
s = seqs.Slice()
print("s = %s" % (s,))
print("s = seqs.Slice([1,2])")
s = seqs.Slice([1,2])
print("s = %s" % (s,))
print("s = seqs.Slice(range(10))")
s = seqs.Slice(range(10))
print("s = %s" % (s,))
print("s = seqs.Slice(xrange(10))")
s = seqs.Slice(xrange(10))
print("s = %s" % (s,))
print("s = seqs.Slice()")
s = seqs.Slice()
print("s = %s" % (s,))
print("s += [1,2]")
s += [1,2]
print("s = %s" % (s,))
print("s += [10,20]")
s += [10,20]
print("s = %s" % (s,))
| [
"[email protected]"
] | |
8b4ed7b0a5ab69cd77a18bc6d948271caf27517a | 15514b8cdb9ef2bb25a33e44a2abe79e5eb86439 | /analyze_in_vivo/analyze_domnisoru/check_basic/velocity_threshold.py | 326534e21640372aed2da1a278da69d0f76db9a4 | [] | no_license | cafischer/analyze_in_vivo | 389ce0d51c6cbeb3e39648aaff13263f0c99060a | e38e1057420b5329504f7095f1ee89e2a293df23 | refs/heads/master | 2021-06-10T00:18:47.741793 | 2019-09-14T08:47:53 | 2019-09-14T08:47:53 | 100,512,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,288 | py | from __future__ import division
import numpy as np
import matplotlib.pyplot as pl
import os
from analyze_in_vivo.load.load_domnisoru import load_cell_ids, load_data, get_celltype, get_track_len
from analyze_in_vivo.analyze_domnisoru.check_basic.in_out_field import threshold_by_velocity
from scipy.ndimage.filters import convolve
from analyze_in_vivo.analyze_domnisoru.position_vs_firing_rate import get_spike_train
pl.style.use('paper')
if __name__ == '__main__':
save_dir_img = '/home/cf/Phd/programming/projects/analyze_in_vivo/analyze_in_vivo/results/domnisoru/check/velocity'
save_dir = '/home/cf/Phd/programming/projects/analyze_in_vivo/analyze_in_vivo/data/domnisoru'
cell_type = 'grid_cells'
cell_ids = load_cell_ids(save_dir, cell_type)
param_list = ['Vm_ljpc', 'Y_cm', 'vel_100ms', 'spiketimes'] # TODO ['Vm_ljpc', 'Y_cm', 'fY_cm', 'vel_100ms', 'spiketimes']
threshold = 1 # cm/s
save_dir_img = os.path.join(save_dir_img, cell_type)
if not os.path.exists(save_dir_img):
os.makedirs(save_dir_img)
time_lost = np.zeros(len(cell_ids))
spikes_lost = np.zeros(len(cell_ids))
for cell_idx, cell_id in enumerate(cell_ids):
print cell_id
# load
data = load_data(cell_id, param_list, save_dir)
v = data['Vm_ljpc']
t = np.arange(0, len(v)) * data['dt']
dt = data['dt']
position = data['Y_cm']
velocity_domnisoru = data['vel_100ms']
# spike train
AP_max_idxs = data['spiketimes']
spike_train = get_spike_train(AP_max_idxs, len(v))
# # velocity from position
# velocity = np.concatenate((np.array([0]), np.diff(position) / (np.diff(t)/1000.)))
#
# # put velocity at switch from end of track to the beginning to 0
# run_start_idxs = np.where(np.diff(position) < -get_track_len(cell_id)/2.)[0] + 1 # +1 because diff shifts one to front
# velocity[run_start_idxs] = 0
#
# # smoothed by a 100 ms uniform sliding window
# window = np.ones(int(round(100. / data['dt'])))
# window /= np.sum(window)
# velocity_smoothed = convolve(velocity, window, mode='nearest')
#
# # threshold by velocity
# [position_thresholded], _ = threshold_by_velocity([position], velocity)
#
# # check same length
# print 'Length Domnisoru - me (s): ', (len(data['fY_cm']) - len(position_thresholded)) * data['dt'] / 1000
#
# pl.figure()
# pl.plot(np.arange(len(position_thresholded)) * data['dt'], position_thresholded)
# pl.plot(np.arange(len(data['fY_cm'])) * data['dt'], data['fY_cm'])
# pl.show()
# threshold by velocity
[t_thresholded, spike_train_thresholded], vel = threshold_by_velocity([t, spike_train], velocity_domnisoru, threshold)
time_lost[cell_idx] = (len(t) - len(t_thresholded)) / float(len(t)) * 100 # %
spikes_lost[cell_idx] = (np.sum(spike_train) - np.sum(spike_train_thresholded)) / float(np.sum(spike_train)) * 100 # %
# print time_lost[cell_idx]
# print spikes_lost[cell_idx]
# pl.figure()
# pl.plot(t, velocity_domnisoru, 'k')
# pl.plot(t[velocity_domnisoru < threshold], velocity_domnisoru[velocity_domnisoru < threshold], 'ro', markersize=2)
# pl.figure()
# pl.plot(t, spike_train, 'k')
# pl.plot(t[velocity_domnisoru < threshold], spike_train[velocity_domnisoru < threshold], 'ro',
# markersize=2)
# pl.figure()
# pl.plot(np.arange(0, len(vel))*dt, vel, 'k')
# pl.show()
if cell_type == 'grid_cells':
n_rows = 3
n_columns = 9
fig, axes = pl.subplots(n_rows, n_columns, sharex='all', sharey='all', figsize=(14, 8.5))
cell_idx = 0
for i1 in range(n_rows):
for i2 in range(n_columns):
if cell_idx < len(cell_ids):
if get_celltype(cell_ids[cell_idx], save_dir) == 'stellate':
axes[i1, i2].set_title(cell_ids[cell_idx] + ' ' + u'\u2605', fontsize=12)
elif get_celltype(cell_ids[cell_idx], save_dir) == 'pyramidal':
axes[i1, i2].set_title(cell_ids[cell_idx] + ' ' + u'\u25B4', fontsize=12)
else:
axes[i1, i2].set_title(cell_ids[cell_idx], fontsize=12)
axes[i1, i2].bar(0, time_lost[cell_idx], color='0.5')
axes[i1, i2].bar(1, spikes_lost[cell_idx], color='0.5')
axes[i1, i2].set_xlim(-1, 2)
axes[i1, i2].set_ylim(0, 100)
axes[i1, i2].set_xticks([0, 1])
axes[i1, i2].set_xticklabels(['Time \nlost', '#APs \nlost'], fontsize=12)
if i2 == 0:
axes[i1, i2].set_ylabel('Percentage')
else:
axes[i1, i2].spines['left'].set_visible(False)
axes[i1, i2].spines['bottom'].set_visible(False)
axes[i1, i2].set_xticks([])
axes[i1, i2].set_yticks([])
cell_idx += 1
pl.tight_layout()
pl.savefig(os.path.join(save_dir_img, 'loss.png'))
pl.show() | [
"[email protected]"
] | |
f2b3dd17ce0965225f01742b65f234f3c042c6fd | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/scheduler/v1/scheduler-v1-py/google/cloud/scheduler_v1/services/cloud_scheduler/async_client.py | 5a574a9d7619dfdff90a26656248611ca1a466a9 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,528 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.scheduler_v1.services.cloud_scheduler import pagers
from google.cloud.scheduler_v1.types import cloudscheduler
from google.cloud.scheduler_v1.types import job
from google.cloud.scheduler_v1.types import job as gcs_job
from google.cloud.scheduler_v1.types import target
from google.protobuf import duration_pb2 as duration # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from google.rpc import status_pb2 as status # type: ignore
from .transports.base import CloudSchedulerTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import CloudSchedulerGrpcAsyncIOTransport
from .client import CloudSchedulerClient
class CloudSchedulerAsyncClient:
"""The Cloud Scheduler API allows external entities to reliably
schedule asynchronous jobs.
"""
_client: CloudSchedulerClient
DEFAULT_ENDPOINT = CloudSchedulerClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = CloudSchedulerClient.DEFAULT_MTLS_ENDPOINT
job_path = staticmethod(CloudSchedulerClient.job_path)
parse_job_path = staticmethod(CloudSchedulerClient.parse_job_path)
topic_path = staticmethod(CloudSchedulerClient.topic_path)
parse_topic_path = staticmethod(CloudSchedulerClient.parse_topic_path)
common_billing_account_path = staticmethod(CloudSchedulerClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(CloudSchedulerClient.parse_common_billing_account_path)
common_folder_path = staticmethod(CloudSchedulerClient.common_folder_path)
parse_common_folder_path = staticmethod(CloudSchedulerClient.parse_common_folder_path)
common_organization_path = staticmethod(CloudSchedulerClient.common_organization_path)
parse_common_organization_path = staticmethod(CloudSchedulerClient.parse_common_organization_path)
common_project_path = staticmethod(CloudSchedulerClient.common_project_path)
parse_common_project_path = staticmethod(CloudSchedulerClient.parse_common_project_path)
common_location_path = staticmethod(CloudSchedulerClient.common_location_path)
parse_common_location_path = staticmethod(CloudSchedulerClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CloudSchedulerAsyncClient: The constructed client.
"""
return CloudSchedulerClient.from_service_account_info.__func__(CloudSchedulerAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CloudSchedulerAsyncClient: The constructed client.
"""
return CloudSchedulerClient.from_service_account_file.__func__(CloudSchedulerAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> CloudSchedulerTransport:
"""Return the transport used by the client instance.
Returns:
CloudSchedulerTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(CloudSchedulerClient).get_transport_class, type(CloudSchedulerClient))
def __init__(self, *,
credentials: credentials.Credentials = None,
transport: Union[str, CloudSchedulerTransport] = 'grpc_asyncio',
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the cloud scheduler client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CloudSchedulerTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = CloudSchedulerClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_jobs(self,
request: cloudscheduler.ListJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobsAsyncPager:
r"""Lists jobs.
Args:
request (:class:`google.cloud.scheduler_v1.types.ListJobsRequest`):
The request object. Request message for listing jobs
using
[ListJobs][google.cloud.scheduler.v1.CloudScheduler.ListJobs].
parent (:class:`str`):
Required. The location name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.scheduler_v1.services.cloud_scheduler.pagers.ListJobsAsyncPager:
Response message for listing jobs using
[ListJobs][google.cloud.scheduler.v1.CloudScheduler.ListJobs].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = cloudscheduler.ListJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_jobs,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListJobsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_job(self,
request: cloudscheduler.GetJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Gets a job.
Args:
request (:class:`google.cloud.scheduler_v1.types.GetJobRequest`):
The request object. Request message for
[GetJob][google.cloud.scheduler.v1.CloudScheduler.GetJob].
name (:class:`str`):
Required. The job name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.scheduler_v1.types.Job:
Configuration for a job.
The maximum allowed size for a job is
100KB.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = cloudscheduler.GetJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_job,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_job(self,
request: cloudscheduler.CreateJobRequest = None,
*,
parent: str = None,
job: gcs_job.Job = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcs_job.Job:
r"""Creates a job.
Args:
request (:class:`google.cloud.scheduler_v1.types.CreateJobRequest`):
The request object. Request message for
[CreateJob][google.cloud.scheduler.v1.CloudScheduler.CreateJob].
parent (:class:`str`):
Required. The location name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (:class:`google.cloud.scheduler_v1.types.Job`):
Required. The job to add. The user can optionally
specify a name for the job in
[name][google.cloud.scheduler.v1.Job.name].
[name][google.cloud.scheduler.v1.Job.name] cannot be the
same as an existing job. If a name is not specified then
the system will generate a random unique name that will
be returned ([name][google.cloud.scheduler.v1.Job.name])
in the response.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.scheduler_v1.types.Job:
Configuration for a job.
The maximum allowed size for a job is
100KB.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = cloudscheduler.CreateJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_job,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def update_job(self,
request: cloudscheduler.UpdateJobRequest = None,
*,
job: gcs_job.Job = None,
update_mask: field_mask.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcs_job.Job:
r"""Updates a job.
If successful, the updated [Job][google.cloud.scheduler.v1.Job]
is returned. If the job does not exist, ``NOT_FOUND`` is
returned.
If UpdateJob does not successfully return, it is possible for
the job to be in an
[Job.State.UPDATE_FAILED][google.cloud.scheduler.v1.Job.State.UPDATE_FAILED]
state. A job in this state may not be executed. If this happens,
retry the UpdateJob request until a successful response is
received.
Args:
request (:class:`google.cloud.scheduler_v1.types.UpdateJobRequest`):
The request object. Request message for
[UpdateJob][google.cloud.scheduler.v1.CloudScheduler.UpdateJob].
job (:class:`google.cloud.scheduler_v1.types.Job`):
Required. The new job properties.
[name][google.cloud.scheduler.v1.Job.name] must be
specified.
Output only fields cannot be modified using UpdateJob.
Any value specified for an output only field will be
ignored.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
A mask used to specify which fields
of the job are being updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.scheduler_v1.types.Job:
Configuration for a job.
The maximum allowed size for a job is
100KB.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([job, update_mask])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = cloudscheduler.UpdateJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if job is not None:
request.job = job
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_job,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('job.name', request.job.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def delete_job(self,
request: cloudscheduler.DeleteJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job.
Args:
request (:class:`google.cloud.scheduler_v1.types.DeleteJobRequest`):
The request object. Request message for deleting a job
using
[DeleteJob][google.cloud.scheduler.v1.CloudScheduler.DeleteJob].
name (:class:`str`):
Required. The job name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = cloudscheduler.DeleteJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_job,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def pause_job(self,
request: cloudscheduler.PauseJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Pauses a job.
If a job is paused then the system will stop executing the job
until it is re-enabled via
[ResumeJob][google.cloud.scheduler.v1.CloudScheduler.ResumeJob].
The state of the job is stored in
[state][google.cloud.scheduler.v1.Job.state]; if paused it will
be set to
[Job.State.PAUSED][google.cloud.scheduler.v1.Job.State.PAUSED].
A job must be in
[Job.State.ENABLED][google.cloud.scheduler.v1.Job.State.ENABLED]
to be paused.
Args:
request (:class:`google.cloud.scheduler_v1.types.PauseJobRequest`):
The request object. Request message for
[PauseJob][google.cloud.scheduler.v1.CloudScheduler.PauseJob].
name (:class:`str`):
Required. The job name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.scheduler_v1.types.Job:
Configuration for a job.
The maximum allowed size for a job is
100KB.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = cloudscheduler.PauseJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.pause_job,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def resume_job(self,
request: cloudscheduler.ResumeJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Resume a job.
This method reenables a job after it has been
[Job.State.PAUSED][google.cloud.scheduler.v1.Job.State.PAUSED].
The state of a job is stored in
[Job.state][google.cloud.scheduler.v1.Job.state]; after calling
this method it will be set to
[Job.State.ENABLED][google.cloud.scheduler.v1.Job.State.ENABLED].
A job must be in
[Job.State.PAUSED][google.cloud.scheduler.v1.Job.State.PAUSED]
to be resumed.
Args:
request (:class:`google.cloud.scheduler_v1.types.ResumeJobRequest`):
The request object. Request message for
[ResumeJob][google.cloud.scheduler.v1.CloudScheduler.ResumeJob].
name (:class:`str`):
Required. The job name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.scheduler_v1.types.Job:
Configuration for a job.
The maximum allowed size for a job is
100KB.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = cloudscheduler.ResumeJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.resume_job,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def run_job(self,
request: cloudscheduler.RunJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Forces a job to run now.
When this method is called, Cloud Scheduler will
dispatch the job, even if the job is already running.
Args:
request (:class:`google.cloud.scheduler_v1.types.RunJobRequest`):
The request object. Request message for forcing a job to
run now using
[RunJob][google.cloud.scheduler.v1.CloudScheduler.RunJob].
name (:class:`str`):
Required. The job name. For example:
``projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.scheduler_v1.types.Job:
Configuration for a job.
The maximum allowed size for a job is
100KB.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
request = cloudscheduler.RunJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.run_job,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('name', request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-scheduler',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
'CloudSchedulerAsyncClient',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
aac080be86faa850be15104254c1f01a4348d237 | b28305dab0be0e03765c62b97bcd7f49a4f8073d | /build/android/resource_sizes.py | cc8681122bda0269f13b21cca01f5c13b2121638 | [
"BSD-3-Clause"
] | permissive | svarvel/browser-android-tabs | 9e5e27e0a6e302a12fe784ca06123e5ce090ced5 | bd198b4c7a1aca2f3e91f33005d881f42a8d0c3f | refs/heads/base-72.0.3626.105 | 2020-04-24T12:16:31.442851 | 2019-08-02T19:15:36 | 2019-08-02T19:15:36 | 171,950,555 | 1 | 2 | NOASSERTION | 2019-08-02T19:15:37 | 2019-02-21T21:47:44 | null | UTF-8 | Python | false | false | 32,820 | py | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reports binary size and static initializer metrics for an APK.
More information at //docs/speed/binary_size/metrics.md.
"""
from __future__ import print_function
import argparse
import collections
from contextlib import contextmanager
import json
import logging
import os
import posixpath
import re
import struct
import sys
import zipfile
import zlib
from binary_size import apk_downloader
import devil_chromium
from devil.android.sdk import build_tools
from devil.utils import cmd_helper
from devil.utils import lazy
import method_count
from pylib import constants
from pylib.constants import host_paths
_AAPT_PATH = lazy.WeakConstant(lambda: build_tools.GetPath('aapt'))
_BUILD_UTILS_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'gyp')
_APK_PATCH_SIZE_ESTIMATOR_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'third_party', 'apk-patch-size-estimator')
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
with host_paths.SysPath(host_paths.TRACING_PATH):
from tracing.value import convert_chart_json # pylint: disable=import-error
with host_paths.SysPath(_BUILD_UTILS_PATH, 0):
from util import build_utils # pylint: disable=import-error
with host_paths.SysPath(_APK_PATCH_SIZE_ESTIMATOR_PATH):
import apk_patch_size_estimator # pylint: disable=import-error
# Python had a bug in zipinfo parsing that triggers on ChromeModern.apk
# https://bugs.python.org/issue14315
def _PatchedDecodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while len(extra) >= 4:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError, "Corrupt extra field %s"%(ln,)
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFFL:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffffL:
self.header_offset = counts[idx]
idx += 1
extra = extra[ln + 4:]
zipfile.ZipInfo._decodeExtra = ( # pylint: disable=protected-access
_PatchedDecodeExtra)
# Captures an entire config from aapt output.
_AAPT_CONFIG_PATTERN = r'config %s:(.*?)config [a-zA-Z-]+:'
# Matches string resource entries from aapt output.
_AAPT_ENTRY_RE = re.compile(
r'resource (?P<id>\w{10}) [\w\.]+:string/.*?"(?P<val>.+?)"', re.DOTALL)
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'APK resource size information.',
'trace_rerun_options': [],
'charts': {}
}
_DUMP_STATIC_INITIALIZERS_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'tools', 'linux', 'dump-static-initializers.py')
# Macro definitions look like (something, 123) when
# enable_resource_whitelist_generation=true.
_RC_HEADER_RE = re.compile(r'^#define (?P<name>\w+).* (?P<id>\d+)\)?$')
_RE_NON_LANGUAGE_PAK = re.compile(r'^assets/.*(resources|percent)\.pak$')
_RE_COMPRESSED_LANGUAGE_PAK = re.compile(
r'\.lpak$|^assets/(?!stored-locales/).*(?!resources|percent)\.pak$')
_RE_STORED_LANGUAGE_PAK = re.compile(r'^assets/stored-locales/.*\.pak$')
_READELF_SIZES_METRICS = {
'text': ['.text'],
'data': ['.data', '.rodata', '.data.rel.ro', '.data.rel.ro.local'],
'relocations': ['.rel.dyn', '.rel.plt', '.rela.dyn', '.rela.plt'],
'unwind': ['.ARM.extab', '.ARM.exidx', '.eh_frame', '.eh_frame_hdr',
'.ARM.exidxsentinel_section_after_text'],
'symbols': ['.dynsym', '.dynstr', '.dynamic', '.shstrtab', '.got', '.plt',
'.got.plt', '.hash', '.gnu.hash'],
'bss': ['.bss'],
'other': ['.init_array', '.fini_array', '.comment', '.note.gnu.gold-version',
'.note.crashpad.info', '.note.android.ident',
'.ARM.attributes', '.note.gnu.build-id', '.gnu.version',
'.gnu.version_d', '.gnu.version_r', '.interp', '.gcc_except_table']
}
def _RunReadelf(so_path, options, tool_prefix=''):
return cmd_helper.GetCmdOutput(
[tool_prefix + 'readelf'] + options + [so_path])
def _ExtractMainLibSectionSizesFromApk(apk_path, main_lib_path, tool_prefix):
with Unzip(apk_path, filename=main_lib_path) as extracted_lib_path:
grouped_section_sizes = collections.defaultdict(int)
section_sizes = _CreateSectionNameSizeMap(extracted_lib_path, tool_prefix)
for group_name, section_names in _READELF_SIZES_METRICS.iteritems():
for section_name in section_names:
if section_name in section_sizes:
grouped_section_sizes[group_name] += section_sizes.pop(section_name)
# Group any unknown section headers into the "other" group.
for section_header, section_size in section_sizes.iteritems():
print('Unknown elf section header: %s' % section_header)
grouped_section_sizes['other'] += section_size
return grouped_section_sizes
def _CreateSectionNameSizeMap(so_path, tool_prefix):
stdout = _RunReadelf(so_path, ['-S', '--wide'], tool_prefix)
section_sizes = {}
# Matches [ 2] .hash HASH 00000000006681f0 0001f0 003154 04 A 3 0 8
for match in re.finditer(r'\[[\s\d]+\] (\..*)$', stdout, re.MULTILINE):
items = match.group(1).split()
section_sizes[items[0]] = int(items[4], 16)
return section_sizes
def _ParseLibBuildId(so_path, tool_prefix):
"""Returns the Build ID of the given native library."""
stdout = _RunReadelf(so_path, ['-n'], tool_prefix)
match = re.search(r'Build ID: (\w+)', stdout)
return match.group(1) if match else None
def _ParseManifestAttributes(apk_path):
# Check if the manifest specifies whether or not to extract native libs.
skip_extract_lib = False
output = cmd_helper.GetCmdOutput([
_AAPT_PATH.read(), 'd', 'xmltree', apk_path, 'AndroidManifest.xml'])
m = re.search(r'extractNativeLibs\(.*\)=\(.*\)(\w)', output)
if m:
skip_extract_lib = not bool(int(m.group(1)))
# Dex decompression overhead varies by Android version.
m = re.search(r'android:minSdkVersion\(\w+\)=\(type \w+\)(\w+)\n', output)
sdk_version = int(m.group(1), 16)
return sdk_version, skip_extract_lib
def CountStaticInitializers(so_path, tool_prefix):
# Mostly copied from //infra/scripts/legacy/scripts/slave/chromium/sizes.py.
def get_elf_section_size(readelf_stdout, section_name):
# Matches: .ctors PROGBITS 000000000516add0 5169dd0 000010 00 WA 0 0 8
match = re.search(r'\.%s.*$' % re.escape(section_name),
readelf_stdout, re.MULTILINE)
if not match:
return (False, -1)
size_str = re.split(r'\W+', match.group(0))[5]
return (True, int(size_str, 16))
# Find the number of files with at least one static initializer.
# First determine if we're 32 or 64 bit
stdout = _RunReadelf(so_path, ['-h'], tool_prefix)
elf_class_line = re.search('Class:.*$', stdout, re.MULTILINE).group(0)
elf_class = re.split(r'\W+', elf_class_line)[1]
if elf_class == 'ELF32':
word_size = 4
else:
word_size = 8
# Then find the number of files with global static initializers.
# NOTE: this is very implementation-specific and makes assumptions
# about how compiler and linker implement global static initializers.
si_count = 0
stdout = _RunReadelf(so_path, ['-SW'], tool_prefix)
has_init_array, init_array_size = get_elf_section_size(stdout, 'init_array')
if has_init_array:
si_count = init_array_size / word_size
si_count = max(si_count, 0)
return si_count
def GetStaticInitializers(so_path, tool_prefix):
output = cmd_helper.GetCmdOutput([_DUMP_STATIC_INITIALIZERS_PATH, '-d',
so_path, '-t', tool_prefix])
summary = re.search(r'Found \d+ static initializers in (\d+) files.', output)
return output.splitlines()[:-1], int(summary.group(1))
def _NormalizeLanguagePaks(translations, normalized_apk_size, factor):
english_pak = translations.FindByPattern(r'.*/en[-_][Uu][Ss]\.l?pak')
num_translations = translations.GetNumEntries()
if english_pak:
normalized_apk_size -= translations.ComputeZippedSize()
normalized_apk_size += int(
english_pak.compress_size * num_translations * factor)
return normalized_apk_size
def _NormalizeResourcesArsc(apk_path, num_arsc_files, num_translations,
out_dir):
"""Estimates the expected overhead of untranslated strings in resources.arsc.
See http://crbug.com/677966 for why this is necessary.
"""
# If there are multiple .arsc files, use the resource packaged APK instead.
if num_arsc_files > 1:
if not out_dir:
print('Skipping resources.arsc normalization (output directory required)')
return 0
ap_name = os.path.basename(apk_path).replace('.apk', '.intermediate.ap_')
ap_path = os.path.join(out_dir, 'gen/arsc/apks', ap_name)
if not os.path.exists(ap_path):
raise Exception('Missing expected file: %s, try rebuilding.' % ap_path)
apk_path = ap_path
aapt_output = _RunAaptDumpResources(apk_path)
# en-rUS is in the default config and may be cluttered with non-translatable
# strings, so en-rGB is a better baseline for finding missing translations.
en_strings = _CreateResourceIdValueMap(aapt_output, 'en-rGB')
fr_strings = _CreateResourceIdValueMap(aapt_output, 'fr')
# en-US and en-GB will never be translated.
config_count = num_translations - 2
size = 0
for res_id, string_val in en_strings.iteritems():
if string_val == fr_strings[res_id]:
string_size = len(string_val)
# 7 bytes is the per-entry overhead (not specific to any string). See
# https://android.googlesource.com/platform/frameworks/base.git/+/android-4.2.2_r1/tools/aapt/StringPool.cpp#414.
# The 1.5 factor was determined experimentally and is meant to account for
# other languages generally having longer strings than english.
size += config_count * (7 + string_size * 1.5)
return size
def _CreateResourceIdValueMap(aapt_output, lang):
"""Return a map of resource ids to string values for the given |lang|."""
config_re = _AAPT_CONFIG_PATTERN % lang
return {entry.group('id'): entry.group('val')
for config_section in re.finditer(config_re, aapt_output, re.DOTALL)
for entry in re.finditer(_AAPT_ENTRY_RE, config_section.group(0))}
def _RunAaptDumpResources(apk_path):
cmd = [_AAPT_PATH.read(), 'dump', '--values', 'resources', apk_path]
status, output = cmd_helper.GetCmdStatusAndOutput(cmd)
if status != 0:
raise Exception('Failed running aapt command: "%s" with output "%s".' %
(' '.join(cmd), output))
return output
class _FileGroup(object):
"""Represents a category that apk files can fall into."""
def __init__(self, name):
self.name = name
self._zip_infos = []
self._extracted_multipliers = []
def AddZipInfo(self, zip_info, extracted_multiplier=0):
self._zip_infos.append(zip_info)
self._extracted_multipliers.append(extracted_multiplier)
def AllEntries(self):
return iter(self._zip_infos)
def GetNumEntries(self):
return len(self._zip_infos)
def FindByPattern(self, pattern):
return next((i for i in self._zip_infos if re.match(pattern, i.filename)),
None)
def FindLargest(self):
if not self._zip_infos:
return None
return max(self._zip_infos, key=lambda i: i.file_size)
def ComputeZippedSize(self):
return sum(i.compress_size for i in self._zip_infos)
def ComputeUncompressedSize(self):
return sum(i.file_size for i in self._zip_infos)
def ComputeExtractedSize(self):
ret = 0
for zi, multiplier in zip(self._zip_infos, self._extracted_multipliers):
ret += zi.file_size * multiplier
return ret
def ComputeInstallSize(self):
return self.ComputeExtractedSize() + self.ComputeZippedSize()
def GenerateApkAnalysis(apk_filename, tool_prefix, out_dir,
unknown_handler=None):
"""Analyse APK to determine size contributions of different file classes."""
file_groups = []
def make_group(name):
group = _FileGroup(name)
file_groups.append(group)
return group
native_code = make_group('Native code')
java_code = make_group('Java code')
native_resources_no_translations = make_group('Native resources (no l10n)')
translations = make_group('Native resources (l10n)')
stored_translations = make_group('Native resources stored (l10n)')
icu_data = make_group('ICU (i18n library) data')
v8_snapshots = make_group('V8 Snapshots')
png_drawables = make_group('PNG drawables')
res_directory = make_group('Non-compiled Android resources')
arsc = make_group('Compiled Android resources')
metadata = make_group('Package metadata')
unknown = make_group('Unknown files')
notices = make_group('licenses.notice file')
unwind_cfi = make_group('unwind_cfi (dev and canary only)')
apk = zipfile.ZipFile(apk_filename, 'r')
try:
apk_contents = apk.infolist()
finally:
apk.close()
sdk_version, skip_extract_lib = _ParseManifestAttributes(apk_filename)
# Pre-L: Dalvik - .odex file is simply decompressed/optimized dex file (~1x).
# L, M: ART - .odex file is compiled version of the dex file (~4x).
# N: ART - Uses Dalvik-like JIT for normal apps (~1x), full compilation for
# shared apps (~4x).
# Actual multipliers calculated using "apk_operations.py disk-usage".
# Will need to update multipliers once apk obfuscation is enabled.
# E.g. with obfuscation, the 4.04 changes to 4.46.
speed_profile_dex_multiplier = 1.17
is_shared_apk = sdk_version >= 24 and (
'Monochrome' in apk_filename or 'WebView' in apk_filename)
if sdk_version < 21:
# JellyBean & KitKat
dex_multiplier = 1.16
elif sdk_version < 24:
# Lollipop & Marshmallow
dex_multiplier = 4.04
elif is_shared_apk:
# Oreo and above, compilation_filter=speed
dex_multiplier = 4.04
else:
# Oreo and above, compilation_filter=speed-profile
dex_multiplier = speed_profile_dex_multiplier
total_apk_size = os.path.getsize(apk_filename)
for member in apk_contents:
filename = member.filename
if filename.endswith('/'):
continue
if filename.endswith('.so'):
basename = posixpath.basename(filename)
should_extract_lib = not skip_extract_lib and basename.startswith('lib')
native_code.AddZipInfo(
member, extracted_multiplier=int(should_extract_lib))
elif filename.endswith('.dex'):
java_code.AddZipInfo(member, extracted_multiplier=dex_multiplier)
elif re.search(_RE_NON_LANGUAGE_PAK, filename):
native_resources_no_translations.AddZipInfo(member)
elif re.search(_RE_COMPRESSED_LANGUAGE_PAK, filename):
translations.AddZipInfo(
member,
extracted_multiplier=int('en_' in filename or 'en-' in filename))
elif re.search(_RE_STORED_LANGUAGE_PAK, filename):
stored_translations.AddZipInfo(member)
elif filename == 'assets/icudtl.dat':
icu_data.AddZipInfo(member)
elif filename.endswith('.bin'):
v8_snapshots.AddZipInfo(member)
elif filename.endswith('.png') or filename.endswith('.webp'):
png_drawables.AddZipInfo(member)
elif filename.startswith('res/'):
res_directory.AddZipInfo(member)
elif filename.endswith('.arsc'):
arsc.AddZipInfo(member)
elif filename.startswith('META-INF') or filename == 'AndroidManifest.xml':
metadata.AddZipInfo(member)
elif filename.endswith('.notice'):
notices.AddZipInfo(member)
elif filename.startswith('assets/unwind_cfi'):
unwind_cfi.AddZipInfo(member)
else:
unknown.AddZipInfo(member)
total_install_size = total_apk_size
total_install_size_android_go = total_apk_size
zip_overhead = total_apk_size
for group in file_groups:
actual_size = group.ComputeZippedSize()
install_size = group.ComputeInstallSize()
uncompressed_size = group.ComputeUncompressedSize()
extracted_size = group.ComputeExtractedSize()
total_install_size += extracted_size
zip_overhead -= actual_size
yield ('Breakdown', group.name + ' size', actual_size, 'bytes')
yield ('InstallBreakdown', group.name + ' size', int(install_size), 'bytes')
# Only a few metrics are compressed in the first place.
# To avoid over-reporting, track uncompressed size only for compressed
# entries.
if uncompressed_size != actual_size:
yield ('Uncompressed', group.name + ' size', uncompressed_size, 'bytes')
if group is java_code and is_shared_apk:
# Updates are compiled using quicken, but system image uses speed-profile.
extracted_size = uncompressed_size * speed_profile_dex_multiplier
total_install_size_android_go += extracted_size
yield ('InstallBreakdownGo', group.name + ' size',
actual_size + extracted_size, 'bytes')
else:
total_install_size_android_go += extracted_size
# Per-file zip overhead is caused by:
# * 30 byte entry header + len(file name)
# * 46 byte central directory entry + len(file name)
# * 0-3 bytes for zipalign.
yield ('Breakdown', 'Zip Overhead', zip_overhead, 'bytes')
yield ('InstallSize', 'APK size', total_apk_size, 'bytes')
yield ('InstallSize', 'Estimated installed size', int(total_install_size),
'bytes')
if is_shared_apk:
yield ('InstallSize', 'Estimated installed size (Android Go)',
int(total_install_size_android_go), 'bytes')
transfer_size = _CalculateCompressedSize(apk_filename)
yield ('TransferSize', 'Transfer size (deflate)', transfer_size, 'bytes')
# Size of main dex vs remaining.
main_dex_info = java_code.FindByPattern('classes.dex')
if main_dex_info:
main_dex_size = main_dex_info.file_size
yield ('Specifics', 'main dex size', main_dex_size, 'bytes')
secondary_size = java_code.ComputeUncompressedSize() - main_dex_size
yield ('Specifics', 'secondary dex size', secondary_size, 'bytes')
# Size of main .so vs remaining.
main_lib_info = native_code.FindLargest()
if main_lib_info:
main_lib_size = main_lib_info.file_size
yield ('Specifics', 'main lib size', main_lib_size, 'bytes')
secondary_size = native_code.ComputeUncompressedSize() - main_lib_size
yield ('Specifics', 'other lib size', secondary_size, 'bytes')
main_lib_section_sizes = _ExtractMainLibSectionSizesFromApk(
apk_filename, main_lib_info.filename, tool_prefix)
for metric_name, size in main_lib_section_sizes.iteritems():
yield ('MainLibInfo', metric_name, size, 'bytes')
# Main metric that we want to monitor for jumps.
normalized_apk_size = total_apk_size
# unwind_cfi exists only in dev, canary, and non-channel builds.
normalized_apk_size -= unwind_cfi.ComputeZippedSize()
# Always look at uncompressed .so.
normalized_apk_size -= native_code.ComputeZippedSize()
normalized_apk_size += native_code.ComputeUncompressedSize()
# Normalized dex size: size within the zip + size on disk for Android Go
# devices (which ~= uncompressed dex size).
normalized_apk_size += java_code.ComputeUncompressedSize()
# Avoid noise caused when strings change and translations haven't yet been
# updated.
num_translations = translations.GetNumEntries()
num_stored_translations = stored_translations.GetNumEntries()
if num_translations > 1:
# Multipliers found by looking at MonochromePublic.apk and seeing how much
# smaller en-US.pak is relative to the average locale.pak.
normalized_apk_size = _NormalizeLanguagePaks(
translations, normalized_apk_size, 1.17)
if num_stored_translations > 1:
normalized_apk_size = _NormalizeLanguagePaks(
stored_translations, normalized_apk_size, 1.43)
if num_translations + num_stored_translations > 1:
if num_translations == 0:
# WebView stores all locale paks uncompressed.
num_arsc_translations = num_stored_translations
else:
# Monochrome has more configurations than Chrome since it includes
# WebView (which supports more locales), but these should mostly be empty
# so ignore them here.
num_arsc_translations = num_translations
normalized_apk_size += int(_NormalizeResourcesArsc(
apk_filename, arsc.GetNumEntries(), num_arsc_translations, out_dir))
yield ('Specifics', 'normalized apk size', normalized_apk_size, 'bytes')
# The "file count" metric cannot be grouped with any other metrics when the
# end result is going to be uploaded to the perf dashboard in the HistogramSet
# format due to mixed units (bytes vs. zip entries) causing malformed
# summaries to be generated.
# TODO(https://crbug.com/903970): Remove this workaround if unit mixing is
# ever supported.
yield ('FileCount', 'file count', len(apk_contents), 'zip entries')
if unknown_handler is not None:
for info in unknown.AllEntries():
unknown_handler(info)
def PrintApkAnalysis(apk_filename, tool_prefix, out_dir, chartjson=None):
"""Calls GenerateApkAnalysis() and report the value."""
def PrintUnknown(info):
print('Unknown entry: %s %d' % (info.filename, info.compress_size))
title_prefix = os.path.basename(apk_filename) + '_'
for data in GenerateApkAnalysis(apk_filename, tool_prefix, out_dir,
PrintUnknown):
title = title_prefix + data[0]
perf_tests_results_helper.ReportPerfResult(chartjson, title, *data[1:])
def _AnnotatePakResources(out_dir):
"""Returns a pair of maps: id_name_map, id_header_map."""
print('Looking at resources in: %s' % out_dir)
grit_headers = []
for root, _, files in os.walk(out_dir):
if root.endswith('grit'):
grit_headers += [os.path.join(root, f) for f in files if f.endswith('.h')]
assert grit_headers, 'Failed to find grit headers in %s' % out_dir
id_name_map = {}
id_header_map = {}
for header in grit_headers:
with open(header, 'r') as f:
for line in f.readlines():
m = _RC_HEADER_RE.match(line.strip())
if m:
i = int(m.group('id'))
name = m.group('name')
if i in id_name_map and name != id_name_map[i]:
print('WARNING: Resource ID conflict %s (%s vs %s)' % (
i, id_name_map[i], name))
id_name_map[i] = name
id_header_map[i] = os.path.relpath(header, out_dir)
return id_name_map, id_header_map
# This method also used by //build/android/gyp/assert_static_initializers.py
def AnalyzeStaticInitializers(apk_filename, tool_prefix, dump_sis, out_dir,
ignored_libs):
# Static initializer counting mostly copies logic in
# infra/scripts/legacy/scripts/slave/chromium/sizes.py.
with zipfile.ZipFile(apk_filename) as z:
so_files = [f for f in z.infolist()
if f.filename.endswith('.so') and f.file_size > 0
and os.path.basename(f.filename) not in ignored_libs]
# Skip checking static initializers for 32 bit .so files when 64 bit .so files
# are present since the 32 bit versions will be checked by bots that only
# build the 32 bit version. This avoids the complexity of finding 32 bit .so
# files in the output directory in 64 bit builds.
has_64 = any('64' in f.filename for f in so_files)
files_to_check = [f for f in so_files if not has_64 or '64' in f.filename]
si_count = 0
for f in files_to_check:
with Unzip(apk_filename, filename=f.filename) as unzipped_so:
si_count += CountStaticInitializers(unzipped_so, tool_prefix)
if dump_sis:
# Print count and list of SIs reported by dump-static-initializers.py.
# Doesn't work well on all archs (particularly arm), which is why
# the readelf method is used for tracking SI counts.
_PrintDumpSIsCount(f.filename, unzipped_so, out_dir, tool_prefix)
return si_count
def _PrintDumpSIsCount(apk_so_name, unzipped_so, out_dir, tool_prefix):
lib_name = os.path.basename(apk_so_name).replace('crazy.', '')
so_with_symbols_path = os.path.join(out_dir, 'lib.unstripped', lib_name)
if os.path.exists(so_with_symbols_path):
_VerifyLibBuildIdsMatch(tool_prefix, unzipped_so, so_with_symbols_path)
sis, _ = GetStaticInitializers(
so_with_symbols_path, tool_prefix)
for si in sis:
print(si)
else:
raise Exception('Unstripped .so not found. Looked here: %s',
so_with_symbols_path)
def _CalculateCompressedSize(file_path):
CHUNK_SIZE = 256 * 1024
compressor = zlib.compressobj()
total_size = 0
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(CHUNK_SIZE), ''):
total_size += len(compressor.compress(chunk))
total_size += len(compressor.flush())
return total_size
def GenerateDexAnalysis(apk_filename):
sizes, total_size = method_count.ExtractSizesFromZip(apk_filename)
dex_metrics = method_count.CONTRIBUTORS_TO_DEX_CACHE
cumulative_sizes = collections.defaultdict(int)
for classes_dex_sizes in sizes.values():
for key in dex_metrics:
cumulative_sizes[key] += classes_dex_sizes[key]
for key, label in dex_metrics.iteritems():
yield ('Dex', label, cumulative_sizes[key], 'entries')
yield ('DexCache', 'DexCache', total_size, 'bytes')
def _PrintDexAnalysis(apk_filename, chartjson=None):
title_prefix = os.path.basename(apk_filename) + '_'
for data in GenerateDexAnalysis(apk_filename):
title = title_prefix + data[0]
perf_tests_results_helper.ReportPerfResult(chartjson, title, *data[1:])
def _PrintPatchSizeEstimate(new_apk, builder, bucket, chartjson=None):
apk_name = os.path.basename(new_apk)
title = apk_name + '_PatchSizeEstimate'
# Reference APK paths have spaces replaced by underscores.
builder = builder.replace(' ', '_')
old_apk = apk_downloader.MaybeDownloadApk(
builder, apk_downloader.CURRENT_MILESTONE, apk_name,
apk_downloader.DEFAULT_DOWNLOAD_PATH, bucket)
if old_apk:
# Use a temp dir in case patch size functions fail to clean up temp files.
with build_utils.TempDir() as tmp:
tmp_name = os.path.join(tmp, 'patch.tmp')
bsdiff = apk_patch_size_estimator.calculate_bsdiff(
old_apk, new_apk, None, tmp_name)
perf_tests_results_helper.ReportPerfResult(chartjson, title,
'BSDiff (gzipped)', bsdiff, 'bytes')
fbf = apk_patch_size_estimator.calculate_filebyfile(
old_apk, new_apk, None, tmp_name)
perf_tests_results_helper.ReportPerfResult(chartjson, title,
'FileByFile (gzipped)', fbf, 'bytes')
@contextmanager
def Unzip(zip_file, filename=None):
"""Utility for temporary use of a single file in a zip archive."""
with build_utils.TempDir() as unzipped_dir:
unzipped_files = build_utils.ExtractAll(
zip_file, unzipped_dir, True, pattern=filename)
if len(unzipped_files) == 0:
raise Exception(
'%s not found in %s' % (filename, zip_file))
yield unzipped_files[0]
def _VerifyLibBuildIdsMatch(tool_prefix, *so_files):
if len(set(_ParseLibBuildId(f, tool_prefix) for f in so_files)) > 1:
raise Exception('Found differing build ids in output directory and apk. '
'Your output directory is likely stale.')
def _ConfigOutDirAndToolsPrefix(out_dir):
if out_dir:
constants.SetOutputDirectory(os.path.abspath(out_dir))
else:
try:
out_dir = constants.GetOutDirectory()
devil_chromium.Initialize()
except EnvironmentError:
pass
if out_dir:
build_vars = build_utils.ReadBuildVars(
os.path.join(out_dir, "build_vars.txt"))
tool_prefix = os.path.join(out_dir, build_vars['android_tool_prefix'])
else:
tool_prefix = ''
return out_dir, tool_prefix
def main():
argparser = argparse.ArgumentParser(description='Print APK size metrics.')
argparser.add_argument('--min-pak-resource-size',
type=int,
default=20*1024,
help='Minimum byte size of displayed pak resources.')
argparser.add_argument('--chromium-output-directory',
dest='out_dir',
help='Location of the build artifacts.')
argparser.add_argument('--chartjson',
action='store_true',
help='DEPRECATED. Use --output-format=chartjson '
'instead.')
argparser.add_argument('--output-format',
choices=['chartjson', 'histograms'],
help='Output the results to a file in the given '
'format instead of printing the results.')
argparser.add_argument('--output-dir',
default='.',
help='Directory to save chartjson to.')
argparser.add_argument('--dump-static-initializers',
action='store_true',
dest='dump_sis',
help='Run dump-static-initializers.py to get the list'
'of static initializers (slow).')
argparser.add_argument('--loadable_module',
action='append',
help='Use for libraries added via loadable_modules')
argparser.add_argument('--estimate-patch-size',
action='store_true',
help='Include patch size estimates. Useful for perf '
'builders where a reference APK is available but adds '
'~3 mins to run time.')
argparser.add_argument('--reference-apk-builder',
default=apk_downloader.DEFAULT_BUILDER,
help='Builder name to use for reference APK for patch '
'size estimates.')
argparser.add_argument('--reference-apk-bucket',
default=apk_downloader.DEFAULT_BUCKET,
help='Storage bucket holding reference APKs.')
argparser.add_argument('apk', help='APK file path.')
args = argparser.parse_args()
# TODO(bsheedy): Remove this once uses of --chartjson have been removed.
if args.chartjson:
args.output_format = 'chartjson'
chartjson = _BASE_CHART.copy() if args.output_format else None
out_dir, tool_prefix = _ConfigOutDirAndToolsPrefix(args.out_dir)
if args.dump_sis and not out_dir:
argparser.error(
'--dump-static-initializers requires --chromium-output-directory')
# Do not add any new metrics without also documenting them in:
# //docs/speed/binary_size/metrics.md.
PrintApkAnalysis(args.apk, tool_prefix, out_dir, chartjson=chartjson)
_PrintDexAnalysis(args.apk, chartjson=chartjson)
ignored_libs = args.loadable_module if args.loadable_module else []
si_count = AnalyzeStaticInitializers(
args.apk, tool_prefix, args.dump_sis, out_dir, ignored_libs)
perf_tests_results_helper.ReportPerfResult(
chartjson, 'StaticInitializersCount', 'count', si_count, 'count')
if args.estimate_patch_size:
_PrintPatchSizeEstimate(args.apk, args.reference_apk_builder,
args.reference_apk_bucket, chartjson=chartjson)
if chartjson:
results_path = os.path.join(args.output_dir, 'results-chart.json')
logging.critical('Dumping chartjson to %s', results_path)
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file)
# We would ideally generate a histogram set directly instead of generating
# chartjson then converting. However, perf_tests_results_helper is in
# //build, which doesn't seem to have any precedent for depending on
# anything in Catapult. This can probably be fixed, but since this doesn't
# need to be super fast or anything, converting is a good enough solution
# for the time being.
if args.output_format == 'histograms':
histogram_result = convert_chart_json.ConvertChartJson(results_path)
if histogram_result.returncode != 0:
logging.error('chartjson conversion failed with error: %s',
histogram_result.stdout)
return 1
histogram_path = os.path.join(args.output_dir, 'perf_results.json')
logging.critical('Dumping histograms to %s', histogram_path)
with open(histogram_path, 'w') as json_file:
json_file.write(histogram_result.stdout)
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
d8e0bc2788db178b1f857236383ca57f5278fc99 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5010/568005010.py | 8f5b66f935d9f1b463871ee861e0a2ab2b098725 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,751 | py | from bots.botsconfig import *
from records005010 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'D5',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'AMT', MIN: 1, MAX: 2},
{ID: 'N1', MIN: 0, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
]},
{ID: 'CS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'REF', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LX', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 1, MAX: 1},
{ID: 'AMT', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'N1', MIN: 0, MAX: 1, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 2},
]},
]},
{ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'FA2', MIN: 1, MAX: 99999},
]},
]},
]},
{ID: 'BAL', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 99999},
{ID: 'RTE', MIN: 0, MAX: 99999},
]},
{ID: 'CTT', MIN: 0, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
a767d8388d15206e4a7a88d87019fd89dfd13dfd | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/create_image_sync_repo_request_body.py | cb2762d95c71188bd7d4b164190eeca326be4fed | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,587 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateImageSyncRepoRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'remote_region_id': 'str',
'remote_namespace': 'str',
'sync_auto': 'bool',
'override': 'bool'
}
attribute_map = {
'remote_region_id': 'remoteRegionId',
'remote_namespace': 'remoteNamespace',
'sync_auto': 'syncAuto',
'override': 'override'
}
def __init__(self, remote_region_id=None, remote_namespace=None, sync_auto=None, override=None):
"""CreateImageSyncRepoRequestBody - a model defined in huaweicloud sdk"""
self._remote_region_id = None
self._remote_namespace = None
self._sync_auto = None
self._override = None
self.discriminator = None
self.remote_region_id = remote_region_id
self.remote_namespace = remote_namespace
if sync_auto is not None:
self.sync_auto = sync_auto
if override is not None:
self.override = override
@property
def remote_region_id(self):
"""Gets the remote_region_id of this CreateImageSyncRepoRequestBody.
目标region ID。
:return: The remote_region_id of this CreateImageSyncRepoRequestBody.
:rtype: str
"""
return self._remote_region_id
@remote_region_id.setter
def remote_region_id(self, remote_region_id):
"""Sets the remote_region_id of this CreateImageSyncRepoRequestBody.
目标region ID。
:param remote_region_id: The remote_region_id of this CreateImageSyncRepoRequestBody.
:type: str
"""
self._remote_region_id = remote_region_id
@property
def remote_namespace(self):
"""Gets the remote_namespace of this CreateImageSyncRepoRequestBody.
目标组织
:return: The remote_namespace of this CreateImageSyncRepoRequestBody.
:rtype: str
"""
return self._remote_namespace
@remote_namespace.setter
def remote_namespace(self, remote_namespace):
"""Sets the remote_namespace of this CreateImageSyncRepoRequestBody.
目标组织
:param remote_namespace: The remote_namespace of this CreateImageSyncRepoRequestBody.
:type: str
"""
self._remote_namespace = remote_namespace
@property
def sync_auto(self):
"""Gets the sync_auto of this CreateImageSyncRepoRequestBody.
自动同步,默认为false
:return: The sync_auto of this CreateImageSyncRepoRequestBody.
:rtype: bool
"""
return self._sync_auto
@sync_auto.setter
def sync_auto(self, sync_auto):
"""Sets the sync_auto of this CreateImageSyncRepoRequestBody.
自动同步,默认为false
:param sync_auto: The sync_auto of this CreateImageSyncRepoRequestBody.
:type: bool
"""
self._sync_auto = sync_auto
@property
def override(self):
"""Gets the override of this CreateImageSyncRepoRequestBody.
是否覆盖,默认为false
:return: The override of this CreateImageSyncRepoRequestBody.
:rtype: bool
"""
return self._override
@override.setter
def override(self, override):
"""Sets the override of this CreateImageSyncRepoRequestBody.
是否覆盖,默认为false
:param override: The override of this CreateImageSyncRepoRequestBody.
:type: bool
"""
self._override = override
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateImageSyncRepoRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.