blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d0b6383c33973c35589ec4404e85d7a6c72e8e8 | 8130c34d546c323d6d5d2ca6b4a67330af08828f | /.history/menu_app/models_20210105152309.py | ede59bfeb80ad64987ea4b2b9d2f75c6e48ba8a7 | [] | no_license | lienusrob/final | ba2dad086fc97b21b537ef12df834dfadd222943 | f2726e31f1d51450e4aed8c74021c33679957b28 | refs/heads/master | 2023-02-15T01:36:54.463034 | 2021-01-07T12:47:05 | 2021-01-07T12:47:05 | 327,279,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,265 | py |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
import random
import string
from datetime import date, datetime
class ToppingsCategory(models.Model):
name = models.CharField(max_length=100)
type = models.CharField(max_length=100)
description = models.TextField(max_length=100, blank=True, null=True, default='')
def __str__(self):
return self.name
class Topping(models.Model):
name = models.CharField(max_length=100)
price = models.DecimalField(max_digits = 4, decimal_places=2, default=0)
category = models.ForeignKey(ToppingsCategory, on_delete = models.PROTECT, default=None)
def __str__(self):
return self.name
class ItemsCategory(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class MenuItem(models.Model):
name = models.CharField(max_length=22)
price = models.DecimalField(max_digits = 4, decimal_places=2)
category = models.ForeignKey(ItemsCategory, on_delete = models.PROTECT)
detail = models.TextField(max_length=1000, default = ' ')
# toppings = models.ManyToManyField(Topping, blank=True)
#image = models.ImageField(default=None, upload_to='', null=True, blank=True)
def __str__(self):
return self.name
class Extras(models.Model):
requests = models.TextField(max_length=400,)
def __str__(self):
return self.name
class Cart (models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
current = models.BooleanField(default=True)
date_ordered = models.DateTimeField(auto_now_add= True )
class CartItem (models.Model):
add_item = models.ForeignKey(MenuItem, on_delete= models.CASCADE)
quantity = models.IntegerField(default=0)
cart = models.ForeignKey(Cart, on_delete= models.CASCADE)
def __str__(self):
return self.add_item.name
#remove dont need
class OrderItem(models.Model):
item = models.ForeignKey(MenuItem, on_delete=models.SET_NULL, null=True)
price = models.DecimalField(max_digits = 4, decimal_places=2, default=0)
order_item_order = models.ForeignKey('menu_app.Order', on_delete=models.CASCADE, null=True)
#toppings = models.ManyToManyField(Topping, blank=True)
def __str__(self):
return self.item.name
def get_item_price(self):
self.price = sum(topping.price for topping in self.toppings.all()) + self.item.price
def get_all_topping_categories(self):
categories = []
for topping in self.toppings.all():
if not topping.category in categories:
categories.append(topping.category)
return categories
class Orders (models.Model):
cart = models.ForeignKey(Cart, on_delete=models.CASCADE)
placed = models.BooleanField(default=False)
def __str__ (self):
return self.cart.user.username
#old need to remove
class Order(models.Model):
customer = models.ForeignKey(User, on_delete = models.CASCADE)
date_ordered = models.DateTimeField(default=timezone.now)
items = models.ManyToManyField(MenuItem)
order_items = models.ManyToManyField(OrderItem)
total = models.DecimalField(max_digits = 6, decimal_places=2, null=True)
is_ordered = models.BooleanField(default=False)
pickup_time = models.DateTimeField(default=timezone.now)
special_instructions = models.TextField(max_length=256, blank=True)
def __str__(self):
return f'Order #{self.id} - {self.customer.username}'
# # url to redirect to when submitting order form
# def get_absolute_url(self):
# return reverse('orders:order_detail', kwargs={'pk':self.pk})
# returns the sum of each item price in order and assigns it to self.total
def get_order_total(self):
self.total = sum(order_item.price for order_item in self.order_items.all())
def get_cart_items(self):
return self.items.all()
def generate_order_id():
date_str = date.today().strftime('%Y%m%d')[2:] + str(datetime.now().second)
rand_str = "".join([random.choice(string.digits) for count in range(3)])
return date_str + rand_str
# class Meta():
# ordering = ['-date_ordered']
| [
"[email protected]"
] | |
31291fea928eb8e023f65781c71fa4432037efea | ba1eff6535027c16b9e1d399b96e7853bc1514dc | /tests/test_16_userinfo_endpoint.py | 03ec0337b9ec1fd4207b1850726eb13b7fc2b0da | [
"Apache-2.0"
] | permissive | sklemer1/oidcendpoint | 09d06e4cf21113f74a78734cdd06c964aaed3c7d | bc2cd9222bd05aec7b7ba5c7c7f593c2143357f3 | refs/heads/master | 2020-03-30T12:24:20.500373 | 2018-10-04T13:42:31 | 2018-10-04T13:42:31 | 151,222,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,738 | py | import json
import os
import pytest
import time
from oidcmsg.key_jar import build_keyjar
from oidcmsg.oidc import AccessTokenRequest
from oidcmsg.oidc import AuthorizationRequest
from oidcendpoint.client_authn import verify_client
from oidcendpoint.oidc import userinfo
from oidcendpoint.oidc.authorization import Authorization
from oidcendpoint.oidc.provider_config import ProviderConfiguration
from oidcendpoint.oidc.registration import Registration
from oidcendpoint.oidc.token import AccessToken
from oidcendpoint.authn_event import AuthnEvent
from oidcendpoint.endpoint_context import EndpointContext
from oidcendpoint.user_authn.authn_context import INTERNETPROTOCOLPASSWORD
from oidcendpoint.user_info import UserInfo
KEYDEFS = [
{"type": "RSA", "key": '', "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]}
]
KEYJAR = build_keyjar(KEYDEFS)[1]
RESPONSE_TYPES_SUPPORTED = [
["code"], ["token"], ["id_token"], ["code", "token"], ["code", "id_token"],
["id_token", "token"], ["code", "token", "id_token"], ['none']]
CAPABILITIES = {
"response_types_supported": [" ".join(x) for x in RESPONSE_TYPES_SUPPORTED],
"token_endpoint_auth_methods_supported": [
"client_secret_post", "client_secret_basic",
"client_secret_jwt", "private_key_jwt"],
"response_modes_supported": ['query', 'fragment', 'form_post'],
"subject_types_supported": ["public", "pairwise"],
"grant_types_supported": [
"authorization_code", "implicit",
"urn:ietf:params:oauth:grant-type:jwt-bearer", "refresh_token"],
"claim_types_supported": ["normal", "aggregated", "distributed"],
"claims_parameter_supported": True,
"request_parameter_supported": True,
"request_uri_parameter_supported": True,
}
AUTH_REQ = AuthorizationRequest(client_id='client_1',
redirect_uri='https://example.com/cb',
scope=['openid'],
state='STATE',
response_type='code')
TOKEN_REQ = AccessTokenRequest(client_id='client_1',
redirect_uri='https://example.com/cb',
state='STATE',
grant_type='authorization_code',
client_secret='hemligt')
TOKEN_REQ_DICT = TOKEN_REQ.to_dict()
BASEDIR = os.path.abspath(os.path.dirname(__file__))
def full_path(local_file):
return os.path.join(BASEDIR, local_file)
USERINFO = UserInfo(json.loads(open(full_path('users.json')).read()))
def setup_session(endpoint_context, areq):
authn_event = AuthnEvent(uid="uid", salt='salt',
authn_info=INTERNETPROTOCOLPASSWORD,
time_stamp=time.time())
sid = endpoint_context.sdb.create_authz_session(authn_event, areq,
client_id='client_id')
endpoint_context.sdb.do_sub(sid, '')
return sid
class TestEndpoint(object):
@pytest.fixture(autouse=True)
def create_endpoint(self):
self.endpoint = userinfo.UserInfo(KEYJAR)
conf = {
"issuer": "https://example.com/",
"password": "mycket hemligt",
"token_expires_in": 600,
"grant_expires_in": 300,
"refresh_token_expires_in": 86400,
"verify_ssl": False,
"capabilities": CAPABILITIES,
"jwks": {
'url_path': '{}/jwks.json',
'local_path': 'static/jwks.json',
'private_path': 'own/jwks.json'
},
'endpoint': {
'provider_config': {
'path': '{}/.well-known/openid-configuration',
'class': ProviderConfiguration,
'kwargs': {}
},
'registration': {
'path': '{}/registration',
'class': Registration,
'kwargs': {}
},
'authorization': {
'path': '{}/authorization',
'class': Authorization,
'kwargs': {}
},
'token': {
'path': '{}/token',
'class': AccessToken,
'kwargs': {}
},
'userinfo': {
'path': '{}/userinfo',
'class': userinfo.UserInfo,
'kwargs': {'db_file': 'users.json'}
}
},
'client_authn': verify_client,
"authentication": [{
'acr': INTERNETPROTOCOLPASSWORD,
'name': 'NoAuthn',
'kwargs': {'user': 'diana'}
}],
'template_dir': 'template'
}
endpoint_context = EndpointContext(conf, keyjar=KEYJAR)
endpoint_context.cdb['client_1'] = {
"client_secret": 'hemligt',
"redirect_uris": [("https://example.com/cb", None)],
"client_salt": "salted",
'token_endpoint_auth_method': 'client_secret_post',
'response_types': ['code', 'token', 'code id_token', 'id_token']
}
self.endpoint = userinfo.UserInfo(endpoint_context)
def test_init(self):
assert self.endpoint
def test_parse(self):
session_id = setup_session(self.endpoint.endpoint_context, AUTH_REQ)
_dic = self.endpoint.endpoint_context.sdb.upgrade_to_token(
key=session_id)
_req = self.endpoint.parse_request(
{}, auth="Bearer {}".format(_dic['access_token']))
assert set(_req.keys()) == {'client_id', 'access_token'}
| [
"[email protected]"
] | |
48f6bf7eed3e7ed029e76a1561da9c2b9fd6b645 | 4488e3c26de4291da447d8251c491b43cb810f7c | /account_banking_payment_export/model/payment_mode.py | 798c8ed20daab08128d6d0b68c1d1b223e11f9d5 | [] | no_license | smart-solution/odoo-crm-80 | b19592ce6e374c9c7b0a3198498930ffb1283018 | 85dfd0cc37f81bcba24d2a0091094708a262fe2c | refs/heads/master | 2016-09-06T06:04:35.191924 | 2015-07-14T12:48:28 | 2015-07-14T12:48:28 | 33,174,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,285 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class payment_mode(orm.Model):
''' Restoring the payment type from version 5,
used to select the export wizard (if any) '''
_inherit = "payment.mode"
def suitable_bank_types(self, cr, uid, payment_mode_id=None, context=None):
""" Reinstates functional code for suitable bank type filtering.
Current code in account_payment is disfunctional.
"""
res = []
payment_mode = self.browse(
cr, uid, payment_mode_id, context)
if (payment_mode and payment_mode.type and
payment_mode.type.suitable_bank_types):
res = [t.code for t in payment_mode.type.suitable_bank_types]
return res
_columns = {
'type': fields.many2one(
'payment.mode.type', 'Payment type',
required=True,
help='Select the Payment Type for the Payment Mode.'
),
'payment_order_type': fields.related(
'type', 'payment_order_type', readonly=True, type='selection',
selection=[('payment', 'Payment'), ('debit', 'Direct debit')],
string="Payment Order Type"),
}
| [
"[email protected]"
] | |
7c7405d5b792cd6f20e89b0b56489b366c8baecf | ba730380c8406b234202a6a19a9e5f01f6b66d25 | /django/crud2/articles/views.py | 4a9f35242edeef84e1211c795529a801b810b62b | [] | no_license | ssabum/note | 3b0fd891ab7053997c7978298635e599b42a7659 | 47354aa55a87813dab66f2ff7a930f5313bffe7a | refs/heads/master | 2023-06-19T03:03:02.398976 | 2021-07-09T15:09:42 | 2021-07-09T15:09:42 | 331,743,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | from django.shortcuts import render, redirect
from .models import Article
# Create your views here.
# READ
def index(request):
# 모든 게시글 조회
# articles = Article.objects.all()[::-1] # 파이썬 언어로 해결
articles = Article.objects.order_by('-updated_at') # DB 단에서 해결, 수정순으로 정렬
context = {
'articles': articles,
}
return render(request, 'articles/index.html', context)
# CREATE
def new(request):
return render(request, 'articles/new.html')
# CREATE
def create(request):
# POST 요청으로 들어온 사용자 데이터를 추출
title = request.POST.get('title')
content = request.POST.get('content')
# Article 모델 클래스를 기반으로 인스턴스를 생성
article = Article(title=title, content=content)
# DB에 저장
article.save()
# return render(request, 'articles/index.html')
# return redirect('articles:index')
return redirect('articles:detail', article.pk)
# READ
def detail(request, pk):
article = Article.objects.get(pk=pk)
context = {
'article': article,
}
return render(request, 'articles/detail.html', context)
# DELETE
# 발동조건: /articles/index/게시글번호/delete
# 띠라서 POST만 삭제되게 만들어야 한다
def delete(request, pk):
# 삭제할 데이터 불러오기
article = Article.objects.get(pk=pk)
if request.method == 'POST':
# 삭제
article.delete()
# 메인페이지로 이동
return redirect('articles:index')
else:
return redirect('articles:detail', article.pk)
# UPDATE
def edit(request, pk):
article = Article.objects.get(pk=pk)
context = {
'article':article,
}
return render(request, 'articles/edit.html', context)
def update(request, pk):
# 수정할 게시글 불러오기
article = Article.objects.get(pk=pk)
# 사용자가 건네준 데이터 추출
article.title = request.POST.get('title')
article.content = request.POST.get('content')
# DB에 저장
article.save()
return redirect('articles:detail', article.pk)
| [
"[email protected]"
] | |
1d626c9dbdb41c344f8870b691bab05f897edafa | 5864e86954a221d52d4fa83a607c71bacf201c5a | /dogma/items/fittableDogmaItem.py | 8e70cbc858bb571171c14d42eeafc1040058e7eb | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,769 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\dogma\items\fittableDogmaItem.py
from dogma.dogmaLogging import *
from baseDogmaItem import BaseDogmaItem
from ccpProfile import TimedFunction
import weakref
class FittableDogmaItem(BaseDogmaItem):
def __init__(self, *args, **kwargs):
self._location = None
self.lastStopTime = None
BaseDogmaItem.__init__(self, *args, **kwargs)
@property
def location(self):
if self._location:
return self._location()
@location.setter
def location(self, location):
if location is None:
self._location = None
else:
self._location = weakref.ref(location)
@property
def ownerID(self):
if self.location:
return self.location.ownerID
@ownerID.setter
def ownerID(self, ownerID):
if self.location and self.location.ownerID != ownerID:
self.dogmaLocation.LogError('Setting ownerID on a FittableDogmaItem to something that disagrees with its location!', self.location.ownerID, ownerID)
@TimedFunction('FittableDogmaItem::Unload')
def Unload(self):
BaseDogmaItem.Unload(self)
if self.location:
try:
locationFittedItems = self.location.fittedItems
except AttributeError:
return
if self.itemID in locationFittedItems:
del locationFittedItems[self.itemID]
elif self.itemID in self.dogmaLocation.itemsMissingLocation:
del self.dogmaLocation.itemsMissingLocation[self.itemID]
def SetLastStopTime(self, lastStopTime):
self.lastStopTime = lastStopTime
def IsActive(self):
for effectID in self.activeEffects:
if effectID == const.effectOnline:
continue
effect = self.dogmaLocation.GetEffect(effectID)
if effect.effectCategory in (const.dgmEffActivation, const.dgmEffTarget):
return True
return False
@TimedFunction('FittableDogmaItem::SetLocation')
def SetLocation(self, locationID, location, flagID):
if location is None:
self.dogmaLocation.LogError('FittableDogmaItem.SetLocation :: Location dogma item is None')
return
if not self.IsValidFittingLocation(location):
self.dogmaLocation.LogError('FittableDogmaItem.SetLocation :: Invalid fitting location')
return
oldData = self.GetLocationInfo()
self.location = location
self.flagID = flagID
location.RegisterFittedItem(self, flagID)
return oldData
def IsValidFittingLocation(self, location):
return False
def UnsetLocation(self, locationDogmaItem):
locationDogmaItem.UnregisterFittedItem(self)
def GetShipID(self):
if self.location:
return self.location.itemID
def GetPilot(self):
if self.location:
return self.location.GetPilot()
def GetOtherID(self):
otherID = None
if self.location:
otherID = self.location.subLocations.get(self.flagID, None)
if otherID is None:
other = self.dogmaLocation.GetChargeNonDB(self.location.itemID, self.flagID)
if other is not None:
otherID = other.itemID
return otherID
def SerializeForPropagation(self):
retVal = BaseDogmaItem.SerializeForPropagation(self)
retVal.lastStopTime = self.lastStopTime
return retVal
def UnpackPropagationData(self, propData, charID, shipID):
BaseDogmaItem.UnpackPropagationData(self, propData, charID, shipID)
self.SetLastStopTime(propData.lastStopTime)
| [
"[email protected]"
] | |
8d8ddb865c6a12401cc24112051255881181248e | f4f5d98101db7baf9703be077615383b831c35d8 | /setup.py | f00a4b6116f81b93954694c531ecc2ff819e8e74 | [
"MIT"
] | permissive | TrendingTechnology/PyYouTube-1 | 23099fd1b825f226cabf2e0f50112e1b3f53346b | 774213412210ab03adf11eb8b38906b0f3de5ee6 | refs/heads/main | 2023-08-17T13:50:03.035784 | 2021-09-15T09:11:31 | 2021-09-15T09:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | import pathlib
import setuptools
def read(file: str) -> list:
with open(file, encoding="utf-8") as r:
return [i.strip() for i in r]
file = pathlib.Path(__file__).parent
README = (file / "README.md").read_text()
setuptools.setup(
name='PyYouTube',
version="1.0.7",
author="mrlokaman",
author_email="[email protected]",
long_description = README,
long_description_content_type = "text/markdown",
description="Python library Get YouTube Video Data",
license="MIT",
url="https://github.com/lntechnical2/PyYouTube",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
install_requires = read("requirements.txt"),
python_requires=">=3.6"
)
| [
"[email protected]"
] | |
faf4719b940c4e5811346205c59cd9ad7daa89ec | 2813f969fc9833023f543fa14c1c22a87325ca8f | /logging_tree/tests/test_format.py | 787b959fd92bbf2bdc6650b8ba7e639e870cd017 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | ralphbean/logging_tree | 5761afc380719f4069fd00b1f21f5765927ce593 | 1db0ad6f485b5333fee637813faf827990924421 | refs/heads/master | 2021-01-16T22:05:05.459626 | 2012-12-04T02:16:14 | 2012-12-04T02:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,425 | py | """Tests for the `logging_tree.format` module."""
import logging
import logging.handlers
import unittest
import sys
from logging_tree.format import build_description, printout
from logging_tree.tests.case import LoggingTestCase
if sys.version_info >= (3,):
from io import StringIO
else:
from StringIO import StringIO
class FakeFile(StringIO):
def __init__(self, filename, mode):
self.filename = filename
StringIO.__init__(self)
def __repr__(self):
return '<file %r>' % self.filename
class FormatTests(LoggingTestCase):
def setUp(self):
# Prevent logging file handlers from trying to open real files.
# (The keyword delay=1, which defers any actual attempt to open
# a file, did not appear until Python 2.6.)
logging.open = FakeFile
super(FormatTests, self).setUp()
def tearDown(self):
del logging.open
super(FormatTests, self).tearDown()
def test_printout(self):
stdout, sys.stdout = sys.stdout, StringIO()
printout()
self.assertEqual(sys.stdout.getvalue(), '<--""\n Level WARNING\n')
sys.stdout = stdout
def test_simple_tree(self):
logging.getLogger('a')
logging.getLogger('a.b').setLevel(logging.DEBUG)
logging.getLogger('x.c')
self.assertEqual(build_description(), '''\
<--""
Level WARNING
|
o<--"a"
| |
| o<--"a.b"
| Level DEBUG
|
o<--[x]
|
o<--"x.c"
''')
def test_fancy_tree(self):
logging.getLogger('').setLevel(logging.DEBUG)
log = logging.getLogger('db')
log.setLevel(logging.INFO)
log.propagate = False
log.addFilter(MyFilter())
handler = logging.StreamHandler()
log.addHandler(handler)
handler.addFilter(logging.Filter('db.errors'))
logging.getLogger('db.errors')
logging.getLogger('db.stats')
log = logging.getLogger('www.status')
log.setLevel(logging.DEBUG)
log.addHandler(logging.FileHandler('/foo/log.txt'))
log.addHandler(MyHandler())
self.assertEqual(build_description(), '''\
<--""
Level DEBUG
|
o "db"
| Level INFO
| Propagate OFF
| Filter <MyFilter>
| Handler Stream %r
| Filter name='db.errors'
| |
| o<--"db.errors"
| |
| o<--"db.stats"
|
o<--[www]
|
o<--"www.status"
Level DEBUG
Handler File '/foo/log.txt'
Handler <MyHandler>
''' % (sys.stderr,))
def test_most_handlers(self):
ah = logging.getLogger('').addHandler
ah(logging.handlers.RotatingFileHandler(
'/bar/one.txt', maxBytes=10000, backupCount=3))
ah(logging.handlers.SocketHandler('server.example.com', 514))
ah(logging.handlers.DatagramHandler('server.example.com', 1958))
ah(logging.handlers.SysLogHandler())
ah(logging.handlers.SMTPHandler(
'mail.example.com', 'Server', 'Sysadmin', 'Logs!'))
# ah(logging.handlers.NTEventLogHandler())
ah(logging.handlers.HTTPHandler('api.example.com', '/logs', 'POST'))
ah(logging.handlers.BufferingHandler(20000))
sh = logging.StreamHandler()
ah(logging.handlers.MemoryHandler(30000, target=sh))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler RotatingFile '/bar/one.txt' maxBytes=10000 backupCount=3
Handler Socket server.example.com 514
Handler Datagram server.example.com 1958
Handler SysLog ('localhost', 514) facility=1
Handler SMTP via mail.example.com to ['Sysadmin']
Handler HTTP POST to http://api.example.com//logs
Handler Buffering capacity=20000
Handler Memory capacity=30000 dumping to:
Handler Stream %r
''' % (sh.stream,))
logging.getLogger('').handlers[3].socket.close() # or Python 3 warning
def test_2_dot_5_handlers(self):
if sys.version_info < (2, 5):
return
ah = logging.getLogger('').addHandler
ah(logging.handlers.TimedRotatingFileHandler('/bar/two.txt'))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler TimedRotatingFile '/bar/two.txt' when='H' interval=3600 backupCount=0
''')
def test_2_dot_6_handlers(self):
if sys.version_info < (2, 6):
return
ah = logging.getLogger('').addHandler
ah(logging.handlers.WatchedFileHandler('/bar/three.txt'))
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler WatchedFile '/bar/three.txt'
''')
def test_nested_handlers(self):
h1 = logging.StreamHandler()
h2 = logging.handlers.MemoryHandler(30000, target=h1)
h2.addFilter(logging.Filter('worse'))
h3 = logging.handlers.MemoryHandler(30000, target=h2)
h3.addFilter(logging.Filter('bad'))
logging.getLogger('').addHandler(h3)
self.assertEqual(build_description(), '''\
<--""
Level WARNING
Handler Memory capacity=30000 dumping to:
Filter name='bad'
Handler Memory capacity=30000 dumping to:
Filter name='worse'
Handler Stream %r
''' % (h1.stream,))
class MyFilter(object):
def __repr__(self):
return '<MyFilter>'
class MyHandler(object):
def __repr__(self):
return '<MyHandler>'
if __name__ == '__main__': # for Python <= 2.4
unittest.main()
| [
"[email protected]"
] | |
83bee1c913ad98cd00f75327075dbef6727ae53a | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/torchx/legacy/nn/VolumetricReplicationPadding.py | 16cc7a1c097d7c351bcc12cb145425dff9ac1bf3 | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 1,969 | py | import torch
from .Module import Module
class VolumetricReplicationPadding(Module):
def __init__(self, pleft, pright=None, ptop=None, pbottom=None, pfront=None, pback=None):
super(VolumetricReplicationPadding, self).__init__()
self.pleft = pleft
self.pright = pright or pleft
self.ptop = ptop or pleft
self.pbottom = pbottom or pleft
self.pfront = pfront or pleft
self.pback = pback or pleft
def updateOutput(self, input):
assert input.dim() == 5
self._backend.VolumetricReplicationPadding_updateOutput(
self._backend.library_state,
input,
self.output,
self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return self.output
def updateGradInput(self, input, gradOutput):
assert input.dim() == 5 and gradOutput.dim() == 5
assert input.size(0) == gradOutput.size(0)
assert input.size(1) == gradOutput.size(1)
assert input.size(2) + self.pfront + self.pback == gradOutput.size(2)
assert input.size(3) + self.ptop + self.pbottom == gradOutput.size(3)
assert input.size(4) + self.pleft + self.pright == gradOutput.size(4)
self._backend.VolumetricReplicationPadding_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return self.gradInput
def __repr__(self):
s = super(VolumetricReplicationPadding, self).__repr__()
s += '({}, {}, {}, {}, {}, {})'.format(self.pleft, self.pright,
self.ptop, self.pbottom,
self.pfront, self.pback
)
return s
| [
"[email protected]"
] | |
cb0b2c679a02d35a32e443a9412c0292555d4f6b | cff588a68be44913be884ba5c4ebf36a0a96cb75 | /python/007study_namespace.py | 3377539352b3e241261c717dfa8c5240c876539d | [] | no_license | KOOKDONGHUN/trading | e6a8d023f4bdbb0f1cf32e3e5b6b26b6265fc3a6 | 2d4337978a5849098ed890e9e2c3f059e4706536 | refs/heads/master | 2022-11-15T00:38:32.705125 | 2020-07-12T10:25:46 | 2020-07-12T10:25:46 | 275,761,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | class Stock:
market = 'kospi'
print(dir())# // ['Stock', '__annotations__', '__builtins__', '__cached__', '__doc__',
# // '__file__', '__loader__', '__name__', '__package__', '__spec__']
# Stock이 추가됐다
print(Stock) # <class '__main__.Stock'>
# 클래스가 정의되면 하나의 독립적인 네임스페이스가 생기고 클래스내에 정의된 변수나 메서드는 해당 네임스페이스 안에 파이썬 딕셔너리 타입으로 저장된다
print(Stock.market)
# 네임스페이스를 확인하는 방법
print(Stock.__dict__) # // {'__module__': '__main__', 'market': 'kospi', '__dict__': <attribute '__dict__' of 'Stock' objects>,
#// '__weakref__': <attribute '__weakref__' of 'Stock' objects>, '__doc__': None}
s1 = Stock()
s2 = Stock()
print(id(s1)) # 2120139199496
print(id(s2)) # 2120139199560
print(s1.__dict__) # 비어있음
print(s2.__dict__) # 비어있음
s1.market = 'kosdaq'
print(s1.__dict__) # {'market': 'kosdaq'}
print(s2.__dict__) # 비어있음
print(s1.market) # kosdaq
# 인스턴스의 네임스페이스에 해당 이름이 없으면 클래스의 네임스페이스로 이동
print(s2.market) # kospi
| [
"[email protected]"
] | |
e7b07e9da69275211369027ccc4b4e3df2428c9a | 98d328e4e00ac7cf8930d2ff9bd68af1d9d9cc3b | /utils/lib_clustering.py | 3e1b9079f84417c6585bb40e6d8bcf926bf03a2b | [] | no_license | jtpils/Lane-Detection-from-Point-Cloud | 4d7e98cafada569097e16e7bcb5fdabc048e0644 | 238cb8cedc823a84c32b60ce13e7de8c81f19232 | refs/heads/master | 2020-06-05T08:42:46.397450 | 2019-06-17T15:41:58 | 2019-06-17T15:41:58 | 192,380,398 | 14 | 4 | null | 2019-06-17T16:16:58 | 2019-06-17T16:16:58 | null | UTF-8 | Python | false | false | 3,810 | py | '''
Clustering by DBSCAN using sklearn library
This code is copied and modified from:
https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
'''
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
class Clusterer(object):
def __init__(self):
self.fit_success = False
def fit(self, X, eps=0.3, min_samples=10):
# Compute DBSCAN
db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
# samples that is close to the center
core_samples_mask[db.core_sample_indices_] = True
self.X = X
self.db = db
self.core_samples_mask = core_samples_mask
self.fit_success = True
self.labels = db.labels_ # label of each sample
self.unique_labels = set(self.labels)
self.n_clusters = len(set(self.labels)) - \
(1 if -1 in self.labels else 0)
def plot_clusters(self):
if not self.fit_success:
return
assert self.X.shape[1] == 2, "To visualize result, X must be 2 dimenstions."
# member vars used in this function
labels, n_clusters, unique_labels = self.labels, self.n_clusters, self.unique_labels
core_samples_mask = self.core_samples_mask
X = self.X
# Black removed and is used for noise instead.
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
# print(colors)
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=6)
# break
plt.title('Clustering result: {} clusters'.format(n_clusters))
def print_clustering_result(self):
if not self.fit_success:
return
labels, n_clusters = self.labels, self.n_clusters
# Number of clusters in labels, ignoring noise if present.
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters)
print('Estimated number of noise points: %d' % n_noise_)
print("Homogeneity: %0.3f" %
metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" %
metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" %
metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels,
average_method='arithmetic'))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
if __name__ == "__main__":
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
# Fit
cluster = Clusterer()
cluster.fit(X)
# Plot
cluster.plot_clusters()
plt.show()
| [
"[email protected]"
] | |
5e57e42cf81e3523dfaa874a315995fbc33cfcb9 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D11B/PAYDUCD11BUN.py | 3dccdf3361385387dedef9f876212a5ce94c56a8 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,580 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD11BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'PAI', MIN: 1, MAX: 1},
{ID: 'FII', MIN: 1, MAX: 2},
{ID: 'DTM', MIN: 1, MAX: 4},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'PYT', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'NAD', MIN: 0, MAX: 6, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 1},
]},
]},
{ID: 'GEI', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 1, MAX: 9},
{ID: 'BUS', MIN: 0, MAX: 1},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 99, LEVEL: [
{ID: 'UGH', MIN: 0, MAX: 1, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 1, MAX: 9},
{ID: 'AJT', MIN: 0, MAX: 9},
{ID: 'PYT', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'UGT', MIN: 1, MAX: 1},
]},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 1},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
] | |
33707edb80b081ec1ed745507088f9c26ebd20fd | b182ff74d1107c00d77d3bb241dfca589ccc9404 | /config.py | 2bba1aadff966f60605fa7fdf900d990f46442d1 | [] | no_license | aexleader/Tornado-OA-System | 7846a13a90c6da512a7f7620b003bd77b331a63d | 6ffc51d2f42fcbd5b0abe7082dae4505bf687894 | refs/heads/master | 2020-08-01T14:00:28.966198 | 2019-09-10T10:57:23 | 2019-09-10T10:57:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | #coding=utf-8
from libs.flash.flash_lib import get_flashed_messages # 引入一个变量
from libs.permission.permission_auth.permission_interface_libs import menu_permission
settings = dict(
template_path = 'templates',
static_path = 'static',
debug = True,
cookie_secret = 'aaaa',
login_url = '/auth/user_login',
xsrf_cookies = True,
# ui_mrthods 是可以作为全局模板变量,在所有的html文件中都可以获取这个参数
ui_methods= {
"menu_permission": menu_permission,
"get_flashed_messages": get_flashed_messages
},
# pycket的配置信息
pycket = {
'engine': 'redis', # 设置存储器类型
'storage': {
'host': 'localhost',
'port': 6379,
'db_sessions': 5,
'db_notifications': 11,
'max_connections': 2 ** 31,
},
'cookies': {
'expires_days': 30, # 设置过期时间
#'max_age': 5000,
},
},
) | [
"[email protected]"
] | |
5f96b2f9df61b2997848aed9767153a92a516338 | 762de1c66746267e05d53184d7854934616416ee | /tools/MolSurfGenService/MolSurfaceGen32/chimera/share/VolumeProcessing/apply.py | e3698c7a49fcc4c0b7f6619db155e7b141e47eb8 | [] | no_license | project-renard-survey/semanticscience | 6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677 | 024890dba56c3e82ea2cf8c773965117f8cda339 | refs/heads/master | 2021-07-07T21:47:17.767414 | 2017-10-04T12:13:50 | 2017-10-04T12:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Apply a function to a grid pointwise.
# The resulting volume is written in netcdf format.
#
# Syntax: apply.py sqrt|square|abs|exp|log <infile> <outfile>
#
# The file type must be one of the types handled by VolumeData.
#
import sys
from VolumeData import Grid_Data
# -----------------------------------------------------------------------------
#
def apply_function(array_func, inpath, outpath):
from VolumeData import fileformats
try:
grids = fileformats.open_file(inpath)
except fileformats.Unknown_File_Type, e:
sys.stderr.write(str(e))
sys.exit(1)
fvalues = [Mapped_Grid(g, array_func) for g in grids]
from VolumeData.netcdf import write_grid_as_netcdf
write_grid_as_netcdf(fvalues, outpath)
# -----------------------------------------------------------------------------
#
class Mapped_Grid(Grid_Data):
def __init__(self, grid_data, array_func):
self.array_func = array_func
Grid_Data.__init__(self, grid_data.size, grid_data.value_type,
grid_data.origin, grid_data.step,
name = grid_data.name, default_color = grid_data.rgba)
# ---------------------------------------------------------------------------
#
def read_matrix(self, ijk_origin, ijk_size, ijk_step, progress):
data = self.component.matrix(ijk_origin, ijk_size, ijk_step, progress)
fvalues = self.array_func(data)
return fvalues
# -----------------------------------------------------------------------------
#
def syntax():
msg = ('Apply a function to a grid pointwise.\n' +
'The resulting volume is written in netcdf format.\n'
'Syntax: apply.py sqrt|square|abs|exp|log <infile> <outfile>\n')
sys.stderr.write(msg)
sys.exit(1)
# -----------------------------------------------------------------------------
#
if len(sys.argv) != 4:
syntax()
fname = sys.argv[1]
from numpy import sqrt, power, absolute, exp, log
if fname == 'sqrt':
array_func = sqrt
elif fname == 'square':
array_func = lambda a: power(a, 2)
elif fname == 'abs':
array_func = absolute
elif fname == 'exp':
array_func = exp
elif fname == 'log':
array_func = log
else:
syntax()
inpath = sys.argv[2]
outpath = sys.argv[3]
apply_function(array_func, inpath, outpath)
| [
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] | alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5 |
aa8aabf65ecb49d7092f518affba7b4f4200745b | 609582ee37a01ac6a67fb9c957825dcd3c9a5b3a | /LeetCode_Math/67_Add_Binaray.py | 77bf2de64eddd1dca19c9a8f56aeabd0235107f3 | [] | no_license | captainjack331089/captainjack33.LeetCode | a9ad7b3591675c76814eda22e683745068e0abed | 4c03f28371e003e8e6a7c30b7b0c46beb5e2a8e7 | refs/heads/master | 2022-03-07T19:53:40.454945 | 2019-11-06T19:32:00 | 2019-11-06T19:32:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | """
67. Add Binary
Category: Math
Difficulty: Easy
"""
"""
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
class Solution():
def addBinary(self,a,b):
return (bin( int(a,2) + int(b,2) )[2:])
a = "100"
b = "100"
if __name__ == "__main__":
print(Solution().addBinary(a,b)) | [
"[email protected]"
] | |
db5478f9a0cb0cf030d084d4aa9c480907c197a7 | 0dc3e9b70da8ccd056e0a0fab2b1d8f850c3d470 | /lantern/django/django_celery/src/apps/cars/serializers.py | 3b2841adafff0d4d82de945686eeba93f6718cd8 | [] | no_license | ArturYefriemov/green_lantern | 28e7150af7b9d2281a107ad80026828ad77af62a | 2841b647e1bfae4a7505e91e8a8695d03f35a3a2 | refs/heads/master | 2021-03-01T16:54:58.881835 | 2020-11-17T19:42:23 | 2020-11-17T19:42:23 | 245,799,969 | 0 | 0 | null | 2020-07-14T18:51:13 | 2020-03-08T11:13:32 | Python | UTF-8 | Python | false | false | 190 | py | from rest_framework import serializers
from apps.cars.models import Car
class CarSerializer(serializers.ModelSerializer):
class Meta:
model = Car
fields = '__all__'
| [
"[email protected]"
] | |
76b5e2452098e49235282783ad7eb1263db83e08 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 10539 - Almost Prime Numbers/main.py | 30bb7c3cab4b9a2a5ac9a024702a2f2bdb6ddbf0 | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | from bisect import *
from bitstring import BitArray
import sys
MAXN = 1000005
def prime_sieve(top=MAXN):
b = BitArray(top) # bitstring of ’0’ bits
for i in range(2, top):
if not b[i]:
yield i
# i is prime, so set all its multiples to ’1’.
b.set(True, range(i * i, top, i))
if __name__ == '__main__':
primes = list(prime_sieve())
almostPrimes = []
for p in primes:
p1 = p ** 2
while p1 < MAXN:
almostPrimes.append(p1)
p1 *= p
almostPrimes.sort()
sys.stdin = open('input.txt')
numTest = int(input())
for x in range(numTest):
left, right = map(int, raw_input().split())
i1 = bisect_right(almostPrimes, left)
i2 = bisect_right(almostPrimes, right)
print(i2 - i1)
| [
"[email protected]"
] | |
9d9a28c406e812fde853a9ab4577cc16b649995d | 9b77f1e31d5901924431a2a3164312cc346bde4f | /ADI4/manage.py | e9ec1a96a7b096f6b2698c979c0b121ed89eb43f | [] | no_license | Adi19471/Djnago_Code-Daily | c2184bf21db5c8d4b3c4098fbd593e4949375ae8 | 03b1b70d3e187fe85eb24e88b7ef3391b14aa98c | refs/heads/master | 2023-08-14T14:36:36.144243 | 2021-09-20T12:52:46 | 2021-09-20T12:52:46 | 375,690,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ADI4.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8e1d635e43cf0d4a577b35facf856bf52864130c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04005/s700047032.py | 6b014ba065ec8998d3dab92a228e7bca1810778d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | a,b,c=map(int,input().split())
print(0 if (a*b*c)%2==0 else min(a*b,b*c,c*a)) | [
"[email protected]"
] | |
8a52bc396fcafcd7f2ed6b20d0b110a3e5a59648 | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Data Scientist with Python - Career Track /22. Machine Learning with the Experts: School Budgets/02. Creating a simple first model/01. Setting up a train-test split in scikit-learn.py | 09e603e05172de82530517858d1031747721ca01 | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,016 | py | '''
Setting up a train-test split in scikit-learn
Alright, you've been patient and awesome. It's finally time to start training models!
The first step is to split the data into a training set and a test set. Some labels don't occur very often, but we want to make sure that they appear in both the training and the test sets. We provide a function that will make sure at least min_count examples of each label appear in each split: multilabel_train_test_split.
Feel free to check out the full code for multilabel_train_test_split here.
You'll start with a simple model that uses just the numeric columns of your DataFrame when calling multilabel_train_test_split. The data has been read into a DataFrame df and a list consisting of just the numeric columns is available as NUMERIC_COLUMNS.
Instructions
100 XP
Create a new DataFrame named numeric_data_only by applying the .fillna(-1000) method to the numeric columns (available in the list NUMERIC_COLUMNS) of df.
Convert the labels (available in the list LABELS) to dummy variables. Save the result as label_dummies.
In the call to multilabel_train_test_split(), set the size of your test set to be 0.2. Use a seed of 123.
Fill in the .info() method calls for X_train, X_test, y_train, and y_test.
'''
SOLUTION
# Create the new DataFrame: numeric_data_only
numeric_data_only = df[NUMERIC_COLUMNS].fillna(-1000)
# Get labels and convert to dummy variables: label_dummies
label_dummies = pd.get_dummies(df[LABELS])
# Create training and test sets
X_train, X_test, y_train, y_test = multilabel_train_test_split(numeric_data_only,
label_dummies,
size=0.2,
seed=123)
# Print the info
print("X_train info:")
print(X_train.info())
print("\nX_test info:")
print(X_test.info())
print("\ny_train info:")
print(y_train.info())
print("\ny_test info:")
print(y_test.info()) | [
"[email protected]"
] | |
9b930250c80b39f856585160a5b1f150a3d9355a | 6053cef7fc0b063a6105cd38659ba082ee706335 | /tweettools/blockmute.py | 945725ca153e6f977a12db922ae170e6fb90aabe | [
"MIT"
] | permissive | jdidion/blockmute | 18dd24535d75d6c8998a432a1a5b657a3e91b93f | 05984da637206d2bc5c69d2f68b10a1df4f9985f | refs/heads/main | 2021-01-19T19:52:16.657531 | 2018-04-29T01:20:39 | 2018-04-29T01:20:39 | 101,212,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py | #!/usr/bin/env python
# Block everyone you've muted, and vice-versa.
from argparse import ArgumentParser
import time
from tqdm import tqdm
from tweettools import get_client
def blockmute(api, sleep_secs=300):
mutes = set(api.GetMutesIDs())
blocks = set(api.GetBlocksIDs())
new_blocks = mutes - blocks
for user_id in tqdm(new_blocks):
while True:
try:
api.CreateBlock(user_id)
break
except:
print("Exceeded rate limit; sleeping for {} seconds".format(sleep_secs))
time.sleep(sleep_secs)
new_mutes = blocks - mutes
for user_id in tqdm(new_mutes):
while True:
try:
api.CreateMute(user_id)
break
except:
print("Exceeded rate limit; sleeping for {} seconds".format(sleep_secs))
time.sleep(sleep_secs)
def main():
parser = ArgumentParser()
parser.add_argument('-ck', '--consumer-key')
parser.add_argument('-cs', '--consumer-secret')
parser.add_argument('-tk', '--token-key', default=None)
parser.add_argument('-ts', '--token-secret', default=None)
parser.add_argument('-s', '--sleep-secs', type=int, default=15*60)
args = parser.parse_args()
api = get_client(args.token_key, args.token_secret, args.consumer_key, args.consumer_secret)
blockmute(api, sleep_secs=args.sleep_secs)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b5722af8ed32f8e2da48f5c2d6fcd13c8de9701f | 52d324c6c0d0eb43ca4f3edc425a86cdc1e27d78 | /scripts/asos/archive_quantity.py | 9c22be17d7528b94acd44e3f1e30933859ee8315 | [
"MIT"
] | permissive | deenacse/iem | 992befd6d95accfdadc34fb7928d6b69d661d399 | 150512e857ca6dca1d47363a29cc67775b731760 | refs/heads/master | 2021-02-04T04:20:14.330527 | 2020-02-26T21:11:32 | 2020-02-26T21:11:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,342 | py | """ Create a simple prinout of observation quanity in the database """
from __future__ import print_function
import sys
import datetime
import numpy as np
from pyiem.util import get_dbconn
class bcolors:
"""Kind of hacky"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
def d(hits, total):
"""another hack"""
if total == 0:
return " N/A"
val = hits / float(total)
c1 = bcolors.ENDC
if val > 0.5:
c1 = bcolors.FAIL
return "%s%.2f%s" % (c1, val, bcolors.ENDC)
def main(argv):
"""Go Main Go"""
now = datetime.datetime.utcnow()
counts = np.zeros((120, 12))
mslp = np.zeros((120, 12))
metar = np.zeros((120, 12))
pgconn = get_dbconn("asos", user="nobody")
acursor = pgconn.cursor()
stid = argv[1]
acursor.execute(
"""
SELECT extract(year from valid) as yr,
extract(month from valid) as mo, count(*),
sum(case when mslp is null or mslp < 1 then 1 else 0 end),
sum(case when metar is null or metar = '' then 1 else 0 end)
from alldata WHERE
station = %s GROUP by yr, mo ORDER by yr ASC, mo ASC
""",
(stid,),
)
for row in acursor:
counts[int(row[0] - 1900), int(row[1] - 1)] = row[2]
mslp[int(row[0] - 1900), int(row[1] - 1)] = row[3]
metar[int(row[0] - 1900), int(row[1] - 1)] = row[4]
print("Observation Count For %s" % (stid,))
print("YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP OCT NOV DEC")
output = False
for i in range(120):
year = 1900 + i
if year > now.year:
continue
if not output and np.max(counts[i, :]) == 0:
continue
output = True
if len(argv) < 3:
print(
("%s %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i")
% (
year,
counts[i, 0],
counts[i, 1],
counts[i, 2],
counts[i, 3],
counts[i, 4],
counts[i, 5],
counts[i, 6],
counts[i, 7],
counts[i, 8],
counts[i, 9],
counts[i, 10],
counts[i, 11],
)
)
else:
if argv[2] == "metar":
data = metar
else:
data = mslp
print(
("%s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s")
% (
year,
d(data[i, 0], counts[i, 0]),
d(data[i, 1], counts[i, 1]),
d(data[i, 2], counts[i, 2]),
d(data[i, 3], counts[i, 3]),
d(data[i, 4], counts[i, 4]),
d(data[i, 5], counts[i, 5]),
d(data[i, 6], counts[i, 6]),
d(data[i, 7], counts[i, 7]),
d(data[i, 8], counts[i, 8]),
d(data[i, 9], counts[i, 9]),
d(data[i, 10], counts[i, 10]),
d(data[i, 11], counts[i, 11]),
)
)
if __name__ == "__main__":
main(sys.argv)
| [
"[email protected]"
] | |
2d83f6345f4629fb349ea3e2aa1ecd09b77cec8b | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/account_budget_proposal_service/transports/base.py | 86d3e463eb723e6cf5e1dcff665b4d0e784c1fce | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,334 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import account_budget_proposal
from google.ads.googleads.v8.services.types import account_budget_proposal_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AccountBudgetProposalServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AccountBudgetProposalService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_account_budget_proposal: gapic_v1.method.wrap_method(
self.get_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
self.mutate_account_budget_proposal: gapic_v1.method.wrap_method(
self.mutate_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_account_budget_proposal(self) -> typing.Callable[
[account_budget_proposal_service.GetAccountBudgetProposalRequest],
account_budget_proposal.AccountBudgetProposal]:
raise NotImplementedError
@property
def mutate_account_budget_proposal(self) -> typing.Callable[
[account_budget_proposal_service.MutateAccountBudgetProposalRequest],
account_budget_proposal_service.MutateAccountBudgetProposalResponse]:
raise NotImplementedError
__all__ = (
'AccountBudgetProposalServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
cc472b1754e73618c88e880b49f00b891157f7e0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/DPN-68_ID1889_for_PyTorch/timm/data/dataset.py | 4b32a3a0617ad45b963c62d5fc03f7d56de6b2f8 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"CC-BY-NC-4.0"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,548 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
""" Quick n Simple Image Folder, Tarfile based DataSet
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.utils.data as data
import os
import torch
import logging
from PIL import Image
from .parsers import create_parser
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
_logger = logging.getLogger(__name__)
_ERROR_RETRY = 50
class ImageDataset(data.Dataset):
def __init__(
self,
root,
parser=None,
class_map='',
load_bytes=False,
transform=None,
):
if parser is None or isinstance(parser, str):
parser = create_parser(parser or '', root=root, class_map=class_map)
self.parser = parser
self.load_bytes = load_bytes
self.transform = transform
self._consecutive_errors = 0
def __getitem__(self, index):
img, target = self.parser[index]
try:
img = img.read() if self.load_bytes else Image.open(img).convert('RGB')
except Exception as e:
_logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}')
self._consecutive_errors += 1
if self._consecutive_errors < _ERROR_RETRY:
return self.__getitem__((index + 1) % len(self.parser))
else:
raise e
self._consecutive_errors = 0
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
return img, target
def __len__(self):
return len(self.parser)
def filename(self, index, basename=False, absolute=False):
return self.parser.filename(index, basename, absolute)
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class IterableImageDataset(data.IterableDataset):
def __init__(
self,
root,
parser=None,
split='train',
is_training=False,
batch_size=None,
class_map='',
load_bytes=False,
repeats=0,
transform=None,
):
assert parser is not None
if isinstance(parser, str):
self.parser = create_parser(
parser, root=root, split=split, is_training=is_training, batch_size=batch_size, repeats=repeats)
else:
self.parser = parser
self.transform = transform
self._consecutive_errors = 0
def __iter__(self):
for img, target in self.parser:
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
yield img, target
def __len__(self):
if hasattr(self.parser, '__len__'):
return len(self.parser)
else:
return 0
def filename(self, index, basename=False, absolute=False):
assert False, 'Filename lookup by index not supported, use filenames().'
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix or other clean/augmentation mixes"""
def __init__(self, dataset, num_splits=2):
self.augmentation = None
self.normalize = None
self.dataset = dataset
if self.dataset.transform is not None:
self._set_transforms(self.dataset.transform)
self.num_splits = num_splits
def _set_transforms(self, x):
assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms'
self.dataset.transform = x[0]
self.augmentation = x[1]
self.normalize = x[2]
@property
def transform(self):
return self.dataset.transform
@transform.setter
def transform(self, x):
self._set_transforms(x)
def _normalize(self, x):
return x if self.normalize is None else self.normalize(x)
def __getitem__(self, i):
x, y = self.dataset[i] # all splits share the same dataset base transform
x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split)
# run the full augmentation on the remaining splits
for _ in range(self.num_splits - 1):
x_list.append(self._normalize(self.augmentation(x)))
return tuple(x_list), y
def __len__(self):
return len(self.dataset)
| [
"[email protected]"
] | |
50c5dd1046b86e17916c7169ac1be8c2aa36dc0b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/49/usersdata/107/19461/submittedfiles/pico.py | d085c047956c05bb79cd9376fc75eadbc27af13d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
from __future__ import division
def pico(a):
posicao=0
for i in range (0,len(a)-1,1):
if a[i]> a[i+1]:
posicao=i
break
cont=0
for i in range (posicao,len(a)-1,1):
if a[i] <= a[i+1]:
cont=cont+1
if cont==0 and posicao !=0:
return True
else:
return False
n = input('digite a quantidade de elemento')
a=[]
for i in range (0,n,1):
a.append(input('a:'))
if pico (a):
print ('S')
else:
primt ('N')
n = input('Digite a quantidade de elementos da lista: ')
#CONTINUE...
| [
"[email protected]"
] | |
4101fd7aac1737d98b2dfafe6118696400bd4e4a | 844e0cd4ffbe1ead05b844508276f66cc20953d5 | /test/testconfigurationmanager.py | e9fae9d325da652711c99ddbfa3770ec19e87574 | [] | no_license | Archanciel/cryptopricer | a256fa793bb1f2d65b5c032dd81a266ee5be79cc | 00c0911fe1c25c1da635dbc9b26d45be608f0cc5 | refs/heads/master | 2022-06-29T13:13:22.435670 | 2022-05-11T20:37:43 | 2022-05-11T20:37:43 | 100,196,449 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,083 | py | import unittest
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from configurationmanager import ConfigurationManager
class TestConfigurationManager(unittest.TestCase):
def setUp(self):
if os.name == 'posix':
self.filePath = '/sdcard/cryptopricer_test.ini'
else:
self.filePath = 'c:\\temp\\cryptopricer_test.ini'
def testConfigurationManagerInstanciation(self):
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationNoConfigFile(self):
os.remove(self.filePath)
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationEmptyConfigFile(self):
open(self.filePath, 'w').close()
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationOneMissingKey(self):
#removing second line in config file
with open(self.filePath, 'r') as configFile:
lines = configFile.readlines()
with open(self.filePath, 'w') as configFile:
# first line contains [General] section name !
configFile.write(''.join(lines[0:1] + lines[2:]))
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
if __name__ == '__main__':
#unittest.main()
tst = TestConfigurationManager()
tst.setUp()
tst.testConfigurationManagerInstanciationEmptyConfigFile()
| [
"[email protected]"
] | |
066554d6b1f8b0a91a6ca227d27ae0ea8cfbd211 | 9a1b033774e371bd6442048f43e862dfb71abed7 | /Comprehensions/Lab/Flattening_Matrix.py | 57887545e4a87d7ca53a75baebc41865c380cf13 | [] | no_license | mialskywalker/PythonAdvanced | ea4fde32ba201f6999cd0d59d1a95f00fb5f674b | c74ad063154c94b247aaf73b7104df9c6033b1a5 | refs/heads/master | 2023-03-09T00:13:28.471328 | 2021-02-24T15:21:11 | 2021-02-24T15:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | n = int(input())
matrix = [[int(j) for j in input().split(', ')] for i in range(n)]
flat = [x for row in matrix for x in row]
print(flat)
| [
"[email protected]"
] | |
a25b6496f12166e06a56177364a4c1ecfbc4a31f | ffd5e689f88c49ab7af3554c22dc0c36301084fa | /thinking_and_testing_uniq_or_not_uniq.py | d7b067767d0bea11d3b61a30da4b020ac1ca2f17 | [] | no_license | ellismckenzielee/codewars-python | 1710e6f0499047139479de386927c7dbd5f1cdf6 | af3f4b4534798a58115d0565730aae28ce87437e | refs/heads/master | 2023-08-09T13:38:40.964141 | 2023-08-01T14:45:22 | 2023-08-01T14:45:22 | 168,981,376 | 45 | 18 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | #thinking and testing: uniq or not uniq kata
#https://www.codewars.com/kata/56d949281b5fdc7666000004
def testit(a, b):
a = list(set(a))
b = list(set(b))
a.extend(b)
return sorted(a) | [
"[email protected]"
] | |
211c727e8d52656e27ff87503013df32b74cd429 | bc54edd6c2aec23ccfe36011bae16eacc1598467 | /simscale_sdk/models/flow_rate_mean_outlet_vbc.py | e896a0e17e908cfccdaca58f5a681e31f2fb9e87 | [
"MIT"
] | permissive | SimScaleGmbH/simscale-python-sdk | 4d9538d5efcadae718f12504fb2c7051bbe4b712 | 6fe410d676bf53df13c461cb0b3504278490a9bb | refs/heads/master | 2023-08-17T03:30:50.891887 | 2023-08-14T08:09:36 | 2023-08-14T08:09:36 | 331,949,105 | 17 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,305 | py | # coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class FlowRateMeanOutletVBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'flow_rate': 'OneOfFlowRateMeanOutletVBCFlowRate'
}
attribute_map = {
'type': 'type',
'flow_rate': 'flowRate'
}
def __init__(self, type='FLOW_RATE_MEAN_OUTLET_VELOCITY', flow_rate=None, local_vars_configuration=None): # noqa: E501
"""FlowRateMeanOutletVBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._flow_rate = None
self.discriminator = None
self.type = type
if flow_rate is not None:
self.flow_rate = flow_rate
@property
def type(self):
"""Gets the type of this FlowRateMeanOutletVBC. # noqa: E501
Schema name: FlowRateMeanOutletVBC # noqa: E501
:return: The type of this FlowRateMeanOutletVBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this FlowRateMeanOutletVBC.
Schema name: FlowRateMeanOutletVBC # noqa: E501
:param type: The type of this FlowRateMeanOutletVBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def flow_rate(self):
"""Gets the flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:return: The flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:rtype: OneOfFlowRateMeanOutletVBCFlowRate
"""
return self._flow_rate
@flow_rate.setter
def flow_rate(self, flow_rate):
"""Sets the flow_rate of this FlowRateMeanOutletVBC.
:param flow_rate: The flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:type: OneOfFlowRateMeanOutletVBCFlowRate
"""
self._flow_rate = flow_rate
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FlowRateMeanOutletVBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FlowRateMeanOutletVBC):
return True
return self.to_dict() != other.to_dict()
| [
"simscale"
] | simscale |
e5bc633da5a7a8bc72a5896a2acd1b80d49ca5f1 | 91fb65972d69ca25ddd892b9d5373919ee518ee7 | /python-training-courses/pfc-sample-programs/func_example_002_a_with_its_use.py | fa56a44bfd53a1e492b260df8427a8512dba5dd3 | [] | no_license | zeppertrek/my-python-sandpit | c36b78e7b3118133c215468e0a387a987d2e62a9 | c04177b276e6f784f94d4db0481fcd2ee0048265 | refs/heads/master | 2022-12-12T00:27:37.338001 | 2020-11-08T08:56:33 | 2020-11-08T08:56:33 | 141,911,099 | 0 | 0 | null | 2022-12-08T04:09:28 | 2018-07-22T16:12:55 | Python | UTF-8 | Python | false | false | 867 | py | # func_example_002_a_with_its_use.py
# refer to func_example_002_without_its_use.py
#
# Passing variable number of arguments to the function
def add_numbers (*myNumbers):
sum = 0
for i in myNumbers:
sum = sum + i
return sum
num01, num02, num03, num04, num05, num06, num07, num08, num09, num10 = 1,2,3,4,5,6,7,8,9,10
# Calculate and Print sum of the first 5 numbers
sum1 = add_numbers (num01, num02, num03, num04, num05)
print ("Sum of the first 5 numbers is - ", sum1 )
# Calculate and Print sum of the numbers from 6 to 10
sum2 = add_numbers (num06, num07, num08, num09, num10)
print ("Sum of the numbers from 6 to 10 - ", sum2 )
# Calculate and Print sum of the numbers in odd positions
sum3 = add_numbers (num01, num03, num05, num07, num09)
print ("Sum of the numbers in odd positions - ", sum3)
| [
"[email protected]"
] | |
453574190afbadf01fad742c24929e94bf313b5f | baed2c2da1f776c0968d3cacd2fa45bdbe5482d6 | /S4cam/groupedCameras/TMP/legacy_designs/TMP_baseline_rev_multicam_test3_elliptical_stop_leaders_8_39/elliptical_aperture/3_mk_merit_func_align_prism_and_set_margin.py | e83b329923c4042e2689350f76cda65679e0d9ef | [] | no_license | patogallardo/zemax_tools | 5ae2fe9a1e8b032684b8cf57457ee4f3239d9141 | 90d309c2f96c94469963eb905844d76fa2137bf9 | refs/heads/master | 2023-01-08T22:52:16.865852 | 2022-12-20T21:36:28 | 2022-12-20T21:36:28 | 234,634,525 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 8,451 | py | import zmx_api
import zmx # noqa
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from progressbar import progressbar
import os
XRADIUS = 2500
YRADIUS = 2747
TARGET_KEEPOUT_RADIUS_MM = 150.0
def eval_distance_to_rim(max_rs, MFE, surfnum):
qsum_rownums = []
radius_rownums = []
MFE.AddOperand()
for j_field in range(len(max_rs)):
op_x = MFE.AddOperand()
rownum_x = op_x.OperandNumber
op_x.ChangeType(REAX)
op_x.GetOperandCell(2).IntegerValue = surfnum
op_x.GetOperandCell(4).DoubleValue = max_rs.hx.values[j_field] # Hx
op_x.GetOperandCell(5).DoubleValue = max_rs.hy.values[j_field] # Hy
op_x.GetOperandCell(6).DoubleValue = max_rs.px.values[j_field] # Px
op_x.GetOperandCell(7).DoubleValue = max_rs.py.values[j_field] # Py
op_x.Weight = 0.0
op_y = MFE.AddOperand()
rownum_y = op_y.OperandNumber
op_y.ChangeType(REAY)
op_y.GetOperandCell(2).IntegerValue = surfnum
op_y.GetOperandCell(4).DoubleValue = max_rs.hx.values[j_field] # Hx
op_y.GetOperandCell(5).DoubleValue = max_rs.hy.values[j_field] # Hy
op_y.GetOperandCell(6).DoubleValue = max_rs.px.values[j_field] # Px
op_y.GetOperandCell(7).DoubleValue = max_rs.py.values[j_field] # Py
op_y.Weight = 0.0
op_qsum = MFE.AddOperand()
op_qsum.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.QSUM)
op_qsum.GetOperandCell(2).IntegerValue = rownum_x
op_qsum.GetOperandCell(3).IntegerValue = rownum_y
op_qsum.Weight = 0.0
MFE.CalculateMeritFunction()
y = op_y.Value
x = op_x.Value
angle = np.arctan2(y, x)
r = np.sqrt((XRADIUS*np.cos(angle))**2
+ (YRADIUS*np.sin(angle))**2)
op_rim = MFE.AddOperand()
op_rim.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.CONS)
op_rim.Target = r
radius_rownums.append(op_rim.OperandNumber)
qsum_rownums.append(op_qsum.OperandNumber)
for j in range(len(qsum_rownums)):
op_diff = MFE.AddOperand()
if j == 0:
first_diff_rownum = op_diff.OperandNumber
if j == len(qsum_rownums) - 1:
last_diff_rownum = op_diff.OperandNumber
op_diff.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.DIFF)
op_diff.GetOperandCell(2).IntegerValue = radius_rownums[j]
op_diff.GetOperandCell(3).IntegerValue = qsum_rownums[j]
op_diff.Weight = 0.0
op_equa = MFE.AddOperand()
op_equa.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.EQUA)
op_equa.GetOperandCell(2).IntegerValue = first_diff_rownum
op_equa.GetOperandCell(3).IntegerValue = last_diff_rownum
op_equa.Weight = 1.0e-4
op_min = MFE.AddOperand()
op_min.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.MINN)
op_min.GetOperandCell(2).IntegerValue = first_diff_rownum
op_min.GetOperandCell(3).IntegerValue = last_diff_rownum
op_min.Weight = 1.0
op_min.Target = TARGET_KEEPOUT_RADIUS_MM
op_opgt = MFE.AddOperand()
op_opgt.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.OPGT)
op_opgt.Target = 140
op_opgt.Weight = 1e12
op_opgt.GetOperandCell(2).IntegerValue = op_min.OperandNumber
op_oplt = MFE.AddOperand()
op_oplt.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.OPLT)
op_oplt.Target = 1200.
op_oplt.Weight = 1e12
op_oplt.GetOperandCell(2).IntegerValue = op_min.OperandNumber
def find_max_radius_fields(df, x_mean, y_mean):
max_rs = []
gs = df.groupby(['px', 'py'])
for g in gs:
r = np.sqrt((x_mean - g[1].x)**2 + (y_mean-g[1].y)**2)
ind = r.idxmax()
max_rs.append(g[1].loc[ind])
max_rs = pd.DataFrame(max_rs)
return max_rs
def plot_rim(active_conf, df, max_rs):
fname_plotout = os.path.join(MF_DIROUT,
"footprint_rim_conf%02i.png" % active_conf) # noqa
plt.gca().set_aspect('equal')
plt.scatter(df.x, df.y, marker='.')
plt.scatter(max_rs.x, max_rs.y, marker='.')
plt.title("configuration number: %i" % active_conf)
plt.xlim([-3000, 3000])
plt.savefig(fname_plotout)
plt.close()
def eval_rim_centroid(max_rs, MFE, surfnum, REAXORY):
for j_field in range(len(max_rs)):
op = MFE.AddOperand()
if j_field == 0:
row_start = op.OperandNumber
if j_field == len(max_rs) - 1:
row_end = op.OperandNumber
op.ChangeType(REAXORY)
op.GetOperandCell(2).IntegerValue = surfnum
op.GetOperandCell(4).DoubleValue = max_rs.hx.values[j_field] # Hx
op.GetOperandCell(5).DoubleValue = max_rs.hy.values[j_field] # Hy
op.GetOperandCell(6).DoubleValue = max_rs.px.values[j_field] # Px
op.GetOperandCell(7).DoubleValue = max_rs.py.values[j_field] # Py
op.Weight = 0.0
op = MFE.AddOperand()
op.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.OSUM)
op.GetOperandCell(2).IntegerValue = row_start
op.GetOperandCell(3).IntegerValue = row_end
op.Weight = 10.0
MKPLOT = True
RUNOPTIMIZER = False
MK_MERITFUNCTIONS = True
mce_rows_to_optimize = [19, 20]
TheSystem, ZOSAPI, ZOSAPI_NetHelper = zmx_api.connect_zmx_interactive()
MFE = TheSystem.MFE
MCE = TheSystem.MCE
REAX = ZOSAPI.Editors.MFE.MeritOperandType.REAX
REAY = ZOSAPI.Editors.MFE.MeritOperandType.REAY
surfnum = 44
wavenum = 1
t = np.linspace(0, 2*np.pi, 32)[:-1]
rs = np.linspace(0, 1, 4)[:-1]
Pxs = np.cos(t)
Pys = np.sin(t)
Hxs = np.concatenate([np.cos(t) * r for r in rs])
Hys = np.concatenate([np.sin(t) * r for r in rs])
MF_DIROUT = './center_pri_footprint/'
if not os.path.exists(MF_DIROUT):
os.mkdir(MF_DIROUT)
if MK_MERITFUNCTIONS:
for active_conf in progressbar(range(1, 86)):
# MFE.GetOperandAt(1).GetOperandCell(2).IntegerValue = 1
MCE.SetCurrentConfiguration(active_conf)
px_out, py_out, hx_out, hy_out, x, y = [], [], [], [], [], []
for (Hx, Hy) in zip(Hxs, Hys):
for (Px, Py) in zip(Pxs, Pys):
valx = MFE.GetOperandValue(REAX, surfnum, wavenum,
Hx, Hy, Px, Py, 0, 0)
valy = MFE.GetOperandValue(REAY, surfnum, wavenum,
Hx, Hy, Px, Py, 0, 0)
px_out.append(Px)
py_out.append(Py)
hx_out.append(Hx)
hy_out.append(Hy)
x.append(valx)
y.append(valy)
stopval = MFE.GetOperandValue(REAX, 6, 1,
0, 0, 1, 0, 0, 0)
df = pd.DataFrame({'hx': hx_out,
'hy': hy_out,
'px': px_out,
'py': py_out,
'x': x,
'y': y})
x_mean, y_mean = df.x.mean(), df.y.mean()
max_rs = find_max_radius_fields(df, x_mean, y_mean)
x_mean, y_mean = max_rs.x.mean(), max_rs.y.mean()
max_rs = find_max_radius_fields(df, x_mean, y_mean)
if MKPLOT:
plot_rim(active_conf, df, max_rs)
# now clear merit function and write up a new one
MFE.RemoveOperandsAt(1, MFE.NumberOfOperands)
MFE.AddOperand()
MFE.GetOperandAt(1).GetOperandCell(2).IntegerValue = active_conf
MFE.AddOperand()
op_cvig = MFE.AddOperand()
op_svig = MFE.AddOperand()
op_cvig.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.CVIG)
op_svig.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.SVIG)
op_svig.GetOperandCell(2).IntegerValue = 2
eval_rim_centroid(max_rs, MFE, surfnum, REAX)
eval_rim_centroid(max_rs, MFE, surfnum, REAY)
eval_distance_to_rim(max_rs, MFE, surfnum)
mf_fnameout = os.path.abspath(os.path.join(MF_DIROUT,
"MF_conf%02i.MF" % active_conf)) # noqa
MFE.SaveMeritFunction(mf_fnameout)
if RUNOPTIMIZER:
for active_conf in progressbar(range(1, 86)):
mf_fnameout = os.path.abspath(os.path.join(MF_DIROUT,
"MF_conf%02i.MF" % active_conf))
MFE.LoadMeritFunction(mf_fnameout)
TheSystem.Tools.RemoveAllVariables()
zmx.set_variables_or_const(mce_rows_to_optimize,
active_conf,
MCE, ZOSAPI, vars=True)
zmx.zemax_optimize(TheSystem, ZOSAPI)
| [
"[email protected]"
] | |
bf4ab1b554798c38423c6b25ffc2e3404c7b9980 | eea1be5dbac7fa10167eae167eb6712e3937f53a | /siteuser/utils/models.py | 607ac2c9399c5f052d881715a70bed9367b4b671 | [] | no_license | chidimo/Voidcoin | 40962e46661b2a7106bd8e60d0830c3b9629b8fa | 227c160dfa671818522781aab013f2d1fcb098a9 | refs/heads/develop | 2022-12-09T17:40:26.294425 | 2019-07-04T08:32:20 | 2019-07-04T08:32:20 | 135,197,447 | 5 | 2 | null | 2022-12-08T02:08:45 | 2018-05-28T18:45:19 | Python | UTF-8 | Python | false | false | 441 | py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from .fields import AutoCreatedField, AutoLastModifiedField
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields.
"""
created = AutoCreatedField(_('created'))
modified = AutoLastModifiedField(_('modified'))
class Meta:
abstract = True
| [
"[email protected]"
] | |
c96baa39b9776108de52e68614ff8a956ef413f8 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/recommend/JEPOO/model/mel.py | 3581eddba520c6e2403c416cad136096a7b09a35 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 3,514 | py | # Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import mindspore as ms
from mindspore import ops
from librosa.filters import mel
from librosa.util import pad_center
from scipy.signal import get_window
class STFT(nn.Cell):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length, hop_length, win_length=None, window='hann'):
super(STFT, self).__init__()
if win_length is None:
win_length = filter_length
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
fourier_basis = np.fft.fft(np.eye(self.filter_length))
self.cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:self.cutoff, :]),
np.imag(fourier_basis[:self.cutoff, :])])
self.forward_basis = ms.Tensor(fourier_basis[:, None, :], ms.float32)
if window is not None:
assert filter_length >= win_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = ms.Tensor(fft_window, ms.float32)
self.forward_basis *= fft_window
def construct(self, input_data):
input_data = ops.expand_dims(input_data, 1)
input_data = ops.Pad(((0, 0), (0, 0), (int(self.filter_length / 2), int(self.filter_length / 2))))(input_data)
forward_transform = nn.Conv1d(1, self.cutoff * 2, self.win_length, stride=self.hop_length, pad_mode='valid',
weight_init=self.forward_basis)(input_data)
real_part = forward_transform[:, :self.cutoff, :]
imag_part = forward_transform[:, self.cutoff:, :]
magnitude = ops.sqrt(real_part**2 + imag_part**2)
phase = ops.atan2(imag_part, real_part)
return magnitude, phase
class MelSpectrogram(nn.Cell):
def __init__(self, n_mels, sample_rate, filter_length, hop_length,
win_length=None, mel_fmin=0.0, mel_fmax=None):
super(MelSpectrogram, self).__init__()
self.stft = STFT(filter_length, hop_length, win_length)
mel_basis = mel(sample_rate, filter_length, n_mels, mel_fmin, mel_fmax, htk=True)
self.mel_basis = ms.Tensor(mel_basis, ms.float32)
self.min_bound = ms.Tensor(1e-5, ms.float32)
def construct(self, y):
magnitudes, _ = self.stft(y)
mel_output = ops.matmul(self.mel_basis, magnitudes)
mel_output = ops.clip_by_value(mel_output, clip_value_min=self.min_bound)
mel_output = ops.log(mel_output)
return mel_output
| [
"[email protected]"
] | |
408eefcd98a92dd07cb9fa4f21371235a339bf84 | d032bc0c01a7cd598481644e22043de8df4c71c4 | /consultant_app/versatilimagefield.py | 90f5c5be364e762bcd094b0cd36c0169a6108c18 | [] | no_license | amrit-kumar/project-for-engineering | eb5f410cd2f0a271633fb6c24132a36e6215f0e0 | 7e975866e540ab4625e735009fdba971df74e393 | refs/heads/master | 2020-12-03T01:49:02.429186 | 2017-06-30T09:09:46 | 2017-06-30T09:09:46 | 95,863,800 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | import io
from io import StringIO
from PIL import Image
# from StringIO import StringIO
from .views import *
from versatileimagefield.datastructures import SizedImage
from django.utils.datastructures import *
from versatileimagefield.fields import VersatileImageField
from versatileimagefield.registry import versatileimagefield_registry
# Unregistering the 'crop' Sizer
# versatileimagefield_registry.unregister_sizer('crop')
# Registering a custom 'crop' Sizer
# versatileimagefield_registry.register_sizer('crop', SomeCustomSizedImageCls)
class ThumbnailImage(SizedImage):
"""
Sizes an image down to fit within a bounding box
See the `process_image()` method for more information
"""
filename_key = 'thumbnail'
def process_image(self, image, image_format, save_kwargs,
width=400, height=400):
"""
Returns a StringIO instance of `image` that will fit
within a bounding box as specified by `width`x`height`
"""
imagefile = io.BytesIO()
image.thumbnail(
(width, height),
Image.ANTIALIAS
)
image.save(
imagefile,
**save_kwargs
)
return imagefile
# Registering the ThumbnailSizer to be available on VersatileImageField
# via the `thumbnail` attribute
versatileimagefield_registry.unregister_sizer('thumbnail')
versatileimagefield_registry.register_sizer('thumbnail', ThumbnailImage)
| [
"[email protected]"
] | |
0c8153f27fb67a668ee75237e7cd43c5388cfa62 | 92773cbdd70812f45e1b9b97bbc024aee4b4b18d | /Chapter7. Iteration/loop.py | 8765dd5a07dd4c28348fbcf1c1cc68b803ce3fd9 | [] | no_license | Abu-Kaisar/Python3Programming--Coursera | e46edc86294ac76109a89b2cb02e8b6af178dcce | e1b93899c4f507b9b32091283951e761e182b97a | refs/heads/master | 2022-11-21T07:40:28.985698 | 2020-07-19T04:07:51 | 2020-07-19T04:07:51 | 280,787,750 | 0 | 0 | null | 2020-07-19T03:58:52 | 2020-07-19T03:58:52 | null | UTF-8 | Python | false | false | 476 | py | # mylist = ["yo","mujju","salman","thuss"]
# for i in mylist:
# print("Hi", i ,"Dawat hai kheenchny aao")
# mylist = "dgsadafdua"
# for char in mylist:
# print("Hi", char )
s = "python rocks"
for ch in s:
print("HELLO")
import turtle # set up alex
wn = turtle.Screen()
mujju = turtle.Turtle()
for aColor in ["yellow", "red", "purple", "blue"]:
alex.color(aColor) # repeat four times
mujju.forward(50)
mujju.left(90)
wn.exitonclick()
| [
"[email protected]"
] | |
a0053fe45551ebe58fb97b17632014d320aff29c | 5ebbad9b3a6664a65d1ebb50056f83fe50435d3a | /Open Elective Python/Unit 3/7.py | 215ba42a02c60afd4c2107dbe91ea45345e874e2 | [] | no_license | jugal13/Python_Lab | 5892c6f2c77d0222a6b31bc40774d24b46f86475 | f8b96b3ecf1b913f4121e8e6d89b1a610f4ecba2 | refs/heads/master | 2023-03-01T03:53:30.889295 | 2021-02-10T14:42:09 | 2021-02-10T14:42:09 | 145,228,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | import time
millis = int(round(time.time() * 1000))
print(millis) | [
"[email protected]"
] | |
d533929137010a828e0c1fe70530eb874680c0e9 | ca5fc43049f94a794d90a561fd8126f02b603599 | /i3py/core/features/options.py | cdcfff349c6f5457d6e1affa7b82a7ef3f760806 | [
"BSD-3-Clause"
] | permissive | Exopy/i3py | 32d9ee343d21d275680a2d030b660a80960e99ac | 6f004d3e2ee2b788fb4693606cc4092147655ce1 | refs/heads/master | 2022-02-18T21:51:16.423188 | 2019-08-28T23:51:02 | 2019-08-28T23:51:02 | 63,874,745 | 1 | 0 | BSD-3-Clause | 2018-05-23T09:45:26 | 2016-07-21T14:07:58 | Python | UTF-8 | Python | false | false | 1,960 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016-2018 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Feature for instrument options.
"""
from typing import Any, Union, Optional, Dict, Tuple
from .feature import Feature
from ..abstracts import AbstractOptions
class Options(Feature):
"""Feature used to access the options of an instrument.
Options in I3py are considered static (ie related to the hardware or
firmware) and are hence read only. Because there is no generic pattern
in the formatting of the options, the user is expected to implement
manually the getter function.
Parameters
----------
names : dict
Names of the different options, as returned by this feature. Hint about
the possible values can be provided as a type or a tuple of values.
"""
def __init__(self, getter: Any=True,
setter: Any=None,
names: Dict[str, Optional[Union[type, tuple]]]={},
extract: str='',
retries: int=0,
checks: Optional[str]=None,
discard: Optional[Union[Tuple[str, ...],
Dict[str, Tuple[str, ...]]]]=None,
options: Optional[str]=None) -> None:
if setter is not None:
raise ValueError('Options is read-only can have a setter.')
if not names:
raise ValueError('No names were provided for Options')
Feature.__init__(self, getter, None, extract, retries,
checks, discard, options)
self.creation_kwargs['names'] = names
self.names = names
AbstractOptions.register(Options)
| [
"[email protected]"
] | |
674f90b04e7ff3a972ebfdec9636df1d6f7d64f7 | 77de0b8daf81dd83022015f1f2e52a2d2a81f3ee | /MeasureeMapQV.py | b027924c4517d5be8f15ab0efe1c74fbe9f3b320 | [
"MIT"
] | permissive | JinfengChen/chm1_scripts | 83a2f183f63d65e28fa402a01914aacb12894aa5 | 55d1783139f4ccc6e41c79812920785b1eaea65e | refs/heads/master | 2020-12-25T22:47:13.831530 | 2014-09-26T18:59:20 | 2014-09-26T18:59:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | #!/usr/bin/env python
import pysam
| [
"[email protected]"
] | |
afce243a4d7ee76553735dace72d9cef5b52557d | 28e8ab381a8c1b4321cd83acff6aa33468166d6b | /python3.4Smartforest/lib/python3.4/site-packages/django/contrib/contenttypes/fields.py | 0e221dbc92678bea486b69b37beddeebeb62fc33 | [
"MIT"
] | permissive | letouriste001/SmartForest_2.0 | 343e13bc085d753be2af43aecfb74a5fffaa5e3b | 109b78bf1e8c8404800f377ab969395ccbb617be | refs/heads/master | 2020-12-21T16:54:22.865824 | 2016-08-11T14:17:45 | 2016-08-11T14:17:45 | 59,734,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,528 | py | from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['on_delete'] = models.CASCADE
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.remote_field.model._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() methods if the model
# on the other end of this relation is ordered with respect to this.
def matching_gfk(field):
return (
isinstance(field, GenericForeignKey) and
self.content_type_field_name == field.ct_field and
self.object_id_field_name == field.fk_field
)
def make_generic_foreign_order_accessors(related_model, model):
if matching_gfk(model._meta.order_with_respect_to):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(make_generic_foreign_order_accessors, self.model, self.remote_field.model)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj
))
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| [
"[email protected]"
] | |
865a2ee42ce0b83535aff7031964ddbd3c0e5d36 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/d7b07d14c6c27a41803588e54091bf9fcf8a2c8736580b3083f089fcd6d4da3f/cython_runtime.py | ddb3e50bb33750fa1bc065bec7db3288f07423db | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | # encoding: utf-8
# module cython_runtime
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\_lib\_ccallback_c.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# no classes
| [
"[email protected]"
] | |
220d417e2a532c64b69fe77a6dbb261b6f5561fc | a360a22af5e0b385db438b1324564ef317ff2f38 | /bancor_module/migrations/0007_bancor_tsymbol.py | 3b1d42b27ed85223c4faa9f0e0b7bf186e2d5cc0 | [] | no_license | ogglin/exchange_comparison | 3eb2d849e731f94e67509e4ce9130e33bb37bbaf | f3feae64aff26b574f7ecd24e6f7aff7bb95ec65 | refs/heads/master | 2023-04-26T07:45:06.229584 | 2021-05-31T18:52:29 | 2021-05-31T18:52:29 | 287,036,194 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 3.1 on 2021-02-20 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bancor_module', '0006_bancor_volume'),
]
operations = [
migrations.AddField(
model_name='bancor',
name='tsymbol',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
a230e6aafa3e03bdd9995d307c925a4d98840639 | 81f8276be090ff9fa960d83db45bfe0b3c69ff39 | /test.py | 9d480a3922a04ae7c87c67955f1914d97189c9ae | [] | no_license | jon--lee/mlb-call-classifier | 1860c15b2f889a2b37daaaaefaed23cb19e808e5 | 28e15a908127a2ca78de92aee39c5dff516f6bf2 | refs/heads/master | 2020-06-07T03:04:54.034548 | 2015-07-23T00:06:54 | 2015-07-23T00:06:54 | 38,954,788 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | import classifier
import neuralpy
import grapher
grapher.graph(filepath='results/bucknor-93-R.txt') | [
"[email protected]"
] | |
eb2e7d1b25fa6419ac1847667a5fe019af42f82f | 19bc1dfbf8a8b4b1bfc9d6ead51479c72602b12e | /tests/test_resource_analysis.py | fea2cef5999b8d3a27c8c10067bde6f075b62ce6 | [
"MIT"
] | permissive | procha2/caper | a5297d6cfe7cf649ac5ac3544558f513b427713d | e9ea0baa3517178ce7b850df8a59eba6479fbcb6 | refs/heads/master | 2023-08-10T17:37:40.840958 | 2021-07-01T22:57:45 | 2021-07-01T22:57:45 | 300,260,107 | 0 | 0 | MIT | 2020-10-01T11:48:13 | 2020-10-01T11:48:12 | null | UTF-8 | Python | false | false | 3,119 | py | """Test is based on a metadata JSON file generated from
running atac-seq-pipeline v1.8.0 with the following input JSON.
gs://encode-pipeline-test-samples/encode-atac-seq-pipeline/ENCSR356KRQ_subsampled_caper.json
"""
import pytest
from caper.resource_analysis import LinearResourceAnalysis, ResourceAnalysis
def test_resource_analysis_abstract_class(gcp_res_analysis_metadata):
with pytest.raises(TypeError):
# abstract base-class
ResourceAnalysis()
def test_resource_analysis_analyze_task(gcp_res_analysis_metadata):
analysis = LinearResourceAnalysis()
analysis.collect_resource_data([gcp_res_analysis_metadata])
result_align1 = analysis.analyze_task(
'atac.align',
in_file_vars=['fastqs_R1'],
reduce_in_file_vars=None,
target_resources=['stats.max.mem', 'stats.mean.cpu_pct'],
)
assert result_align1['x'] == {'fastqs_R1': [15643136, 18963919]}
assert 'stats.mean.cpu_pct' in result_align1['y']
assert 'stats.max.mem' in result_align1['y']
assert 'stats.max.disk' not in result_align1['y']
assert list(result_align1['y'].keys()) == list(result_align1['coeffs'].keys())
assert result_align1['coeffs']['stats.mean.cpu_pct'][0][0] == pytest.approx(
1.6844513715565233e-06
)
assert result_align1['coeffs']['stats.mean.cpu_pct'][1] == pytest.approx(
42.28561239506905
)
assert result_align1['coeffs']['stats.max.mem'][0][0] == pytest.approx(
48.91222341236991
)
assert result_align1['coeffs']['stats.max.mem'][1] == pytest.approx(
124314029.09791338
)
result_align2 = analysis.analyze_task(
'atac.align', in_file_vars=['fastqs_R2'], reduce_in_file_vars=sum
)
assert result_align2['x'] == {'sum(fastqs_R2)': [16495088, 20184668]}
assert 'stats.mean.cpu_pct' not in result_align2['y']
assert 'stats.max.mem' in result_align2['y']
assert 'stats.max.disk' in result_align2['y']
assert list(result_align2['y'].keys()) == list(result_align2['coeffs'].keys())
result_align_star = analysis.analyze_task('atac.align*', reduce_in_file_vars=max)
assert result_align_star['x'] == {
'max(chrsz,fastqs_R1,fastqs_R2,idx_tar,tmp_fastqs)': [
32138224,
39148587,
3749246230,
3749246230,
]
}
def test_resource_analysis_analyze(gcp_res_analysis_metadata):
"""Test method analyze() which analyze all tasks defined in in_file_vars.
"""
analysis = LinearResourceAnalysis()
analysis.collect_resource_data([gcp_res_analysis_metadata])
result = analysis.analyze(
in_file_vars={
'atac.align*': ['fastqs_R1', 'fastqs_R2'],
'atac.filter*': ['bam'],
}
)
assert len(result) == 2
assert result['atac.align*']['x'] == {
'sum(fastqs_R1,fastqs_R2)': [32138224, 39148587, 32138224, 39148587]
}
assert result['atac.filter*']['x'] == {
'sum(bam)': [61315022, 76789196, 61315022, 76789196]
}
result_all = analysis.analyze()
# 38 tasks in total
assert len(result_all) == 38
| [
"[email protected]"
] | |
497277a27e50f16bd4dac4167ba204b4b27a60da | 80338a9379508bdf5d40e055e12f2621dee01daa | /usernameless/__init__.py | aadcfca92bdcc1431f589d7f88dbb0d88b56edf3 | [
"MIT"
] | permissive | johnnncodes/django-usernameless | 6ec75780edfec667ba653e50b0496e788832a36c | cf7b0904030e640ce51bf20c36521daf6abf447f | refs/heads/master | 2021-05-26T22:30:08.415853 | 2013-10-26T02:36:45 | 2013-10-26T02:54:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | # -*- coding: utf-8 -*-
__title__ = 'usernameless'
__version__ = '0.1.0'
__author__ = 'Marconi Moreto'
| [
"[email protected]"
] | |
b0e9d034f38256d73cecf9f4255f71cbf66f2f94 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /classes/_eigensolver1.py | b6ce82292c594e6f6578a8c6eb7978f16397aebd | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | from xcp2k.inputsection import InputSection
class _eigensolver1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.N = None
self.N_loop = None
self.Diag_method = None
self.Eigenvalues = None
self.Init_method = None
self._name = "EIGENSOLVER"
self._keywords = {'Diag_method': 'DIAG_METHOD', 'N_loop': 'N_LOOP', 'Init_method': 'INIT_METHOD', 'Eigenvalues': 'EIGENVALUES', 'N': 'N'}
| [
"[email protected]"
] | |
97ce640d8f9e55d51546c4a93f3597a7132318cf | 33a747246dab38960c25520d5232d5a37dfe2a01 | /starbucks/address_to_gecoords.py | d842315ca462c234888776d81feaa308e92f2f34 | [] | no_license | Yxiaokuan/spider | 6a79a950d170ea20dae13001697b9c214872f345 | e51a398c7fdee1b1814c50c5a3121ce9a193e302 | refs/heads/master | 2022-04-02T16:01:18.104056 | 2020-02-11T03:49:44 | 2020-02-11T03:49:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,951 | py | '''
@author:KongWeiKun
@file: address_to_gecoords.py
@time: 18-1-2 下午5:55
@contact: [email protected]
'''
import csv
import json
import random
import re
import requests
import time
'''
地址转经纬度
'''
from urllib.request import quote #URL编码
def getLngLat(url,timeOutRetry=5):
try:
response = requests.get(url)
return response.text
except Exception as e:
if timeOutRetry>0:
getLngLat(url,timeOutRetry=(timeOutRetry-1))
print("真的失败了")
def write_to_file(content):
with open('./resources/starbucks_result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n') # 写入文件,并且确定为汉字
f.close()
def pack_url(address):
ak='LVsGVvCzooeqcHGM1lnNzvTTSba7gtvU'
aks = 'fV9ODCmTALCdTtlbkRsheFUacvA9sL7A'
base_url = 'http://api.map.baidu.com/geocoder/v2/?address='
output = 'json'
callback = 'showLocation'
url = base_url+quote(address)+"&output="+output+"&ak="+ak+"&callback"+callback
return url
def readCsv(filename):
reader = csv.reader(open(filename))
return reader
def main():
starbucks = './resources/starbucks.csv'
reader = readCsv(starbucks)
for row in reader:
address = row[0]
url=pack_url(address)
gecoord=getLngLat(url)
print(gecoord)
pattern = re.compile('"lng":(.*?),"lat":(.*?)}')
lngLat=re.findall(pattern, gecoord)
if lngLat:
for ll in lngLat:
print(ll[0])
print('写入文件%s%s'%ll)
write_to_file(','.join(ll))
time.sleep(random.random()*5)
if __name__ == '__main__':
# main()
#利用localtime()
#函数将时间戳转化成localtime的格式
#利用strftime()
#函数重新格式化时间
start = time.time()
main()
end = time.time()
print("转换完成,共消耗%s"%(end-start)) | [
"[email protected]"
] | |
6e1b0e5aa34daaa437c9eee45fc76dbcb0dc1c5a | 2a5d93182aecc251462c9d3844e7c3e28013013e | /mysite/chat/tests.py | 114198533bdb4a28861f61e98807da39f4a8fde4 | [] | no_license | burbaljaka/websocket_chat | 20acc9908cd7b0e122a3b96252208defdc7460d9 | ca6883987befb6bfad5973156b01bfe876b1414f | refs/heads/master | 2021-05-26T22:58:31.151913 | 2020-04-08T19:37:16 | 2020-04-08T19:37:16 | 254,182,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,906 | py | from channels.testing import ChannelsLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
import dill
class ChatTests(ChannelsLiveServerTestCase):
serve_static = True #emulate StaticLiveServerTestCase
@classmethod
def setUpClass(cls):
super().setUpClass()
try:
# NOTE: Requires "chromedriver" binary to be installed in $PATH
cls.driver = webdriver.Chrome('C:\chromedriver.exe')
except:
super().tearDownClass()
raise
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def test_chat_message_posted_then_seen_by_everyone_in_same_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_1')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 2 from window 1')
finally:
self._close_all_new_windows()
def test_when_chat_message_posted_then_not_seen_by_anyone_in_different_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_2')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
self._post_message('world')
WebDriverWait(self.driver, 2).until(lambda _:
'world' in self._chat_log_value,
'Message was not received by window 2 from window 2')
self.assertTrue('hello' not in self._chat_log_value,
'Message was improperly received by window 2 from window 1')
finally:
self._close_all_new_windows()
# === Utility ===
def _enter_chat_room(self, room_name):
self.driver.get(self.live_server_url + '/chat/')
ActionChains(self.driver).send_keys(room_name + '\n').perform()
WebDriverWait(self.driver, 2).until(lambda _:
room_name in self.driver.current_url)
def _open_new_window(self):
self.driver.execute_script('window.open("about:blank", "_blank");')
self.driver.switch_to_.window(self.driver.window_handles[-1])
def _close_all_new_windows(self):
while len(self.driver.window_handles) > 1:
self.driver.switch_to.window(self.driver.window_handles[-1])
self.driver.execute_script('window.close();')
if len(self.driver.window_handles) == 1:
self.driver.switch_to.window(self.driver.window_handles[0])
def _switch_to_window(self, window_index):
self.driver.switch_to.window(self.driver.window_handles[window_index])
def _post_message(self, message):
ActionChains(self.driver).send_keys(message + '\n').perform()
@property
def _chat_log_value(self):
return self.driver.find_element_by_css_selector('#chat-log').get_property('value') | [
"[email protected]"
] | |
c279e12030d6850291b50ede25ac75ba3db5c7fd | 24f664aa2344d4f5d5e7b048ac4e85231715c4c8 | /experimental/dsmith/scrapheap/clsmith_run_cl_launcher.py | a8c4780ae8872e6567505e8112cc3a515308e79e | [] | no_license | speycode/clfuzz | 79320655e879d1e0a06a481e8ec2e293c7c10db7 | f2a96cf84a7971f70cb982c07b84207db407b3eb | refs/heads/master | 2020-12-05T13:44:55.486419 | 2020-01-03T14:14:03 | 2020-01-03T14:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,786 | py | #!/usr/bin/env python3
import os
import re
from argparse import ArgumentParser
from collections import deque
from tempfile import NamedTemporaryFile
from time import strftime
from typing import Tuple
import progressbar
from dsmith import clsmith
from dsmith import db
from dsmith.db import *
from dsmith.lib import *
from labm8.py import crypto
from third_party.py.pyopencl import pyopencl as cl
def get_platform_name(platform_id):
platform = cl.get_platforms()[platform_id]
return platform.get_info(cl.platform_info.NAME)
def get_device_name(platform_id, device_id):
platform = cl.get_platforms()[platform_id]
device = platform.get_devices()[device_id]
return device.get_info(cl.device_info.NAME)
def get_driver_version(platform_id, device_id):
platform = cl.get_platforms()[platform_id]
device = platform.get_devices()[device_id]
return device.get_info(cl.device_info.DRIVER_VERSION)
def cl_launcher(
src: str, platform_id: int, device_id: int, *args
) -> Tuple[float, int, str, str]:
""" Invoke cl launcher on source """
with NamedTemporaryFile(prefix="cl_launcher-", suffix=".cl") as tmp:
tmp.write(src.encode("utf-8"))
tmp.flush()
return clsmith.cl_launcher(
tmp.name,
platform_id,
device_id,
*args,
timeout=os.environ.get("TIMEOUT", 60),
)
def verify_params(
platform: str,
device: str,
optimizations: bool,
global_size: tuple,
local_size: tuple,
stderr: str,
) -> None:
""" verify that expected params match actual as reported by CLsmith """
optimizations = "on" if optimizations else "off"
actual_platform = None
actual_device = None
actual_optimizations = None
actual_global_size = None
actual_local_size = None
for line in stderr.split("\n"):
if line.startswith("Platform: "):
actual_platform_name = re.sub(r"^Platform: ", "", line).rstrip()
elif line.startswith("Device: "):
actual_device_name = re.sub(r"^Device: ", "", line).rstrip()
elif line.startswith("OpenCL optimizations: "):
actual_optimizations = re.sub(
r"^OpenCL optimizations: ", "", line
).rstrip()
# global size
match = re.match("^3-D global size \d+ = \[(\d+), (\d+), (\d+)\]", line)
if match:
actual_global_size = (
int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
)
match = re.match("^2-D global size \d+ = \[(\d+), (\d+)\]", line)
if match:
actual_global_size = (int(match.group(1)), int(match.group(2)), 0)
match = re.match("^1-D global size \d+ = \[(\d+)\]", line)
if match:
actual_global_size = (int(match.group(1)), 0, 0)
# local size
match = re.match("^3-D local size \d+ = \[(\d+), (\d+), (\d+)\]", line)
if match:
actual_local_size = (
int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
)
match = re.match("^2-D local size \d+ = \[(\d+), (\d+)\]", line)
if match:
actual_local_size = (int(match.group(1)), int(match.group(2)), 0)
match = re.match("^1-D local size \d+ = \[(\d+)\]", line)
if match:
actual_local_size = (int(match.group(1)), 0, 0)
# check if we've collected everything:
if (
actual_platform
and actual_device
and actual_optimizations
and actual_global_size
and actual_local_size
):
assert actual_platform == platform
assert actual_device == device
assert actual_optimizations == optimizations
assert actual_global_size == global_size
assert actual_local_size == local_size
return
def parse_ndrange(ndrange: str) -> Tuple[int, int, int]:
components = ndrange.split(",")
assert len(components) == 3
return (int(components[0]), int(components[1]), int(components[2]))
def get_num_to_run(
session: db.session_t, testbed: Testbed, optimizations: int = None
):
num_ran = session.query(sql.sql.func.count(CLSmithResult.id)).filter(
CLSmithResult.testbed_id == testbed.id
)
total = session.query(sql.sql.func.count(CLSmithTestCase.id))
if optimizations is not None:
num_ran = (
num_ran.join(CLSmithTestCase)
.join(cl_launcherParams)
.filter(cl_launcherParams.optimizations == optimizations)
)
total = total.join(cl_launcherParams).filter(
cl_launcherParams.optimizations == optimizations
)
return num_ran.scalar(), total.scalar()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"-H", "--hostname", type=str, default="cc1", help="MySQL database hostname"
)
parser.add_argument(
"platform_id", metavar="<platform-id>", type=int, help="OpenCL platform ID"
)
parser.add_argument(
"device_id", metavar="<device-id>", type=int, help="OpenCL device ID"
)
parser.add_argument(
"--opt", action="store_true", help="Only test with optimizations on"
)
parser.add_argument(
"--no-opt",
action="store_true",
help="Only test with optimizations disabled",
)
args = parser.parse_args()
# Parse command line options
platform_id = args.platform_id
device_id = args.device_id
# get testbed information
platform_name = get_platform_name(platform_id)
device_name = get_device_name(platform_id, device_id)
driver_version = get_driver_version(platform_id, device_id)
optimizations = None
if args.opt and args.no_opt:
pass # both flags
elif args.opt:
optimizations = 1
elif args.no_opt:
optimizations = 0
db.init(args.hostname) # initialize db engine
with Session() as session:
testbed = get_testbed(session, platform_name, device_name)
devname = util.device_str(testbed.device)
# progress bar
num_ran, num_to_run = get_num_to_run(session, testbed, optimizations)
bar = progressbar.ProgressBar(init_value=num_ran, max_value=num_to_run)
# programs to run, and results to push to database
inbox = deque()
def next_batch():
"""
Fill the inbox with jobs to run.
"""
BATCH_SIZE = 100
print(f"\nnext CLSmith batch for {devname} at", strftime("%H:%M:%S"))
# update the counters
num_ran, num_to_run = get_num_to_run(session, testbed, optimizations)
bar.max_value = num_to_run
bar.update(min(num_ran, num_to_run))
# fill inbox
done = session.query(CLSmithResult.testcase_id).filter(
CLSmithResult.testbed == testbed
)
if optimizations is not None:
done = (
done.join(CLSmithTestCase)
.join(cl_launcherParams)
.filter(cl_launcherParams.optimizations == optimizations)
)
todo = (
session.query(CLSmithTestCase)
.filter(~CLSmithTestCase.id.in_(done))
.order_by(CLSmithTestCase.program_id, CLSmithTestCase.params_id)
)
if optimizations is not None:
todo = todo.join(cl_launcherParams).filter(
cl_launcherParams.optimizations == optimizations
)
todo = todo.limit(BATCH_SIZE)
for testcase in todo:
inbox.append(testcase)
try:
while True:
# get the next batch of programs to run
if not len(inbox):
next_batch()
# we have no programs to run
if not len(inbox):
break
# get next program to run
testcase = inbox.popleft()
program = testcase.program
params = testcase.params
flags = params.to_flags()
# drive the program
runtime, status, stdout, stderr = cl_launcher(
program.src, platform_id, device_id, *flags
)
# assert that executed params match expected
verify_params(
platform=platform_name,
device=device_name,
optimizations=params.optimizations,
global_size=params.gsize,
local_size=params.lsize,
stderr=stderr,
)
# create new result
stdout_ = util.escape_stdout(stdout)
stdout = get_or_create(
session, CLSmithStdout, hash=crypto.sha1_str(stdout_), stdout=stdout_
)
stderr_ = util.escape_stderr(stderr)
stderr = get_or_create(
session, CLSmithStderr, hash=crypto.sha1_str(stderr_), stderr=stderr_
)
session.flush()
result = CLSmithResult(
testbed_id=testbed.id,
testcase_id=testcase.id,
status=status,
runtime=runtime,
stdout_id=stdout.id,
stderr_id=stderr.id,
outcome=analyze.get_cl_launcher_outcome(status, runtime, stderr_),
)
session.add(result)
session.commit()
# update progress bar
num_ran += 1
bar.update(min(num_ran, num_to_run))
finally:
# flush any remaining results
next_batch()
print("done.")
| [
"[email protected]"
] | |
56d6d53b07810d51b36c4842c6af1666223e5ee3 | d82ac08e029a340da546e6cfaf795519aca37177 | /chapter_05_dimensionality_reduction/05_kernel_principal_component.py | 609ded2a21b83c00d4f66aca64610875be219164 | [] | no_license | CSwithJC/PythonMachineLearning | 4409303c3f4d4177dc509c83e240d7a589b144a0 | 0c4508861e182a8eeacd4645fb93b51b698ece0f | refs/heads/master | 2021-09-04T04:28:14.608662 | 2018-01-15T20:25:36 | 2018-01-15T20:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,810 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.decomposition import PCA
from matplotlib.ticker import FormatStrFormatter
""" Kernel PCA
Using Kernel PCA, we perform a nonlinear mapping that transforms
the data onto a higher-dimensional space and use standard PCA
in this higher-dimensional space to project the data back onto a
lower-dimensional space where the samples can be separated by a
linear classifier.
"""
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation
Parameters
----------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components:
Number of principal components to return
Returns
-------
X_pc: {NumPy ndarray}, shape = [n_samples, n_features]
Projected dataset
"""
# Calculate the pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
X_pc = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1)))
return X_pc
# Examples to apply kernel pca to some datasets:
#
# 1. Half-moon shapes:
#
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y == 0, 0], X[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.show()
# Now, project the dataset via standard PCA:
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Now, try again using our rbf_kernel_pca function
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.show()
# In this new plot, we see that the two classes (cirles and traingles)
# are lineraly well separated so that it becomes a suitable training
# dataset for linear classifiers.
#
# 2. Concentric circles:
#
X, y = make_circles(n_samples=1000, random_state=123,
noise=0.1, factor=0.2)
plt.scatter(X[y == 0, 0], X[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.show()
# PCA Approach:
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Again, standard PCA does not produce a good result.
# Now, again using our RBF Kernel PCA Implementation:
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Again, RBF Kernel PCA projected the data onto a new
# subspace where the two classes become linearly separable .
# This is seen in the new plot.
| [
"[email protected]"
] | |
20b0ea9579d0886baeaed4bfa6287cb7360d5595 | d7bc476f610d8b7d4abbeaf1545af4d2d827a7ef | /projeto/estoque/admin.py | 62fe30997551edf3ee208581329a1eb027dab2eb | [] | no_license | CleitonSilvaT/ControleEstoque | 0bcaa7168f93de124b10117aefeb636c492ac776 | dd2c573fb53edb0904393c0897917b55f3afac13 | refs/heads/master | 2023-01-19T09:34:49.213727 | 2020-11-26T09:27:07 | 2020-11-26T09:27:07 | 260,329,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | from django.contrib import admin
from .models import EstoqueEntrada
from .models import EstoqueSaida
from .models import EstoqueItens
# Register your models here.
# admin.site.register(Estoque)
# admin.site.register(EstoqueItens)
class EstoqueItensInLine(admin.TabularInline):
model = EstoqueItens
extra = 0
@admin.register(EstoqueEntrada)
class EstoqueEntradaAdmin(admin.ModelAdmin):
inlines = (EstoqueItensInLine,)
list_display = ('__str__', 'nf', 'funcionario',)
search_fields = ('nf',)
list_filter = ('funcionario',)
date_hierarchy = 'created'
@admin.register(EstoqueSaida)
class EstoqueSaidaAdmin(admin.ModelAdmin):
inlines = (EstoqueItensInLine,)
list_display = ('__str__', 'nf', 'funcionario',)
search_fields = ('nf',)
list_filter = ('funcionario',)
date_hierarchy = 'created' | [
"[email protected]"
] | |
b3391ed1ddf7e3ff1d1e526f45e1f80873ff81b5 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/python_all/39_11.py | 2ab73f925b4bb8f6b56c6ea625257db987241936 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,960 | py | Python – Replace words from Dictionary
Given String, replace it’s words from lookup dictionary.
> **Input** : test_str = ‘geekforgeeks best for geeks’, repl_dict = {“geeks”
> : “all CS aspirants”}
> **Output** : geekforgeeks best for all CS aspirants
> **Explanation** : “geeks” word is replaced by lookup value.
>
> **Input** : test_str = ‘geekforgeeks best for geeks’, repl_dict = {“good” :
> “all CS aspirants”}
> **Output** : geekforgeeks best for geeks
> **Explanation** : No lookup value, unchanged result.
**Method #1 : Using split() + get() + join()**
In this, we initially split the list using split(), then look for lookups
using get(), and if found, replaced and joined back to string using join().
## Python3
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Replace words from Dictionary
# Using split() + join() + get()
# initializing string
test_str = 'geekforgeeks best for geeks'
# printing original string
print("The original string is : " + str(test_str))
# lookup Dictionary
lookp_dict = {"best" : "good and better", "geeks" : "all CS
aspirants"}
# performing split()
temp = test_str.split()
res = []
for wrd in temp:
# searching from lookp_dict
res.append(lookp_dict.get(wrd, wrd))
res = ' '.join(res)
# printing result
print("Replaced Strings : " + str(res))
---
__
__
**Output**
The original string is : geekforgeeks best for geeks
Replaced Strings : geekforgeeks good and better for all CS aspirants
**Method #2 : Using list comprehension + join()**
Similar to above method, difference just being 1 liner rather than 3-4 steps
in separate lines.
## Python3
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Replace words from Dictionary
# Using list comprehension + join()
# initializing string
test_str = 'geekforgeeks best for geeks'
# printing original string
print("The original string is : " + str(test_str))
# lookup Dictionary
lookp_dict = {"best" : "good and better", "geeks" : "all CS
aspirants"}
# one-liner to solve problem
res = " ".join(lookp_dict.get(ele, ele) for ele in
test_str.split())
# printing result
print("Replaced Strings : " + str(res))
---
__
__
**Output**
The original string is : geekforgeeks best for geeks
Replaced Strings : geekforgeeks good and better for all CS aspirants
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
] | |
6c6e12fa925c57be0fddd4074aa52393dca4eb69 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02787/s069996134.py | 5242a0c98d9fb0fe819f9bb08a60a75b923ddb4e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
H, N, *AB = map(int, read().split())
magic = [0] * N
for i, (a, b) in enumerate(zip(*[iter(AB)] * 2)):
magic[i] = (a, b)
dp = [[INF] * (H + 1) for _ in range(N + 1)]
for i in range(N + 1):
dp[i][0] = 0
for i in range(N):
a, b = magic[i]
for j in range(H + 1):
dp[i + 1][j] = min(dp[i][j], dp[i + 1][max(j - a, 0)] + b)
print(dp[N][H])
return
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
64bacc9506ee8073571d4e6a9868841577b71d60 | 1f528631683f9d96c09dd4a4af243dd7213a4bd7 | /thumbnail.py | 6dffb24265b8649c5dce6c5d5183c7387998cc12 | [] | no_license | wd5/artcontactmeru | 4f8ede2a3f072c11ac0c8c8a74e0960382824537 | 2057f1b00db8eb808e0b4fb91a95fbe0a3e4f223 | refs/heads/master | 2020-12-30T14:55:54.067776 | 2009-11-12T07:30:00 | 2009-11-12T07:30:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | # -*- coding: utf-8 -*-
import os, Image
THUMBNAILS = 'thumbnails'
SCALE_WIDTH = 'w'
SCALE_HEIGHT = 'h'
SCALE_BOTH = 'x'
def scale(max_x, pair):
x, y = pair
new_y = (float(max_x) / x) * y
return (int(max_x), int(new_y))
# Thumbnail filter based on code from
# http://batiste.dosimple.ch/blog/2007-05-13-1/
def thumbnail(media_url, original_image_path, arg):
if not original_image_path:
return ''
if arg.find(','):
size, upload_path = [a.strip() for a in arg.split(',')]
else:
size = arg
upload_path = ''
if (size.lower().endswith('h')):
mode = SCALE_HEIGHT
elif (size.lower().endswith('w')):
mode = SCALE_WIDTH
else:
mode = SCALE_BOTH
# defining the size
size = size[:-1]
max_size = int(size.strip())
# defining the filename and the miniature filename
basename, format = original_image_path.rsplit('.', 1)
basename, name = basename.rsplit(os.path.sep, 1)
upload_path = '/'.join(basename.rsplit(os.path.sep, 2)[1:])
miniature = name + '_' + str(max_size) + mode + '.' + format
thumbnail_path = os.path.join(basename, THUMBNAILS)
if not os.path.exists(thumbnail_path):
os.mkdir(thumbnail_path)
miniature_filename = os.path.join(thumbnail_path, miniature)
miniature_url = '/'.join((media_url, upload_path, THUMBNAILS, miniature))
# if the image wasn't already resized, resize it
if not os.path.exists(miniature_filename) \
or os.path.getmtime(original_image_path) > os.path.getmtime(miniature_filename):
image = Image.open(original_image_path)
image_x, image_y = image.size
if mode == SCALE_BOTH:
if image_x > image_y:
mode = SCALE_WIDTH
else:
mode = SCALE_HEIGHT
if mode == SCALE_HEIGHT:
image_y, image_x = scale(max_size, (image_y, image_x))
else:
image_x, image_y = scale(max_size, (image_x, image_y))
image = image.resize((image_x, image_y), Image.ANTIALIAS)
image.save(miniature_filename, image.format)
return miniature_url
| [
"[email protected]"
] | |
7e57694591ccea12ade2aaeb5ac1524ce38461db | 03dea3c0db7c8fafda71d23c3c2595f563ffb335 | /SignalMC/python/AMSB_chargino900GeV_ctau1000cm_NoFilter_13TeV.py | d9b642184847712cc96ebbd952b587fe419eaacd | [] | no_license | Andersen98/DisappTrks | 3952e9bf8ba270e2d88aa2e8d9ef805cf25dfc46 | 140a5efdc4c51a30e5fced6d34b7813876c2f2ee | refs/heads/master | 2020-06-27T03:41:59.136790 | 2017-07-12T15:19:18 | 2017-07-12T15:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(3),
filterEfficiency = cms.untracked.double(1.0),
# comEnergy = cms.double(8000.0),
comEnergy = cms.double(13000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'IMSS(1) = 11 ! Spectrum from external SLHA file',
'IMSS(21) = 33 ! LUN number for SLHA File (must be 33) ',
'IMSS(22) = 33 ! Read-in SLHA decay table ',
'MSEL = 0 ! General SUSY',
'MSUB(226) = 1 ! to double chargino',
'MSUB(229) = 1 ! to neutralino + chargino',
'MDCY(312,1) = 0 ! set the chargino stable.',
),
parameterSets = cms.vstring('pythiaUESettings', 'processParameters', 'SLHAParameters'),
SLHAParameters = cms.vstring('SLHAFILE = DisappTrks/SignalMC/data/AMSB_chargino_900GeV_Isajet780.slha'),
),
slhaFile = cms.untracked.string('DisappTrks/SignalMC/data/AMSB_chargino_900GeV_Isajet780.slha'),
# The following parameters are required by Exotica_HSCP_SIM_cfi:
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
hscpFlavor = cms.untracked.string('stau'),
massPoint = cms.untracked.int32(900),
particleFile = cms.untracked.string('DisappTrks/SignalMC/data/geant4_AMSB_chargino_900GeV_ctau1000cm.slha')
)
| [
"[email protected]"
] | |
b818191531994619e2f64e216edd315786e81044 | 24927eac464cdb1bec665f1cb4bfee85728ec5e1 | /entry_parser/balenciaga.py | ca5ea1f4cb43c5aaeece995f78a8da7d00683e75 | [] | no_license | yingl/fashion-spider | d72ea8dfd4a49270fd3e64e7a507d6fcbaaf492c | 0698768cd21d509ec335d7202a753be4f6ad378b | refs/heads/master | 2021-01-01T18:14:17.848732 | 2017-09-27T08:44:47 | 2017-09-27T08:44:47 | 98,282,505 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | """ Balenciaga """
# coding:utf-8
import sys
sys.path.append('../')
import util
PREFIXES = ['www.balenciaga.cn']
def parse(driver, url):
products = []
driver.get(url)
elements = util.find_elements_by_css_selector(driver, 'a.item-display-image-container')
for element in elements:
products.append(element.get_attribute('href').strip())
return ';'.join(products)
def main():
driver = util.create_chrome_driver()
print(parse(driver, sys.argv[1]))
driver.quit()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
60b22d53fbf1e5893ab6925972c16edebae8fa71 | c6ae5d4c9b2a3acef09ee8254e1c1b67bc255b56 | /okex/v5/billtype.py | 1432ed9ee5152a20f37741012015561760391395 | [
"MIT"
] | permissive | PattonChen/okex-py | ce36a4ded7cb051a7167e3ba9aeb88bc4e0417f3 | cced7f1419da0940a2200ce66d62b4f9539949f2 | refs/heads/main | 2023-05-04T06:00:38.540463 | 2021-05-16T08:30:42 | 2021-05-16T08:30:42 | 367,833,565 | 1 | 0 | MIT | 2021-05-16T09:14:59 | 2021-05-16T09:14:59 | null | UTF-8 | Python | false | false | 302 | py |
from enum import Enum
class BillType(Enum):
# 划转
TRANSFER = 1
# 交易
TRADE = 2
# 交割
DELIVERY = 3
# 强制换币
FORCE_SWAP = 4
# 强平
FORCED_LIQUIDATION = 5
# ...
class BillSubType(Enum):
LINEAR = "linear"
INVERSE = "inverse"
# ... | [
"[email protected]"
] | |
9ee6d085738441c5e04afe2534663c554139d8e8 | fed0e4edf4df4b7a4abd52cbd1a0115243bdfcd0 | /hello.py | 11eec872b636492245036a4b82f4fe6982e46ba3 | [] | no_license | TitanVA/hh_parser | 62c5c9a87ab38a454c36d6ceba3dec1cd52f0ee3 | 0b51dc84ba5714de15629c8ec1d9da0dd9b2ecb1 | refs/heads/master | 2022-02-08T00:55:23.492072 | 2022-01-19T10:43:54 | 2022-01-19T10:43:54 | 236,823,784 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | print("Hello World!")
print("MR please")
| [
"[email protected]"
] | |
1d80fe96db1c270109eea36af33536d1f681f4e0 | 0fa96aa3b1ee5cf752e20bad98ef02785c369225 | /quaducom/quaducom/assess/assess_shell/mn_resistance/ecb_law_mn_diagram.py | 971d0cc7a521f908aee65bce5fdb5716551192e2 | [] | no_license | simvisage/simvisage | 7a56ce0d13211513a86e3631db1b55dc62e85986 | acb2a8eb8b6caa57c1e9e15f724a2b43843c7b10 | refs/heads/master | 2020-04-04T07:32:47.598636 | 2018-12-10T15:10:43 | 2018-12-10T15:10:43 | 3,583,342 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 7,966 | py | '''
Created on Sep 4, 2012
@author: rch
'''
from etsproxy.traits.api import \
HasTraits, Int, Instance, Property, cached_property, DelegatesTo, \
Event, Button
from util.traits.editors.mpl_figure_editor import \
MPLFigureEditor
from matplotlib.figure import \
Figure
from etsproxy.traits.ui.api import \
View, Item, Group, HSplit, VGroup, HGroup, RangeEditor, InstanceEditor
from ecb_law_calib import \
ECBLCalib
import numpy as np
class ECBLMNDiagram(HasTraits):
# calibrator supplying the effective material law
calib = Instance(ECBLCalib)
def _calib_default(self):
return ECBLCalib(notify_change=self.set_modified)
def _calib_changed(self):
self.calib.notify_change = self.set_modified
modified = Event
def set_modified(self):
print 'MN:set_modifeid'
self.modified = True
# cross section
cs = DelegatesTo('calib')
calibrated_ecb_law = Property(depends_on='modified')
@cached_property
def _get_calibrated_ecb_law(self):
print 'NEW CALIBRATION'
return self.calib.calibrated_ecb_law
eps_cu = Property()
def _get_eps_cu(self):
return -self.cs.cc_law.eps_c_u
eps_tu = Property()
def _get_eps_tu(self):
return self.calibrated_ecb_law.eps_tex_u
n_eps = Int(5, auto_set=False, enter_set=True)
eps_range = Property(depends_on='n_eps')
@cached_property
def _get_eps_range(self):
eps_c_space = np.linspace(self.eps_cu, 0, self.n_eps)
eps_t_space = np.linspace(0, self.eps_tu, self.n_eps)
eps_ccu = 0.8 * self.eps_cu
#eps_cc = self.eps_cu * np.ones_like(eps_c_space)
eps_cc = np.linspace(eps_ccu, self.eps_cu, self.n_eps)
eps_ct = self.eps_cu * np.ones_like(eps_t_space)
eps_tc = self.eps_tu * np.ones_like(eps_c_space)
eps_tt = self.eps_tu * np.ones_like(eps_t_space)
eps1 = np.vstack([eps_c_space, eps_cc])
eps2 = np.vstack([eps_t_space, eps_ct])
eps3 = np.vstack([eps_tc, eps_c_space])
eps4 = np.vstack([eps_tt, eps_t_space])
return np.hstack([eps1, eps2, eps3, eps4])
n_eps_range = Property(depends_on='n_eps')
@cached_property
def _get_n_eps_range(self):
return self.eps_range.shape[1]
#===========================================================================
# MN Diagram
#===========================================================================
def _get_MN_fn(self, eps_lo, eps_up):
self.cs.set(eps_lo=eps_lo,
eps_up=eps_up)
return (self.cs.M, self.cs.N)
MN_vct = Property(depends_on='modified')
def _get_MN_vct(self):
return np.vectorize(self._get_MN_fn)
MN_arr = Property(depends_on='modified')
@cached_property
def _get_MN_arr(self):
return self.MN_vct(self.eps_range[0, :], self.eps_range[1, :])
#===========================================================================
# f_eps Diagram
#===========================================================================
current_eps_idx = Int(0) # , auto_set = False, enter_set = True)
def _current_eps_idx_changed(self):
self._clear_fired()
self._replot_fired()
current_eps = Property(depends_on='current_eps_idx')
@cached_property
def _get_current_eps(self):
return self.eps_range[(0, 1), self.current_eps_idx]
current_MN = Property(depends_on='current_eps_idx')
@cached_property
def _get_current_MN(self):
return self._get_MN_fn(*self.current_eps)
#===========================================================================
# Plotting
#===========================================================================
figure = Instance(Figure)
def _figure_default(self):
figure = Figure(facecolor='white')
figure.add_axes([0.08, 0.13, 0.85, 0.74])
return figure
data_changed = Event
clear = Button
def _clear_fired(self):
self.figure.clear()
self.data_changed = True
replot = Button
def _replot_fired(self):
ax = self.figure.add_subplot(2, 2, 1)
ax.plot(-self.eps_range, [0, 0.06], color='black')
ax.plot(-self.current_eps, [0, 0.06], lw=3, color='red')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax = self.figure.add_subplot(2, 2, 2)
ax.plot(self.MN_arr[0], -self.MN_arr[1], lw=2, color='blue')
ax.plot(self.current_MN[0], -self.current_MN[1], 'g.', markersize=20.0, color='red')
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(b=None, which='major')
self.cs.set(eps_lo=self.current_eps[0],
eps_up=self.current_eps[1])
ax = self.figure.add_subplot(2, 2, 3)
self.cs.plot_eps(ax)
ax = self.figure.add_subplot(2, 2, 4)
self.cs.plot_sig(ax)
self.data_changed = True
view = View(HSplit(Group(
HGroup(
Group(Item('n_eps', springy=True),
label='Discretization',
springy=True
),
springy=True,
),
HGroup(
Group(VGroup(
Item('cs', label='Cross section', show_label=False, springy=True,
editor=InstanceEditor(kind='live'),
),
Item('calib', label='Calibration', show_label=False, springy=True,
editor=InstanceEditor(kind='live'),
),
springy=True,
),
label='Cross sectoin',
springy=True
),
springy=True,
),
scrollable=True,
),
Group(HGroup(
Item('replot', show_label=False),
Item('clear', show_label=False),
),
Item('current_eps_idx', editor=RangeEditor(low=0,
high_name='n_eps_range',
format='(%s)',
mode='slider',
auto_set=False,
enter_set=False,
),
show_label=False,
),
Item('figure', editor=MPLFigureEditor(),
resizable=True, show_label=False),
id='simexdb.plot_sheet',
label='plot sheet',
dock='tab',
),
),
width=1.0,
height=0.8,
resizable=True,
buttons=['OK', 'Cancel'])
if __name__ == '__main__':
c = ECBLCalib(
Mu=3.49,
width=0.20,
n_rovings=23,
ecb_law_type='fbm',
cc_law_type='quadratic' #eps_tu 0.0137279096658
)
mn = ECBLMNDiagram(calib=c,
n_eps=30,
)
mn.configure_traits()
| [
"[email protected]"
] | |
34d8513c172a58e7d635c1cc20add3c5cfc710df | ffb4db36bf3959ed4a994f693c62d68092a91e63 | /image_space/utils.py | 405488ce25781aa69623a15abb722ef060c3b29d | [] | no_license | quasiben/image_solr | 8bc25db1e47f19d83d5b51b89e250f8da2cd285b | cc2baafa170bdbfecc1a0450ffd041de485f19fa | refs/heads/master | 2021-01-10T02:10:19.064770 | 2015-03-05T23:37:24 | 2015-03-05T23:37:24 | 31,621,600 | 2 | 1 | null | 2015-03-05T23:25:21 | 2015-03-03T21:12:28 | JavaScript | UTF-8 | Python | false | false | 1,319 | py | from image_space import app
from image_space.models import Image, IMAGE_TABLE_NAME
from image_space import db
# Upload Handling
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
# EXIF Processing
def process_exif(exif_data, img_path):
# Get the EXIF data from the image
LSVN = getattr(exif_data.get('EXIF LensSerialNumber'), 'values', None)
MSNF = getattr(exif_data.get('MakerNote SerialNumberFormat'), 'values', None)
BSN = getattr(exif_data.get('EXIF BodySerialNumber'), 'values', None)
MISN = getattr(exif_data.get('MakerNote InternalSerialNumber'), 'values', None)
MSN = getattr(exif_data.get('MakerNote SerialNumber'), 'values', None)
IBSN = getattr(exif_data.get('Image BodySerialNumber'), 'values', None)
image = Image(img_file = img_path,
EXIF_LensSerialNumber = LSVN,
MakerNote_SerialNumberFormat = MSNF,
EXIF_BodySerialNumber = BSN,
MakerNote_InternalSerialNumber = MISN,
MakerNote_SerialNumber = MSN,
Image_BodySerialNumber = IBSN,
Uploaded = 1,
)
# Add uploaded image to the database
db.session.add(image)
db.session.commit()
| [
"[email protected]"
] | |
0c3eb34ca123217876148bd6cbe34e978632e747 | 6657a43ee360177e578f67cf966e6aef5debda3c | /test/test_warning_api.py | 7b7edb07cebe3c9a13d48a3b983ac64425eaa37f | [
"MIT"
] | permissive | NVE/python-varsom-avalanche-client | 3cc8b9c366f566a99c6f309ccdfb477f73256659 | c7787bf070d8ea91efd3a2a9e7782eedd4961528 | refs/heads/master | 2022-04-20T09:32:24.499284 | 2020-04-16T20:12:01 | 2020-04-16T20:12:01 | 256,318,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | # coding: utf-8
"""
Snøskredvarsel API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v5.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import varsom_avalanche_client
from api.warning_api import WarningApi # noqa: E501
from varsom_avalanche_client.rest import ApiException
class TestWarningApi(unittest.TestCase):
"""WarningApi unit test stubs"""
def setUp(self):
self.api = api.warning_api.WarningApi() # noqa: E501
def tearDown(self):
pass
def test_warning_all(self):
"""Test case for warning_all
"""
pass
def test_warning_id(self):
"""Test case for warning_id
"""
pass
def test_warning_region(self):
"""Test case for warning_region
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
65ee34428a001611cc81d153a87842166b3b521d | 9fbbfb3dd1990be27acfada1a91af980f474c8f1 | /Chapter 07/rangesumBST.py | 603891b6170f47a25ae103aebce7585ed63a2d86 | [
"MIT"
] | permissive | bpbpublications/Python-Quick-Interview-Guide | 61a48446f910144a050a5bb1515ad48567dc9481 | ab4ff3e670b116a4db6b9e1f0ccba8424640704d | refs/heads/main | 2023-04-08T12:18:14.605193 | 2021-04-13T09:18:30 | 2021-04-13T09:18:30 | 350,315,060 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class BinTree:
def printTree(self,root:TreeNode)->None:
LevelList = [root]
self.printLevel(LevelList)
def printLevel(self,LevelList:List[TreeNode])-> List[TreeNode]:
LevelStr = ""
outList = []
ListEmpty = True
for node in LevelList:
if node is None:
LevelStr += "None "
outList.append(None)
outList.append(None)
else:
LevelStr += (str(node.val) + " ")
outList.append(node.left)
outList.append(node.right)
ListEmpty = False
if not ListEmpty:
print(LevelStr)
self.printLevel(outList)
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
self.sum=0
def printInorder(root):
if root:
printInorder(root.left) #Recursively call left child
if root.val:
if L <= root.val <= R:self.sum += root.val
printInorder(root.right) #Recursively call right child
printInorder(root)
return self.sum
#Driver code
root = TreeNode(5)
root.left = TreeNode(3)
root.right = TreeNode(6)
root.left.left = TreeNode(2)
root.left.right = TreeNode(4)
root.right.left = TreeNode(None)
root.right.right = TreeNode(7)
bst = BinTree()
bst.printTree(root)
sol = Solution()
print("Range sum =",sol.rangeSumBST(root,2,6))
| [
"[email protected]"
] | |
795f936423965063c44b347705c53fd1c306692f | bf2aa4eab14a6a5347fe4af65cc4a37f512a465d | /people/migrations/0111_auto_20200530_0632.py | 7320c7e9f7c6db4746b6c85c51fb4fef42dfea53 | [] | no_license | drdavidknott/betterstart | 0cda889f5cd6bb779f6d1fa75cb4f2ef08eb626c | 59e2f8282b34b7c75e1e19e1cfa276b787118adf | refs/heads/master | 2023-05-04T07:32:24.796488 | 2023-04-16T15:26:30 | 2023-04-16T15:26:30 | 173,626,906 | 0 | 0 | null | 2023-02-18T07:27:55 | 2019-03-03T20:37:01 | Python | UTF-8 | Python | false | false | 623 | py | # Generated by Django 3.0.3 on 2020-05-30 05:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0110_auto_20200530_0631'),
]
operations = [
migrations.AlterField(
model_name='site',
name='password_reset_email_from',
field=models.CharField(blank=True, default='', max_length=100),
),
migrations.AlterField(
model_name='site',
name='password_reset_email_title',
field=models.CharField(blank=True, default='', max_length=100),
),
]
| [
"[email protected]"
] | |
c07f5f20db0ddcca9d3b07ecdb404f2a7e817bcb | 1caf4418f3549567637f5e9893a445f52a38c6a0 | /CmsAdmin/user_content/app/dtos/change_account_password_dto.py | 29b05af87dbeee572f150ac6b43bf6236ec0b7b5 | [] | no_license | Final-Game/social_network_backend | c601563e08c0fd7de72a614944f354ef8d2d31d8 | 8111787d1d20eb87733ae360d8baa745a65e2743 | refs/heads/master | 2023-03-04T21:12:43.147084 | 2021-02-23T03:45:22 | 2021-02-23T03:45:22 | 290,542,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from core.common import BaseApiException
class ChangeAccountPasswordDto(object):
old_password: str
new_password: str
def __init__(self, old_password: str, new_password: str) -> None:
self.old_password = old_password
self.new_password = new_password
self.validate()
def validate(self):
if not (self.old_password or self.new_password):
raise BaseApiException("Password not found")
if len(self.new_password) < 6:
raise BaseApiException("Password must be large 6 characters.")
| [
"[email protected]"
] | |
ee71398911054d72c2440fe57f222ff41fe9d50c | ebf997ac5814bd20a44646b6690de6913669f2e1 | /plugins/btsync/resources/btsyncUI/freenas/urls.py | 2b3a85c40b8dfa13a5dc5e20d84814d6cc7c5f00 | [] | no_license | MadMarty/freenas-plugins-1 | 4add49728e07fb75191352902969a1ecea67b248 | 4940cd7cc39a26882ea7f4a61799bcea1cea6b34 | refs/heads/master | 2021-01-22T04:23:36.608602 | 2016-05-06T18:02:47 | 2016-05-06T18:02:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('btsyncUI.freenas.views',
url(r'^edit$', 'edit', name="btsync_edit"),
url(r'^treemenu-icon$', 'treemenu_icon', name="treemenu_icon"),
url(r'^_s/treemenu$', 'treemenu', name="treemenu"),
url(r'^_s/start$', 'start', name="start"),
url(r'^_s/stop$', 'stop', name="stop"),
url(r'^_s/status$', 'status', name="status"),
)
| [
"[email protected]"
] | |
94525c4e1278e1b638d45df4e32589b8ea6e5133 | 3a891a79be468621aae43defd9a5516f9763f36e | /desktop/core/ext-py/pyasn1-0.1.8/test/codec/der/test_decoder.py | 5f0bc0b8be193a1564d66e29a71c3e468855d6a1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] | permissive | oyorooms/hue | b53eb87f805063a90f957fd2e1733f21406269aa | 4082346ef8d5e6a8365b05752be41186840dc868 | refs/heads/master | 2020-04-15T20:31:56.931218 | 2019-01-09T19:02:21 | 2019-01-09T19:05:36 | 164,998,117 | 4 | 2 | Apache-2.0 | 2019-01-10T05:47:36 | 2019-01-10T05:47:36 | null | UTF-8 | Python | false | false | 963 | py | from pyasn1.type import univ
from pyasn1.codec.der import decoder
from pyasn1.compat.octets import ints2octs
from pyasn1.error import PyAsn1Error
from sys import version_info
if version_info[0:2] < (2, 7) or \
version_info[0:2] in ( (3, 0), (3, 1) ):
try:
import unittest2 as unittest
except ImportError:
import unittest
else:
import unittest
class OctetStringDecoderTestCase(unittest.TestCase):
def testShortMode(self):
assert decoder.decode(
'\004\017Quick brown fox'.encode()
) == ('Quick brown fox'.encode(), ''.encode())
def testIndefMode(self):
try:
decoder.decode(
ints2octs((36, 128, 4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120, 0, 0))
)
except PyAsn1Error:
pass
else:
assert 0, 'indefinite length encoding tolerated'
if __name__ == '__main__': unittest.main()
| [
"[email protected]"
] | |
7042119118b6a349f705cc5540e5f3d55888e2fd | 74eeeaeabf73f2909794eb767b5126460ce69d92 | /object_detection2/modeling/onestage_heads/centernet_outputs.py | 39e6afac159d44ec42cf2a8e27e537f1ab279705 | [
"MIT"
] | permissive | seantangtao/wml | 1bb9f699467e8c03143d8b92becb121db3717747 | 8fbf060088816cd1a366d7cbd5dfe1a0e00f8d79 | refs/heads/master | 2023-07-15T10:35:11.713990 | 2021-09-03T07:50:47 | 2021-09-03T07:50:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,294 | py | # coding=utf-8
import tensorflow as tf
import wml_tfutils as wmlt
import wnn
from basic_tftools import channel
import functools
import tfop
import object_detection2.bboxes as odbox
from object_detection2.standard_names import *
import wmodule
from .onestage_tools import *
from object_detection2.datadef import *
from object_detection2.config.config import global_cfg
from object_detection2.modeling.build import HEAD_OUTPUTS
import object_detection2.wlayers as odl
import numpy as np
from object_detection2.data.dataloader import DataLoader
import wsummary
import wnn
@HEAD_OUTPUTS.register()
class CenterNetOutputs(wmodule.WChildModule):
def __init__(
self,
cfg,
parent,
box2box_transform,
head_outputs,
gt_boxes=None,
gt_labels=None,
gt_length=None,
max_detections_per_image=100,
**kwargs,
):
"""
Args:
cfg: Only the child part
box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for
anchor-proposal transformations.
gt_boxes: [B,N,4] (ymin,xmin,ymax,xmax)
gt_labels: [B,N]
gt_length: [B]
"""
super().__init__(cfg, parent=parent, **kwargs)
self.num_classes = cfg.NUM_CLASSES
self.topk_candidates = cfg.TOPK_CANDIDATES_TEST
self.score_threshold = cfg.SCORE_THRESH_TEST
self.nms_threshold = cfg.NMS_THRESH_TEST
self.max_detections_per_image = max_detections_per_image
self.box2box_transform = box2box_transform
self.head_outputs = head_outputs
self.k = self.cfg.K
self.size_threshold = self.cfg.SIZE_THRESHOLD
self.dis_threshold = self.cfg.DIS_THRESHOLD
self.gt_boxes = gt_boxes
self.gt_labels = gt_labels
self.gt_length = gt_length
self.mid_results = {}
def _get_ground_truth(self):
"""
Returns:
"""
res = []
for i,outputs in enumerate(self.head_outputs):
shape = wmlt.combined_static_and_dynamic_shape(outputs['heatmaps_tl'])[1:3]
t_res = self.box2box_transform.get_deltas(self.gt_boxes,
self.gt_labels,
self.gt_length,
output_size=shape)
res.append(t_res)
return res
@wmlt.add_name_scope
def losses(self):
"""
Args:
Returns:
"""
all_encoded_datas = self._get_ground_truth()
all_loss0 = []
all_loss1 = []
all_loss2 = []
all_offset_loss = []
all_embeading_loss = []
for i,outputs in enumerate(self.head_outputs):
encoded_datas = all_encoded_datas[i]
head_outputs = self.head_outputs[i]
loss0 = tf.reduce_mean(wnn.focal_loss_for_heat_map(labels=encoded_datas["g_heatmaps_tl"],
logits=head_outputs["heatmaps_tl"],scope="tl_loss"))
loss1 = tf.reduce_mean(wnn.focal_loss_for_heat_map(labels=encoded_datas["g_heatmaps_br"],
logits=head_outputs["heatmaps_br"],scope="br_loss"))
loss2 = tf.reduce_mean(wnn.focal_loss_for_heat_map(labels=encoded_datas["g_heatmaps_ct"],
logits=head_outputs["heatmaps_ct"],scope="ct_loss"))
offset0 = wmlt.batch_gather(head_outputs['offset_tl'],encoded_datas['g_index'][:,:,0])
offset1 = wmlt.batch_gather(head_outputs['offset_br'],encoded_datas['g_index'][:,:,1])
offset2 = wmlt.batch_gather(head_outputs['offset_ct'],encoded_datas['g_index'][:,:,2])
offset = tf.concat([offset0,offset1,offset2],axis=2)
offset_loss = tf.losses.huber_loss(labels=encoded_datas['g_offset'],
predictions=offset,
loss_collection=None,
weights=tf.cast(tf.expand_dims(encoded_datas['g_index_mask'],-1),tf.float32))
embeading_loss = self.ae_loss(head_outputs['tag_tl'],head_outputs['tag_br'],
encoded_datas['g_index'],
encoded_datas['g_index_mask'])
all_loss0.append(loss0)
all_loss1.append(loss1)
all_loss2.append(loss2)
all_offset_loss.append(offset_loss)
all_embeading_loss.append(embeading_loss)
loss0 = tf.add_n(all_loss0)
loss1 = tf.add_n(all_loss1)
loss2 = tf.add_n(all_loss2)
offset_loss = tf.add_n(all_offset_loss)
embeading_loss= tf.add_n(all_embeading_loss)
#loss0 = tf.Print(loss0,["loss",loss0,loss1,loss2,offset_loss,embeading_loss],summarize=100)
return {"heatmaps_tl_loss": loss0,
"heatmaps_br_loss": loss1,
"heatmaps_ct_loss":loss2,
"offset_loss":offset_loss,
'embeading_loss':embeading_loss}
@staticmethod
@wmlt.add_name_scope
def ae_loss(tag0,tag1,index,mask):
'''
:param tag0: [B,N,C],top left tag
:param tag1: [B,N,C], bottom right tag
:param index: [B,M]
:parma mask: [B,M]
:return:
'''
with tf.name_scope("pull_loss"):
num = tf.reduce_sum(tf.cast(mask,tf.float32))+1e-4
#num = tf.Print(num,["X",num,tf.shape(tag0),tf.shape(tag1),tf.shape(index),tf.shape(mask)],summarize=100)
tag0 = wmlt.batch_gather(tag0,index[:,:,0])
tag1 = wmlt.batch_gather(tag1,index[:,:,1])
tag_mean = (tag0+tag1)/2
tag0 = tf.pow(tag0-tag_mean,2)/num
tag0 = tf.reduce_sum(tf.boolean_mask(tag0,mask))
tag1 = tf.pow(tag1-tag_mean,2)/num
tag1 = tf.reduce_sum(tf.boolean_mask(tag1,mask))
#tag0 = tf.Print(tag0,["tag01",tag0,tag1],summarize=100)
pull = tag0+tag1
with tf.name_scope("push_loss"):
neg_index = tfop.make_neg_pair_index(mask)
push_mask = tf.greater(neg_index,-1)
neg_index = tf.nn.relu(neg_index)
num = tf.reduce_sum(tf.cast(push_mask,tf.float32))+1e-4
tag0 = wmlt.batch_gather(tag_mean,neg_index[:,:,0])
tag1 = wmlt.batch_gather(tag_mean,neg_index[:,:,1])
#tag0 = tf.Print(tag0,["X2",num,tf.shape(tag0),tf.shape(tag1),tf.shape(neg_index),tf.shape(push_mask)],summarize=100)
tag0 = tf.boolean_mask(tag0,push_mask[...,0])
tag1 = tf.boolean_mask(tag1,push_mask[...,1])
#num = tf.Print(num,["X3",num,tf.shape(tag0),tf.shape(tag1),tf.shape(neg_index),tf.shape(push_mask)],summarize=100)
push = tf.reduce_sum(tf.nn.relu(1-tf.abs(tag0-tag1)))/num
#push = tf.Print(push,["push",push],summarize=100)
return pull+push
@wmlt.add_name_scope
def inference(self,inputs,head_outputs):
"""
Arguments:
inputs: same as CenterNet.forward's batched_inputs
Returns:
results:
RD_BOXES: [B,N,4]
RD_LABELS: [B,N]
RD_PROBABILITY:[ B,N]
RD_LENGTH:[B]
"""
self.inputs = inputs
all_bboxes = []
all_scores = []
all_clses = []
all_length = []
img_size = tf.shape(inputs[IMAGE])[1:3]
for i,datas in enumerate(head_outputs):
num_dets = max(self.topk_candidates//(4**i),4)
K = max(self.k//(4**i),4)
bboxes, scores, clses, length = self.get_box_in_a_single_layer(datas,num_dets,img_size,K)
all_bboxes.append(bboxes)
all_scores.append(scores)
all_clses.append(clses)
all_length.append(length)
with tf.name_scope(f"merge_all_boxes"):
bboxes,_ = wmlt.batch_concat_with_length(all_bboxes,all_length)
scores,_ = wmlt.batch_concat_with_length(all_scores,all_length)
clses,length = wmlt.batch_concat_with_length(all_clses,all_length)
nms = functools.partial(tfop.boxes_nms, threshold=self.nms_threshold,
classes_wise=True,
k=self.max_detections_per_image)
#预测时没有背景, 这里加上1使背景=0
clses = clses + 1
#bboxes = tf.Print(bboxes,["shape",tf.shape(bboxes),tf.shape(clses),length],summarize=100)
bboxes, labels, nms_indexs, lens = odl.batch_nms_wrapper(bboxes, clses, length, confidence=None,
nms=nms,
k=self.max_detections_per_image,
sort=True)
scores = wmlt.batch_gather(scores,nms_indexs)
#labels = clses+1
#lens = length
outdata = {RD_BOXES:bboxes,RD_LABELS:labels,RD_PROBABILITY:scores,RD_LENGTH:lens}
if global_cfg.GLOBAL.SUMMARY_LEVEL<=SummaryLevel.DEBUG:
wsummary.detection_image_summary(images=inputs[IMAGE],
boxes=outdata[RD_BOXES],
classes=outdata[RD_LABELS],
lengths=outdata[RD_LENGTH],
scores=outdata[RD_PROBABILITY],
name="CenterNetOutput",
category_index=DataLoader.category_index)
return outdata
@staticmethod
def pixel_nms(heat,kernel=[3,3],epsilon=1e-8):
hmax=tf.nn.max_pool(heat,ksize=[1]+kernel+[1],strides=[1,1,1,1],padding='SAME')
mask=tf.less_equal(tf.abs(hmax-heat),epsilon)
mask = tf.cast(mask,tf.float32)
return mask*heat
@staticmethod
@wmlt.add_name_scope
def _topk(scores,K=100):
B,H,W,C = wmlt.combined_static_and_dynamic_shape(scores)
scores = tf.reshape(scores,[B,-1])
topk_scores,topk_inds = tf.nn.top_k(scores,k=K)
topk_classes = topk_inds%C
topk_inds = topk_inds//C
topk_ys = tf.cast(topk_inds//W,tf.float32)
topk_xs = tf.cast(topk_inds%W,tf.float32)
return topk_scores,topk_inds,topk_classes,topk_ys,topk_xs
@wmlt.add_name_scope
def get_box_in_a_single_layer(self,datas,num_dets,img_size,K):
'''
'''
#wsummary.variable_summaries_v2(datas['heatmaps_tl'],"hm_tl")
h_tl = tf.nn.sigmoid(datas['heatmaps_tl'])
h_br = tf.nn.sigmoid(datas['heatmaps_br'])
h_ct = tf.nn.sigmoid(datas['heatmaps_ct'])
#wsummary.variable_summaries_v2(h_tl,"hm_a_tl")
B,H,W,C = wmlt.combined_static_and_dynamic_shape(h_tl)
h_tl = self.pixel_nms(h_tl)
h_br = self.pixel_nms(h_br)
h_ct = self.pixel_nms(h_ct)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(h_tl, K=K)
br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(h_br, K=K)
ct_scores, ct_inds, ct_clses, ct_ys, ct_xs = self._topk(h_ct, K=K)
tl_ys = tf.tile(tf.reshape(tl_ys,[B,K,1]),[1,1,K])
tl_xs = tf.tile(tf.reshape(tl_xs,[B,K,1]),[1,1,K])
br_ys = tf.tile(tf.reshape(br_ys,[B,1,K]),[1,K,1])
br_xs = tf.tile(tf.reshape(br_xs,[B,1,K]),[1,K,1])
ct_ys = tf.reshape(ct_ys,[B,K])
ct_xs = tf.reshape(ct_xs,[B,K])
ct_scores = tf.reshape(ct_scores,[B,K])
if 'offset_tl' in datas:
tl_regr = wmlt.batch_gather(datas['offset_tl'],tl_inds)
br_regr = wmlt.batch_gather(datas['offset_br'],br_inds)
ct_regr = wmlt.batch_gather(datas['offset_ct'],br_inds)
tl_regr = tf.reshape(tl_regr,[B,K,1,2])
br_regr = tf.reshape(br_regr,[B,1,K,2])
ct_regr = tf.reshape(ct_regr,[B,K,2])
tl_xs = tl_xs + tl_regr[...,0]
tl_ys = tl_ys + tl_regr[...,1]
br_xs = br_xs + br_regr[...,0]
br_ys = br_ys + br_regr[...,1]
ct_xs = ct_xs + ct_regr[...,0]
ct_ys = ct_ys + ct_regr[...,1]
bboxes = tf.stack([tl_ys,tl_xs,br_ys,br_xs],axis=-1)
#bboxes = tf.Print(bboxes,["box0",tf.reduce_max(bboxes),tf.reduce_min(bboxes),W,H],summarize=100)
#wsummary.detection_image_summary(self.inputs[IMAGE],
#boxes=odbox.tfabsolutely_boxes_to_relative_boxes(tf.reshape(bboxes,[B,-1,4]),width=W,height=H),
#name="box0")
tl_tag = wmlt.batch_gather(datas['tag_tl'],tl_inds)
br_tag = wmlt.batch_gather(datas['tag_br'],br_inds)
tl_tag = tf.expand_dims(tl_tag,axis=2)
br_tag = tf.expand_dims(br_tag,axis=1)
tl_tag = tf.tile(tl_tag,[1,1,K,1])
br_tag = tf.tile(br_tag,[1,K,1,1])
dists = tf.abs(tl_tag-br_tag)
dists = tf.squeeze(dists,axis=-1)
dis_inds = (dists>self.dis_threshold)
tl_scores = tf.tile(tf.reshape(tl_scores,[B,K,1]),[1,1,K])
br_scores = tf.tile(tf.reshape(br_scores,[B,1,K]),[1,K,1])
scores = (tl_scores+br_scores)/2
tl_clses = tf.tile(tf.reshape(tl_clses,[B,K,1]),[1,1,K])
br_clses = tf.tile(tf.reshape(br_clses,[B,1,K]),[1,K,1])
cls_inds = tf.not_equal(tl_clses,br_clses)
width_inds = (br_xs<tl_xs)
height_inds = (br_ys<tl_ys)
all_inds = tf.logical_or(cls_inds,dis_inds)
all_inds = tf.logical_or(all_inds,width_inds)
all_inds = tf.logical_or(all_inds,height_inds)
#all_inds = cls_inds
scores = tf.where(all_inds,tf.zeros_like(scores),scores)
scores,inds = tf.nn.top_k(tf.reshape(scores,[B,-1]),num_dets)
wsummary.variable_summaries_v2(scores,"scores")
wsummary.variable_summaries_v2(tl_scores,"tl_scores")
wsummary.variable_summaries_v2(br_scores,"br_scores")
bboxes = tf.reshape(bboxes,[B,-1,4])
bboxes = wmlt.batch_gather(bboxes,inds)
#bboxes = tf.Print(bboxes,["box1",tf.reduce_max(bboxes),tf.reduce_min(bboxes),W,H],summarize=100)
#wsummary.detection_image_summary(self.inputs[IMAGE],
# boxes=odbox.tfabsolutely_boxes_to_relative_boxes(tf.reshape(bboxes,[B,-1,4]),width=W,height=H),
# name="box1")
clses = tf.reshape(tl_clses,[B,-1])
clses = wmlt.batch_gather(clses,inds)
'''tl_scores = tf.reshape(tl_scores,[B,-1,1])
tl_scores = wmlt.batch_gather(tl_scores,inds)
br_scores = tf.reshape(br_scores,[B,-1,1])
br_scores = wmlt.batch_gather(br_scores,inds)'''
ct = tf.stack([ct_ys/tf.to_float(H), ct_xs/tf.to_float(W)], axis=-1)
bboxes = odbox.tfabsolutely_boxes_to_relative_boxes(bboxes,width=W,height=H)
sizes = tf.convert_to_tensor(self.size_threshold,dtype=tf.float32)
relative_size = sizes*tf.rsqrt(tf.cast(img_size[0]*img_size[1],tf.float32))
_,box_nr,_ = wmlt.combined_static_and_dynamic_shape(bboxes)
length = tf.ones([B],tf.int32)*box_nr
#bboxes = tf.Print(bboxes,["bboxes",tf.reduce_min(bboxes),tf.reduce_max(bboxes),tf.reduce_min(ct),tf.reduce_max(ct)],summarize=100)
center_index = tfop.center_boxes_filter(bboxes=bboxes,
bboxes_clses=clses,
center_points=ct,
center_clses=ct_clses,
size_threshold=relative_size,
bboxes_length=length,
nrs=[3,5])
def fn(bboxes,scores,clses,ct_score,c_index):
ct_score = tf.gather(ct_score,tf.nn.relu(c_index))
scores = (scores*2+ct_score)/3 #变成三个点的平均
mask = tf.logical_and(tf.greater_equal(c_index,0),tf.greater(scores,self.score_threshold))
mask = tf.logical_and(tf.greater_equal(ct_score,0.001),mask)
bboxes = tf.boolean_mask(bboxes,mask)
scores = tf.boolean_mask(scores,mask)
clses = tf.boolean_mask(clses,mask)
len = tf.reduce_sum(tf.cast(mask,tf.int32))
bboxes = tf.pad(bboxes,[[0,box_nr-len],[0,0]])
scores = tf.pad(scores,[[0,box_nr-len]])
clses = tf.pad(clses,[[0,box_nr-len]])
return bboxes,scores,clses,len
bboxes,scores,clses,length = tf.map_fn(lambda x:fn(x[0],x[1],x[2],x[3],x[4]),
elems=(bboxes,scores,clses,ct_scores,center_index),
dtype=(tf.float32,tf.float32,tf.int32,tf.int32))
#bboxes = tf.Print(bboxes,["box2",tf.reduce_max(bboxes),tf.reduce_min(bboxes),W,H],summarize=100)
#wsummary.detection_image_summary(self.inputs[IMAGE],
# boxes=tf.reshape(bboxes,[B,-1,4]),lengths=length,
# name="box2")
return bboxes,scores,clses,length
| [
"[email protected]"
] | |
3612b5e072cd20efa4dddf1146c44558b40b97d6 | 0258e0c9595406ceb3de32067aff776bc2a58fa8 | /test1.py | 8d7172f2fc5867f453187b5d1e84acd26e2737c4 | [] | no_license | akromibn37/python_code | 72c016c361b3ba2e04c83e1d1a703171b0bd8819 | 41d1a09f8ec8696e37ad83c1a0cb6506c7f0f4f6 | refs/heads/master | 2020-03-21T22:57:25.111642 | 2018-06-29T14:14:33 | 2018-06-29T14:14:33 | 139,157,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | s = "PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP-PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP-PPPP"
c = s.rfind("-")
print(c) | [
"[email protected]"
] | |
312b1e2bce53e43eed3cdd5faca54bc8a98d4c90 | ce9d90ac5dfd61cc1d3ec57378186a4895323abb | /tests/filters/helpers.py | 0bd99d08eb31da8ac72380689341d36b801acad4 | [
"Apache-2.0"
] | permissive | min-a-youn/plaso | 61ee7f280c471a2e9fcc3407445ddd4f3c5eb125 | 082ff564872f7dd1d0d5d68cca23b3f7b8cdedcb | refs/heads/master | 2020-09-20T22:07:34.631095 | 2019-08-28T06:06:48 | 2019-08-28T06:06:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,455 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the event filter helper functions and classes."""
from __future__ import unicode_literals
import unittest
from plaso.filters import helpers
from tests import test_lib as shared_test_lib
class CopyValueToDateTimeTest(shared_test_lib.BaseTestCase):
"""Tests the CopyValueToDateTime helper function."""
def testCopyValueToDateTime(self):
"""Tests the CopyValueToDateTime function."""
date_time = helpers.CopyValueToDateTime('2009-07-13T23:29:02.849131')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742849131)
date_time = helpers.CopyValueToDateTime('2009-07-13')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247443200000000)
date_time = helpers.CopyValueToDateTime('2009-07-13 23:29:02')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742000000)
date_time = helpers.CopyValueToDateTime('2009-07-13 23:29:02.849131')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742849131)
date_time = helpers.CopyValueToDateTime('1247527742849131')
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742849131)
date_time = helpers.CopyValueToDateTime(1247527742849131)
self.assertIsNotNone(date_time)
self.assertEqual(date_time.timestamp, 1247527742849131)
with self.assertRaises(ValueError):
helpers.CopyValueToDateTime(None)
class GetUnicodeStringTest(shared_test_lib.BaseTestCase):
"""Tests the GetUnicodeString helper function."""
def testGetUnicodeString(self):
"""Tests the GetUnicodeString function."""
string = helpers.GetUnicodeString(['1', '2', '3'])
self.assertEqual(string, '123')
string = helpers.GetUnicodeString([1, 2, 3])
self.assertEqual(string, '123')
string = helpers.GetUnicodeString(123)
self.assertEqual(string, '123')
string = helpers.GetUnicodeString(b'123')
self.assertEqual(string, '123')
string = helpers.GetUnicodeString('123')
self.assertEqual(string, '123')
class TimeRangeCacheTest(shared_test_lib.BaseTestCase):
"""Tests the TimeRangeCache helper."""
# pylint: disable=protected-access
def testGetTimeRange(self):
"""Tests the GetTimeRange function."""
if hasattr(helpers.TimeRangeCache, '_lower'):
del helpers.TimeRangeCache._lower
if hasattr(helpers.TimeRangeCache, '_upper'):
del helpers.TimeRangeCache._upper
first, last = helpers.TimeRangeCache.GetTimeRange()
self.assertEqual(first, helpers.TimeRangeCache._INT64_MIN)
self.assertEqual(last, helpers.TimeRangeCache._INT64_MAX)
def testSetLowerTimestamp(self):
"""Tests the SetLowerTimestamp function."""
helpers.TimeRangeCache.SetLowerTimestamp(1247527742849131)
first, last = helpers.TimeRangeCache.GetTimeRange()
self.assertEqual(first, 1247527742849131)
self.assertEqual(last, helpers.TimeRangeCache._INT64_MAX)
del helpers.TimeRangeCache._lower
def testSetUpperTimestamp(self):
"""Tests the SetUpperTimestamp function."""
helpers.TimeRangeCache.SetUpperTimestamp(1247527742849131)
first, last = helpers.TimeRangeCache.GetTimeRange()
self.assertEqual(first, helpers.TimeRangeCache._INT64_MIN)
self.assertEqual(last, 1247527742849131)
del helpers.TimeRangeCache._upper
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
d15fd0a6853b2f38a462ef37d462539753a53184 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_089/ch12_2020_03_09_12_58_17_378201.py | a48a67c58fe14187f66e923fd33e0299ab4d37f9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | def resolve_equacao_1o_grau(x):
f= a*x+b
a*x+b=0
return f
| [
"[email protected]"
] | |
7bb4179aa4bbbb1f06631d13ab17c3564a767a29 | 3e7b2ebb64e9e324ce47d19def21ae62cc1e56a6 | /My Thought/reverse a string.py | 296417613cb9272f294f6413f7eee55df1ccb0ce | [] | no_license | shuvo14051/python-data-algo | 9b6622d9260e95ca9ffabd39b02996f13bdf20d1 | 8f66ff6f2bd88a0ae48dac72e4ea6c5382a836ec | refs/heads/master | 2023-02-03T03:04:01.183093 | 2020-12-13T10:13:15 | 2020-12-13T10:13:15 | 274,106,480 | 0 | 0 | null | 2020-07-05T06:33:28 | 2020-06-22T10:24:05 | Python | UTF-8 | Python | false | false | 188 | py | # this is a complete pythonnic way
# print(a[::-1])
a = "Shuvo"
li = []
for i in a:
li.append(i)
reverse = ''
for i in range(len(li)-1,-1,-1):
reverse += li[i]
print(reverse)
| [
"[email protected]"
] | |
4097dc4928226ce67c6d923fff671e5ca2b9b7d3 | e20ed90b9be7a0bcdc1603929d65b2375a224bf6 | /generated-libraries/python/netapp/job/job_private_delete_iter_key_td.py | 7dde8422f159e87e74cc57255dc0b70f0ea0cce6 | [
"MIT"
] | permissive | radekg/netapp-ontap-lib-gen | 530ec3248cff5ead37dc2aa47ced300b7585361b | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | refs/heads/master | 2016-09-06T17:41:23.263133 | 2015-01-14T17:40:46 | 2015-01-14T17:40:46 | 29,256,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | from netapp.netapp_object import NetAppObject
class JobPrivateDeleteIterKeyTd(NetAppObject):
"""
Key typedef for table jm_local_jobs_table_remove
"""
_key_2 = None
@property
def key_2(self):
"""
Field vserver
"""
return self._key_2
@key_2.setter
def key_2(self, val):
if val != None:
self.validate('key_2', val)
self._key_2 = val
_key_1 = None
@property
def key_1(self):
"""
Field id
"""
return self._key_1
@key_1.setter
def key_1(self, val):
if val != None:
self.validate('key_1', val)
self._key_1 = val
_key_0 = None
@property
def key_0(self):
"""
Field node
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
@staticmethod
def get_api_name():
return "job-private-delete-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-2',
'key-1',
'key-0',
]
def describe_properties(self):
return {
'key_2': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_1': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| [
"[email protected]"
] | |
b374191a7cf732d53d219ab1e5838ac5a74b3ab2 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/cherrypy/test/test_virtualhost.py | e9b88bd297cb6047933124c32d619fd6c0d22cc0 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 3,718 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\cherrypy\test\test_virtualhost.py
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import cherrypy
from cherrypy.test import helper
class VirtualHostTest(helper.CPWebCase):
def setup_server():
class Root:
def index(self):
return 'Hello, world'
index.exposed = True
def dom4(self):
return 'Under construction'
dom4.exposed = True
def method(self, value):
return 'You sent %s' % repr(value)
method.exposed = True
class VHost:
def __init__(self, sitename):
self.sitename = sitename
def index(self):
return 'Welcome to %s' % self.sitename
index.exposed = True
def vmethod(self, value):
return 'You sent %s' % repr(value)
vmethod.exposed = True
def url(self):
return cherrypy.url('nextpage')
url.exposed = True
static = cherrypy.tools.staticdir.handler(section='/static', dir=curdir)
root = Root()
root.mydom2 = VHost('Domain 2')
root.mydom3 = VHost('Domain 3')
hostmap = {'www.mydom2.com': '/mydom2',
'www.mydom3.com': '/mydom3',
'www.mydom4.com': '/dom4'}
cherrypy.tree.mount(root, config={'/': {'request.dispatch': cherrypy.dispatch.VirtualHost(**hostmap)},
'/mydom2/static2': {'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
'tools.staticdir.index': 'index.html'}})
setup_server = staticmethod(setup_server)
def testVirtualHost(self):
self.getPage('/', [('Host', 'www.mydom1.com')])
self.assertBody('Hello, world')
self.getPage('/mydom2/', [('Host', 'www.mydom1.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom2.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom3.com')])
self.assertBody('Welcome to Domain 3')
self.getPage('/', [('Host', 'www.mydom4.com')])
self.assertBody('Under construction')
self.getPage('/method?value=root')
self.assertBody("You sent u'root'")
self.getPage('/vmethod?value=dom2+GET', [('Host', 'www.mydom2.com')])
self.assertBody("You sent u'dom2 GET'")
self.getPage('/vmethod', [('Host', 'www.mydom3.com')], method='POST', body='value=dom3+POST')
self.assertBody("You sent u'dom3 POST'")
self.getPage('/vmethod/pos', [('Host', 'www.mydom3.com')])
self.assertBody("You sent 'pos'")
self.getPage('/url', [('Host', 'www.mydom2.com')])
self.assertBody('%s://www.mydom2.com/nextpage' % self.scheme)
def test_VHost_plus_Static(self):
self.getPage('/static/style.css', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css;charset=utf-8')
self.getPage('/static2/dirback.jpg', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'image/jpeg')
self.getPage('/static2/', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertBody('Hello, world\r\n')
self.getPage('/static2', [('Host', 'www.mydom2.com')])
self.assertStatus(301)
| [
"[email protected]"
] | |
152c01b7254082a6295aa8c64ce3f0600ca33d97 | be134c181703b95aca1e48b6a31bcfdb7bcfcc76 | /site/mezzanine_old/galleries/migrations/0001_initial.py | 11f1937e16fbf9cff1135c9e2c992c658bbfd803 | [] | permissive | aldenjenkins/ThiccGaming | 0245955a797394bcfeedb2cfb385f633653ba55d | 4790d2568b019438d1569d0fe4e9f9aba008b737 | refs/heads/master | 2022-12-16T02:43:36.532981 | 2021-11-17T04:15:21 | 2021-11-17T04:15:21 | 154,858,818 | 0 | 0 | BSD-3-Clause | 2022-12-08T02:58:44 | 2018-10-26T15:52:39 | Python | UTF-8 | Python | false | false | 1,837 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('pages', '__first__'),
]
operations = [
migrations.CreateModel(
name='Gallery',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')),
('content', mezzanine.core.fields.RichTextField(verbose_name='Content')),
('zip_import', models.FileField(help_text="Upload a zip file containing images, and they'll be imported into this gallery.", upload_to='galleries', verbose_name='Zip import', blank=True)),
],
options={
'ordering': ('_order',),
'verbose_name': 'Gallery',
'verbose_name_plural': 'Galleries',
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='GalleryImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('_order', models.IntegerField(null=True, verbose_name='Order')),
('file', mezzanine.core.fields.FileField(max_length=200, verbose_name='File')),
('description', models.CharField(max_length=1000, verbose_name='Description', blank=True)),
('gallery', models.ForeignKey(related_name='images', to='galleries.Gallery')),
],
options={
'ordering': ('_order',),
'verbose_name': 'Image',
'verbose_name_plural': 'Images',
},
bases=(models.Model,),
),
]
| [
"[email protected]"
] | |
b08be16b6f55bbb29dd93651676a710322f99cdd | 2fcb5da42f0aff62c88189bd36fc5f61a40eb604 | /vardautomation/timeconv.py | 3b84b24deda8187b48a85d3ae7948559d45a7404 | [
"MIT"
] | permissive | tomato39/vardautomation | d45ec446a1cd06c2e7b7ec5378772953fa7b4caa | efa24d9420d6a6f732e8b0a846874a289a7cb095 | refs/heads/master | 2023-08-23T01:44:00.014196 | 2021-10-21T23:05:52 | 2021-10-21T23:09:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,647 | py | """Conversion time module"""
from fractions import Fraction
from .status import Status
class Convert:
"""Collection of methods to perform time conversion"""
@classmethod
def ts2f(cls, ts: str, fps: Fraction, /) -> int:
"""
Convert a timestamp hh:mm:ss.xxxx in number of frames
:param ts: Timestamp
:param fps: Framerate Per Second
:return: Frames
"""
s = cls.ts2seconds(ts)
f = cls.seconds2f(s, fps)
return f
@classmethod
def f2ts(cls, f: int, fps: Fraction, /, *, precision: int = 3) -> str:
"""
Convert frames in timestamp hh:mm:ss.xxxx
:param f: Frames
:param fps: Framerate Per Second
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
s = cls.f2seconds(f, fps)
ts = cls.seconds2ts(s, precision=precision)
return ts
@classmethod
def seconds2ts(cls, s: float, /, *, precision: int = 3) -> str:
"""
Convert seconds in timestamp hh:mm:ss.xxx
:param s: Seconds
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
m = s // 60
s %= 60
h = m // 60
m %= 60
return cls.composets(h, m, s, precision=precision)
@classmethod
def f2assts(cls, f: int, fps: Fraction, /) -> str:
"""
Convert frames to .ass timestamp hh:mm:ss.xx properly
by removing half of one frame per second of the specified framerate
:param f: Frames
:param fps: Framerate Per Second
:return: ASS timestamp
"""
s = cls.f2seconds(f, fps)
s -= fps ** -1 * 0.5
ts = cls.seconds2ts(max(0, s), precision=3)
return ts[:-1]
@classmethod
def assts2f(cls, assts: str, fps: Fraction, /) -> int:
"""
Convert .ass timestamp hh:mm:ss.xx to frames properly
by adding half of one frame per second of the specified framerate
:param assts: ASS timestamp
:param fps: Framerate Per Second
:return: Frames
"""
s = cls.ts2seconds(assts)
if s > 0:
s += fps ** -1 * 0.5
return cls.seconds2f(s, fps)
@staticmethod
def f2seconds(f: int, fps: Fraction, /) -> float:
"""
Convert frames to seconds
:param f: Frames
:param fps: Framerate Per Second
:return: Seconds
"""
if f == 0:
return 0.0
t = round(float(10 ** 9 * f * fps ** -1))
s = t / 10 ** 9
return s
@staticmethod
def ts2seconds(ts: str, /) -> float:
"""
Convert timestamp hh:mm:ss.xxxx to seconds
:param ts: Timestamp
:return: Seconds
"""
h, m, s = map(float, ts.split(':'))
return h * 3600 + m * 60 + s
@staticmethod
def seconds2f(s: float, fps: Fraction, /) -> int:
"""
Convert seconds to frames
:param s: Seconds
:param fps: Framerate Per Second
:return: Frames
"""
return round(s * fps)
@staticmethod
def samples2seconds(num_samples: int, sample_rate: int, /) -> float:
"""
Convert samples to seconds
:param num_samples: Samples
:param sample_rate: Playback sample rate
:return: Seconds
"""
return num_samples / sample_rate
@staticmethod
def seconds2samples(s: float, sample_rate: int, /) -> int:
"""
Convert seconds to samples
:param s: Seconds
:param sample_rate: Playback sample rate
:return: Samples
"""
return round(s * sample_rate)
@classmethod
def f2samples(cls, f: int, fps: Fraction, sample_rate: int) -> int:
"""
Convert frames to samples
:param f: Frames
:param fps: Framerate Per Second
:param sample_rate: Playback sample rate
:return: Samples
"""
s = cls.f2seconds(f, fps)
return cls.seconds2samples(s, sample_rate)
@classmethod
def samples2f(cls, num_samples: int, sample_rate: int, fps: Fraction) -> int:
"""
Convert sample to frames
:param num_samples: Samples
:param sample_rate: Playback sample rate
:param fps: Framerate Per Second
:return: Frame
"""
s = cls.samples2seconds(num_samples, sample_rate)
return cls.seconds2f(s, fps)
@staticmethod
def composets(h: float, m: float, s: float, /, *, precision: int = 3) -> str:
"""
Make a timestamp based on given hours, minutes and seconds
:param h: Hours
:param m: Minutes
:param s: Seconds
:param precision: Precision number, defaults to 3
:return: Timestamp
"""
if precision == 0:
out = f"{h:02.0f}:{m:02.0f}:{round(s):02}"
elif precision == 3:
out = f"{h:02.0f}:{m:02.0f}:{s:06.3f}"
elif precision == 6:
out = f"{h:02.0f}:{m:02.0f}:{s:09.6f}"
elif precision == 9:
out = f"{h:02.0f}:{m:02.0f}:{s:012.9f}"
else:
Status.fail(f'composets: the precision {precision} must be a multiple of 3 (including 0)')
return out
| [
"[email protected]"
] | |
c4cc3eae8ce8dc40427cfc6263c0d8d9207e33ce | e2590e0a78046a22131b69c76ebde21bf042cdd1 | /ABC201_300/ABC275/A.py | 6bc5a95d16891d1502a3adf5fbd2ff8aa0b3a6a3 | [] | no_license | masato-sso/AtCoderProblems | b8e23941d11881860dcf2942a5002a2b19b1f0c8 | fbc02e6b7f8c6583e5a4e5187463e0001fc5f4d8 | refs/heads/main | 2023-01-22T23:57:58.509585 | 2023-01-21T14:07:47 | 2023-01-21T14:07:47 | 170,867,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py |
N = int(input())
H = list(map(int, input().split()))
maxValue = max(H)
ans = 0
for idx,h in enumerate(H):
if(h == maxValue):
ans = idx + 1
break
print(ans) | [
"[email protected]"
] | |
a0d3caee1fbf6c2afadd6139c75f0fb247dbe328 | b24e45267a8d01b7d3584d062ac9441b01fd7b35 | /Usuario/.history/views_20191102195546.py | 879e6589a3c510e2404c8ff9b59bed87520c898f | [] | no_license | slalbertojesus/merixo-rest | 1707b198f31293ced38930a31ab524c0f9a6696c | 5c12790fd5bc7ec457baad07260ca26a8641785d | refs/heads/master | 2022-12-10T18:56:36.346159 | 2020-05-02T00:42:39 | 2020-05-02T00:42:39 | 212,175,889 | 0 | 0 | null | 2022-12-08T07:00:07 | 2019-10-01T18:56:45 | Python | UTF-8 | Python | false | false | 3,630 | py | from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework_simplejwt.tokens import RefreshToken
from .models import Usuario
from .serializers import UsuarioSerializer
SUCCESS = 'exito'
ERROR = 'error'
DELETE_SUCCESS = 'eliminado'
UPDATE_SUCCESS = 'actualizado'
CREATE_SUCCESS = 'creado'
@api_view(['GET', ])
def api_detail_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UsuarioSerializer(usuario)
return Response(serializer.data)
@api_view(['PUT',])
def api_update_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = UsuarioSerializer(usuario, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data[SUCCESS] = UPDATE_SUCCESS
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE',])
def api_delete_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador=identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = usuario.delete()
data = {}
if operation:
data[SUCCESS] = DELETE_SUCCESS
return Response(data=data)
@api_view(['POST',])
@permission_classes([AllowAny,])
def api_create_usuario_view(request):
if request.method == 'POST':
serializer = UsuarioSerializer(data=request.data)
data = {}
if serializer.is_valid():
usuario = serializer.save()
data['response'] = "se registró de forma exitosa"
data['nombre'] = usuario.nombre
data['usuario'] = usuario.usuario
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["POST"])
@permission_classes([AllowAny,])
def api_login(request):
usuario = request.data.get("usuario")
contraseña = request.data.get("contraseña")
if usuario is None or contraseña is None:
return Response({'error': 'No existen contraseña ni usuario'},
status=HTTP_400_BAD_REQUEST)
usuario = authenticate(usuario=usuario, contraseña=contraseña)
get_tokens_for_user(usuario)
return {
'refresh': str(token),
'access': str(token.access_token),
}
def for_user(cls, user):
"""
Returns an authorization token for the given user that will be provided
after authenticating the user's credentials.
"""
user_id = getattr(user, api_settings.USER_ID_FIELD)
if not isinstance(user_id, int):
user_id = str(user_id)
token = cls()
token[api_settings.USER_ID_CLAIM] = user_id
return token
refresh = RefreshToken.for_user(user)
def authenticate(usuario, contraseña):
usuario = Usuario.objects.get(usuario= usuario, contraseña=contraseña)
if not usuario:
raise serializers.ValidationError({'error': 'Usuario no existe'},
status=HTTP_404_NOT_FOUND)
return usuario | [
"[email protected]"
] | |
561487ce846747b6d7fb0034befaeceaa9bf589e | 4ae6e54a01e25d370929b49bbaa91c51b003d41a | /wwwroot/app/cgi-bin/AutograderBackEnd.py | a5ec7da5cc2a9cd932b912fdd77e998cb02ccbfb | [] | no_license | rdasxy/programming-autograder | 8197a827236dc5384f6f3ceeaf2fbadefdd5506c | f885c1cd37721e1cd0b3bf3b49cc44b9adb64d92 | refs/heads/master | 2021-01-22T05:33:28.971055 | 2012-12-27T21:53:24 | 2012-12-27T21:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,367 | py |
# 4th major iteration - refactoring to deal with changed authentication procedures
# and to deal with each problem in parallel.
import os, sys
import subprocess
import random
import string
import winprocess
import win32pipe
import win32file
import pickle
import autograde_utilities
import thread
import Queue
import time
import datetime
import smtplib
import collections
import zipfile
import autograder
def ArchiveResults(JobTriple):
''' Record this attempt in archive.
Gets 3-tuple: Job (itself a named tuple), result (string), error (string, possibly empty)
'''
D = dict()
D['UserID'] = JobTriple[0].UserID
D['CourseNum'] = JobTriple[0].CourseNum
D['ProblemNum'] = JobTriple[0].ProblemNum
D['ProblemID']= JobTriple[0].ProblemID
D['Timestamp'] = JobTriple[0].Timestamp
D['Files']= JobTriple[0].Files
D['Result'] = JobTriple[1]
Path = 'c:/users/public/archive'
Fname = JobTriple[0].UserID + JobTriple[0].CourseNum + "%04d"%JobTriple[0].ProblemID + str(JobTriple[0].Timestamp).replace(' ', '').replace(':','')
Fname = Fname +'.pkl'
Fullname = os.path.join(Path, Fname)
Zipname = os.path.join(Path, 'archive.zip')
F = open(Fullname, 'wb')
pickle.dump(D, F)
F.close()
Z = zipfile.ZipFile(Zipname, 'a', zipfile.ZIP_DEFLATED)
Z.write(Fullname, os.path.basename(Fullname))
Z.close()
os.remove(Fullname)
def EmailResults(AJob, Result, Error):
# includes code from: http://www.mkyong.com/python/how-do-send-email-in-python-via-smtplib/
# setup login information
#print "Emailing results."
prefix = AJob.UserID
if prefix in ('hareb', 'spatzs'):
suffix = '@umkc.edu'
else:
suffix = '@mail.umkc.edu'
Addy = prefix + suffix
gmail_acct = '[email protected]'
gmail_pwd = 'SaulAndBrian'
# build message
Body = "\nThis is an automatically generated email from the autograder. Do not reply to this address. "
Body += "Contact the course instructor if you have questions."
Body += "\nHere are the results from your submission for problem %s, %s:\n" % (AJob.ProblemNum, AJob.CourseNum)
Body += Result + '\n' + Error + '\n'
header = 'To:' + Addy + '\n' + 'From: ' + gmail_acct + '\n' + 'Subject:Autograder results \n'
msg = header + Body
# Now deal with the smtp server
smtpserver = smtplib.SMTP("smtp.gmail.com",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(gmail_acct, gmail_pwd)
#print header
smtpserver.sendmail(gmail_acct, Addy, msg)
#print 'done!'
smtpserver.close()
def PostResults(ResultQueue):
''' pull results from queue, deal w/ logging etc.
This function is called as a separate thread. It blocks waiting for things to
be added to the queue; if nothing is added, it blocks until the main process
dies after a 30-sec or so timeout, taking this thread with it.
Queue contains 3-tuples: Job (namedtuple), Result (str), ErrMsg (str, may be empty)'''
# collections.namedtuple(JobType, ['UserID', 'CourseNum', 'ProblemNum', 'ProblemID', 'Timestamp', 'Files'])
while not ResultQueue.empty():
#print "Posting results, line 90"
# TODO: Add code to save student's submission in archive.
NextJob = ResultQueue.get() # this blocks as long as necessary.
ArchiveResults(NextJob)
# NextJob[0].Files = None
autograder.ReportGradeStatus(NextJob[0], NextJob[1])
EmailResults(NextJob[0], NextJob[1], NextJob[2])
def Grade(JobList):
''' called by chron job--gets a named tuple representing the list of pending jobs.
Spins off new threads for dealing with each job. Snoozes a bit, then dies.'''
ResultsQueue = Queue.Queue()
SandboxList = list()
while JobList:
Settings = dict()
ProblemDict = dict()
Job = JobList.pop(0)
if not Job.Files: # Student didn't turn anything in
ResultsQueue.put( (Job, 'SubmissionError', 'No files submitted'))
Settings, ProblemDict = SetUpSubmission(Job)
SandboxList.append(ProblemDict['SandboxDir'])
if not Settings: # Can't set up the problem
ResultsQueue.put( (Job, 'SystemError', "Can't set up problem; see administrator"))
return # and we're out of here.
# Otherwise paths are set up & sandbox is ready.
Settings['cs101'] = HandleSubmission
Settings['cs282'] = HandleMARSSubmission
try:
#IOFiles = ProblemDict['IOPairs']
ProblemDict['FileToRun']=os.path.join(ProblemDict['SandboxDir'], ProblemDict['Run'])
if 'ExtraFiles' in ProblemDict:
Extras = ProblemDict['ExtraFiles']
else:
Extras = []
except KeyError:
os.rmdir(ProblemDict['SandboxDir'])
ResultsQueue.put( (Job, 'SystemError', 'Misread configuration data; see administrator'))
return
#NextJob = JobList.pop(0)
## ReportGradeStatus(NextJob.UserID, NextJob.CourseNum, NextJob.ProblemNum,
## NextJob.Timestamp, 'Submitted')
try:
FuncToRun = Settings[Job.CourseNum]
except KeyError:
print "Course number not found, don't know which language to run."
print "Grade, line 138"
FuncToRun(Job, Settings, ProblemDict, ResultsQueue)
#thread.start_new_thread(HandleSubmission, (Job, Settings, ProblemDict, ResultQueue))
# HandleSubmission will post results to queue. Start 1 thread to handle
# results by pulling them off queue & dealing with them.
PostResults(ResultsQueue)
#thread.start_new_thread(PostResults, (ResultQueue,))
#time.sleep(15) # which should be more than enough for everything to finish.
# When this function ends, all threads and the queue they're operating on
# go away. In the vast majority of cases, they're long since done anyway;
# the producer threads (HandleSubmission) are done and the consumer
# (PostResults) is waiting for results that will never come. But just in case
# something was left over & blocked, the end of function will clean them up.
for Dir in SandboxList:
try:
autograde_utilities.Cleanup(Dir)
os.rmdir(Dir)
except Exception, e: # if anything goes wrong, ignore it; utility script will fix later.
#print e
os.chdir('..')
try:
os.rmdir(Dir)
except Exception, e:
pass #print "Still didn't work.", e
def ReadSystemConfig():
try:
F = open('c:/autograder.ini')
Stuff = dict()
for line in F:
Setting = line.split('=')
if Setting[0]:
Key = Setting[0].strip()
Val=Setting[1].strip()
Stuff[Key] = Val
F.close()
except IOError:
pass
except KeyError:
return None
return Stuff
def ReadProblemINI(ProblemPath):
try:
F=open(os.path.join(ProblemPath, 'template.txt'))
except IOError:
return False
ProblemDict=dict()
for line in F:
if len(line) > 2:
thingy = line.split(':')
if thingy[0]:
Key = thingy[0].strip()
Val=thingy[1].strip()
ProblemDict[Key]=Val
F.close()
# Note: Some things might be lists. Convert them.
try:
SubmitList=[F.lower().strip() for F in ProblemDict['SubmissionFiles'].split()]
ProblemDict['SubmissionFiles']=SubmitList
except KeyError:
pass
try:
ExtraList=[F.lower().strip() for F in ProblemDict['ExtraFiles'].split()]
ExtraPath=os.path.join(ProblemPath, 'ExtraFiles')
Extras = [os.path.join(ExtraPath, F) for F in ExtraList]
ProblemDict['ExtraFiles']=Extras
except KeyError:
pass
try:
SubmitList=[F.lower().strip() for F in ProblemDict['IOPairs'].split()]
TupList = list()
while SubmitList:
try:
(i, o) = SubmitList[0], SubmitList[1]
SubmitList.pop(0)
SubmitList.pop(0)
except IndexError:
pass
else:
TupList.append((i, o))
ProblemDict['IOPairs']=TupList
except KeyError:
pass
try:
IOPath=ProblemDict['IOPath']
except KeyError:
IOPath=''
ProblemDict['IOPath'] = os.path.join(ProblemPath, IOPath)
return ProblemDict
def SetUpSubmission(Job):
Settings = ReadSystemConfig()
if not Settings:
return False, "Can't read system configuration"
ProblemPath=os.path.join(Settings['ProblemPath'], Job.CourseNum, '%04d' % Job.ProblemID)
if not os.path.isdir(ProblemPath):
return False, "Can't find problem directory"
else:
Settings['ProblemPath'] = ProblemPath
ProblemDict=ReadProblemINI(ProblemPath)
if not ProblemDict:
return False, "Can't read problem configuration"
TimeStr = str(Job.Timestamp)
# Sandbox dir looks something like:
# Sandbox\abcxyz02072012-01-17120102030000\stuff goes here
# for problem 0207 submitted by student 'abcxyz' on 2012-01-17 at 12:01:02.030000 PM
# Timestamp is a datetime object, and the string version of it has characters
# that can't be part of a directory path. So fix it.
TempDir = Job.UserID + ('%04d' % Job.ProblemNum) + TimeStr
for ch in ' :.,':
TempDir = TempDir.replace(ch, '')
ProblemDict['SandboxDir'] = os.path.join(Settings['SandboxDir'], TempDir)
try:
os.mkdir(ProblemDict['SandboxDir'])
except WindowsError:
ProblemDict['SandboxDir'] = None
return False, "Can't configure problem."
return Settings, ProblemDict
def HandleSubmission(Job, Settings, ProblemDict, ResultsQueue):
''' handle the traffic-cop aspects of a submission.
Parameters:
Job : The job that we're about to process. a named tuple
ResultsQueue: The queue that we should post results to for later processing.
Actions:
For this problem, retrieve the list of system supplied files (if any) and list of (input,output) tuples.
Feed the HandleFile function the problem, submission, and single (i, o) pairs until either:
All input cases have been handled successfully; or
Any submission has returned anything other than 'Correct.'
If any case returned anything other than 'Correct':
Post this job, Status, ErrMsg to results queue.
Example: job, 'SyntaxError', traceback
or: job, 'OutputError', 'Excessive output detected.'
otherwise:
Post this job, 'Correct', '' to results queue
Returns: Nothing
'''
#InputDir = ProblemDict['IOPath']
# Now process each set of I/O files; continue until all done, or an error is hit.
for IOTuple in ProblemDict['IOPairs']:
if 'Extras' not in ProblemDict:
ProblemDict['Extras'] = None
Res, Err = HandleFile(Job,
os.path.join(ProblemDict['IOPath'], IOTuple[0]),
os.path.join(ProblemDict['IOPath'], IOTuple[1]),
ProblemDict)
if Res != 'Correct':
ResultsQueue.put((Job, Res, Err)) # Post results & exit early
#os.rmdir(ProblemDict['SandboxDir'])
return
# If we're here, then all files were processed correctly.
#autograde_utilities.ReportGradeStatus(StudentID, ProblemID, Res)
ResultsQueue.put( (Job, 'Correct', ''))
#os.rmdir(ProblemDict['SandboxDir'])
return
def HandleMARSSubmission(Job, Settings, ProblemDict, ResultsQueue):
'''
Process one student's submission on one set of input data using MARS.
Parameters:
Job: The named tuple containing, among other things, the files submitted by the student and their contents.
InputFileName: The name (including path if needed) of the ONE file with sample input for this test.
CorrectOutputFileName: The name (including path if needed) of the ONE file with correct output for
the specified input.
FileNameToRun: The name (excluding path) of the ONE file that is to run
to test the student's code. This must be present in Job or
SystemSuppliedFileList.
SystemSuppliedFileList: The (possibly empty or missing) list of other
files (including paths) which are needed to run this problem's code
(class files, driver programs, etc)
Returns:
tuple of strings (Res, Err). Res is a brief description ('Correct',
'Runtime exceeded', etc), and Err is an error message (possibly empty
string).
'''
# set up some labels for later (exit codes)
ExitMsg = {1:'Translation Error', 2:'Time Limit Exceeded', 3:'Windows Error', \
4:'Excessive Output', 5:'Submission Error', 6:'Assembly Error',\
7:'Runtime Error'}
# Make sure we've got everything we're expecting; if we don't, skip all this.
ExpectedFiles = [Filename for (Filename, contents) in Job.Files]
try:
ExpectedFiles += ProblemDict['Extras'] # SystemSuppliedFileList
except (TypeError, KeyError): # if there was no list of other needed files.
pass
Expected = [os.path.basename(name).lower().strip() for name in ExpectedFiles]
if os.path.basename(ProblemDict['Run']).lower().strip() not in Expected:
Res = "File " + ProblemDict['Run'] + " was expected, but not found."
Err = ExitMsg[5]
return Err, Res
# even if we're going ahead, we can free up some memory
del(ExpectedFiles)
del(Expected)
# Create working (temporary) directory, copy files into it
ProblemDict['WritePath'] = os.path.dirname(ProblemDict['FileToRun']) #FileNameToRun)
try:
for f in Job.Files:
Fname = f[0]
Code = f[1]
open(ProblemDict['WritePath']+'/'+os.path.basename(Fname),'w').write(Code)
try:
if ProblemDict['Extras']: # SystemSuppliedFileList:
for f in ProblemDict['Extras']:
Code = open(f).read()
open(ProblemDict['WritePath']+'/'+os.path.basename(f),'w').write(Code)
except KeyError:
pass
except IOError:
return ('SystemError', 'Contact Administrator or Instructor')
# Setup I/O for program we're testing.
Input = open(InputFileName).read()
os.chdir(ProblemDict['WritePath'])
open(os.path.join(ProblemDict['WritePath'], 'input.txt'),'w').write(Input)
In = open('input.txt')
Out = open('output.txt', 'w')
#Err = open('error.txt', 'w')
# Run that sucker!
try:
ExitCode = winprocess.run('java -jar c:\\Mars.jar nc p sm ae6 se7 %s' % ProblemDict['Run'], stdin=In, \
stdout=Out, mSec=5000, desktop='')
except WindowsError, msg:
if 'timeout exceeded' in str(msg):
ExitCode = 2 # time out
elif ExitCode not in (0, 6, 7):
ExitCode = 3 # some other Windows error
# Exit code of 0 indicates no error, as usual.
# Exit code 6 indicates assembly error
# Exit code 7 indicates runtime error
#Done with files.
In.close()
Out.close()
#Err.close()
# Grab output
if os.path.getsize('output.txt') < 5.0e6:
Out = open('output.txt').read()
else: # more than 5 megabytes output, something's wrong
ExitCode = 4 # so set error flag
Out = '' # & set Out to a safe value, but don't touch file.
# grab error message if any.
#Err = open('error.txt').read()
# Cleanup temporary directory
autograde_utilities.Cleanup(ProblemDict['WritePath'])
#os.chdir(StartPath)
# os.rmdir(WritePath)
# Check output for validity.
Correct = str(open(CorrectOutputFileName).read())
Out = Out.replace('\r', '')
Correct = Correct.replace('\r', '')
try:
Result = ExitMsg[ExitCode]
except KeyError:
Result = autograde_utilities.CompareWithFormatting(Correct, Out)
return Result, ''
def HandleFile(Job, InputFileName, CorrectOutputFileName, ProblemDict): #FileNameToRun, SystemSuppliedFileList=None):
'''
Process one student's submission on one set of input data.
Parameters:
Job: The named tuple containing, among other things, the files submitted by the student and their contents.
InputFileName: The name (including path if needed) of the ONE file with sample input for this test.
CorrectOutputFileName: The name (including path if needed) of the ONE file with correct output for
the specified input.
FileNameToRun: The name (excluding path) of the ONE file that is to run
to test the student's code. This must be present in Job or
SystemSuppliedFileList.
SystemSuppliedFileList: The (possibly empty or missing) list of other
files (including paths) which are needed to run this problem's code
(class files, driver programs, etc)
Returns:
tuple of strings (Res, Err). Res is a brief description ('Correct',
'Runtime exceeded', etc), and Err is an error message (possibly empty
string).
'''
# set up some labels for later (exit codes)
ExitMsg = {1:'Translation Error', 2:'Time Limit Exceeded', 3:'Windows Error', \
4:'Excessive Output', 5:'Submission Error'}
# Make sure we've got everything we're expecting; if we don't, skip all this.
ExpectedFiles = [Filename for (Filename, contents) in Job.Files]
try:
ExpectedFiles += ProblemDict['Extras'] # SystemSuppliedFileList
except (TypeError, KeyError): # if there was no list of other needed files.
pass
Expected = [os.path.basename(name).lower().strip() for name in ExpectedFiles]
if os.path.basename(ProblemDict['Run']).lower().strip() not in Expected:
Res = "File " + ProblemDict['Run'] + " was expected, but not found."
Err = ExitMsg[5]
return Err, Res
# even if we're going ahead, we can free up some memory
del(ExpectedFiles)
del(Expected)
# Create working (temporary) directory, copy files into it
ProblemDict['WritePath'] = os.path.dirname(ProblemDict['FileToRun']) #FileNameToRun)
try:
for f in Job.Files:
Fname = f[0]
Code = f[1]
open(ProblemDict['WritePath']+'/'+os.path.basename(Fname),'w').write(Code)
if ProblemDict['Extras']: # SystemSuppliedFileList:
for f in ProblemDict['Extras']:
Code = open(f).read()
open(ProblemDict['WritePath']+'/'+os.path.basename(f),'w').write(Code)
except IOError:
return ('SystemError', 'Contact Administrator or Instructor')
# Setup I/O for program we're testing.
Input = open(InputFileName).read()
os.chdir(ProblemDict['WritePath'])
open(os.path.join(ProblemDict['WritePath'], 'input.txt'),'w').write(Input)
In = open('input.txt')
Out = open('output.txt', 'w')
Err = open('error.txt', 'w')
# Run that sucker!
try:
ExitCode = winprocess.run('python %s' % ProblemDict['Run'], stdin=In, \
stdout=Out, stderr=Err, mSec=5000, desktop='')
except WindowsError, msg:
if 'timeout exceeded' in str(msg):
ExitCode = 2 # time out
else:
ExitCode = 3 # some other Windows error
# Exit code of 0 indicates no error, as usual.
#Done with files.
In.close()
Out.close()
Err.close()
# Grab output
if os.path.getsize('output.txt') < 5.0e6:
Out = open('output.txt').read()
else: # more than 5 megabytes output, something's wrong
ExitCode = 4 # so set error flag
Out = '' # & set Out to a safe value, but don't touch file.
# grab error message if any.
Err = open('error.txt').read()
# Cleanup temporary directory
autograde_utilities.Cleanup(ProblemDict['WritePath'])
#os.chdir(StartPath)
# os.rmdir(WritePath)
# Check output for validity.
Correct = str(open(CorrectOutputFileName).read())
Out = Out.replace('\r', '')
Correct = Correct.replace('\r', '')
try:
Result = ExitMsg[ExitCode]
except KeyError:
Result = autograde_utilities.CompareWithFormatting(Correct, Out)
return Result, Err
def RunTest():
JobType = collections.namedtuple('JobType', ['UserID', 'CourseNum', 'ProblemNum', 'ProblemID', 'Timestamp', 'Files'])
JobList = list()
UserID = 'hareb'
CourseNum="CS101"
ProblemNum='1'
ProblemID='0102'
Timestamp=str(time.localtime())
f = open('c:/users/public/problems/cs101/0102/example0102.py').read()
Files = list()
Files.append( ('solution.py', f))
Job = JobType(UserID, CourseNum, ProblemNum, ProblemID, Timestamp, Files)
JobList.append(Job)
f = open('c:/users/public/problems/cs101/0103/example0103.py').read()
Files = list()
Files.append( ('example0103.py', f) )
Timestamp = str(time.localtime())
Job = JobType(UserID, CourseNum, '002', '0103', Timestamp, Files)
JobList.append(Job)
Grade( JobList )
# print "Done."
if __name__ == '__main__':
connection = autograder.getConnection()
Cursor = connection.cursor()
cmd = """UPDATE Jobs SET Status = 'pending' WHERE SequenceNumber = 21"""
Cursor.execute(cmd)
connection.commit()
connection.close()
Jobs = autograder.getJobs()
Grade(Jobs)
#RunTest()
##
## OK, Res, Err = HandleSubmission(1, '0102', ['example0102.py'])
## print "Your result:", Res
## if Err:
## print "Error message:\n", Err
##
## if OK:
## print '\tNeed to update database if this is first success on this problem.'
## else:
## print '\tNeed to update database if this is first attempt on this problem.'
##
| [
"[email protected]"
] | |
ae8caa3e5755b5b934074980647e9b8a044a2e9a | 2d930aadf19b2ad6ea49725099d2f37475cd57f8 | /test/functional/wallet-dump.py | c3f723a19bbd46584fb33bce6dba37487abcdcbe | [
"MIT"
] | permissive | stratton-oakcoin/oakcoin | ea83774c9f6ea64adb8832770e6219ffb31edef6 | fe53193a50bd3674211448f1dcc39c6f9f042bb2 | refs/heads/master | 2021-01-20T13:22:05.877005 | 2017-05-07T10:09:57 | 2017-05-07T10:09:57 | 90,477,972 | 1 | 2 | null | 2017-05-07T10:09:57 | 2017-05-06T16:58:05 | C++ | UTF-8 | Python | false | false | 4,770 | py | #!/usr/bin/env python3
# Copyright (c) 2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
from test_framework.test_framework import OakcoinTestFramework
from test_framework.util import (start_nodes, start_node, assert_equal, oakcoind_processes)
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(OakcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-keypool=90"]]
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60)
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 50) # 50 blocks where mined
assert_equal(found_addr_rsv, 90*2) # 90 keys plus 100% internal keys
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
oakcoind_processes[0].wait()
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90*2)
if __name__ == '__main__':
WalletDumpTest().main ()
| [
"[email protected]"
] | |
4cf799ae31dfe4802a0d9299a2f9c9087c10afe6 | 0add969034a82912bc6e19abc427abe883ee65bb | /theta_en_time_polar.py | a9683111bde6bafb250a54492723f599975e5624 | [] | no_license | Michael-Gong/New_LPI_python_script | eefd162fdbbc3c614c66e2b157ea5296e3bc8492 | 9de109c6f19aa60bdeaf102e9a1ec0baff5669ad | refs/heads/master | 2020-03-28T16:06:09.631550 | 2020-02-01T08:21:17 | 2020-02-01T08:21:17 | 148,659,608 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,511 | py | #%matplotlib inline
#import sdf
import matplotlib
import matplotlib as mpl
#mpl.style.use('https://raw.githubusercontent.com/Michael-Gong/DLA_project/master/style')
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
from optparse import OptionParser
import os
from mpl_toolkits.mplot3d import Axes3D
import random
from mpl_toolkits import mplot3d
from matplotlib import rc
import matplotlib.transforms as mtransforms
import sys
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
font = {'family' : 'monospace',
'color' : 'black',
'weight' : 'normal',
'size' : 28,
}
font2 = {'family' : 'monospace',
'color' : 'black',
'weight' : 'normal',
'size' : 15,
}
font_size = 28
font_size_2 = 15
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
upper = matplotlib.cm.jet(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_jet = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
upper = matplotlib.cm.viridis(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_viridis = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
upper = matplotlib.cm.rainbow(np.arange(256))
lower = np.ones((int(256/4),4))
for i in range(3):
lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0])
cmap = np.vstack(( lower, upper ))
mycolor_rainbow = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0])
def pxpy_to_energy(gamma, weight):
binsize = 200
en_grid = np.linspace(50,19950,200)
en_bin = np.linspace(0,20000.0,201)
en_value = np.zeros_like(en_grid)
for i in range(binsize):
# if i == binsize-1:
# en_value[i] = sum(weight[en_bin[i]<=gamma])
# else:
en_value[i] = sum(weight[ (en_bin[i]<=gamma) & (gamma<en_bin[i+1]) ])
return (en_grid, en_value)
def theta_to_grid(theta, weight):
binsize = 240
theta_grid = np.linspace(-119.5,119.5,240)
theta_bin = np.linspace(-120,120,241)
theta_value = np.zeros_like(theta_grid)
for i in range(binsize):
# if i == binsize-1:
# en_value[i] = sum(weight[en_bin[i]<=gamma])
# else:
theta_value[i] = sum(weight[ (theta_bin[i]<=theta) & (theta<theta_bin[i+1]) ])
return (theta_grid, theta_value)
if __name__ == "__main__":
part_number = 50000
from_path = './p50000_no_T150/'
nsteps = int(sum(1 for line in open(from_path+'t_tot_s.txt'))/part_number)
ntheta = 270
ngg = 120
from_path_list = ['./p50000_no_T150/','./p50000_rr_T150/','./p50000_qe_T150/']
#from_path_list = ['./Data_qe_T500_p50000_try/']
for i in range(np.size(from_path_list)):
from_path = from_path_list[i] #'./Data_qe_T050_p50000/'
to_path = from_path
t0 = np.loadtxt(from_path+'t_tot_s.txt')/2/np.pi
px0 = np.loadtxt(from_path+'px_tot_s.txt')
py0 = np.loadtxt(from_path+'py_tot_s.txt')
t0 = np.reshape(t0,(part_number,nsteps))
px0 = np.reshape(px0,(part_number,nsteps))
py0 = np.reshape(py0,(part_number,nsteps))
gg0 = (px0**2+py0**2+1)**0.5*0.51e-3
ww0 = np.zeros_like(gg0)+1
ww0 = np.zeros_like(gg0)+gg0
theta0 = np.arctan2(py0,px0)
theta_edges = np.linspace(-np.pi,np.pi, ntheta +1)
gg_edges = np.linspace(0.1, 6, ngg +1)
theta_edges_1 = np.linspace(-np.pi,np.pi,ntheta)
gg_edges_1 = np.linspace(0.1, 6, ngg)
for n in range(np.size(t0[0,:])):
H, _, _ = np.histogram2d(gg0[:,n], theta0[:,n], [gg_edges, theta_edges], weights=gg0[:,n])
print('Max H:',np.max(H))
Theta, R = np.meshgrid(theta_edges_1,gg_edges_1)
H_temp = np.sum(H[:,:]*R,0)
print('averaged |theta|=',np.sum(H_temp*abs(theta_edges_1))/np.sum(H_temp)/np.pi*180)
fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar'))
ax.set_facecolor('whitesmoke')
levels = np.logspace(1,5, 101)
H[H<0.01] = np.nan
img=ax.pcolormesh(Theta, R, H, norm=colors.LogNorm(vmin=0.01, vmax=1e3), cmap='viridis')
# cax = fig.add_axes([0.68,0.97,0.25,0.02])
# cbar=fig.colorbar(img,cax=cax, ticks=[1e3,1e5],orientation='horizontal')
# cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), fontsize=font_size_2)
# cbar.set_label(r'dI/d$\theta$dE [A.U.]',fontdict=font2)
# ax.tick_params(axis="y", pad=25)
ax.tick_params(axis="x", pad=10)
# ax.set_xticks([])
if (i%3 != 2):
ax.set_xticklabels([])
#ax.set_xlim(10,50)
#ax.set_ylim(0.,1.)
ax.set_xlabel(r'$\theta\ [^o]$',fontdict=font)
# ax.set_rlim(1e-1,1e3)
# ax.set_rmax(1e3)
l_r = np.array([0,1,2,3])
ax.set_rticks(l_r+1)
ax.set_yticklabels([])
# ax.set_yticklabels(['$10^%d$' % x for x in (l_r+1)])
ax.set_rlim(0, 6)
ax.set_rlabel_position(90)
# ax.set_rscale('log')
# ax.set_rscale('log')
# ax.set_thetamin(-90)
# ax.set_thetamax(90)
# ax.set_yticklabels([0.1,1,10,100,1000])
ax.set_xticklabels([0,90,180,270])
#ax.set_theta_zero_location('N')
# ax.set_ylabel(r'$\theta\ [^o]$',fontdict=font)
ax.tick_params(axis='x',labelsize=font_size)
ax.tick_params(axis='y',labelsize=font_size_2)
#ax.set_title('proton_angular_time='+str(time1), va='bottom', y=1., fontsize=20)
# plt.text(-100,650,' t = '++' fs',fontdict=font)
ax.grid(True,linestyle='--',linewidth=1.5,color='grey')
#plt.pcolormesh(x, y, ex.T, norm=mpl.colors.Normalize(vmin=0,vmax=100,clip=True), cmap=cm.cubehelix_r)
# plt.axis([x.min(), x.max(), y.min(), y.max()])
#### manifesting colorbar, changing label and axis properties ####
# cbar=plt.colorbar(pad=0.01)#ticks=[np.min(ex), -eee/2, 0, eee/2, np.min()])
# cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=font_size)
# cbar.set_label('dN/dE [A.U.]',fontdict=font)
# a0=200.0
# alpha=np.linspace(-3.5,0.5,501)
# plt.xlabel(r'$\theta$'+' [degree]',fontdict=font)
# plt.ylabel('time [fs]',fontdict=font)
# plt.xticks([-135,-90,-45,0,45,90,135],fontsize=font_size);
#plt.yticks([0,500,1000,1500],fontsize=font_size);
# plt.title(r'$dN/d\theta$'+' for no RR', fontsize=font_size)
# plt.xlim(-120,120)
# plt.ylim(0,1650)
#plt.title('electron at y='+str(round(y[n,0]/2/np.pi,4)),fontdict=font)
plt.subplots_adjust(top=0.90, bottom=0.11, left=0.1, right=0.93, hspace=0.10, wspace=0.05)
fig = plt.gcf()
fig.set_size_inches(6., 6.)
#fig.set_size_inches(5, 4.5)
fig.savefig(to_path+'theta_en_dist_'+to_path[7:-1]+'_'+str(n).zfill(4)+'.png',format='png',dpi=160)
plt.close("all")
| [
"[email protected]"
] | |
373f9f9cd537df8df9fb85fee9220607f78f2be6 | de5adea6b67660bfc45150ee56b6cf4957c8c4e7 | /main_app/migrations/0001_initial.py | f522eb7c2263895a61cc3153af186e867e0d5fdf | [] | no_license | arthuroe/treasure_gram | 70049a25009318d947488dea28505f65816d9d84 | 5ce93ed21284fee17640b15546011848de3115ac | refs/heads/develop | 2020-03-18T02:16:19.413381 | 2018-05-23T17:18:58 | 2018-05-23T17:24:16 | 134,182,468 | 0 | 0 | null | 2018-05-28T18:52:48 | 2018-05-20T20:02:49 | Python | UTF-8 | Python | false | false | 824 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-20 21:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Treasure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('materials', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('img_url', models.CharField(max_length=100)),
],
),
]
| [
"[email protected]"
] | |
3b1a469d9c82b2869b62462652c2a0c924e3bb31 | 470e0a9dc07edfe13ca68f2a1b6d60d0e395e095 | /3-2.py | b67172d7abbc097ec46a4caa894c73eba80c02c4 | [] | no_license | mj08021/ThisIsCodingTestforGetaJob | 77ce8edab2bd855db9b96597982f58251d0bd31e | ad98b368956937065c6c396b2806351a4eaf12a2 | refs/heads/main | 2023-04-28T10:51:02.012344 | 2021-05-16T05:51:58 | 2021-05-16T05:51:58 | 316,853,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # N, M, K를 공백으로 구분하여 입력받기
n, m, k = map(int, input().split())
# N개의 수를 공백으로 구분하여 입력받기
data = list(map(int, input().split()))
data.sort() # 입력받은 수 정렬
first = data[n - 1] # 가장 큰 수
second = data[n - 2] # 두 번째로 큰 수
# 가장 큰 수가 더해지는 횟수 계산
count = int(m / (k + 1)) * k
count += m % (k + 1)
result = 0
result += (count) * first # 가장 큰 수 더하기
result += (m - count) * second # 두 번째로 큰 수 더하기
print(result) # 최종 답안 출력
# ex) input
# 5 8 3
# 2 4 5 4 6 | [
"[email protected]"
] | |
f2a7ee60c707d01abd0cb97f85cf647ce9ebf4e3 | a6df74bc7c139734bd9ce9f48d51e08fdc7d7efb | /article/migrations/0006_auto_20210311_1721.py | 116d4f2900f9f0f393ad9eb58894d557a6c11b5c | [] | no_license | Erlan1998/python_group_7_homework_68_Erlan_Kurbanaliev | 5a7f210e51f1998e5d52cdeb42538f2786af3f9f | fdc92be2c5187c78fecdc713f58e0e3e9fc62cb1 | refs/heads/master | 2023-05-03T17:01:59.066596 | 2021-05-26T13:28:41 | 2021-05-26T13:28:41 | 368,165,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # Generated by Django 3.1.6 on 2021-03-11 17:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0005_auto_20210311_1319'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='tags',
new_name='tags_old',
),
]
| [
"[email protected]"
] | |
6fe7640c64822df4cca889a856f9099d33231595 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02860/s554783475.py | ba781c1a512917a311a200fc59b2e495d4dab5c5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | n = int(input())
s = input()
if (n%2 == 1):
print("No")
else:
c = 0
for i in range(int(n/2)):
if (s[i] != s[i + int(n/2)]):
c = 1
if (c == 0):
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
866fcd777ed57198ecc587fa85d3a71e6974ea99 | 9d1491368c5e87760131ba27d252ee2d10620433 | /gammapy/spectrum/powerlaw.py | 39edaeca1329962422682f6d153c6cf79d653ff1 | [
"BSD-3-Clause"
] | permissive | cnachi/gammapy | f9295306a8e81d0b7f4d2111b3fa3679a78da3f7 | 3d3fc38c111d2f490d984082750f8003580fe06c | refs/heads/master | 2021-01-20T23:37:59.409914 | 2016-06-09T08:36:33 | 2016-06-09T08:36:33 | 60,764,807 | 0 | 0 | null | 2016-06-09T09:55:54 | 2016-06-09T09:55:54 | null | UTF-8 | Python | false | false | 6,540 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Power law spectrum helper functions.
Convert differential and integral fluxes with error propagation.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
__all__ = [
'power_law_evaluate',
'power_law_pivot_energy',
'df_over_f',
'power_law_flux',
'power_law_integral_flux',
'g_from_f',
'g_from_points',
'I_from_points',
'f_from_points',
'f_with_err',
'I_with_err',
'compatibility',
]
E_INF = 1e10 # practically infinitely high flux
g_DEFAULT = 2
def power_law_evaluate(energy, norm, gamma, energy_ref):
r"""Differential flux at a given energy.
.. math:: f(energy) = N (E / E_0) ^ - \Gamma
with norm ``N``, energy ``E``, reference energy ``E0`` and spectral index :math:`\Gamma`.
Parameters
----------
energy : array_like
Energy at which to compute the differential flux
gamma : array_like
Power law spectral index
"""
return norm * (energy / energy_ref) ** (-gamma)
def power_law_pivot_energy(energy_ref, f0, d_gamma, cov):
"""Compute pivot (a.k.a. decorrelation) energy.
Defined as smallest df / f.
Reference: http://arxiv.org/pdf/0910.4881
"""
pivot_energy = energy_ref * np.exp(cov / (f0 * d_gamma ** 2))
return pivot_energy
def df_over_f(e, e0, f0, df0, dg, cov):
"""Compute relative flux error at any given energy.
Used to draw butterflies.
Reference: http://arxiv.org/pdf/0910.4881 Equation (1)
"""
term1 = (df0 / f0) ** 2
term2 = 2 * cov / f0 * np.log(e / e0)
term3 = (dg * np.log(e / e0)) ** 2
return np.sqrt(term1 - term2 + term3)
def _conversion_factor(g, e, e1, e2):
"""Conversion factor between differential and integral flux."""
# In gamma-ray astronomy only falling power-laws are used.
# Here we force this, i.e. give "correct" input even if the
# user gives a spectral index with an incorrect sign.
g = np.abs(g)
term1 = e / (-g + 1)
term2 = (e2 / e) ** (-g + 1) - (e1 / e) ** (-g + 1)
return term1 * term2
def power_law_flux(I=1, g=g_DEFAULT, e=1, e1=1, e2=E_INF):
"""Compute differential flux for a given integral flux.
Parameters
----------
I : array_like
Integral flux in ``energy_min``, ``energy_max`` band
g : array_like
Power law spectral index
e : array_like
Energy at which to compute the differential flux
e1 : array_like
Energy band minimum
e2 : array_like
Energy band maximum
Returns
-------
flux : `numpy.array`
Differential flux at ``energy``.
"""
return I / _conversion_factor(g, e, e1, e2)
def power_law_integral_flux(f=1, g=g_DEFAULT, e=1, e1=1, e2=E_INF):
"""Compute integral flux for a given differential flux.
Parameters
----------
f : array_like
Differential flux at ``energy``
g : array_like
Power law spectral index
e : array_like
Energy at which the differential flux is given
e1 : array_like
Energy band minimum
e2 : array_like
Energy band maximum
Returns
-------
flux : `numpy.array`
Integral flux in ``energy_min``, ``energy_max`` band
"""
return f * _conversion_factor(g, e, e1, e2)
def g_from_f(e, f, de=1):
"""Spectral index at a given energy e for a given function f(e)"""
e1, e2 = e, e + de
f1, f2 = f(e1), f(e2)
return g_from_points(e1, e2, f1, f2)
def g_from_points(e1, e2, f1, f2):
"""Spectral index for two given differential flux points"""
return -np.log(f2 / f1) / np.log(e2 / e1)
def I_from_points(e1, e2, f1, f2):
"""Integral flux in energy bin for power law"""
g = g_from_points(e1, e2, f1, f2)
pl_int_flux = (f1 * e1 / (-g + 1) *
((e2 / e1) ** (-g + 1) - 1))
return pl_int_flux
def f_from_points(e1, e2, f1, f2, e):
"""Linear interpolation"""
e1 = np.asarray(e1, float)
e2 = np.asarray(e2, float)
f1 = np.asarray(f1, float)
f2 = np.asarray(f2, float)
e = np.asarray(e, float)
logdy = np.log(f2 / f1)
logdx = np.log(e2 / e1)
logy = np.log(f1) + np.log(e / e1) * (logdy / logdx)
return np.exp(logy)
def f_with_err(I_val=1, I_err=0, g_val=g_DEFAULT, g_err=0,
e=1, e1=1, e2=E_INF):
"""Wrapper for f so the user doesn't have to know about
the uncertainties module"""
from uncertainties import unumpy
I = unumpy.uarray(I_val, I_err)
g = unumpy.uarray(g_val, g_err)
_f = power_law_flux(I, g, e, e1, e2)
f_val = unumpy.nominal_values(_f)
f_err = unumpy.std_devs(_f)
return f_val, f_err
def I_with_err(f_val=1, f_err=0, g_val=g_DEFAULT, g_err=0,
e=1, e1=1, e2=E_INF):
"""Wrapper for f so the user doesn't have to know about
the uncertainties module"""
from uncertainties import unumpy
f = unumpy.uarray(f_val, f_err)
g = unumpy.uarray(g_val, g_err)
_I = power_law_integral_flux(f, g, e, e1, e2)
I_val = unumpy.nominal_values(_I)
I_err = unumpy.std_devs(_I)
return I_val, I_err
def compatibility(par_low, par_high):
"""Quantify spectral compatibility of power-law
measurements in two energy bands.
Reference: 2008ApJ...679.1299F Equation (2)
Compute spectral compatibility parameters for the
situation where two power laws were measured in a low
and a high spectral energy band.
par_low and par_high are the measured parameters,
which must be lists in the following order:
e, f, f_err, g, g_err
where e is the pivot energy, f is the flux density
and g the spectral index
"""
# Unpack power-law paramters
e_high, f_high, f_err_high, g_high, g_err_high = par_high
e_low, f_low, f_err_low, g_low, g_err_low = par_low
log_delta_e = np.log10(e_high) - np.log10(e_low)
log_delta_f = np.log10(f_high) - np.log10(f_low)
# g_match is the index obtained by connecting the two points
# with a power law, i.e. a straight line in the log_e, log_f plot
g_match = -log_delta_f / log_delta_e
# sigma is the number of standar deviations the match index
# is different from the measured index in one band.
# (see Funk et al. (2008ApJ...679.1299F) eqn. 2)
sigma_low = (g_match - g_low) / g_err_low
sigma_high = (g_match - g_high) / g_err_high
sigma_comb = np.sqrt(sigma_low ** 2 + sigma_high ** 2)
return g_match, sigma_low, sigma_high, sigma_comb
| [
"[email protected]"
] | |
ddcaf6e28b533963df17ac8f9f13f4ce3c77631f | 1581f1d66d6835b2c271295e3251c2dde239fec8 | /payment_gateway/pg_utils.py | 6036c701e7036016bef878326b20e168433fab8a | [] | no_license | abinash-kumar/pythod | 527659e3bdd161f9abcaaa9182dfe58044b3ff66 | 1469dc0cd9d6d72b2fe2e69f99542e470bea807b | refs/heads/master | 2023-01-30T02:54:10.729606 | 2020-02-24T07:18:51 | 2020-02-24T07:18:51 | 242,670,715 | 0 | 0 | null | 2023-01-25T13:57:52 | 2020-02-24T07:16:02 | Python | UTF-8 | Python | false | false | 2,318 | py | from motor_product import prod_utils as mpu
from health_product import prod_utils as hpu
HEALTH_INSURER_SLUG = {
'the-oriental-insurance-company-ltd': 'oriental'
}
def resolve_utils(transaction):
if transaction.product_type == 'motor':
return mpu
elif transaction.product_type == 'health':
return hpu
else:
return None
def process_payment_response(request, response, transaction):
if transaction.product_type == 'motor':
return mpu.process_payment_response(
request,
mpu.VEHICLE_TYPE_SLUG[transaction.vehicle_type],
get_insurer_slug(transaction),
response,
transaction.transaction_id
)
elif transaction.product_type == 'health':
return hpu.process_payment_response(
transaction.slab.health_product.insurer.id,
response,
transaction
)
else:
return None
def get_insurer_slug(transaction):
if transaction.product_type == 'motor':
return transaction.insurer.slug
elif transaction.product_type == 'health':
return HEALTH_INSURER_SLUG[transaction.slab.health_product.insurer.slug]
else:
return None
def get_error_url(transaction):
if transaction.product_type == 'motor':
vehicle_type = mpu.VEHICLE_TYPE_SLUG[transaction.vehicle_type]
return '/motor/' + vehicle_type + '/product/failure/'
elif transaction.product_type == 'health':
return '/health-plan/payment/transaction/%s/failure/' % transaction.transaction_id
else:
return None
def todict(obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = todict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return todict(obj._ast())
elif hasattr(obj, "__iter__"):
return [todict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, todict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
| [
"[email protected]"
] | |
0e188befbac224d8224dc6e6649007c2d0ccc5b5 | 8b1dcac39acfcee0f573dc71d608671dea2062a2 | /tools/hikyuu/interactive/draw/__init__.py | fcdb11396c845625805c5eebb3c406cd9deb7ab1 | [
"MIT"
] | permissive | eightwind/hikyuu | 4c876170b1e298105e7eaf9675b310ad378dd9a4 | 4dab98a93e2a9847f77d615d6900067fbf90b73d | refs/heads/master | 2021-08-26T05:32:39.813080 | 2017-11-21T18:59:16 | 2017-11-21T18:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | __version__ = "Only for pip dist" | [
"[email protected]"
] | |
1d6007a5ebcba5fca71c8d3808860c34ac1f9ede | 0f0f8b3b027f412930ca1890b0666538358a2807 | /dotop/addons/base/ir/ir_filters.py | 7e792068539ec5262791dfa23e1034b0a6500c7e | [] | no_license | konsoar/dotop_pos_v11 | 741bd5ca944dfd52eb886cab6f4b17b6d646e131 | 576c860917edd25661a72726d0729c769977f39a | refs/heads/master | 2021-09-06T13:25:34.783729 | 2018-02-07T02:11:12 | 2018-02-07T02:11:12 | 111,168,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,584 | py | # -*- coding: utf-8 -*-
# Part of dotop. See LICENSE file for full copyright and licensing details.
import ast
from dotop import api, fields, models, _
from dotop.exceptions import UserError
class IrFilters(models.Model):
_name = 'ir.filters'
_description = 'Filters'
_order = 'model_id, name, id desc'
name = fields.Char(string='Filter Name', translate=True, required=True)
user_id = fields.Many2one('res.users', string='User', ondelete='cascade', default=lambda self: self._uid,
help="The user this filter is private to. When left empty the filter is public "
"and available to all users.")
domain = fields.Text(default='[]', required=True)
context = fields.Text(default='{}', required=True)
sort = fields.Text(default='[]', required=True)
model_id = fields.Selection(selection='_list_all_models', string='Model', required=True)
is_default = fields.Boolean(string='Default filter')
action_id = fields.Many2one('ir.actions.actions', string='Action', ondelete='cascade',
help="The menu action this filter applies to. "
"When left empty the filter applies to all menus "
"for this model.")
active = fields.Boolean(default=True)
@api.model
def _list_all_models(self):
self._cr.execute("SELECT model, name FROM ir_model ORDER BY name")
return self._cr.fetchall()
@api.multi
def copy(self, default=None):
self.ensure_one()
default = dict(default or {}, name=_('%s (copy)') % self.name)
return super(IrFilters, self).copy(default)
@api.multi
def _get_eval_domain(self):
self.ensure_one()
return ast.literal_eval(self.domain)
@api.model
def _get_action_domain(self, action_id=None):
"""Return a domain component for matching filters that are visible in the
same context (menu/view) as the given action."""
if action_id:
# filters specific to this menu + global ones
return [('action_id', 'in', [action_id, False])]
# only global ones
return [('action_id', '=', False)]
@api.model
def get_filters(self, model, action_id=None):
"""Obtain the list of filters available for the user on the given model.
:param action_id: optional ID of action to restrict filters to this action
plus global filters. If missing only global filters are returned.
The action does not have to correspond to the model, it may only be
a contextual action.
:return: list of :meth:`~osv.read`-like dicts containing the
``name``, ``is_default``, ``domain``, ``user_id`` (m2o tuple),
``action_id`` (m2o tuple) and ``context`` of the matching ``ir.filters``.
"""
# available filters: private filters (user_id=uid) and public filters (uid=NULL),
# and filters for the action (action_id=action_id) or global (action_id=NULL)
action_domain = self._get_action_domain(action_id)
filters = self.search(action_domain + [('model_id', '=', model), ('user_id', 'in', [self._uid, False])])
user_context = self.env.user.context_get()
return filters.with_context(user_context).read(['name', 'is_default', 'domain', 'context', 'user_id', 'sort'])
@api.model
def _check_global_default(self, vals, matching_filters):
""" _check_global_default(dict, list(dict), dict) -> None
Checks if there is a global default for the model_id requested.
If there is, and the default is different than the record being written
(-> we're not updating the current global default), raise an error
to avoid users unknowingly overwriting existing global defaults (they
have to explicitly remove the current default before setting a new one)
This method should only be called if ``vals`` is trying to set
``is_default``
:raises dotop.exceptions.UserError: if there is an existing default and
we're not updating it
"""
domain = self._get_action_domain(vals.get('action_id'))
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', False),
('is_default', '=', True),
])
if not defaults:
return
if matching_filters and (matching_filters[0]['id'] == defaults.id):
return
raise UserError(_("There is already a shared filter set as default for %(model)s, delete or change it before setting a new default") % {'model': vals.get('model_id')})
@api.model
@api.returns('self', lambda value: value.id)
def create_or_replace(self, vals):
action_id = vals.get('action_id')
current_filters = self.get_filters(vals['model_id'], action_id)
matching_filters = [f for f in current_filters
if f['name'].lower() == vals['name'].lower()
# next line looks for matching user_ids (specific or global), i.e.
# f.user_id is False and vals.user_id is False or missing,
# or f.user_id.id == vals.user_id
if (f['user_id'] and f['user_id'][0]) == vals.get('user_id')]
if vals.get('is_default'):
if vals.get('user_id'):
# Setting new default: any other default that belongs to the user
# should be turned off
domain = self._get_action_domain(action_id)
defaults = self.search(domain + [
('model_id', '=', vals['model_id']),
('user_id', '=', vals['user_id']),
('is_default', '=', True),
])
if defaults:
defaults.write({'is_default': False})
else:
self._check_global_default(vals, matching_filters)
# When a filter exists for the same (name, model, user) triple, we simply
# replace its definition (considering action_id irrelevant here)
if matching_filters:
matching_filter = self.browse(matching_filters[0]['id'])
matching_filter.write(vals)
return matching_filter
return self.create(vals)
_sql_constraints = [
# Partial constraint, complemented by unique index (see below). Still
# useful to keep because it provides a proper error message when a
# violation occurs, as it shares the same prefix as the unique index.
('name_model_uid_unique', 'unique (name, model_id, user_id, action_id)', 'Filter names must be unique'),
]
@api.model_cr_context
def _auto_init(self):
result = super(IrFilters, self)._auto_init()
# Use unique index to implement unique constraint on the lowercase name (not possible using a constraint)
self._cr.execute("DROP INDEX IF EXISTS ir_filters_name_model_uid_unique_index") # drop old index w/o action
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_filters_name_model_uid_unique_action_index'")
if not self._cr.fetchone():
self._cr.execute("""CREATE UNIQUE INDEX "ir_filters_name_model_uid_unique_action_index" ON ir_filters
(lower(name), model_id, COALESCE(user_id,-1), COALESCE(action_id,-1))""")
return result
| [
"Administrator@20nuo003-PC"
] | Administrator@20nuo003-PC |
e5131ff29aa41698036707a61a86466d77e7d3b9 | 6c50bced6fb4474e4eb2e4f3c27a5ce38b0e6048 | /manage.py | e1fbda688388d8db4449c6abeb1423356d40d79b | [] | no_license | NMShihab/WebChatApp | 0d5651fe38baccfee186e59e32c2c79de2bb39a4 | 2dda4e750c370e74bbfbc42dce02432268194d46 | refs/heads/master | 2023-02-01T22:57:53.738222 | 2020-12-15T17:09:14 | 2020-12-15T17:09:14 | 319,082,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChatApi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f0558330618b47efd52ea7dae4624354fe0c32ac | 89b45e528f3d495f1dd6f5bcdd1a38ff96870e25 | /pyneng/exercises/09_functions/task_9_2.py | e2a25f74f4ea48dd6a5f51879221d1048f8a5c94 | [] | no_license | imatyukin/python | 2ec6e712d4d988335fc815c7f8da049968cc1161 | 58e72e43c835fa96fb2e8e800fe1a370c7328a39 | refs/heads/master | 2023-07-21T13:00:31.433336 | 2022-08-24T13:34:32 | 2022-08-24T13:34:32 | 98,356,174 | 2 | 0 | null | 2023-07-16T02:31:48 | 2017-07-25T22:45:29 | Python | UTF-8 | Python | false | false | 2,935 | py | # -*- coding: utf-8 -*-
"""
Задание 9.2
Создать функцию generate_trunk_config, которая генерирует
конфигурацию для trunk-портов.
У функции должны быть такие параметры:
- intf_vlan_mapping: ожидает как аргумент словарь с соответствием интерфейс-VLANы
такого вида:
{'FastEthernet0/1': [10, 20],
'FastEthernet0/2': [11, 30],
'FastEthernet0/4': [17]}
- trunk_template: ожидает как аргумент шаблон конфигурации trunk-портов в виде
списка команд (список trunk_mode_template)
Функция должна возвращать список команд с конфигурацией на основе указанных портов
и шаблона trunk_mode_template. В конце строк в списке не должно быть символа
перевода строки.
Проверить работу функции на примере словаря trunk_config
и списка команд trunk_mode_template.
Если предыдущая проверка прошла успешно, проверить работу функции еще раз
на словаре trunk_config_2 и убедится, что в итоговом списке правильные номера
интерфейсов и вланов.
Пример итогового списка (перевод строки после каждого элемента сделан
для удобства чтения):
[
'interface FastEthernet0/1',
'switchport mode trunk',
'switchport trunk native vlan 999',
'switchport trunk allowed vlan 10,20,30',
'interface FastEthernet0/2',
'switchport mode trunk',
'switchport trunk native vlan 999',
'switchport trunk allowed vlan 11,30',
...]
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
from pprint import pprint
trunk_mode_template = [
"switchport mode trunk",
"switchport trunk native vlan 999",
"switchport trunk allowed vlan",
]
trunk_config = {
"FastEthernet0/1": [10, 20, 30],
"FastEthernet0/2": [11, 30],
"FastEthernet0/4": [17],
}
trunk_config_2 = {
"FastEthernet0/11": [120, 131],
"FastEthernet0/15": [111, 130],
"FastEthernet0/14": [117],
}
def generate_trunk_config(intf_vlan_mapping, trunk_template):
cfg = []
for intf, vlans in intf_vlan_mapping.items():
cfg.append("interface " + intf)
for s in trunk_template:
if s.endswith('allowed vlan'):
s = s + ' ' + str(vlans)[1:-1].replace(" ", "")
cfg.append(s)
return cfg
pprint(generate_trunk_config(trunk_config, trunk_mode_template))
pprint(generate_trunk_config(trunk_config_2, trunk_mode_template))
| [
"[email protected]"
] | |
0a7ff4211eaca98470e2742585ac72c1dbe492de | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02415/s303347384.py | f7612caa107b4023d41f174a9952151845dbb81a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | word = input()
print(str.swapcase(word))
| [
"[email protected]"
] | |
ac9c7f15ea1547bd32a8c41e2f64470813bf0d52 | 70054615f56be28373b00c9df96544ec822be683 | /res/scripts/client/gui/scaleform/daapi/view/meta/questswindowmeta.py | 66a92293420cda94a63d878facfa96ffceb268d2 | [] | no_license | wanyancan/WOTDecompiled | c646ad700f5ec3fb81fb4e87862639ce0bdf0000 | 9ffb09007a61d723cdb28549e15db39c34c0ea1e | refs/heads/master | 2020-04-17T23:13:15.649069 | 2013-11-15T16:37:10 | 2013-11-15T16:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | from gui.Scaleform.framework.entities.DAAPIModule import DAAPIModule
class QuestsWindowMeta(DAAPIModule):
pass
| [
"[email protected]"
] | |
17fa82a9093701e46b8648bd51b5684c11c5f8c9 | 5d6365f4cc81272f8c481ee31f1111e8eca6dca5 | /alipay/aop/api/domain/BizActionLogDTO.py | bdaee8dcf4791f2ea8f5f6ac64c0cb3184f154de | [
"Apache-2.0"
] | permissive | barrybbb/alipay-sdk-python-all | 9e99b56138e6ca9c0b236707c79899d396ac6f88 | 1b63620431d982d30d39ee0adc4b92463cbcee3c | refs/heads/master | 2023-08-22T20:16:17.242701 | 2021-10-11T08:22:44 | 2021-10-11T08:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,378 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BizActionLogDTO(object):
def __init__(self):
self._amount = None
self._biz_budget_apply_code = None
self._biz_budget_id = None
self._biz_name = None
self._biz_type = None
self._biz_uk_id = None
self._gmt_create = None
self._gmt_modified = None
self._id = None
self._modify_type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_budget_apply_code(self):
return self._biz_budget_apply_code
@biz_budget_apply_code.setter
def biz_budget_apply_code(self, value):
self._biz_budget_apply_code = value
@property
def biz_budget_id(self):
return self._biz_budget_id
@biz_budget_id.setter
def biz_budget_id(self, value):
self._biz_budget_id = value
@property
def biz_name(self):
return self._biz_name
@biz_name.setter
def biz_name(self, value):
self._biz_name = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def biz_uk_id(self):
return self._biz_uk_id
@biz_uk_id.setter
def biz_uk_id(self, value):
self._biz_uk_id = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def modify_type(self):
return self._modify_type
@modify_type.setter
def modify_type(self, value):
self._modify_type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_budget_apply_code:
if hasattr(self.biz_budget_apply_code, 'to_alipay_dict'):
params['biz_budget_apply_code'] = self.biz_budget_apply_code.to_alipay_dict()
else:
params['biz_budget_apply_code'] = self.biz_budget_apply_code
if self.biz_budget_id:
if hasattr(self.biz_budget_id, 'to_alipay_dict'):
params['biz_budget_id'] = self.biz_budget_id.to_alipay_dict()
else:
params['biz_budget_id'] = self.biz_budget_id
if self.biz_name:
if hasattr(self.biz_name, 'to_alipay_dict'):
params['biz_name'] = self.biz_name.to_alipay_dict()
else:
params['biz_name'] = self.biz_name
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.biz_uk_id:
if hasattr(self.biz_uk_id, 'to_alipay_dict'):
params['biz_uk_id'] = self.biz_uk_id.to_alipay_dict()
else:
params['biz_uk_id'] = self.biz_uk_id
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.modify_type:
if hasattr(self.modify_type, 'to_alipay_dict'):
params['modify_type'] = self.modify_type.to_alipay_dict()
else:
params['modify_type'] = self.modify_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BizActionLogDTO()
if 'amount' in d:
o.amount = d['amount']
if 'biz_budget_apply_code' in d:
o.biz_budget_apply_code = d['biz_budget_apply_code']
if 'biz_budget_id' in d:
o.biz_budget_id = d['biz_budget_id']
if 'biz_name' in d:
o.biz_name = d['biz_name']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'biz_uk_id' in d:
o.biz_uk_id = d['biz_uk_id']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'id' in d:
o.id = d['id']
if 'modify_type' in d:
o.modify_type = d['modify_type']
return o
| [
"[email protected]"
] | |
93a759dd1d4ce068810fd67a473fd7f242615fd5 | f2fcf807b441aabca1ad220b66770bb6a018b4ae | /coderbyte/StringMerge.py | aee27511c52f7fc9c13b05cde0262bec9a847235 | [] | no_license | gokou00/python_programming_challenges | 22d1c53ccccf1f438754edad07b1d7ed77574c2c | 0214d60074a3b57ff2c6c71a780ce5f9a480e78c | refs/heads/master | 2020-05-17T15:41:07.759580 | 2019-04-27T16:36:56 | 2019-04-27T16:36:56 | 183,797,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | def StringMerge(string):
stringArr = string.split("*")
arr1 = stringArr[0]
arr2 = stringArr[1]
strBuild = ""
for i in range(len(arr1)):
strBuild+= arr1[i]
strBuild+= arr2[i]
return strBuild
print(StringMerge("123hg*aaabb"))
| [
"[email protected]"
] | |
b69ca6b786925c7020c263729f5d7bd1e74e3d05 | 35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d | /Python_Study/第七模块学习/Day04/EdmureBlog/web/forms/base.py | ab198421829eb1b2c3ebc96a9c1743d571cc884e | [] | no_license | KongChan1988/51CTO-Treasure | 08b4ca412ad8a09d67c1ea79c7149f8573309ca4 | edb2e4bd11d39ac24cd240f3e815a88361867621 | refs/heads/master | 2021-07-04T15:57:56.164446 | 2019-07-24T15:28:36 | 2019-07-24T15:28:36 | 97,453,749 | 5 | 8 | null | 2019-10-30T22:05:12 | 2017-07-17T08:34:59 | Python | UTF-8 | Python | false | false | 208 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
class BaseForm(object):
def __init__(self, request, *args, **kwargs):
self.request = request
super(BaseForm, self).__init__(*args, **kwargs)
| [
"[email protected]"
] | |
8d16a7b317c421b41cb6db551f09e5d6d244cff9 | 3d8d874ebba15fd065c0a9e74c05e8cd2a24dbe8 | /Week 6 - Joining Data with pandas/19-Concatenate and merge to find common songs.py | 9ad795f5e20ab5a06eff3519aec9c340843f3813 | [] | no_license | RomuloMileris/UCD_Professional_Certificate_in_Data_Analytics | db3e583a6e607e74f3d26b65ba0de59cff64e5a3 | a4a77df69a2440132cfa3e89c4a1674e3e02d086 | refs/heads/master | 2023-02-22T12:48:50.039440 | 2021-01-15T17:06:07 | 2021-01-15T17:06:07 | 319,717,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Concatenate the classic tables vertically
classic_18_19 = pd.concat([classic_18, classic_19], ignore_index=True)
# Concatenate the pop tables vertically
pop_18_19 = pd.concat([pop_18, pop_19], ignore_index=True)
# Concatenate the classic tables vertically
classic_18_19 = pd.concat([classic_18, classic_19], ignore_index=True)
# Concatenate the pop tables vertically
pop_18_19 = pd.concat([pop_18, pop_19], ignore_index=True)
# Merge classic_18_19 with pop_18_19
classic_pop = classic_18_19.merge(pop_18_19, on='tid')
# Using .isin(), filter classic_18_19 rows where tid is in classic_pop
popular_classic = classic_18_19[classic_18_19['tid'].isin(classic_pop['tid'])]
# Print popular chart
print(popular_classic) | [
"[email protected]"
] | |
fc56269afc1a9b27972e6ba65f1634e38ca3c907 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/volatil.py | da3fffbd742a2e39d77bda58f2168f2a493c7200 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 586 | py | ii = [('EmerRN.py', 1), ('RogePAV2.py', 2), ('GodwWSL2.py', 1), ('FerrSDO3.py', 1), ('WilbRLW.py', 1), ('ProuWCM.py', 5), ('PettTHE.py', 3), ('PeckJNG.py', 1), ('WilbRLW2.py', 7), ('CarlTFR.py', 2), ('CrokTPS.py', 1), ('ClarGE.py', 1), ('BuckWGM.py', 1), ('GilmCRS.py', 1), ('WestJIT2.py', 1), ('SoutRD2.py', 1), ('MedwTAI2.py', 1), ('BuckWGM2.py', 1), ('WestJIT.py', 2), ('FitzRNS4.py', 2), ('EdgeMHT.py', 1), ('LyttELD3.py', 1), ('BellCHM.py', 1), ('WilbRLW3.py', 1), ('AinsWRR2.py', 1), ('BrewDTO.py', 4), ('FitzRNS2.py', 1), ('LyelCPG3.py', 1), ('BeckWRE.py', 1), ('WordWYR.py', 1)] | [
"[email protected]"
] | |
ba8d9485f114b77345b5bdc786cacf2516b8dba0 | b29dcbf879166592b59e34f0e2bc4918c3ac94a0 | /cart/views.py | 4dfc522e62c9c9e4cc9b815d50b1184bbe3d6954 | [] | no_license | samdasoxide/myshop | ce6d4553af04f1ddf5de1cbfa38ef2ff33ac6b11 | 21115de7748862c8a44ef4dc5a61511ad67746dd | refs/heads/master | 2022-12-14T07:39:13.803686 | 2017-06-20T11:42:30 | 2017-06-20T11:42:30 | 92,954,076 | 0 | 0 | null | 2022-12-07T23:58:40 | 2017-05-31T14:23:18 | JavaScript | UTF-8 | Python | false | false | 1,067 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductFrom
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductFrom(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,
quantity=cd['quantity'],
update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductFrom(
initial={'quantity': item['quantity'], 'update': True}
)
return render(request, 'cart/detail.html', {'cart': cart})
| [
"[email protected]"
] | |
b3743862fc7b8de3b6dca5344e37f61f50a634eb | b97a608517f024b81db0bdc4094d143ba87c8af4 | /src/oceandata/export_production/mouw.py | 5922a9fe193338af1b8d507473dce963eb6aaa90 | [
"MIT"
] | permissive | brorfred/oceandata | ff008042cc993a07d9db1de3fa72e70f70d44219 | 831e0691223da1aa6a6e97175e8c2d7874bf60cd | refs/heads/master | 2022-02-14T11:48:13.401206 | 2022-01-27T17:01:56 | 2022-01-27T17:01:56 | 175,451,337 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,519 | py | """
Global ocean particulate organic carbon flux.
Ref: https://doi.org/10.1594/PANGAEA.855594,
"""
import os, pathlib
import warnings
import pandas as pd
import numpy as np
import requests
DATADIR = pathlib.PurePath(pathlib.Path.home(), ".oceandata")
pathlib.Path(DATADIR).mkdir(parents=True, exist_ok=True)
DATAURL = "https://doi.pangaea.de/10.1594/PANGAEA.855594"
"""
def load():
df = pd.read_hdf("h5files/ep_mouw_with_sat.h5")
df["Zeu"] = 4.6/df.kd490
df["ep_obs"] = df.POC_flux
df["chl"] = df["chl"] * df["Zeu"]
#lh = ecoregions.Longhurst()
#longh = lh.match("regions", lonvec=dfm.lon, latvec=dfm.lat, jdvec=dfm.lat*0)
#dfm["longhurst"] = longh
return df
"""
def load(datadir=DATADIR, filename="GO_flux.tab", with_std=False):
"""Load tab file and fix some columns"""
fn = os.path.join(datadir, filename)
if not os.path.isfile(fn):
download(datadir=datadir, filename=filename)
with open(fn ,"r") as fH:
while 1:
line = fH.readline()
if "*/" in line:
break
df = pd.read_csv(fH, sep="\t", parse_dates=[1,])
if not with_std:
df.drop(columns=['Flux std dev [±]', 'C flux [mg/m**2/day]',
'C flux std dev [±]', 'POC flux std dev [±]',
'PIC flux std dev [±]', 'PON flux std dev [±]',
'POP flux std dev [±]', 'PSi flux std dev [±]',
'PAl std dev [±]', 'CaCO3 flux std dev [±]',
'Reference'], inplace=True)
df.rename(columns={'ID (Reference identifier)':"ref_ID",
'ID (Unique location identifier)':"UUID",
'Type (Data type)':"sampling_type",
'Latitude':"lat",
'Longitude':"lon",
'Flux tot [mg/m**2/day]':"tot_flux",
'POC flux [mg/m**2/day]':"POC_flux",
'PIC flux [mg/m**2/day]':"PIC_flux",
'PON flux [mg/m**2/day]':"PON_flux",
'POP flux [mg/m**2/day]':"POP_flux",
'PSi flux [mg/m**2/day]':"PSi_flux",
'PSiO2 flux [mg/m**2/day]':"PSiO2_flux",
'PSi(OH)4 flux [mg/m**2/day]':"PSiOH4_flux",
'PAl [mg/m**2/day]':"PAl_flux",
'Chl flux [mg/m**2/day]':"Chl_flux",
'Pheop flux [µg/m**2/day]':"Pheop_flux",
'CaCO3 flux [mg/m**2/day]':"CaCO3_flux",
'Fe flux [mg/m**2/day]':"Fe_flux",
'Mn flux [µg/m**2/day]':"Mn_flux",
'Ba flux [µg/m**2/day]':"Ba_flux",
'Detrital flux [mg/m**2/day]':"Detr_flux",
'Ti flux [µg/m**2/day]':"Ti_flux",
'Bathy depth [m] (ETOPO1 bathymetry)':"bathy",
'Depth water [m] (Sediment trap deployment depth)':"depth",
'Area [m**2]':"area",
'Duration [days]':"duration",
'Date/Time (Deployed)':"start_time",
'Date/time end (Retrieved)':"end_time",
'Area [m**2] (Surface area of trap)':"trap_area",
},
inplace=True)
df.drop(columns=['Type (Sediment trap type)',
'Elevation [m a.s.l.] (Total water depth)'],
inplace=True)
df["start_time"] = pd.DatetimeIndex(df["start_time"])
df["end_time"] = pd.DatetimeIndex(df["end_time"])
df.set_index("end_time", inplace=True)
return df
def download(datadir=DATADIR, filename="GO_flux.tab"):
"""Download txt file from BATS server
Refs
----
"""
local_filename = os.path.join(datadir, filename)
try:
os.unlink(local_filename)
except FileNotFoundError:
pass
try:
r = requests.get(DATAURL, stream=True, timeout=6, params={"format":"textfile"})
except requests.ReadTimeout:
warnings.warn("Connection to server timed out.")
return False
if r.ok:
if local_filename is None:
return r.text
else:
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
else:
raise IOError(f"Could not download file from server, Error {r.status_code}")
| [
"[email protected]"
] | |
11e303d4c69ca7bcedd509112ad6562b91d12bdc | 6a562077f79213f6b2bb89e92d6a16d931268089 | /frappe/core/doctype/data_import/importer_new.py | 6fccbc89ef1f32fc83abe5d05da1ba572513dd91 | [
"MIT"
] | permissive | libracore/frappe | 74fe917b75aa1cfad38c71519914180d5d5f1366 | 92d94a73a3445a252a2828de0053dcce86a18f17 | refs/heads/v12 | 2023-07-17T04:58:08.622228 | 2023-06-28T17:27:33 | 2023-06-28T17:27:33 | 89,392,790 | 6 | 8 | MIT | 2023-08-29T16:29:03 | 2017-04-25T18:19:40 | Python | UTF-8 | Python | false | false | 27,077 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import io
import os
import json
import timeit
import frappe
from datetime import datetime
from frappe import _
from frappe.utils import cint, flt, update_progress_bar
from frappe.utils.csvutils import read_csv_content
from frappe.utils.xlsxutils import (
read_xlsx_file_from_attached_file,
read_xls_file_from_attached_file,
)
from frappe.model import no_value_fields, table_fields
INVALID_VALUES = ["", None]
MAX_ROWS_IN_PREVIEW = 10
# pylint: disable=R0201
class Importer:
def __init__(
self, doctype, data_import=None, file_path=None, content=None, console=False
):
self.doctype = doctype
self.template_options = frappe._dict({"remap_column": {}})
self.console = console
if data_import:
self.data_import = data_import
if self.data_import.template_options:
template_options = frappe.parse_json(self.data_import.template_options)
self.template_options.update(template_options)
else:
self.data_import = None
self.header_row = None
self.data = None
# used to store date formats guessed from data rows per column
self._guessed_date_formats = {}
# used to store eta during import
self.last_eta = 0
# used to collect warnings during template parsing
# and show them to user
self.warnings = []
self.meta = frappe.get_meta(doctype)
self.prepare_content(file_path, content)
self.parse_data_from_template()
def prepare_content(self, file_path, content):
extension = None
if self.data_import and self.data_import.import_file:
file_doc = frappe.get_doc("File", {"file_url": self.data_import.import_file})
content = file_doc.get_content()
extension = file_doc.file_name.split(".")[1]
if file_path:
content, extension = self.read_file(file_path)
if not extension:
extension = "csv"
if content:
self.read_content(content, extension)
self.validate_template_content()
self.remove_empty_rows_and_columns()
def read_file(self, file_path):
extn = file_path.split(".")[1]
file_content = None
with io.open(file_path, mode="rb") as f:
file_content = f.read()
return file_content, extn
def read_content(self, content, extension):
if extension == "csv":
data = read_csv_content(content)
elif extension == "xlsx":
data = read_xlsx_file_from_attached_file(fcontent=content)
elif extension == "xls":
data = read_xls_file_from_attached_file(content)
self.header_row = data[0]
self.data = data[1:]
def validate_template_content(self):
column_count = len(self.header_row)
if any([len(row) != column_count and len(row) != 0 for row in self.data]):
frappe.throw(
_("Number of columns does not match with data"), title=_("Invalid Template")
)
def remove_empty_rows_and_columns(self):
self.row_index_map = []
removed_rows = []
removed_columns = []
# remove empty rows
data = []
for i, row in enumerate(self.data):
if all(v in INVALID_VALUES for v in row):
# empty row
removed_rows.append(i)
else:
data.append(row)
self.row_index_map.append(i)
# remove empty columns
# a column with a header and no data is a valid column
# a column with no header and no data will be removed
header_row = []
for i, column in enumerate(self.header_row):
column_values = [row[i] for row in data]
values = [column] + column_values
if all(v in INVALID_VALUES for v in values):
# empty column
removed_columns.append(i)
else:
header_row.append(column)
data_without_empty_columns = []
# remove empty columns from data
for i, row in enumerate(data):
new_row = [v for j, v in enumerate(row) if j not in removed_columns]
data_without_empty_columns.append(new_row)
self.data = data_without_empty_columns
self.header_row = header_row
def get_data_for_import_preview(self):
out = frappe._dict()
out.data = list(self.rows)
out.columns = self.columns
out.warnings = self.warnings
if len(out.data) > MAX_ROWS_IN_PREVIEW:
out.data = out.data[:MAX_ROWS_IN_PREVIEW]
out.max_rows_exceeded = True
out.max_rows_in_preview = MAX_ROWS_IN_PREVIEW
return out
def parse_data_from_template(self):
columns = self.parse_columns_from_header_row()
columns, data = self.add_serial_no_column(columns, self.data)
self.columns = columns
self.rows = data
def parse_columns_from_header_row(self):
remap_column = self.template_options.remap_column
columns = []
df_by_labels_and_fieldnames = self.build_fields_dict_for_column_matching()
for i, header_title in enumerate(self.header_row):
header_row_index = str(i)
column_number = str(i + 1)
skip_import = False
fieldname = remap_column.get(header_row_index)
if fieldname and fieldname != "Don't Import":
df = df_by_labels_and_fieldnames.get(fieldname)
self.warnings.append(
{
"col": column_number,
"message": _("Mapping column {0} to field {1}").format(
frappe.bold(header_title or "<i>Untitled Column</i>"), frappe.bold(df.label)
),
"type": "info",
}
)
else:
df = df_by_labels_and_fieldnames.get(header_title)
if not df:
skip_import = True
else:
skip_import = False
if fieldname == "Don't Import":
skip_import = True
self.warnings.append(
{
"col": column_number,
"message": _("Skipping column {0}").format(frappe.bold(header_title)),
"type": "info",
}
)
elif header_title and not df:
self.warnings.append(
{
"col": column_number,
"message": _("Cannot match column {0} with any field").format(
frappe.bold(header_title)
),
"type": "info",
}
)
elif not header_title and not df:
self.warnings.append(
{"col": column_number, "message": _("Skipping Untitled Column"), "type": "info"}
)
columns.append(
frappe._dict(
df=df,
skip_import=skip_import,
header_title=header_title,
column_number=column_number,
index=i,
)
)
return columns
def build_fields_dict_for_column_matching(self):
"""
Build a dict with various keys to match with column headers and value as docfield
The keys can be label or fieldname
{
'Customer': df1,
'customer': df1,
'Due Date': df2,
'due_date': df2,
'Item Code (Sales Invoice Item)': df3,
'Sales Invoice Item:item_code': df3,
}
"""
out = {}
table_doctypes = [df.options for df in self.meta.get_table_fields()]
doctypes = table_doctypes + [self.doctype]
for doctype in doctypes:
# name field
name_key = "ID" if self.doctype == doctype else "ID ({})".format(doctype)
name_df = frappe._dict(
{
"fieldtype": "Data",
"fieldname": "name",
"label": "ID",
"reqd": self.data_import.import_type == "Update Existing Records",
"parent": doctype,
}
)
out[name_key] = name_df
out["name"] = name_df
# other fields
meta = frappe.get_meta(doctype)
fields = self.get_standard_fields(doctype) + meta.fields
for df in fields:
fieldtype = df.fieldtype or "Data"
parent = df.parent or self.doctype
if fieldtype not in no_value_fields:
# label as key
label = (
df.label if self.doctype == doctype else "{0} ({1})".format(df.label, parent)
)
out[label] = df
# fieldname as key
if self.doctype == doctype:
out[df.fieldname] = df
else:
key = "{0}:{1}".format(doctype, df.fieldname)
out[key] = df
# if autoname is based on field
# add an entry for "ID (Autoname Field)"
autoname_field = self.get_autoname_field(self.doctype)
if autoname_field:
out["ID ({})".format(autoname_field.label)] = autoname_field
# ID field should also map to the autoname field
out["ID"] = autoname_field
out["name"] = autoname_field
return out
def get_standard_fields(self, doctype):
meta = frappe.get_meta(doctype)
if meta.istable:
standard_fields = [
{"label": "Parent", "fieldname": "parent"},
{"label": "Parent Type", "fieldname": "parenttype"},
{"label": "Parent Field", "fieldname": "parentfield"},
{"label": "Row Index", "fieldname": "idx"},
]
else:
standard_fields = [
{"label": "Owner", "fieldname": "owner"},
{"label": "Document Status", "fieldname": "docstatus", "fieldtype": "Int"},
]
out = []
for df in standard_fields:
df = frappe._dict(df)
df.parent = doctype
out.append(df)
return out
def add_serial_no_column(self, columns, data):
columns_with_serial_no = [
frappe._dict({"header_title": "Sr. No", "skip_import": True})
] + columns
# update index for each column
for i, col in enumerate(columns_with_serial_no):
col.index = i
data_with_serial_no = []
for i, row in enumerate(data):
data_with_serial_no.append([self.row_index_map[i] + 1] + row)
return columns_with_serial_no, data_with_serial_no
def parse_value(self, value, df):
# convert boolean values to 0 or 1
if df.fieldtype == "Check" and value.lower().strip() in ["t", "f", "true", "false"]:
value = value.lower().strip()
value = 1 if value in ["t", "true"] else 0
if df.fieldtype in ["Int", "Check"]:
value = cint(value)
elif df.fieldtype in ["Float", "Percent", "Currency"]:
value = flt(value)
elif df.fieldtype in ["Date", "Datetime"]:
value = self.parse_date_format(value, df)
return value
def parse_date_format(self, value, df):
date_format = self.guess_date_format_for_column(df.fieldname)
if date_format:
return datetime.strptime(value, date_format)
return value
def guess_date_format_for_column(self, fieldname):
""" Guesses date format for a column by parsing the first 10 values in the column,
getting the date format and then returning the one which has the maximum frequency
"""
PARSE_ROW_COUNT = 10
if not self._guessed_date_formats.get(fieldname):
column_index = -1
for i, field in enumerate(self.header_row):
if self.meta.has_field(field) and field == fieldname:
column_index = i
break
if column_index == -1:
self._guessed_date_formats[fieldname] = None
date_values = [
row[column_index] for row in self.data[:PARSE_ROW_COUNT] if row[column_index]
]
date_formats = [guess_date_format(d) for d in date_values]
if not date_formats:
return
max_occurred_date_format = max(set(date_formats), key=date_formats.count)
self._guessed_date_formats[fieldname] = max_occurred_date_format
return self._guessed_date_formats[fieldname]
def import_data(self):
# set user lang for translations
frappe.cache().hdel("lang", frappe.session.user)
frappe.set_user_lang(frappe.session.user)
if not self.console:
self.data_import.db_set("template_warnings", "")
# set flags
frappe.flags.in_import = True
frappe.flags.mute_emails = self.data_import.mute_emails
# prepare a map for missing link field values
self.prepare_missing_link_field_values()
# parse docs from rows
payloads = self.get_payloads_for_import()
# dont import if there are non-ignorable warnings
warnings = [w for w in self.warnings if w.get("type") != "info"]
if warnings:
if self.console:
self.print_grouped_warnings(warnings)
else:
self.data_import.db_set("template_warnings", json.dumps(warnings))
frappe.publish_realtime(
"data_import_refresh", {"data_import": self.data_import.name}
)
return
# setup import log
if self.data_import.import_log:
import_log = frappe.parse_json(self.data_import.import_log)
else:
import_log = []
# remove previous failures from import log
import_log = [l for l in import_log if l.get("success") == True]
# get successfully imported rows
imported_rows = []
for log in import_log:
log = frappe._dict(log)
if log.success:
imported_rows += log.row_indexes
# start import
total_payload_count = len(payloads)
batch_size = frappe.conf.data_import_batch_size or 1000
for batch_index, batched_payloads in enumerate(
frappe.utils.create_batch(payloads, batch_size)
):
for i, payload in enumerate(batched_payloads):
doc = payload.doc
row_indexes = [row[0] for row in payload.rows]
current_index = (i + 1) + (batch_index * batch_size)
if set(row_indexes).intersection(set(imported_rows)):
print("Skipping imported rows", row_indexes)
if total_payload_count > 5:
frappe.publish_realtime(
"data_import_progress",
{
"current": current_index,
"total": total_payload_count,
"skipping": True,
"data_import": self.data_import.name,
},
)
continue
try:
start = timeit.default_timer()
doc = self.process_doc(doc)
processing_time = timeit.default_timer() - start
eta = self.get_eta(current_index, total_payload_count, processing_time)
if total_payload_count > 5:
frappe.publish_realtime(
"data_import_progress",
{
"current": current_index,
"total": total_payload_count,
"docname": doc.name,
"data_import": self.data_import.name,
"success": True,
"row_indexes": row_indexes,
"eta": eta,
},
)
if self.console:
update_progress_bar(
"Importing {0} records".format(total_payload_count),
current_index,
total_payload_count,
)
import_log.append(
frappe._dict(success=True, docname=doc.name, row_indexes=row_indexes)
)
# commit after every successful import
frappe.db.commit()
except Exception:
import_log.append(
frappe._dict(
success=False,
exception=frappe.get_traceback(),
messages=frappe.local.message_log,
row_indexes=row_indexes,
)
)
frappe.clear_messages()
# rollback if exception
frappe.db.rollback()
# set status
failures = [l for l in import_log if l.get("success") == False]
if len(failures) == total_payload_count:
status = "Pending"
elif len(failures) > 0:
status = "Partial Success"
else:
status = "Success"
if self.console:
self.print_import_log(import_log)
else:
self.data_import.db_set("status", status)
self.data_import.db_set("import_log", json.dumps(import_log))
frappe.flags.in_import = False
frappe.flags.mute_emails = False
frappe.publish_realtime("data_import_refresh", {"data_import": self.data_import.name})
return import_log
def get_payloads_for_import(self):
payloads = []
# make a copy
data = list(self.rows)
while data:
doc, rows, data = self.parse_next_row_for_import(data)
payloads.append(frappe._dict(doc=doc, rows=rows))
return payloads
def parse_next_row_for_import(self, data):
"""
Parses rows that make up a doc. A doc maybe built from a single row or multiple rows.
Returns the doc, rows, and data without the rows.
"""
doctypes = set([col.df.parent for col in self.columns if col.df and col.df.parent])
# first row is included by default
first_row = data[0]
rows = [first_row]
# if there are child doctypes, find the subsequent rows
if len(doctypes) > 1:
# subsequent rows either dont have any parent value set
# or have the same value as the parent row
# we include a row if either of conditions match
parent_column_indexes = [
col.index
for col in self.columns
if not col.skip_import and col.df and col.df.parent == self.doctype
]
parent_row_values = [first_row[i] for i in parent_column_indexes]
data_without_first_row = data[1:]
for row in data_without_first_row:
row_values = [row[i] for i in parent_column_indexes]
# if the row is blank, it's a child row doc
if all([v in INVALID_VALUES for v in row_values]):
rows.append(row)
continue
# if the row has same values as parent row, it's a child row doc
if row_values == parent_row_values:
rows.append(row)
continue
# if any of those conditions dont match, it's the next doc
break
def get_column_indexes(doctype):
return [
col.index
for col in self.columns
if not col.skip_import and col.df and col.df.parent == doctype
]
def validate_value(value, df):
if df.fieldtype == "Select":
select_options = df.get_select_options()
if select_options and value not in select_options:
options_string = ", ".join([frappe.bold(d) for d in select_options])
msg = _("Value must be one of {0}").format(options_string)
self.warnings.append(
{
"row": row_number,
"field": df.as_dict(convert_dates_to_str=True),
"message": msg,
}
)
return False
elif df.fieldtype == "Link":
d = self.get_missing_link_field_values(df.options)
if value in d.missing_values and not d.one_mandatory:
msg = _("Value {0} missing for {1}").format(
frappe.bold(value), frappe.bold(df.options)
)
self.warnings.append(
{
"row": row_number,
"field": df.as_dict(convert_dates_to_str=True),
"message": msg,
}
)
return value
return value
def parse_doc(doctype, docfields, values, row_number):
# new_doc returns a dict with default values set
doc = frappe.new_doc(doctype, as_dict=True)
# remove standard fields and __islocal
for key in frappe.model.default_fields + ("__islocal",):
doc.pop(key, None)
for df, value in zip(docfields, values):
if value in INVALID_VALUES:
value = None
value = validate_value(value, df)
if value:
doc[df.fieldname] = self.parse_value(value, df)
check_mandatory_fields(doctype, doc, row_number)
return doc
def check_mandatory_fields(doctype, doc, row_number):
# check if mandatory fields are set (except table fields)
meta = frappe.get_meta(doctype)
fields = [
df
for df in meta.fields
if df.fieldtype not in table_fields
and df.reqd
and doc.get(df.fieldname) in INVALID_VALUES
]
if not fields:
return
if len(fields) == 1:
self.warnings.append(
{
"row": row_number,
"message": _("{0} is a mandatory field").format(fields[0].label),
}
)
else:
fields_string = ", ".join([df.label for df in fields])
self.warnings.append(
{"row": row_number, "message": _("{0} are mandatory fields").format(fields_string)}
)
parsed_docs = {}
for row in rows:
for doctype in doctypes:
if doctype == self.doctype and parsed_docs.get(doctype):
# if parent doc is already parsed from the first row
# then skip
continue
row_number = row[0]
column_indexes = get_column_indexes(doctype)
values = [row[i] for i in column_indexes]
if all(v in INVALID_VALUES for v in values):
# skip values if all of them are empty
continue
columns = [self.columns[i] for i in column_indexes]
docfields = [col.df for col in columns]
doc = parse_doc(doctype, docfields, values, row_number)
parsed_docs[doctype] = parsed_docs.get(doctype, [])
parsed_docs[doctype].append(doc)
# build the doc with children
doc = {}
for doctype, docs in parsed_docs.items():
if doctype == self.doctype:
doc.update(docs[0])
else:
table_dfs = self.meta.get(
"fields", {"options": doctype, "fieldtype": ["in", table_fields]}
)
if table_dfs:
table_field = table_dfs[0]
doc[table_field.fieldname] = docs
# check if there is atleast one row for mandatory table fields
mandatory_table_fields = [
df
for df in self.meta.fields
if df.fieldtype in table_fields and df.reqd and len(doc.get(df.fieldname, [])) == 0
]
if len(mandatory_table_fields) == 1:
self.warnings.append(
{
"row": first_row[0],
"message": _("There should be atleast one row for {0} table").format(
mandatory_table_fields[0].label
),
}
)
elif mandatory_table_fields:
fields_string = ", ".join([df.label for df in mandatory_table_fields])
self.warnings.append(
{
"row": first_row[0],
"message": _("There should be atleast one row for the following tables: {0}").format(fields_string),
}
)
return doc, rows, data[len(rows) :]
def process_doc(self, doc):
import_type = self.data_import.import_type
if import_type == "Insert New Records":
return self.insert_record(doc)
elif import_type == "Update Existing Records":
return self.update_record(doc)
def insert_record(self, doc):
self.create_missing_linked_records(doc)
new_doc = frappe.new_doc(self.doctype)
new_doc.update(doc)
# name shouldn't be set when inserting a new record
new_doc.set("name", None)
new_doc.insert()
if self.meta.is_submittable and self.data_import.submit_after_import:
new_doc.submit()
return new_doc
def create_missing_linked_records(self, doc):
"""
Finds fields that are of type Link, and creates the corresponding
document automatically if it has only one mandatory field
"""
link_values = []
def get_link_fields(doc, doctype):
for fieldname, value in doc.items():
meta = frappe.get_meta(doctype)
df = meta.get_field(fieldname)
if not df:
continue
if df.fieldtype == "Link" and value not in INVALID_VALUES:
link_values.append([df.options, value])
elif df.fieldtype in table_fields:
for row in value:
get_link_fields(row, df.options)
get_link_fields(doc, self.doctype)
for link_doctype, link_value in link_values:
d = self.missing_link_values.get(link_doctype)
if d and d.one_mandatory and link_value in d.missing_values:
# find the autoname field
autoname_field = self.get_autoname_field(link_doctype)
name_field = autoname_field.fieldname if autoname_field else "name"
new_doc = frappe.new_doc(link_doctype)
new_doc.set(name_field, link_value)
new_doc.insert()
d.missing_values.remove(link_value)
def update_record(self, doc):
id_fieldname = self.get_id_fieldname()
id_value = doc[id_fieldname]
existing_doc = frappe.get_doc(self.doctype, id_value)
existing_doc.flags.via_data_import = self.data_import.name
existing_doc.update(doc)
existing_doc.save()
return existing_doc
def export_errored_rows(self):
from frappe.utils.csvutils import build_csv_response
if not self.data_import:
return
import_log = frappe.parse_json(self.data_import.import_log or "[]")
failures = [l for l in import_log if l.get("success") == False]
row_indexes = []
for f in failures:
row_indexes.extend(f.get("row_indexes", []))
# de duplicate
row_indexes = list(set(row_indexes))
row_indexes.sort()
header_row = [col.header_title for col in self.columns[1:]]
rows = [header_row]
rows += [row[1:] for row in self.rows if row[0] in row_indexes]
build_csv_response(rows, self.doctype)
def get_missing_link_field_values(self, doctype):
return self.missing_link_values.get(doctype, {})
def prepare_missing_link_field_values(self):
columns = self.columns
rows = self.rows
link_column_indexes = [
col.index for col in columns if col.df and col.df.fieldtype == "Link"
]
self.missing_link_values = {}
for index in link_column_indexes:
col = columns[index]
column_values = [row[index] for row in rows]
values = set([v for v in column_values if v not in INVALID_VALUES])
doctype = col.df.options
missing_values = [value for value in values if not frappe.db.exists(doctype, value)]
if self.missing_link_values.get(doctype):
self.missing_link_values[doctype].missing_values += missing_values
else:
self.missing_link_values[doctype] = frappe._dict(
missing_values=missing_values,
one_mandatory=self.has_one_mandatory_field(doctype),
df=col.df,
)
def get_id_fieldname(self):
autoname_field = self.get_autoname_field(self.doctype)
if autoname_field:
return autoname_field.fieldname
return "name"
def get_eta(self, current, total, processing_time):
remaining = total - current
eta = processing_time * remaining
if not self.last_eta or eta < self.last_eta:
self.last_eta = eta
return self.last_eta
def has_one_mandatory_field(self, doctype):
meta = frappe.get_meta(doctype)
# get mandatory fields with default not set
mandatory_fields = [df for df in meta.fields if df.reqd and not df.default]
mandatory_fields_count = len(mandatory_fields)
if meta.autoname and meta.autoname.lower() == "prompt":
mandatory_fields_count += 1
return mandatory_fields_count == 1
def get_autoname_field(self, doctype):
meta = frappe.get_meta(doctype)
if meta.autoname and meta.autoname.startswith("field:"):
fieldname = meta.autoname[len("field:") :]
return meta.get_field(fieldname)
def print_grouped_warnings(self, warnings):
warnings_by_row = {}
other_warnings = []
for w in warnings:
if w.get("row"):
warnings_by_row.setdefault(w.get("row"), []).append(w)
else:
other_warnings.append(w)
for row_number, warnings in warnings_by_row.items():
print("Row {0}".format(row_number))
for w in warnings:
print(w.get("message"))
for w in other_warnings:
print(w.get("message"))
def print_import_log(self, import_log):
failed_records = [l for l in import_log if not l.success]
successful_records = [l for l in import_log if l.success]
if successful_records:
print(
"Successfully imported {0} records out of {1}".format(
len(successful_records), len(import_log)
)
)
if failed_records:
print("Failed to import {0} records".format(len(failed_records)))
file_name = '{0}_import_on_{1}.txt'.format(self.doctype, frappe.utils.now())
print('Check {0} for errors'.format(os.path.join('sites', file_name)))
text = ""
for w in failed_records:
text += "Row Indexes: {0}\n".format(str(w.get('row_indexes', [])))
text += "Messages:\n{0}\n".format('\n'.join(w.get('messages', [])))
text += "Traceback:\n{0}\n\n".format(w.get('exception'))
with open(file_name, 'w') as f:
f.write(text)
DATE_FORMATS = [
r"%d-%m-%Y",
r"%m-%d-%Y",
r"%Y-%m-%d",
r"%d-%m-%y",
r"%m-%d-%y",
r"%y-%m-%d",
r"%d/%m/%Y",
r"%m/%d/%Y",
r"%Y/%m/%d",
r"%d/%m/%y",
r"%m/%d/%y",
r"%y/%m/%d",
r"%d.%m.%Y",
r"%m.%d.%Y",
r"%Y.%m.%d",
r"%d.%m.%y",
r"%m.%d.%y",
r"%y.%m.%d",
]
TIME_FORMATS = [
r"%H:%M:%S.%f",
r"%H:%M:%S",
r"%H:%M",
r"%I:%M:%S.%f %p",
r"%I:%M:%S %p",
r"%I:%M %p",
]
def guess_date_format(date_string):
date_string = date_string.strip()
_date = None
_time = None
if " " in date_string:
_date, _time = date_string.split(" ", 1)
else:
_date = date_string
date_format = None
time_format = None
for f in DATE_FORMATS:
try:
# if date is parsed without any exception
# capture the date format
datetime.strptime(_date, f)
date_format = f
break
except ValueError:
pass
if _time:
for f in TIME_FORMATS:
try:
# if time is parsed without any exception
# capture the time format
datetime.strptime(_time, f)
time_format = f
break
except ValueError:
pass
full_format = date_format
if time_format:
full_format += " " + time_format
return full_format
def import_data(doctype, file_path):
i = Importer(doctype, file_path)
i.import_data()
| [
"[email protected]"
] | |
07e30b5ca44e0780d580e0e6e6bb3d6b3d5b027e | 031b1c5b0c404f23ccd61a08845695bd4c3827f2 | /python/pyfiles/算术运算符.py | 39efec4aa582072f142c44bd1bc23d687686d1e0 | [] | no_license | AndyFlower/zixin | c8d957fd8b1e6ca0e1ae63389bc8151ab93dbb55 | 647705e5f14fae96f82d334ba1eb8a534735bfd9 | refs/heads/master | 2022-12-23T21:10:44.872371 | 2021-02-10T07:15:21 | 2021-02-10T07:15:21 | 232,578,547 | 1 | 0 | null | 2022-12-16T15:41:14 | 2020-01-08T14:13:25 | Java | UTF-8 | Python | false | false | 795 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 22:57:02 2020
@author: sanglp
"""
# +运算符
print(3+5)
print(3.4+4.5)
print((3+4j)+(4+5j))
print('abc'+'def')
print([1,2]+[3,4])
print((1,2)+(3,))
# -运算符
print(7.9 -4.5) # 浮点数有误差
print(5-3)
num = 3
print(-num)
print(--num)
print({1,2,3}-{3,4,5}) #计算差集
# *运算符
print(3333*5555)
print((3+4j)*(5+6j))
print('重要的事情说3遍'*3)
print([0]*5)
print((0,)*3)
# /和//运算符
print(17 / 4)
print(17 // 4) #4
print((-17) / 4)
print((-17) // 4) #-5
# %运算符
print(365 %7)
print(365 %2)
print('%c,%c,%c' %(65,97,48)) # 数字格式化为字符 A,a,0
# **运算符
print(2 ** 4)
print(3 ** 3 ** 3)
print(3 ** (3**3))
print((3**3)**3)
print(9**0.5)
print((-1)**0.5) # 对负数计算平方根得到负数 | [
"[email protected]"
] | |
0ef417ef2ea2ab51e1240c4fc86e2f26be2e0302 | 509d717f18caad77e00c3261dcf1934f7e5bd95d | /venv/css_selectors/sports_bet_page_locators.py | 65e85e02577a73a8730a604977beb681bc7cbdcc | [] | no_license | Swingyboy/pronet_design_testing | 8aee2f42e2452ca178fbe34e7a51ce7377156e08 | ad3dc5a58983ed6d6c9cef91a40ea8160f699dd0 | refs/heads/master | 2023-05-06T05:34:47.438023 | 2020-09-15T09:17:36 | 2020-09-15T09:17:36 | 281,055,876 | 1 | 1 | null | 2021-06-02T02:56:51 | 2020-07-20T08:12:21 | Python | UTF-8 | Python | false | false | 403 | py | from selenium.webdriver.common.by import By
class SportsBetPageLocators():
UPCOMING_EVENTS_BAR =(By.CSS_SELECTOR, 'upcoming-events > div > div.modul-header')
LIVE_BET_BAR = (By.CSS_SELECTOR, 'live-at-now > div > div.modul-header')
ESPORTS_BAR = (By.CSS_SELECTOR, 'app-esports > div > div.modul-header')
TODAY_EVENT_BAR = (By.CSS_SELECTOR, 'todays-sport-types > div > div.modul-header') | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.