blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
29d497b0958f5306ca1af7ce54ce68796eaabfc5 | 0486b6ccf883e9cd7a24bbd89b5420e7de2172b9 | /DRF Study Material/Django REST Code/gs40/gs40/settings.py | 385459f2976108c2c7956f10aac8a7fd6bed0e5f | [] | no_license | ajitexl/restfrmaework | 2980203d7faa6c8364288283758d32c8f2a37817 | 9ab203748e623516365d9924dcc68acc786a66e1 | refs/heads/main | 2023-02-03T08:52:00.672047 | 2020-12-10T09:50:51 | 2020-12-10T09:50:51 | 320,222,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,211 | py | """
Django settings for gs40 project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#n6u1#xqh2!drsp9790#cbu5p4tms9p4sod=x(051^82j8a*w1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gs40.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gs40.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
9a9dfa7493053bf7d54ab1b2f0a6907ca4e2210b | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-css/huaweicloudsdkcss/v1/model/show_cluster_volume_rsp.py | 67727eed536ac36df3ddb9ef1537a0c6c9c17134 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,714 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowClusterVolumeRsp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'size': 'int'
}
attribute_map = {
'type': 'type',
'size': 'size'
}
def __init__(self, type=None, size=None):
"""ShowClusterVolumeRsp
The model defined in huaweicloud sdk
:param type: 实例磁盘类型。
:type type: str
:param size: 实例磁盘大小。
:type size: int
"""
self._type = None
self._size = None
self.discriminator = None
if type is not None:
self.type = type
if size is not None:
self.size = size
@property
def type(self):
"""Gets the type of this ShowClusterVolumeRsp.
实例磁盘类型。
:return: The type of this ShowClusterVolumeRsp.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ShowClusterVolumeRsp.
实例磁盘类型。
:param type: The type of this ShowClusterVolumeRsp.
:type type: str
"""
self._type = type
@property
def size(self):
"""Gets the size of this ShowClusterVolumeRsp.
实例磁盘大小。
:return: The size of this ShowClusterVolumeRsp.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ShowClusterVolumeRsp.
实例磁盘大小。
:param size: The size of this ShowClusterVolumeRsp.
:type size: int
"""
self._size = size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowClusterVolumeRsp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0d93842ad2b9ac62adc555b2ee2d299216b6633c | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_5/gdljam001/mymath.py | 16fc26c21b16d5f866005d3e2ae831c8a1999015 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #get Integer
#James Godlonton
#12 April 2014
def get_integer(choice):
ans=input("Enter "+choice+":\n")
while not ans.isdigit():
ans=input("Enter "+choice+":\n")
return eval(ans)
def calc_factorial(x):
retVal=1
for i in range(1,x+1):
retVal=retVal*i
return retVal
| [
"[email protected]"
] | |
7f8d9c571f666fc5e9028d7c61e5a706929ef742 | ab5392cc0fc17cdc3feca3eb7b32e79b6be60ef7 | /hrp/internal/scaffold/templates/testcases/demo_ref_testcase_test.py | 714030cdab14a9b2a13fba8363f41bb01715bb3d | [
"Apache-2.0"
] | permissive | Jason-Fu/HttpRunner | 3ad5def3e5aa49f073d8cef75e75a4654ac3ec38 | 69495c9eb3e19eaf6f7af438b6a0437050f32315 | refs/heads/master | 2022-06-04T07:28:47.570096 | 2022-05-05T13:52:01 | 2022-05-05T13:52:01 | 195,747,138 | 0 | 0 | null | 2019-07-08T06:06:36 | 2019-07-08T06:06:36 | null | UTF-8 | Python | false | false | 1,678 | py | # NOTE: Generated By HttpRunner v4.0.0
# FROM: testcases/demo_ref_testcase.yml
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from httprunner import HttpRunner, Config, Step, RunRequest, RunTestCase
from testcases.demo_requests_test import TestCaseDemoRequests as DemoRequests
class TestCaseDemoRefTestcase(HttpRunner):
config = (
Config("request methods testcase: reference testcase")
.variables(
**{
"foo1": "testsuite_config_bar1",
"expect_foo1": "testsuite_config_bar1",
"expect_foo2": "config_bar2",
}
)
.base_url("https://postman-echo.com")
.verify(False)
)
teststeps = [
Step(
RunTestCase("request with functions")
.with_variables(
**{"foo1": "testcase_ref_bar1", "expect_foo1": "testcase_ref_bar1"}
)
.call(DemoRequests)
.export(*["foo3"])
),
Step(
RunRequest("post form data")
.with_variables(**{"foo1": "bar1"})
.post("/post")
.with_headers(
**{
"User-Agent": "funplugin/${get_version()}",
"Content-Type": "application/x-www-form-urlencoded",
}
)
.with_data("foo1=$foo1&foo2=$foo3")
.validate()
.assert_equal("status_code", 200)
.assert_equal("body.form.foo1", "bar1")
.assert_equal("body.form.foo2", "bar21")
),
]
if __name__ == "__main__":
TestCaseDemoRefTestcase().test_start()
| [
"[email protected]"
] | |
a3422ab27af13b221285949eb7b8f385e80b3318 | 7cc0e0ae806a4f580100a1ae0d120cab37ccddff | /Atividades1/At1Q45.py | a7ef1b6a4c6ab72526f6383ee1548fc08c44860c | [] | no_license | osmarsalesjr/AtividadesProfFabioGomesEmPython3 | 2ac10cebb7887798a39d9029fe205619f3fd481a | a8f2536e34ed8897011536135a1937689d6c3144 | refs/heads/master | 2021-01-01T06:43:21.301461 | 2017-07-17T16:22:09 | 2017-07-17T16:22:09 | 97,496,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py |
def main():
valor = float(input("Qual o Valor a Sacar em R$? "))
conta_notas(valor)
def conta_notas(valor):
notas_cem, notas_cinquenta, notas_vinte, notas_dez = 0, 0, 0, 0
notas_cinco, notas_dois, notas_um = 0, 0, 0
while valor >= 100:
valor = valor - 100
notas_cem = notas_cem + 1
while valor >= 50:
valor = valor - 50
notas_cinquenta = notas_cinquenta + 1
while valor >= 20:
valor = valor - 20
notas_vinte = notas_vinte + 1
while valor >= 10:
valor = valor - 10
notas_dez = notas_dez + 1
while valor >= 5:
valor = valor - 5
notas_cinco = notas_cinco + 1
while valor >= 2:
valor = valor - 2
notas_dois = notas_dois + 1
notas_um = int(valor)
print(">> Quantidade de notas a serem recebidas: ")
print("Notas de R$ 100: %d\nNotas de R$ 50: %d\nNotas de R$ 20: %d"%(notas_cem, notas_cinquenta, notas_vinte))
print("Notas de R$ 10: %d\nNotas de R$ 5: %d\nNotas de R$ 1: %d"%(notas_dez, notas_cinco, notas_um))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
bccdece2cd099fc2dc4805d24691218c61918883 | 55a2a89fe752dc72ca353c091a47628c830e4117 | /classification/tests/test_classifier.py | 3a92278b2c73de077df8f2a1e59980057667a367 | [
"Apache-2.0"
] | permissive | hercules261188/serverless-transformers-on-aws-lambda | 28df74886154906494f7315298d534000f784b41 | d48caab0e07ae8326d4b37ab730faf2cffd02f7d | refs/heads/master | 2023-07-09T01:37:49.042169 | 2021-08-20T08:39:46 | 2021-08-20T08:39:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | from src.classifier import Classifier
pipeline = Classifier()
def test_response(requests, response):
assert response == pipeline(requests)
| [
"[email protected]"
] | |
2d0d1a621fa5ff4224c806a23ae0828c5a4408ae | 999f3f3da1cb70cb5872f99a09d65d7c4df71cf7 | /src/data/1061.py | 51dbb3f209cfabf9eaecdccf40b8420c755a3ff3 | [
"MIT"
] | permissive | NULLCT/LOMC | 0f0d1f01cce1d5633e239d411565ac7f0c687955 | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | refs/heads/main | 2023-07-27T17:03:46.703022 | 2021-09-04T08:58:45 | 2021-09-04T08:58:45 | 396,290,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | import sys
input = sys.stdin.readline
from collections import defaultdict, deque
def II():
return int(input())
def IS():
return input().rstrip()
def MI():
return map(int, input().split())
def LI():
return list(map(int, input().split()))
def MS():
return input().rstrip().split()
def LS():
return list(input().rstrip())
n, Q = MI()
d = defaultdict(list)
for i in range(n - 1):
a, b = MI()
a -= 1
b -= 1
d[a].append(b)
d[b].append(a)
def biper(n: int, d: defaultdict(list)):
seen = [0] * n
parity = [0] * n
q = deque()
q.append((0, 0))
while q:
v, p = q.pop()
if seen[v] == 0:
seen[v] = 1
parity[v] = p
else:
continue
for to in d[v]:
if seen[to] == 0:
if p == 0:
q.appendleft((to, 1))
else:
q.appendleft((to, 0))
else:
continue
return parity
l = biper(n, d)
# print(l)
for i in range(Q):
C, D = MI()
C -= 1
D -= 1
if l[C] == l[D]:
print('Town')
else:
print('Road')
| [
"[email protected]"
] | |
429d79300bcd0ab017f398e7e54a70ad643630b9 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_jimmy17_B.py | 02e02091c0d5cf8bb9357e91027a4f4de051b7df | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 556 | py | tests = int(raw_input())
for i in range(tests):
result = 0
pancakes = raw_input()
state = pancakes[0]
has_changed = False
plus_exists = False
for ch in pancakes:
if ch != state:
has_changed = True
if state == '+':
plus_exists = True
result += 2
else:
if not plus_exists:
result += 1
state = ch
if has_changed == False and state == '-':
result = 1
print 'Case #'+ str(i+1)+': '+ str(result)
| [
"[[email protected]]"
] | |
feaa101319f2ec0937022c8f51000b88ddd84e02 | 78ca13fcd5a7556227136814703d2536880591b7 | /list_iteration/py/py3_list_iter_02.py | dace7dcba9da6b3a7164190c1d1bebfe56b02e4a | [] | no_license | dheerajs0346/PYPL | 24a7658607eb905e012d49f86a66216b37398918 | ebfadd75d44ef17e78e4cf1daf8f9e2f66c5ee30 | refs/heads/master | 2023-03-10T04:56:37.056033 | 2021-02-17T18:50:14 | 2021-02-17T18:50:14 | 375,725,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | ## list iteration methods in Py
## author: Vladimir Kulyukin
lst = [1, 2, 3, 4]
## use lazy range from 0 upto the length of lst - 1.
## index into a specific spot in lst.
for i in range(0, len(lst)):
print(lst[i])
print('i=', i) # i is bound to its last value in for-loop
## the above for-loop and print produce the following output:
## 1
## 2
## 3
## 4
## i= 3
| [
"[email protected]"
] | |
57a1603b2f96aa4b651f5e650ca820a55bba227b | 7bdca6fb5f972586efcfb379cded7a5e3891d27c | /pymaster/tests/test_masking_flat.py | f056e2a287de98f84cf497b0d467039f9c08d1e2 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LSSTDESC/NaMaster | 0c31705a5ca5d57d0ad8e7af80dc071811cdfc81 | b45317f840320855b7e38799fa411782e2886289 | refs/heads/master | 2023-08-11T11:57:13.177268 | 2023-08-01T12:11:21 | 2023-08-01T12:11:21 | 142,736,704 | 47 | 25 | BSD-3-Clause | 2023-08-01T12:11:23 | 2018-07-29T06:29:34 | C | UTF-8 | Python | false | false | 2,280 | py | import pytest
import numpy as np
import pymaster as nmt
class MaskingTesterFlat(object):
def __init__(self):
self.nx = self.ny = 200
self.lx = self.ly = np.radians(10.)
self.msk = np.zeros([self.ny, self.nx])
self.msk[:self.ny//2, :] = 1.
self.aposize = 1.
self.inv_xthr = 1./np.radians(self.aposize)
self.ioff = self.ny//2-int(np.radians(self.aposize)/(self.ly/self.ny))
MT = MaskingTesterFlat()
def test_mask_flat_errors():
with pytest.raises(ValueError): # Badly shaped input
nmt.mask_apodization_flat(MT.msk[0], MT.lx,
MT.ly, MT.aposize,
apotype="C1")
with pytest.raises(RuntimeError): # Negative apodization
nmt.mask_apodization_flat(MT.msk, MT.lx, MT.ly,
-MT.aposize, apotype="C1")
with pytest.raises(RuntimeError): # Wrong apodization type
nmt.mask_apodization_flat(MT.msk, MT.lx, MT.ly,
MT.aposize, apotype="C3")
def test_mask_flat_c1():
msk_apo = nmt.mask_apodization_flat(MT.msk, MT.lx, MT.ly,
MT.aposize, apotype="C1")
# Below transition
assert (msk_apo[MT.ny//2:, :] < 1E-10).all()
# Above transition
assert (np.fabs(msk_apo[:MT.ioff, :]-1.) < 1E-10).all()
# Within transition
ind_transition = np.arange(MT.ioff, MT.ny//2, dtype=int)
x = MT.inv_xthr*np.fabs((MT.ny/2.-ind_transition)*MT.ly/MT.ny)
f = x-np.sin(x*2*np.pi)/(2*np.pi)
assert (np.fabs(msk_apo[ind_transition, :] - f[:, None])
< 1E-10).all()
def test_mask_flat_c2():
msk_apo = nmt.mask_apodization_flat(MT.msk, MT.lx,
MT.ly, MT.aposize,
apotype="C2")
# Below transition
assert (msk_apo[MT.ny//2:, :] < 1E-10).all()
# Above transition
assert (np.fabs(msk_apo[:MT.ioff, :]-1.) < 1E-10).all()
# Within transition
ind_transition = np.arange(MT.ioff, MT.ny//2, dtype=int)
x = MT.inv_xthr*np.fabs((MT.ny/2.-ind_transition)*MT.ly/MT.ny)
f = 0.5*(1-np.cos(x*np.pi))
assert (np.fabs(msk_apo[ind_transition, :] -
f[:, None]) < 1E-10).all()
| [
"[email protected]"
] | |
e84af738a00cae662826ffe2c63e25e47325c97c | e854337c828f355a1c916c3adffcff56b069e4c2 | /flights/migrations/0003_passenger.py | 175a8cd40a837cff8c71430748aedc3938a81ab1 | [] | no_license | viralsir/djangoProject_evening | a6c90e4e4a1d689de53f46d53366db0a7fc410f5 | 1fb05e66df1373c8bf23e7344f388f9ace1ea7cf | refs/heads/master | 2023-04-24T11:54:41.338160 | 2021-05-21T12:15:52 | 2021-05-21T12:15:52 | 361,741,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # Generated by Django 3.2 on 2021-05-10 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flights', '0002_auto_20210510_1715'),
]
operations = [
migrations.CreateModel(
name='passenger',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=40)),
('flights', models.ManyToManyField(related_name='passenger', to='flights.flight')),
],
),
]
| [
"[email protected]"
] | |
23153ae789f6cb8f400fa348bf266dffb1b07264 | c56ddcc2807151a5c44d3a1d65a1984bc8fd9b84 | /4 кю/Strip Comments.py | ac850d313c2f546cb6f74af406e94c9aa4b26948 | [] | no_license | kelpasa/Code_Wars_Python | 2cd18dd404603a6535887e8e6ed2d08da19562ba | 939ec1dd08ffc7939bb9a139bf42901d6f24fbdd | refs/heads/master | 2022-12-17T02:00:28.319351 | 2020-09-23T09:11:20 | 2020-09-23T09:11:20 | 246,642,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | import re
def solution(s, markers):
if not markers:
return s.strip()
pattern = re.compile(
' *[{}].*\n'
.format(''.join([m if m not in '-^' else '\\' + m for m in markers]))
)
return re.sub(pattern, '\n', s + '\n')[:-1]
| [
"[email protected]"
] | |
5b76f8005aa35ad3b95eaf1d9c01c4b33046a647 | ef198b8a5625626773351ca8de3da6bd7969be25 | /0x0F-python-object_relational_mapping/1-filter_states.py | d90b3afd9dde3c46e9169e77c1a160d564a5f998 | [] | no_license | fdetun/holbertonschool-higher_level_programming | efde2762a55066f9c571a3f6ea4b724af96be6a8 | 3733fc52fddab9df3bc51e6ea2905dad0eefe9ae | refs/heads/master | 2022-12-26T10:28:15.381013 | 2020-09-26T02:12:48 | 2020-09-26T02:12:48 | 259,254,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | #!/usr/bin/python3
"""
upper N
"""
import MySQLdb as mdb
import sys
def byfoued():
"""
function by foued
"""
fdb = mdb.connect(host='localhost',
port=3306,
user=sys.argv[1],
passwd=sys.argv[2],
db=sys.argv[3]
)
cursor = fdb.cursor()
cursor.execute("""SELECT * FROM states WHERE name
LIKE BINARY 'N%' ORDER BY id ASC""")
fd = cursor.fetchall()
for i in fd:
print(i)
cursor.close()
fdb.close()
if __name__ == "__main__":
byfoued()
| [
"[email protected]"
] | |
01c1a755a35000e35640feb4f78c75a47abd78a0 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/network/v20210201/express_route_circuit_connection.py | 9705f4f1c841b996e62a96d821c1fe0952cb8ad2 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,870 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ExpressRouteCircuitConnectionArgs', 'ExpressRouteCircuitConnection']
@pulumi.input_type
class ExpressRouteCircuitConnectionArgs:
def __init__(__self__, *,
circuit_name: pulumi.Input[str],
peering_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_circuit_connection_config: Optional[pulumi.Input['Ipv6CircuitConnectionConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a ExpressRouteCircuitConnection resource.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:param pulumi.Input[str] authorization_key: The authorization key.
:param pulumi.Input[str] connection_name: The name of the express route circuit connection.
:param pulumi.Input['SubResourceArgs'] express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['Ipv6CircuitConnectionConfigArgs'] ipv6_circuit_connection_config: IPv6 Address PrefixProperties of the express route circuit connection.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input['SubResourceArgs'] peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
pulumi.set(__self__, "circuit_name", circuit_name)
pulumi.set(__self__, "peering_name", peering_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if connection_name is not None:
pulumi.set(__self__, "connection_name", connection_name)
if express_route_circuit_peering is not None:
pulumi.set(__self__, "express_route_circuit_peering", express_route_circuit_peering)
if id is not None:
pulumi.set(__self__, "id", id)
if ipv6_circuit_connection_config is not None:
pulumi.set(__self__, "ipv6_circuit_connection_config", ipv6_circuit_connection_config)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_express_route_circuit_peering is not None:
pulumi.set(__self__, "peer_express_route_circuit_peering", peer_express_route_circuit_peering)
@property
@pulumi.getter(name="circuitName")
def circuit_name(self) -> pulumi.Input[str]:
"""
The name of the express route circuit.
"""
return pulumi.get(self, "circuit_name")
@circuit_name.setter
def circuit_name(self, value: pulumi.Input[str]):
pulumi.set(self, "circuit_name", value)
@property
@pulumi.getter(name="peeringName")
def peering_name(self) -> pulumi.Input[str]:
"""
The name of the peering.
"""
return pulumi.get(self, "peering_name")
@peering_name.setter
def peering_name(self, value: pulumi.Input[str]):
pulumi.set(self, "peering_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
/29 IP address space to carve out Customer addresses for tunnels.
"""
return pulumi.get(self, "address_prefix")
@address_prefix.setter
def address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address_prefix", value)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[pulumi.Input[str]]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@authorization_key.setter
def authorization_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_key", value)
@property
@pulumi.getter(name="connectionName")
def connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the express route circuit connection.
"""
return pulumi.get(self, "connection_name")
@connection_name.setter
def connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "connection_name", value)
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
"""
return pulumi.get(self, "express_route_circuit_peering")
@express_route_circuit_peering.setter
def express_route_circuit_peering(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "express_route_circuit_peering", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipv6CircuitConnectionConfig")
def ipv6_circuit_connection_config(self) -> Optional[pulumi.Input['Ipv6CircuitConnectionConfigArgs']]:
"""
IPv6 Address PrefixProperties of the express route circuit connection.
"""
return pulumi.get(self, "ipv6_circuit_connection_config")
@ipv6_circuit_connection_config.setter
def ipv6_circuit_connection_config(self, value: Optional[pulumi.Input['Ipv6CircuitConnectionConfigArgs']]):
pulumi.set(self, "ipv6_circuit_connection_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="peerExpressRouteCircuitPeering")
def peer_express_route_circuit_peering(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
return pulumi.get(self, "peer_express_route_circuit_peering")
@peer_express_route_circuit_peering.setter
def peer_express_route_circuit_peering(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "peer_express_route_circuit_peering", value)
class ExpressRouteCircuitConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_circuit_connection_config: Optional[pulumi.Input[pulumi.InputType['Ipv6CircuitConnectionConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:param pulumi.Input[str] authorization_key: The authorization key.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] connection_name: The name of the express route circuit connection.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['Ipv6CircuitConnectionConfigArgs']] ipv6_circuit_connection_config: IPv6 Address PrefixProperties of the express route circuit connection.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering Resource of the peered circuit.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExpressRouteCircuitConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
:param str resource_name: The name of the resource.
:param ExpressRouteCircuitConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExpressRouteCircuitConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
authorization_key: Optional[pulumi.Input[str]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_circuit_connection_config: Optional[pulumi.Input[pulumi.InputType['Ipv6CircuitConnectionConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_express_route_circuit_peering: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExpressRouteCircuitConnectionArgs.__new__(ExpressRouteCircuitConnectionArgs)
__props__.__dict__["address_prefix"] = address_prefix
__props__.__dict__["authorization_key"] = authorization_key
if circuit_name is None and not opts.urn:
raise TypeError("Missing required property 'circuit_name'")
__props__.__dict__["circuit_name"] = circuit_name
__props__.__dict__["connection_name"] = connection_name
__props__.__dict__["express_route_circuit_peering"] = express_route_circuit_peering
__props__.__dict__["id"] = id
__props__.__dict__["ipv6_circuit_connection_config"] = ipv6_circuit_connection_config
__props__.__dict__["name"] = name
__props__.__dict__["peer_express_route_circuit_peering"] = peer_express_route_circuit_peering
if peering_name is None and not opts.urn:
raise TypeError("Missing required property 'peering_name'")
__props__.__dict__["peering_name"] = peering_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["circuit_connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20210201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20180801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20181001:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20181101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20181201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20190901:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20191101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20191201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200301:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200501:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20200801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-native:network/v20201101:ExpressRouteCircuitConnection"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ExpressRouteCircuitConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitConnection, __self__).__init__(
'azure-native:network/v20210201:ExpressRouteCircuitConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitConnection':
"""
Get an existing ExpressRouteCircuitConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ExpressRouteCircuitConnectionArgs.__new__(ExpressRouteCircuitConnectionArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["authorization_key"] = None
__props__.__dict__["circuit_connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["express_route_circuit_peering"] = None
__props__.__dict__["ipv6_circuit_connection_config"] = None
__props__.__dict__["name"] = None
__props__.__dict__["peer_express_route_circuit_peering"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return ExpressRouteCircuitConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
/29 IP address space to carve out Customer addresses for tunnels.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> pulumi.Output[Optional[str]]:
"""
The authorization key.
"""
return pulumi.get(self, "authorization_key")
@property
@pulumi.getter(name="circuitConnectionStatus")
def circuit_connection_status(self) -> pulumi.Output[str]:
"""
Express Route Circuit connection state.
"""
return pulumi.get(self, "circuit_connection_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteCircuitPeering")
def express_route_circuit_peering(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Reference to Express Route Circuit Private Peering Resource of the circuit initiating connection.
"""
return pulumi.get(self, "express_route_circuit_peering")
@property
@pulumi.getter(name="ipv6CircuitConnectionConfig")
def ipv6_circuit_connection_config(self) -> pulumi.Output[Optional['outputs.Ipv6CircuitConnectionConfigResponse']]:
"""
IPv6 Address PrefixProperties of the express route circuit connection.
"""
return pulumi.get(self, "ipv6_circuit_connection_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerExpressRouteCircuitPeering")
def peer_express_route_circuit_peering(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Reference to Express Route Circuit Private Peering Resource of the peered circuit.
"""
return pulumi.get(self, "peer_express_route_circuit_peering")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the express route circuit connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
3203eda17fc7fab819b461afd5e033a342bf2850 | eb215d9387a8aa006fbb3c1339cf34bdba82ec32 | /app/controllers/Favorites.py | 064bbdbc6dc74d744312d9c547e1e99eb9cd98b8 | [] | no_license | elliotsyoung/crime-dodger | ba7d98e593e650c3937565bd47a95fdca7129203 | 268fa82250c98ab1765eed547a515d08c581911a | refs/heads/master | 2021-01-22T05:27:45.296838 | 2016-09-01T22:30:43 | 2016-09-01T22:30:43 | 67,058,461 | 0 | 0 | null | 2016-08-31T17:22:26 | 2016-08-31T17:22:26 | null | UTF-8 | Python | false | false | 431 | py | from system.core.controller import *
class Favorites(Controller):
def __init__(self, action):
super(Favorites, self).__init__(action)
self.load_model('Crime')
self.load_model('Favorite')
self.load_model('User')
self.db = self._app.db
def edit(self,id):
favorite=self.models['Favorite'].get_favorite(id)
return self.load_view('/favorites/edit.html',favorite=favorite[0]) | [
"[email protected]"
] | |
ad0da0a7a513c06187cec186221cfab19e7cc10a | 1e21f0939d4c46db8eeca9fa8ef034ed14b7a549 | /PhotonIDSFs/TnP_76X/test/fitterWithManyTemplates_DataMVA_fitRange.py | c72472006fee506735238be1e18a7700247c22ef | [] | no_license | Ming-Yan/photonTnp | 4e46286998d4e2806e423e2e27893c0a8675494f | 5468bea3eff51b21eed2701cda4f3e5d2ad9e6bf | refs/heads/master | 2021-10-08T20:33:55.910375 | 2018-10-22T09:12:26 | 2018-10-22T09:12:26 | 162,109,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,689 | py | import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
import PhysicsTools.TagAndProbe.baseline.commonFitMVA_fitRange as common
options = VarParsing('analysis')
options.register(
"isMC",
# True,
False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Compute efficiency for MC"
)
options.register(
"inputFileName",
#"/afs/cern.ch/work/i/ishvetso/public/for_Matteo/TnPTree_mc-powheg.root",
"/data2/pwang/TnP/76X_v2/TnP_Data.root",
#"TnP_Data.root",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Input filename"
)
options.register(
"outputFileName",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Output filename"
)
options.register(
"idName",
"passingMVA",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"ID variable name as in the fitter_tree"
)
options.register(
"dirName",
"PhotonToRECO",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Folder name containing the fitter_tree"
)
options.register(
"doCutAndCount",
False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"Do not compute fitting, just cut and count"
)
options.parseArguments()
process = cms.Process("TagProbe")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.destinations = ['cout', 'cerr']
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
################################################
InputFileName = options.inputFileName
OutputFile = "efficiency-mc-"+options.idName
if (not options.isMC):
OutputFile = "efficiency-data-"+options.idName
if (options.outputFileName != ""):
OutputFile = OutputFile+"-"+options.outputFileName+".root"
else:
OutputFile = OutputFile+".root"
################################################
#specifies the binning of parameters
EfficiencyBins = cms.PSet(
# probe_Ele_et = cms.vdouble(20. ,40. ,60. ,100.),
# probe_sc_eta = cms.vdouble(-2.5, -1.0, 0.0, 1.0, 2.5),
probe_sc_eta = cms.vdouble(-2.5,-1.566,-1.4442,-1.0,0.0, 1.0, 1.4442, 1.566, 2.5),
probe_sc_et = cms.vdouble(20. ,30, 40. ,50., 200.),
)
DataBinningSpecification = cms.PSet(
UnbinnedVariables = cms.vstring("mass"),
BinnedVariables = cms.PSet(EfficiencyBins),
BinToPDFmap = cms.vstring(
"mva_20p0To30p0_0p0To1p0",
#"mva_40p0To50p0_1p0To1p4442",
"*et_bin0*eta_bin0*","mva_20p0To30p0_m2p5Tom1p566",
"*et_bin0*eta_bin1*","mva_20p0To30p0_m1p566Tom1p4442",
"*et_bin0*eta_bin2*","mva_20p0To30p0_m1p4442Tom1p0",
"*et_bin0*eta_bin3*","mva_20p0To30p0_m1p0To0p0",
"*et_bin0*eta_bin4*","mva_20p0To30p0_0p0To1p0",
"*et_bin0*eta_bin5*","mva_20p0To30p0_1p0To1p4442",
"*et_bin0*eta_bin6*","mva_20p0To30p0_1p4442To1p566",
"*et_bin0*eta_bin7*","mva_20p0To30p0_1p566To2p5",
"*et_bin1*eta_bin0*","mva_30p0To40p0_m2p5Tom1p566",
"*et_bin1*eta_bin1*","mva_30p0To40p0_m1p566Tom1p4442",
"*et_bin1*eta_bin2*","mva_30p0To40p0_m1p4442Tom1p0",
"*et_bin1*eta_bin3*","mva_30p0To40p0_m1p0To0p0",
"*et_bin1*eta_bin4*","mva_30p0To40p0_0p0To1p0",
"*et_bin1*eta_bin5*","mva_30p0To40p0_1p0To1p4442",
"*et_bin1*eta_bin6*","mva_30p0To40p0_1p4442To1p566",
"*et_bin1*eta_bin7*","mva_30p0To40p0_1p566To2p5",
"*et_bin2*eta_bin0*","mva_40p0To50p0_m2p5Tom1p566",
"*et_bin2*eta_bin1*","mva_40p0To50p0_m1p566Tom1p4442",
"*et_bin2*eta_bin2*","mva_40p0To50p0_m1p4442Tom1p0",
"*et_bin2*eta_bin3*","mva_40p0To50p0_m1p0To0p0",
"*et_bin2*eta_bin4*","mva_40p0To50p0_0p0To1p0",
"*et_bin2*eta_bin5*","mva_40p0To50p0_1p0To1p4442",
"*et_bin2*eta_bin6*","mva_40p0To50p0_1p4442To1p566",
"*et_bin2*eta_bin7*","mva_40p0To50p0_1p566To2p5",
"*et_bin3*eta_bin0*","mva_50p0To200p0_m2p5Tom1p566",
"*et_bin3*eta_bin1*","mva_50p0To200p0_m1p566Tom1p4442",
"*et_bin3*eta_bin2*","mva_50p0To200p0_m1p4442Tom1p0",
"*et_bin3*eta_bin3*","mva_50p0To200p0_m1p0To0p0",
"*et_bin3*eta_bin4*","mva_50p0To200p0_0p0To1p0",
"*et_bin3*eta_bin5*","mva_50p0To200p0_1p0To1p4442",
"*et_bin3*eta_bin6*","mva_50p0To200p0_1p4442To1p566",
"*et_bin3*eta_bin7*","mva_50p0To200p0_1p566To2p5",
)
)
McBinningSpecification = cms.PSet(
UnbinnedVariables = cms.vstring("mass", "totWeight"),
BinnedVariables = cms.PSet(EfficiencyBins, mcTrue = cms.vstring("true")),
BinToPDFmap = cms.vstring(
"mva_20p0To40p0_0p0To1p5",
"*et_bin0*eta_bin0*","mva_20p0To40p0_0p0To1p5",
"*et_bin1*eta_bin0*","mva_40p0To60p0_0p0To1p5",
"*et_bin2*eta_bin0*","mva_60p0To100p0_0p0To1p5",
"*et_bin0*eta_bin1*","mva_20p0To40p0_1p5To2p5",
"*et_bin1*eta_bin1*","mva_40p0To60p0_1p5To2p5",
"*et_bin2*eta_bin1*","mva_60p0To100p0_1p5To2p5",
)
)
########################
process.TnPMeasurement = cms.EDAnalyzer("TagProbeFitTreeAnalyzer",
InputFileNames = cms.vstring(InputFileName),
InputDirectoryName = cms.string(options.dirName),
InputTreeName = cms.string("fitter_tree"),
OutputFileName = cms.string(OutputFile),
NumCPU = cms.uint32(2),
SaveWorkspace = cms.bool(False), #VERY TIME CONSUMING FOR MC
doCutAndCount = cms.bool(options.doCutAndCount),
floatShapeParameters = cms.bool(True),
binnedFit = cms.bool(True),
binsForFit = cms.uint32(60),
WeightVariable = cms.string("totWeight"),
# defines all the real variables of the probes available in the input tree and intended for use in the efficiencies
Variables = cms.PSet(
#mass = cms.vstring("Tag-Probe Mass", "60.0", "120.0", "GeV/c^{2}"),
mass = cms.vstring("Tag-Probe Mass", "60.0", "140.0", "GeV/c^{2}"),
probe_sc_et = cms.vstring("Probe E_{T}", "0", "100", "GeV/c"),
probe_sc_eta = cms.vstring("Probe #eta", "-2.5", "2.5", ""),
totWeight = cms.vstring("totWeight", "-1000000", "100000000", ""),
#event_met_pfmet = cms.vstring("event_met_pfmet", "-1000000", "100000000", "GeV")
#event_met_pfphi = cms.vstring("event_met_pfphi", "-6", "6", "")
),
# defines all the discrete variables of the probes available in the input tree and intended for use in the efficiency calculation
Expressions = cms.PSet(),
Categories = cms.PSet(),
PDFs = common.all_pdfs,
Efficiencies = cms.PSet(),
#Expressions = cms.PSet(myMT = cms.vstring("myMT", "sqrt(2*event_met_pfmet*tag_Pho_et*(1-cos(tag_Pho_phi-event_met_phi)))", "event_met_pfmet", "tag_Pho_et","tag_Pho_phi","event_met_phi")),
###SJ
Cuts = cms.PSet(
#MTcut = cms.vstring("myMT", "50.", "below"),
#mvacut = cms.vstring("tag_Pho_mva","0.95","above")
tagEt = cms.vstring("tag_Pho_et","30","above") ###new
),
###SJ
)
setattr(process.TnPMeasurement.Categories, options.idName, cms.vstring(options.idName, "dummy[pass=1,fail=0]"))
setattr(process.TnPMeasurement.Categories, "mcTrue", cms.vstring("MC true", "dummy[true=1,false=0]"))
if (not options.isMC):
delattr(process.TnPMeasurement, "WeightVariable")
process.TnPMeasurement.Variables = cms.PSet(
#mass = cms.vstring("Tag-Probe Mass", "60.0", "120.0", "GeV/c^{2}"),
mass = cms.vstring("Tag-Probe Mass", "60.0", "140.0", "GeV/c^{2}"),
probe_sc_et = cms.vstring("Probe E_{T}", "20", "1000", "GeV/c"),
probe_sc_eta = cms.vstring("Probe #eta", "-2.5", "2.5", ""),
event_met_pfmet = cms.vstring("event_met_pfmet", "0", "100000000", "GeV"),
#event_met_phi = cms.vstring("event_met_phi", "-10", "10", ""),
#tag_Pho_phi = cms.vstring("tag_Pho_phi", "-10", "10", ""),
###SJ
tag_Pho_et = cms.vstring("Tag E_{T}", "20", "1000", "GeV/c"),
tag_Pho_mva = cms.vstring("Tag MVA", "-1.5", "1.5", "GeV/c")
###SJ
#event_met_pfsumet = cms.vstring("event_met_pfsumet", "0", "1000", ""),
)
for pdf in process.TnPMeasurement.PDFs.__dict__:
param = process.TnPMeasurement.PDFs.getParameter(pdf)
if (type(param) is not cms.vstring):
continue
for i, l in enumerate(getattr(process.TnPMeasurement.PDFs, pdf)):
if l.find("signalFractionInPassing") != -1:
getattr(process.TnPMeasurement.PDFs, pdf)[i] = l.replace("[1.0]","[0.5,0.,1.]")
setattr(process.TnPMeasurement.Efficiencies, options.idName, DataBinningSpecification)
setattr(getattr(process.TnPMeasurement.Efficiencies, options.idName) , "EfficiencyCategoryAndState", cms.vstring(options.idName, "pass"))
else:
setattr(process.TnPMeasurement.Efficiencies, "MCtruth_" + options.idName, McBinningSpecification)
setattr(getattr(process.TnPMeasurement.Efficiencies, "MCtruth_" + options.idName), "EfficiencyCategoryAndState", cms.vstring(options.idName, "pass"))
for pdf in process.TnPMeasurement.PDFs.__dict__:
param = process.TnPMeasurement.PDFs.getParameter(pdf)
if (type(param) is not cms.vstring):
continue
for i, l in enumerate(getattr(process.TnPMeasurement.PDFs, pdf)):
if l.find("backgroundPass") != -1:
getattr(process.TnPMeasurement.PDFs, pdf)[i] = "RooPolynomial::backgroundPass(mass, a[0.0])"
if l.find("backgroundFail") != -1:
getattr(process.TnPMeasurement.PDFs, pdf)[i] = "RooPolynomial::backgroundFail(mass, a[0.0])"
process.fit = cms.Path(
process.TnPMeasurement
)
| [
"[email protected]"
] | |
be4a55087db33754f407e24b68ea1708c532ea30 | 89b45e528f3d495f1dd6f5bcdd1a38ff96870e25 | /pyneng/exercises/09_functions/task_9_1.py | f3fea8eb574faa2713a4238fc517d66e031c07fd | [] | no_license | imatyukin/python | 2ec6e712d4d988335fc815c7f8da049968cc1161 | 58e72e43c835fa96fb2e8e800fe1a370c7328a39 | refs/heads/master | 2023-07-21T13:00:31.433336 | 2022-08-24T13:34:32 | 2022-08-24T13:34:32 | 98,356,174 | 2 | 0 | null | 2023-07-16T02:31:48 | 2017-07-25T22:45:29 | Python | UTF-8 | Python | false | false | 3,458 | py | # -*- coding: utf-8 -*-
"""
Задание 9.1
Создать функцию generate_access_config, которая генерирует конфигурацию
для access-портов.
Функция ожидает такие аргументы:
- словарь с соответствием интерфейс-VLAN такого вида:
{'FastEthernet0/12': 10,
'FastEthernet0/14': 11,
'FastEthernet0/16': 17}
- шаблон конфигурации access-портов в виде списка команд (список access_mode_template)
Функция должна возвращать список всех портов в режиме access с конфигурацией
на основе шаблона access_mode_template. В конце строк в списке не должно быть
символа перевода строки.
В этом задании заготовка для функции уже сделана и надо только продолжить писать
само тело функции.
Пример итогового списка (перевод строки после каждого элемента сделан
для удобства чтения):
[
'interface FastEthernet0/12',
'switchport mode access',
'switchport access vlan 10',
'switchport nonegotiate',
'spanning-tree portfast',
'spanning-tree bpduguard enable',
'interface FastEthernet0/17',
'switchport mode access',
'switchport access vlan 150',
'switchport nonegotiate',
'spanning-tree portfast',
'spanning-tree bpduguard enable',
...]
Проверить работу функции на примере словаря access_config
и списка команд access_mode_template.
Если предыдущая проверка прошла успешно, проверить работу функции еще раз на словаре
access_config_2 и убедиться, что в итоговом списке правильные номера интерфейсов
и вланов.
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
from pprint import pprint
access_mode_template = [
"switchport mode access",
"switchport access vlan",
"switchport nonegotiate",
"spanning-tree portfast",
"spanning-tree bpduguard enable",
]
access_config = {"FastEthernet0/12": 10, "FastEthernet0/14": 11, "FastEthernet0/16": 17}
access_config_2 = {
"FastEthernet0/03": 100,
"FastEthernet0/07": 101,
"FastEthernet0/09": 107,
}
def generate_access_config(intf_vlan_mapping, access_template):
"""
intf_vlan_mapping - словарь с соответствием интерфейс-VLAN такого вида:
{'FastEthernet0/12':10,
'FastEthernet0/14':11,
'FastEthernet0/16':17}
access_template - список команд для порта в режиме access
Возвращает список всех портов в режиме access с конфигурацией на основе шаблона
"""
cfg = []
for intf, vlan in intf_vlan_mapping.items():
cfg.append("interface " + intf)
for s in access_template:
if s.endswith('vlan'):
s = s + ' ' + str(vlan)
cfg.append(s)
return cfg
cfg = generate_access_config(access_config, access_mode_template)
pprint(cfg)
| [
"[email protected]"
] | |
17e86708d5b2a0476756c63ab8d0cd12a77eba92 | 610ac1da64200c109b9ac48d162058fdd85801aa | /initmethd1.py | 0611c42bbb6351ad0642f65bd482c172afe1023f | [] | no_license | rajdharmkar/Python2.7 | 3d88e7c76c92bbba7481bce7a224ccc8670b3abb | 9c6010e8afd756c16e426bf8c3a40ae2cefdadfe | refs/heads/master | 2021-05-03T18:56:36.249812 | 2019-10-08T00:17:46 | 2019-10-08T00:17:46 | 120,418,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | class Human:
# def __init__(self): pass # init method initializing instance variables
#
# def __str__(): pass # ??
#
# def __del__(self): pass # ??...these three def statements commented out using code>> comment with line comment..toggles
def __init__(self, name, age, gender): # self means object in the init method inline comment example
# type: (object, object, object) -> object
self.name = name # creating and assigning new variable
self.age = age
self.gender = gender
def speak_name(self): # a method defined here with a print statement to execute as well
print "my name is %s" % self.name
def speak(self, text):
# def speak(text):; without self, we get nameerror...speaks take exactly one argument given two given; gave one arg but self is implied so two counted as given
print text
def perform_math(self, operation, *args):
print "%s performed math and the result was %f" % (self.name, operation(*args))
def add(a, b):#this is an example of function not an object method and is callable everywhere
return a + b
rhea = Human('Rhea', 20, 'female')
bill = Human('William', '24',
'male') # creating new object 'bill' which is an instance of class Human with variables assignment as well
# getting saying bill is not defined
print bill.name
print bill.age
print bill.gender
bill.speak_name()
bill.speak("Love")
rhea.perform_math(add, 34,45)
# method speak_name belongs to object 'bill' here; a method call?
# method diff from function and it can be called only from the object and it bound to the class
| [
"[email protected]"
] | |
f40e095dd0a9ac20f890db612881d524f776ab90 | 6316ad09411bca6d9bdee7d1a2fa1046e753d0d5 | /celery_app/__init__.py | 5fc1dcf7e6cb6533ac7b33198d647e22abdf0ece | [] | no_license | huazhz/food_master | a6da09adb75d303e95733069dec486a3ca7792df | f5a444da3fd98d4a17948265cfe863f1ea1f4746 | refs/heads/master | 2020-04-08T02:34:56.423886 | 2018-04-02T13:36:43 | 2018-04-02T13:36:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
from celery import Celery
# from celery_app.celeryconfig import broker, backend
app = Celery('tasks')
app.config_from_object('celery_app.celeryconfig')
pool = redis.ConnectionPool(host='127.0.0.1', port=6379)
r = redis.Redis(connection_pool=pool)
| [
"[email protected]"
] | |
96c00338f7ff5b13c7eeac22e83d169897ab16aa | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/nltk/corpus/reader/crubadan.py | 70c2e82bdc0cedc69f1d70c5343d24457ed96867 | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,622 | py | # Natural Language Toolkit: An Crubadan N-grams Reader
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Avital Pekker <[email protected]>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
An NLTK interface for the n-gram statistics gathered from
the corpora for each language using An Crubadan.
There are multiple potential applications for the data but
this reader was created with the goal of using it in the
context of language identification.
For details about An Crubadan, this data, and its potential uses, see:
http://borel.slu.edu/crubadan/index.html
"""
import re
from os import path
from nltk.corpus.reader import CorpusReader
from nltk.data import ZipFilePathPointer
from nltk.probability import FreqDist
class CrubadanCorpusReader(CorpusReader):
"""
A corpus reader used to access language An Crubadan n-gram files.
"""
_LANG_MAPPER_FILE = "table.txt"
_all_lang_freq = {}
def __init__(self, root, fileids, encoding="utf8", tagset=None):
super().__init__(root, fileids, encoding="utf8")
self._lang_mapping_data = []
self._load_lang_mapping_data()
def lang_freq(self, lang):
"""Return n-gram FreqDist for a specific language
given ISO 639-3 language code"""
if lang not in self._all_lang_freq:
self._all_lang_freq[lang] = self._load_lang_ngrams(lang)
return self._all_lang_freq[lang]
def langs(self):
"""Return a list of supported languages as ISO 639-3 codes"""
return [row[1] for row in self._lang_mapping_data]
def iso_to_crubadan(self, lang):
"""Return internal Crubadan code based on ISO 639-3 code"""
for i in self._lang_mapping_data:
if i[1].lower() == lang.lower():
return i[0]
def crubadan_to_iso(self, lang):
"""Return ISO 639-3 code given internal Crubadan code"""
for i in self._lang_mapping_data:
if i[0].lower() == lang.lower():
return i[1]
def _load_lang_mapping_data(self):
"""Load language mappings between codes and description from table.txt"""
if isinstance(self.root, ZipFilePathPointer):
raise RuntimeError(
"Please install the 'crubadan' corpus first, use nltk.download()"
)
mapper_file = path.join(self.root, self._LANG_MAPPER_FILE)
if self._LANG_MAPPER_FILE not in self.fileids():
raise RuntimeError("Could not find language mapper file: " + mapper_file)
with open(mapper_file, encoding="utf-8") as raw:
strip_raw = raw.read().strip()
self._lang_mapping_data = [row.split("\t") for row in strip_raw.split("\n")]
def _load_lang_ngrams(self, lang):
"""Load single n-gram language file given the ISO 639-3 language code
and return its FreqDist"""
if lang not in self.langs():
raise RuntimeError("Unsupported language.")
crubadan_code = self.iso_to_crubadan(lang)
ngram_file = path.join(self.root, crubadan_code + "-3grams.txt")
if not path.isfile(ngram_file):
raise RuntimeError("No N-gram file found for requested language.")
counts = FreqDist()
with open(ngram_file, encoding="utf-8") as f:
for line in f:
data = line.split(" ")
ngram = data[1].strip("\n")
freq = int(data[0])
counts[ngram] = freq
return counts
| [
"[email protected]"
] | |
04e2128404e809a729d91ded71472deff19e7274 | 533f86815dcded10183f623b6ddd552fadd9e38c | /Lesson_08/hw8/DmitryBirulin_DZ_magic_v2.py | 3dd1343f0e280ffb969253a832174f2d555acaae | [] | no_license | DoctorSad/_Course | 8ab81db218cd9a0bfefb118094912c53b11256d4 | da5ba4d6904910be033241e3b68c846e883a24fa | refs/heads/main | 2023-04-15T10:26:27.294706 | 2021-05-06T08:59:38 | 2021-05-06T08:59:38 | 351,796,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,506 | py | """
Реализуйте игру Magic (hw3/magic.py) с некоторыми дополнениями.
1. При запуске, программа спрашивает имя игрока.
2. В словаре player_data хранить данные игрока и актуализировать их после
каждой сыгранной игры. Оперировать такими данными:
name - имя игрока
games - общее количество сыграных игр
record - рекордное количество попыток (минимальное)
avg_attempts - среднее количество попыток за игру
3. При выходе из программы данные игрока записывать в файл (txt либо json).
**4. При запуске программы, после ввода имени пользователем, читать файл,
если данные об игроке есть в файле то загружать их в player_data.
"""
import random
from pathlib import Path
import json
def main():
BASE_DIR = Path(__file__).resolve().parent
FILES_DIR = BASE_DIR / "Files"
FILES_DIR.mkdir(exist_ok=True)
file_path = FILES_DIR / "player_data.json"
player_data = {}
print('Введите имя игрока: ')
player_name = input()
with open(file_path) as f:
player_data_in = json.load(f)
for i in player_data_in:
if i['name'] == player_name:
player_data = i
new_player = False
break
else:
player_data = {'name': player_name, 'games': 0, 'record': 9999, 'avg_attempts': float(1)}
new_player = True
while True:
tmp_data = magic()
player_data_out = []
player_data['games'] += 1
if player_data['record'] > tmp_data['record']:
player_data['record'] = tmp_data['record']
player_data['avg_attempts'] = round(((player_data['avg_attempts'] * (player_data['games'] - 1) +
tmp_data['attempts']) / player_data['games']), 2)
if new_player:
player_data_in.append(player_data)
player_data_out = player_data_in
else:
for i in player_data_in:
if i['name'] != player_name:
player_data_out.append(i)
else:
player_data_out.append(player_data)
out = input(' Continue (Y/n)?: ')
if out == 'n':
print('Bye!')
break
with open(file_path, "w") as f:
data = json.dumps(player_data_out, indent=4)
f.write(data)
def magic() -> dict:
while True:
count = 1
guess = None
record = 1000000
print('Введите нижний диапазон: ', end='')
min_ = input()
print('Введите верхний диапазон: ', end='')
max_ = input()
try:
min_ = int(min_)
max_ = int(max_)
except ValueError:
print('Вы не ввели диапазоны с типом <int>')
else:
magic_number = random.randint(min_, max_)
while not guess:
print('Введите число: ', end='')
try:
number = int(input())
except ValueError:
print('Вы не ввели число типа <int>')
break
if number > magic_number:
print('Вы ввели число которое больше рандомного')
count += 1
elif number < magic_number:
print('Вы ввели число которое меньше рандомного')
count += 1
elif number == magic_number:
print('Вы угадали рандомное число <', magic_number, '>. Использовано попыток: ', count)
if count < record:
print('Вы установили новый рекорд!')
record = count
guess = 1
return {'record': record, 'attempts': count}
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d10f037bb1bef97f1a9452fcdb59b25bcd316a5b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_35/344.py | 223d826084a19fea614cdec9902fde6de1a8a235 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | #vim: fileencoding=utf-8 :
import sys, string
sinks=[]
alpha, map, matrix=None, None, None
MAX_LATTITUDE=20000
def read_n():
return [int(n) for n in sys.stdin.readline().split()]
def read_map():
h,w=read_n()
map=[]
for j in range(h):
map.append(read_n())
assert len(map)==h
return h,w,map
def main():
for casenum in range(read_n()[0]):
one_case(casenum)
def one_case(casenum):
global alpha, ialpha, matrix, map, W, H
H,W,map=read_map()
ialpha=alpha_iter()
matrix=make_matrix(W,H)
alpha=ialpha.next()
for i in range(H):
for j in range(W):
flow(i, j, [])
print 'Case #%d:' % (casenum+1)
print_matrix()
def flow(i, j, trails):
global alpha, ialpha
#print_matrix()
try:
mark(i,j,alpha)
except ValueError:
#print 'meet drainage basins'
# 같은 drainage basins를 만났다
v=matrix[i][j]
for row,col in trails:
matrix[row][col]=v
return
smallest=map[i][j]
dir=None
dirs=[
('N', (i-1, j)),
('W', (i, j-1)),
('E', (i, j+1)),
('S', (i+1, j)),
]
for d, (row, col) in dirs:
if (row>=0 and col>=0) and (row<H and col<W):
#print 'W, H, row, col, len(map), len(map[0])', W, H, row, col, len(map), len(map[0])
altitude=map[row][col]
if altitude < smallest:
#print 'd, altitude, smallest', d, altitude, smallest
smallest=altitude
dir=d
if dir:
row,col=dict(dirs)[dir]
#print 'dir, row, col: ', dir, row, col
trails.append((i,j))
flow(row, col, trails)
else:
sinks.append((i,j))
alpha=ialpha.next()
def mark(row, col, alpha):
assert row>=0 and col>=0
#print 'matrix size, row, col', len(matrix), len(matrix[row]), row, col
if matrix[row][col]!=0:
raise ValueError('already marked')
matrix[row][col]=alpha
def make_matrix(w, h):
return [[0]*w for i in xrange(h)]
def alpha_iter():
for c in string.lowercase:
yield c
def print_matrix():
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print matrix[i][j],
print
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
25e5950f6d51c06a9351d4a52d1452dd0ff8a617 | 056adbbdfb968486ecc330f913f0de6f51deee33 | /200-number-of-islands/number-of-islands.py | 3137ef84af9b6aa64b412c25fb441d919cf4a809 | [] | no_license | privateHmmmm/leetcode | b84453a1a951cdece2dd629c127da59a4715e078 | cb303e610949e953b689fbed499f5bb0b79c4aea | refs/heads/master | 2021-05-12T06:21:07.727332 | 2018-01-12T08:54:52 | 2018-01-12T08:54:52 | 117,215,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | # -*- coding:utf-8 -*-
# Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
# 11110110101100000000
# Answer: 1
# Example 2:
# 11000110000010000011
# Answer: 3
#
# Credits:Special thanks to @mithmatt for adding this problem and creating all test cases.
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
"""
# BFS approach
n = len(grid)
if n == 0: return 0
m = len(grid[0])
if m == 0: return 0
ans = 0
step = [(-1, 0), (1, 0), (0, -1), (0, 1)]
queue = [-1 for i in range(0, n*m)]
def floodfill(i, j):
h = 0
queue[0] = [i, j]
grid[i][j] = '0'
r = 1 # [h, r)
while h<r:
ii, jj = queue[h]
h +=1
for s in step:
newi = ii + s[0]
newj = jj + s[1]
if 0<=newi<n and 0<=newj<m and grid[newi][newj] == '1':
grid[newi][newj] = '0'
queue[r] = [newi, newj]
r +=1
for i in range(0, n):
for j in range(0,m):
if grid[i][j] == '1':
ans +=1
floodfill(i, j)
return ans
"""
"""
# union-find
n = len(grid)
if n == 0: return 0
m = len(grid[0])
if m == 0: return 0
def merge(x, y):
fx = getfa(x)
fy = getfa(y)
if fx != fy:
fa[fx] = fy
def getfa(x):
if fa[x]!=x:
fa[x]=getfa(fa[x])
return fa[x]
step = [(1, 0), (0, 1)]
fa = range(0, n*m)
for i in range(0, n):
for j in range(0, m):
if grid[i][j]=='1':
for s in step:
newI = i + s[0]
newJ = j + s[1]
if 0<=newI<n and 0<=newJ<m and grid[newI][newJ]=='1':
merge(i*m+j, newI*m+newJ)
Set = set()
for i in range(0, n):
for j in range(0, m):
if grid[i][j] == '1':
Set.add(getfa(i*m+j))
return len(Set)
"""
# DFS
m = len(grid)
if m == 0: return 0
n = len(grid[0])
if n == 0: return 0
steps = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def DFS(i, j):
grid[i][j] = '0'
for s in steps:
newI = i + s[0]
newJ = j + s[1]
if 0<=newI<m and 0<=newJ<n and grid[newI][newJ] == '1':
DFS(newI, newJ)
res = 0
for i in range(0, m):
for j in range(0, n):
if grid[i][j] == '1':
res +=1
DFS(i, j)
return res
| [
"[email protected]"
] | |
131771e977d1853e673318264933d9ff1d2a3cce | 0859a864b1270164fe44a878ab12cfb3302c36bf | /abc121/a.py | 7661ace8ee8744ff9b65a4dc009ce7d7cc12d07c | [] | no_license | wataoka/atcoder | f359d49ab6e0db39c019d9f6d2e8b92d35f723c4 | b91465dd5f655d05b89485fc7ad222283c5958f5 | refs/heads/master | 2021-04-15T06:02:59.593965 | 2020-05-11T04:38:23 | 2020-05-11T04:38:23 | 126,754,342 | 0 | 0 | null | 2020-02-28T02:31:03 | 2018-03-26T00:51:12 | Python | UTF-8 | Python | false | false | 94 | py | H, W = map(int, input().split())
h, w = map(int, input().split())
print(H*W - w*H - W*h + w*h) | [
"[email protected]"
] | |
0e789989d27f3d2625ed474126c16466417e6a8b | 91781e25054b55850fe982add76da1dd709e1218 | /manage.py | 961816f726429a4d7a1c0b618036c142a2d92248 | [] | no_license | bloogrox/ssp-cabinet | 5b026bb3f392aec60c6a70bf2cb3d4d162a4b237 | 7de03ed35fc603305d07446b8ecd97811c1d9bb2 | refs/heads/master | 2021-01-01T06:41:00.628110 | 2017-12-14T13:59:41 | 2017-12-14T13:59:41 | 97,483,236 | 1 | 2 | null | 2018-12-18T20:40:25 | 2017-07-17T14:05:01 | Python | UTF-8 | Python | false | false | 813 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cabinet.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
127ef93a98a960b952dfbb058c0bfdea68f1bd89 | 372edad1cd6399cadba82818e9fb9682c3bac1b4 | /packages/python/plotly/plotly/validators/histogram2dcontour/_colorbar.py | 11d3f5a1e0017a1b417fd8a2b3b4d2cab0fb5e93 | [
"MIT"
] | permissive | OGVGdev/plotly.py | 78bfa9e25e92c367f0da30af7885cdd163ba612b | 96a9101c79aa588023f56153bf274d0d570ffcf6 | refs/heads/master | 2022-11-10T16:44:06.732450 | 2020-06-26T13:07:06 | 2020-06-26T13:07:06 | 275,173,321 | 1 | 0 | MIT | 2020-06-26T14:19:41 | 2020-06-26T14:19:40 | null | UTF-8 | Python | false | false | 10,885 | py | import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="colorbar", parent_name="histogram2dcontour", **kwargs
):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogr
am2dcontour.colorbar.Tickformatstop` instances
or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.histogram2dcontour.colorbar.tickformatstopdef
aults), sets the default property values to use
for elements of
histogram2dcontour.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram2dcontour
.colorbar.Title` instance or dict with
compatible properties
titlefont
Deprecated: Please use
histogram2dcontour.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram2dcontour.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
| [
"[email protected]"
] | |
5ea470a7c5f0251219964dad00a091892d220849 | 7b6930e5bf5af256cc6ddff3d5a6656e3dddce84 | /generate_data.py | d3c386ccde85e652c23b76502d7cd33ee7867a7f | [] | no_license | isabella232/de_tech_test_pyspark | 40a9800a781195fcad36d771f304f39fdac8be7f | 6a00b38584453f5f933bb18ddc7eaf521d923235 | refs/heads/main | 2023-06-26T03:31:29.939619 | 2021-07-08T14:27:28 | 2021-07-08T14:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | import random
from functools import partial
import csv
from itertools import product
import string
import csv
from datetime import datetime
import os
def generate_data(size=1000000):
"""
Generates csvs in root directory. The csvs are:
main/test_data.csv
- field1, field2, field3, field4 are random ints in range(1,20)
- val1, val2, val3 are random floats
Args:
size (int, optional): The number of rows required in the
main test data csv.
Raises:
FileExistsError: Raised if a file has already been generated
with today's date.
"""
def _randomly_nullify(series, n):
"Replaces n entires in series with None"
indices = random.choices(range(size),k=n)
return [v if i not in indices else None for i,v in enumerate(series)]
date = datetime.today().strftime('%Y-%m-%d')
part_choices = partial(random.choices, range(1,20), k=size)
field1 = _randomly_nullify(
part_choices(weights=[i**2/2 for i in range(1,20)]), 5
) # end weighted
field2 = _randomly_nullify(
part_choices(weights=[(20-i)/i for i in range(1,20)]), 30
) # start weighted
field3 = part_choices(weights=[1/(1+abs(i - 10)) for i in range(1,20)]) # mid weighted
field4 = part_choices() # uniform
val1 = (random.gauss(1000, 100) for i in range(size)) # normal random
val2 = (random.random()*1000*i if i else 0 for i in field1) # random correlated with field1
val3 = _randomly_nullify(
[random.random()*1000*i for i in field4],10
) # random correlated with field4
combined = zip(field1, field2, field3, field4, val1, val2, val3)
path = os.path.join(os.getcwd(), f'data/main/{date}/test_data.csv')
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'x', newline='') as f:
writer = csv.writer(f)
writer.writerow(['field1','field2','field3','field4','val1','val2','val3'])
writer.writerows(combined)
# lookup csv
field = [i for i in range(1,20) if i != 10]
group = product(field, field)
lookup = list([x, y, random.choice(string.ascii_letters)] for x,y in group)
try:
with open('data/lookup.csv', 'x', newline='') as f:
writer = csv.writer(f)
writer.writerow(['field1','f2','lookup_val'])
writer.writerows(lookup)
except FileExistsError:
pass
| [
"[email protected]"
] | |
43ff85cc266c6409377138647e481859ad1febf5 | fd40d6375ddae5c8613004a411341f0c984e80d5 | /examples/websites.py | f23bd681715e78801378f8fd3435768abb9d6654 | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | ieaves/tenzing | 93c3353e62621c90adefc5a174a2dcde9aacbc46 | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | refs/heads/master | 2020-04-25T07:14:31.388737 | 2020-01-07T02:51:13 | 2020-01-07T02:51:13 | 172,608,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | import pandas as pd
from visions.core.implementations import visions_complete_set
from visions.core.functional import type_cast, type_inference
from visions.core.summaries.summary import CompleteSummary
# Load dataset
df = pd.read_csv(
"https://raw.githubusercontent.com/berkmancenter/url-lists/master/lists/et.csv",
parse_dates=["date_added"],
)
# Type
typeset = visions_complete_set()
# Type inference
inferred_types = type_inference(df, typeset)
print(inferred_types)
# Type cast
cast_df, cast_types = type_cast(df, typeset)
print(cast_types)
# Summarization
summary = CompleteSummary()
summaries = summary.summarize(cast_df, cast_types)
for key, variable_summary in summaries["series"].items():
print(key, variable_summary)
| [
"[email protected]"
] | |
1667b6219a909476a37dc1d9dc14e719e3cd0678 | 5a7f0462e4b37fffa42840024d5b185bd22d8358 | /entry_point_inspector/group.py | 11ff9ecf0593f4b83b2df8739c87a17c5c3f253e | [
"Apache-2.0"
] | permissive | dhellmann/entry_point_inspector | a9884406e5f50277172a3c67154bfc5170714f33 | 77dc43a76feef9fffe6a2edbc1970593d2afc5ae | refs/heads/master | 2021-06-12T18:36:30.693175 | 2021-04-12T20:30:46 | 2021-04-12T20:30:46 | 11,693,466 | 19 | 9 | Apache-2.0 | 2021-04-12T20:24:08 | 2013-07-26T19:43:35 | Python | UTF-8 | Python | false | false | 1,527 | py | import logging
from cliff import lister
import pkg_resources
LOG = logging.getLogger(__name__)
class GroupList(lister.Lister):
"""Shows the groups for which plugins are available.
"""
def take_action(self, parsed_args):
names = set()
for dist in pkg_resources.working_set:
LOG.debug('checking distribution "%s"', dist)
entry_map = pkg_resources.get_entry_map(dist)
names.update(set(entry_map.keys()))
return (
('Name',),
((n,) for n in sorted(names)),
)
class GroupShow(lister.Lister):
"""Shows the members of a specific group.
"""
def get_parser(self, prog_name):
p = super(GroupShow, self).get_parser(prog_name)
p.add_argument(
'group',
help='the name of the group to show',
)
return p
def take_action(self, parsed_args):
results = []
for ep in pkg_resources.iter_entry_points(parsed_args.group):
try:
ep.load()
except Exception as err:
load_error = str(err) # unicode?
else:
load_error = ''
attr = '.'.join(ep.attrs)
results.append((
ep.name,
ep.module_name,
attr,
str(ep.dist), # unicode?
load_error,
))
return (
('Name', 'Module', 'Member', 'Distribution', 'Error'),
results,
)
| [
"[email protected]"
] | |
d8c3cc3d8f58fa5679b00e83bfc4695e814c6d89 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2928/60717/271819.py | 69abec6b11d324214d2dc8fd5938d954d5d43f48 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | v=int(input())
list1=input().split()
for i in range(0,9):
list1[i]=int(list1[i])
if min(list1)>v:
print(-1)
else:
n=int(v/min(list1))
tmp=min(list1)
index=list1.index(tmp)
list2=[index+1 for i in range(0,n)]
index+=1
while index<9:
summ=0
for i in range(0,len(list2)):
summ+=list1[list2[i]-1]
for i in range(0,len(list2)):
if summ-list1[list2[i]-1]+list1[index]<=v:
summ=summ-list1[list2[i]-1]+list1[index]
list2[i]=index+1
index+=1
output=''
for i in range(0,len(list2)):
output+=str(list2[i])
print(output) | [
"[email protected]"
] | |
4bcfb0710a767c789b9a4f612cf464628ec5fe42 | 26e4bea46942b9afa5a00b9cde9a84f2cc58e3c9 | /thinkpython/ex4_md5sum.py | 741e74eb3f7cbcf1f3796406663c928922b3ff7e | [] | no_license | MeetLuck/works | 46da692138cb9741a913d84eff6822f107510dc7 | ab61175bb7e2ed5c5113bf150e0541ae18eb04c4 | refs/heads/master | 2020-04-12T05:40:25.143075 | 2017-08-21T17:01:06 | 2017-08-21T17:01:06 | 62,373,576 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py | ''' Exercise 4 '''
# 1. Write a program that searches a directory and all of its subdirectories,
# recursively, and returns a list of complete paths for all files with a given suffix(like mp3)
# 2. To recognize duplicates, you can use md5sum to compute a 'checksum' for each files.
# If two files have the same checksum, they probably have the same contents.
# 3. To duble-check, you can use the Unix command diff
import os
def walk(dirname):
''' Finds the names of all files in dirname and its sub-directories. '''
names = list()
for name in os.listdir(dirname):
path = os.path.join(dirname, name)
if os.path.isfile(path):
names.append(path)
else:
names.extend( walk(path) )
return names
def pipe(cmd):
''' Runs a command in a subprocess
Returns (res,stat), the output of the subprocess and the exit status
'''
fp = os.popen(cmd)
res = fp.read()
stat = fp.close()
assert stat is None
return res, stat
def compute_checksum(filename):
''' Computes the MD5 checksum of the contents of a file '''
cmd = 'md5sum ' + filename
return pipe(cmd)
def check_diff(name1,name2):
''' Computes the difference between the contents of two files
name1, name2 : string of filenames
'''
cmd = 'diff %s %s' %(name1,name2)
return pipe(cmd)
def compute_checksums(dirname, suffix):
''' Computes checksums for all files with the given suffix
dirname: string name of directory to search
suffix: string suffix to match
Returns map from checksum to list of files with that checksum
'''
names = walk(dirname)
d = dict()
for name in names:
if name.endswith(suffix):
res, stat = compute_checksum(name)
checksum, _ = res.split()
if checksum in d:
d[checksum].append(name)
else:
d[checksum] = [name]
return d
def check_pairs(names):
''' Checks whether any in a list of files differs from others
names: [ list of string filenames ]
'''
for name1 in names:
for name2 in names:
if name1 < name2:
res, stat = check_diff(name1,name2)
if res:
return False
return True
def print_duplicates(d):
''' checks for duplicate files
Reports any files with the same checksum and checks whether they are, in fact, identical
d: map from checksum to list of files with that checksum
'''
for key, names in d.iteritems():
print key, names
if len(names) > 1:
print 'The following files have the same checksum:'
for name in names:
print name
if check_pairs(names):
print 'And they are identical'
if __name__ == '__main__':
# dirname = os.path.abspath(__file__)
dirname = os.path.dirname(__file__)
print dirname
d = compute_checksums(dirname=dirname, suffix='.py')
print_duplicates(d)
| [
"[email protected]"
] | |
a751ae94d464f1373dde689a9c0e8df70e5cfa1c | d7261ed9957df0bb1f832778201f39edc3dffc49 | /bot_redis/storeges.py | a5d9e6aeb7b8d5b7fc32b84cacd2f1d968f4df3c | [] | no_license | unitorzero/SearchBot | 915bcd50803029a2322ed33593079dc93bf379ff | 396163468b3223894c862d83f46165fd9e26477f | refs/heads/master | 2022-12-11T06:24:33.944342 | 2019-11-26T11:50:01 | 2019-11-26T11:50:01 | 224,178,376 | 0 | 0 | null | 2022-06-21T23:34:32 | 2019-11-26T11:40:33 | Python | UTF-8 | Python | false | false | 2,102 | py | from db.bot_orm.tables.user import User
import logging
class Users:
log = logging
def __init__(self):
self.UserTable = User
self.users = []
self.user_ids = []
self.user_telegram_ids = []
self.admins = []
self.admin_ids = []
self.admin_telegram_ids = []
self.update()
def __str__(self):
return '\r\nusers: %s \r\n\r\nadmins: %s\r\n' % (list(map(str, self.users)), list(map(str, self.admins)))
def update(self):
self.users = self.UserTable.all()
self.admins = list(filter(lambda x: x.admin, self.users))
self.user_ids = list(map(lambda x: x.id, self.users))
self.user_telegram_ids = list(map(lambda x: x.telegram_id, self.users))
self.admin_ids = list(map(lambda x: x.id, self.admins))
self.admin_telegram_ids = list(map(lambda x: x.telegram_id, self.admins))
self.log.info('Storage Users was updated. \r\n%s' % self)
return self
def get_user_by_telegram_id(self, _id):
items = list(filter(lambda x: x.telegram_id == _id, self.users))
return items[0] if len(items) else False
def get_user_by_id(self, _id):
items = list(filter(lambda x: x.id == int(_id), self.users))
return items[0] if len(items) else False
def get_admin_by_id(self, _id):
items = list(filter(lambda x: x.id == _id, self.admins))
return items[0] if len(items) else False
def get_admin_by_telegram_id(self, _id):
items = list(filter(lambda x: x.telegram_id == _id, self.admins))
return items[0] if len(items) else False
def delete_user(self, _id):
User.delete(_id)
user = self.get_user_by_id(_id)
self.log.warning('User |%s| was deleted.' % user)
self.update()
return user
def set_description(self, _id, description):
User.add_description(_id, description)
self.update()
user = self.get_user_by_id(_id)
self.log.warning('User |%s| set decription %s.' % (user, description))
return user
users = Users()
| [
"[email protected]"
] | |
4dd14a31dbec97cf808271e1bfbf5baa9c2bb497 | 62154efb438d7d799cc98eba1f150679bf79f7b9 | /mbm/string.py | d4ae1901fb00f49d05fab8643a4b6232b81e1e45 | [] | no_license | asmark/mafia-by-mail | 5e4b30572741a8bae49d99bd5f1cd88c413969bb | 2beff28a402336723e44a7ca8f31e6711747350b | refs/heads/master | 2020-12-25T11:21:49.665920 | 2016-02-03T21:28:46 | 2016-02-03T21:28:46 | 51,032,830 | 0 | 0 | null | 2016-02-03T21:44:48 | 2016-02-03T21:44:48 | null | UTF-8 | Python | false | false | 713 | py | import re
import string
class Template(string.Template):
def __init__(self, template):
super().__init__(template)
self.vars = tuple(self._extract_vars())
def _extract_vars(self):
for match in self.pattern.finditer(self.template):
if match.group('invalid') is not None:
self._invalid(m)
if match.group('escaped'):
continue
yield match.group('braced') or match.group('named')
def substitute_with_name(self, lookup=lambda name: name):
return self.substitute(**{v: lookup(v) for v in self.vars})
def reformat_lines(s):
return re.sub(r'\n+', lambda m: '\n' if len(m.group(0)) > 1 else ' ', s)
| [
"[email protected]"
] | |
3a42c8aa7f3676c8e0d4fe1e7956973fda98082f | bebacae90aa17ad2ab4c9111a2e5cfa0f8cf13a6 | /Python-3/multiprocessing_examples/multiprocessing_cpu_count.py | d4640b28d50ba8805cb61d807a2811da66ef8c84 | [
"MIT"
] | permissive | ayanakshi/journaldev | 5b0d73c53bc9a5292a8629c6c0320196abeab76e | a61cba22232e8cc9c40264c31aaba0bd17ff2522 | refs/heads/master | 2020-03-27T21:52:15.081736 | 2018-08-31T11:51:28 | 2018-08-31T11:51:28 | 147,182,378 | 1 | 0 | MIT | 2018-09-03T09:28:38 | 2018-09-03T09:28:38 | null | UTF-8 | Python | false | false | 78 | py | import multiprocessing
print("Number of cpu : ", multiprocessing.cpu_count()) | [
"[email protected]"
] | |
47e4cf718f4e7820d9b3dcbefc875ddd516aa7f4 | 79f5836b7d57491cf4afe5cb37829b6e7a5fc919 | /env/bin/easy_install-2.7 | 54f5989bb1ed592e4959423e8a79623d3ffca30d | [] | no_license | raultr/pypersonaljs | c73633c9de91a502973fb25833943da2fdbfbc6d | 2240281b787ad99718f3c33a82c90b012c051208 | refs/heads/master | 2020-06-05T15:51:14.196096 | 2014-10-15T05:43:24 | 2014-10-15T05:43:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | 7 | #!/home/raultr-hp/proyectos/PyPersonalJs/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
feccae051f13771d4f3877c8a034cd5bfe92c450 | 7ee11bb1adc0f58fc4a8362be86487258e4c149a | /example/Schema.py | 0968d1abc4a98ace09b1aaac38f4e33f3500e96d | [] | no_license | eddiedb6/schema | 416b41532e7c3726b88a8fb15197c1a61622c1bb | 72627caa15bdc097b9f80118ffcf9965f5ff5955 | refs/heads/master | 2020-05-21T04:33:54.705016 | 2019-02-14T09:11:43 | 2019-02-14T09:11:43 | 65,799,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,963 | py | {
SchemaConfigRoot: {
SchemaType: SchemaTypeDict,
SchemaRule: [
HasKey(Const.UI, Const.Action)
]
},
Const.UI: {
SchemaType: SchemaTypeDict,
SchemaRule: [
HasKey(Const.Name, Const.Type),
CheckAsTypeFromKey(Const.Type)
]
},
Const.UIRoot: {
SchemaType: SchemaTypeDict
},
Const.UIApp: {
SchemaType: SchemaTypeDict,
SchemaRule: [
HasKey(Const.Path)
]
},
Const.UIWeb: {
SchemaInherit: Const.UIApp
},
Const.AppRoot: {
SchemaType: SchemaTypeDict
},
Const.AppButton: {
SchemaInherit: Const.AppRoot
},
Const.AppForm: {
SchemaInherit: Const.AppRoot
},
Const.Path: {
SchemaType: SchemaTypeString,
SchemaRule: [
NotEmpty(SchemaTypeString)
]
},
Const.Name: {
SchemaType: SchemaTypeString,
SchemaRule: [
NotEmpty(SchemaTypeString)
]
},
Const.Type: {
SchemaType: SchemaTypeString,
SchemaRule: [
ValueIn(Const.UIType)
]
},
Const.SubUI: {
SchemaType: SchemaTypeArray,
SchemaRule: [
CheckForeachAsType(Const.UI)
]
},
Const.Caption: {
SchemaType: SchemaTypeString
},
Const.BreakTime: {
SchemaType: SchemaTypeInteger
},
Const.Text: {
SchemaType: SchemaTypeString
},
Const.Script: {
SchemaType: SchemaTypeString,
SchemaRule: [
NotEmpty(SchemaTypeString)
]
},
Const.Action: {
SchemaType: SchemaTypeDict
},
Const.SubAction: {
SchemaType: SchemaTypeArray,
SchemaRule: [
CheckForeachAsType(Const.Action)
]
},
Const.Ignore: {
SchemaType: SchemaTypeDict,
SchemaRule: [
IgnoreChildSchema()
]
}
}
| [
"[email protected]"
] | |
2034adf0328604846a83fc1c284cfefbb7834f5b | 4f9de774a4a67ea6419bab6fbe3333028ab84fa0 | /logic/components/velocity.py | a131a706cd464efba6168992a99af551707df99e | [] | no_license | Zireael07/Flask-roguelike | 871c7584c4df424af0ce71ad2e548539c54721ae | 44ed1aafd3e2ff4fdf442d1b31b11cf35cf6f5d0 | refs/heads/master | 2020-09-27T13:12:23.227546 | 2020-01-03T14:45:39 | 2020-01-03T14:45:39 | 226,524,533 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | class Velocity():
def __init__(self, dx=0, dy=0):
self.dx = dx
self.dy = dy
# readable representation
def __str__(self):
return 'Vel(dx='+str(self.dx)+', dy='+str(self.dy)+ ')'
| [
"[email protected]"
] | |
a69632dc48e1c7fee3eff1bd1c2b2d2829762640 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/f21307da911b0d81896bfb77092744a6ae4275d1-<set_action>-bug.py | c7be048a3640a9c23a870dff2f8d6af75047a2ac | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,706 | py | def set_action(self):
super(GalaxyCLI, self).set_action()
if (self.action == 'delete'):
self.parser.set_usage('usage: %prog delete [options] github_user github_repo')
elif (self.action == 'import'):
self.parser.set_usage('usage: %prog import [options] github_user github_repo')
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help="Don't wait for import results.")
self.parser.add_option('--branch', dest='reference', help="The name of a branch to import. Defaults to the repository's default branch (usually master)")
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.')
elif (self.action == 'info'):
self.parser.set_usage('usage: %prog info [options] role_name[,version]')
elif (self.action == 'init'):
self.parser.set_usage('usage: %prog init [options] role_name')
self.parser.add_option('--init-path', dest='init_path', default='./', help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--type', dest='role_type', action='store', default='default', help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON, help='The path to a role skeleton that the new role should be based upon.')
elif (self.action == 'install'):
self.parser.set_usage('usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]')
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help="Don't download roles listed as dependencies")
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
self.parser.add_option('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true', default=False, help='Use tar instead of the scm archive option when packaging the role')
elif (self.action == 'remove'):
self.parser.set_usage('usage: %prog remove role1 role2 ...')
elif (self.action == 'list'):
self.parser.set_usage('usage: %prog list [role_name]')
elif (self.action == 'login'):
self.parser.set_usage('usage: %prog login [options]')
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif (self.action == 'search'):
self.parser.set_usage('usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]')
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif (self.action == 'setup'):
self.parser.set_usage('usage: %prog setup [options] source github_user github_repo secret')
self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest='setup_list', action='store_true', default=False, help='List all of your integrations.')
if (self.action in ['init', 'info']):
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if (self.action not in ('delete', 'import', 'init', 'login', 'setup')):
self.parser.add_option('-p', '--roles-path', dest='roles_path', action='callback', callback=CLI.unfrack_paths, default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)', type='str')
if (self.action in ('init', 'install')):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') | [
"[email protected]"
] | |
d73758a6e4a7b56359dd8ab165025a812fdfe00e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/309/98523/submittedfiles/jogoDaVelha_BIB.py | 077f0b29e7ef4040f1afea9f7eb009446dd540c9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | # -*- coding: utf-8 -*-
from datetime import datetime
# autenticação do simbolo para a jogada humano
def solicitaSimboloDoHumano():
# nome=input('Qual seu nome(ou apelido)? ')
simbH= input("Qual o simbolo que você deseja utilizar no jogo? ")
while simbH!="X" and simbH!="x" and simbH!="O" and simbH!="o" :
print ("Ops! Simbolo inválido")
simbH= input("Informe um simbolo válido que deseja utilizar para a partida: X ou O : ")
return simbH
#sorteio
def sorteioPrimeiraJogada ():
now= datetime.now()
a=now.second
if a%2==0:
print("Vencedor do sorteio para inicio do jogo: Computador")
# chamar a função printartab com a jogada do computador
else:
print("Vencedor do sorteio para inicio do jogo: Jogador")
#apos disso perguntar a posição desejada e printartab
#Função para printar o tabuleiro:
def mostraTabuleiro():
print (tabuleiro[0][0] +'|'+ tabuleiro[0][1] + '|'+ tabuleiro[0][2])
print (tabuleiro[1][0] +'|'+ tabuleiro[1][1] + '|'+ tabuleiro[1][2])
print (tabuleiro[2][0] +'|'+ tabuleiro[2][1] + '|'+ tabuleiro[2][2])
#Função da jogado do humano
def jogadaHumana(nome, simbH, tabuleiro):
casa=int(input(" Qual a sua jogada, %s ?" %nome))
tabuleiro.insert(casa, simbH)
#Função para validar uma jogada
#def validarJogada():
#Função da Jogada do computador
#def jogadaComputador():
#Função que verifica o vencedor
#def verificaVencedor():
| [
"[email protected]"
] | |
e71adbb986cb66140b07800f36e7086d3e5523cd | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=4/params.py | 8a3f119b2e020763ecccd1a111e1a000b2370760 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.524833',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 4,
'utils': 'uni-medium-3'}
| [
"[email protected]"
] | |
5b2e5af0bd06d45e62b152bf8f0cb0a42144c205 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/117/usersdata/206/26191/submittedfiles/al2.py | 3c07aa88ea79f1c204ff4dfcd0a785239d43f380 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from __future__ import division
n=10.2017
inteiro= int (n)
fracionario= n - inteiro
print inteiro
prit fracionario | [
"[email protected]"
] | |
b2454946299a8748c66ae48602950df7191e9989 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1323+042/sdB_pg_1323+042_coadd.py | b826cd815baf19301b82379a8ff8b83c57ce8070 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[201.583167,3.965192], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_1323+042/sdB_pg_1323+042_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_1323+042/sdB_pg_1323+042_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
3b6c319dc0422c1182842c6d3eb219ef9dfa8e9d | c2566840fea739e40e8b93ba692fb861c885a82d | /setup.py | 4c2f5008c1cbc9fda6636dcbe78d1f951c9d7b7a | [
"MIT"
] | permissive | deeso/fiery-snap | e462fa35a2710ff5a356cef8d1bcabb398541f87 | c95cf4cd6089ea5b15a49440c684c687e8b417e6 | refs/heads/master | 2021-05-05T15:09:41.071803 | 2018-03-30T13:44:51 | 2018-03-30T13:44:51 | 117,293,928 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
import os
data_files = [(d, [os.path.join(d, f) for f in files])
for d, folders, files in os.walk(os.path.join('src', 'config'))]
setup(name='fiery-snap',
version='1.0',
description='connect together data flows',
author='Brad Antoniewicz, Adam Pridgen',
author_email='[email protected], [email protected]',
install_requires=['toml', 'kombu', 'redis', 'validators',
'web.py', 'regex', 'python-twitter',
'bs4', 'pymongo', 'requests'],
packages=find_packages('src'),
package_dir={'': 'src'},
)
| [
"[email protected]"
] | |
119dcbef5caee3c2e569f4eeecf04414e5af7500 | ea82f29e5e0c1a1aeac1edf4e9297a4d7d1003c1 | /backup/Ignorância Zero-backup/Ignorância Zero/059Exercício1.py | 76d6e417e4daacc77581efb126bfdc6490d9acb1 | [] | no_license | G-itch/Projetos | b66a4c41d8a073bf13dc993e71f5cddfba047f3a | 69a503a9c62cc887c0785a8d205c653eff402826 | refs/heads/master | 2023-04-20T08:26:59.181315 | 2021-05-05T15:33:40 | 2021-05-05T15:33:40 | 341,940,302 | 0 | 0 | null | 2021-05-05T15:33:40 | 2021-02-24T15:13:43 | Jupyter Notebook | UTF-8 | Python | false | false | 421 | py | class Quadrado(object):
def __init__(self,l):
self.lado = l
def mudar_valor(self,lado):
self.lado = lado
return self.lado
def retornar_lado(self):
return f"O lado do quadrado é igual a {self.lado}"
def área(self):
return f"A área do quadrado é igual a {self.lado*self.lado}"
Q = Quadrado(12)
# Q.mudar_valor(16)
print(Q.retornar_lado())
print(Q.área())
| [
"[email protected]"
] | |
4c76c2ebcd10489a9dd46341289ea3e1e1a33e99 | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/VanderPlas17Python/C_Chapter2/I_StructuredData/D_Onto/index.py | 949fef6f3a6ef9e0fa59ab4d610d0ca3fdb9e857 | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,201 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# RecordArrays: Structured Arrays with a Twist
# NumPy also provides the np.recarray class, which is almost identical to the struc‐
# tured arrays just described, but with one additional feature: fields can be accessed as
# attributes rather than as dictionary keys. Recall that we previously accessed the ages
# by writing:
# In[15]: data['age']
# Out[15]: array([25, 45, 37, 19], dtype=int32)
# If we view our data as a record array instead, we can access this with slightly fewer
# keystrokes:
# In[16]: data_rec = data.view(np.recarray)
# data_rec.age
# Out[16]: array([25, 45, 37, 19], dtype=int32)
# The downside is that for record arrays, there is some extra overhead involved in
# accessing the fields, even when using the same syntax. We can see this here:
# In[17]: %timeit data['age']
# %timeit data_rec['age']
# %timeit data_rec.age
# 1000000 loops, best of 3: 241 ns per loop
# 100000 loops, best of 3: 4.61 µs per loop
# 100000 loops, best of 3: 7.27 µs per loop
# Whether the more convenient notation is worth the additional overhead will depend
# on your own application.
#
# On to Pandas
# This section on structured and record arrays is purposely at the end of this chapter,
# because it leads so well into the next package we will cover: Pandas. Structured arrays
# like the ones discussed here are good to know about for certain situations, especially
# in case you’re using NumPy arrays to map onto binary data formats in C, Fortran, or
# another language. For day-to-day use of structured data, the Pandas package is a
# much better choice, and we’ll dive into a full discussion of it in the next chapter.
#
#
#
#
# 96 | Chapter 2: Introduction to NumPy
#
# CHAPTER 3
# Data Manipulation with Pandas
#
#
#
#
# In the previous chapter, we dove into detail on NumPy and its ndarray object, which
# provides efficient storage and manipulation of dense typed arrays in Python. Here
# we’ll build on this knowledge by looking in detail at the data structures provided by
# the Pandas library. Pandas is a newer package built on top of NumPy, and provides an
# efficient implementation of a DataFrame. DataFrames are essentially multidimen‐
# sional arrays with attached row and column labels, and often with heterogeneous
# types and/or missing data. As well as offering a convenient storage interface for
# labeled data, Pandas implements a number of powerful data operations familiar to
# users of both database frameworks and spreadsheet programs.
# As we saw, NumPy’s ndarray data structure provides essential features for the type of
# clean, well-organized data typically seen in numerical computing tasks. While it
# serves this purpose very well, its limitations become clear when we need more flexi‐
# bility (attaching labels to data, working with missing data, etc.) and when attempting
# operations that do not map well to element-wise broadcasting (groupings, pivots,
# etc.), each of which is an important piece of analyzing the less structured data avail‐
# able in many forms in the world around us. Pandas, and in particular its Series and
# DataFrame objects, builds on the NumPy array structure and provides efficient access
# to these sorts of “data munging” tasks that occupy much of a data scientist’s time.
# In this chapter, we will focus on the mechanics of using Series, DataFrame, and
# related structures effectively. We will use examples drawn from real datasets where
# appropriate, but these examples are not necessarily the focus.
#
# Installing and Using Pandas
# Installing Pandas on your system requires NumPy to be installed, and if you’re build‐
# ing the library from source, requires the appropriate tools to compile the C and
#
#
# 97
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"On to Pandas",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Onto(HierNode):
def __init__(self):
super().__init__("On to Pandas")
self.add(Content())
# eof
| [
"[email protected]"
] | |
586d70d29746fed538966ba9258cfa7d67de7905 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_7/models/array_connection_key_get_response.py | 6960a362ef6721c2cf3a867826a91bffea1cb82f | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,277 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.7, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_7 import models
class ArrayConnectionKeyGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[ArrayConnectionKey]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.ArrayConnectionKey]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[ArrayConnectionKey])
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayConnectionKeyGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayConnectionKeyGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayConnectionKeyGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
6bd956775f12f250875477f676345e6b0e234bf8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_legion.py | 5090b650106bfa138215037cce9864eaddf0603c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py |
#calss header
class _LEGION():
def __init__(self,):
self.name = "LEGION"
self.definitions = [u'very large in number: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
1c07af723bbd25d1d1d976b25d84c6a4956b4734 | 84c9a6fb5e18741f14a55d0d737e2a556383770d | /venv/Lib/site-packages/w3af/plugins/audit/phishing_vector.py | 4b0519748eb2af2346eb67a13bcaf81251014def | [] | no_license | AravindChan96/Vulcan | 638a1db2f84df08bc50dd76c7f142014d529fbec | 5548a6f36f04108ac1a6ed8e707930f9821f0bd9 | refs/heads/master | 2022-11-05T15:05:54.224578 | 2020-06-19T20:44:14 | 2020-06-19T20:44:14 | 273,396,348 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,808 | py | """
phishing_vector.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from __future__ import with_statement
import w3af.core.controllers.output_manager as om
import w3af.core.data.constants.severity as severity
import w3af.core.data.parsers.parser_cache as parser_cache
from w3af.core.data.fuzzer.fuzzer import create_mutants
from w3af.core.controllers.plugins.audit_plugin import AuditPlugin
from w3af.core.data.kb.vuln import Vuln
class phishing_vector(AuditPlugin):
"""
Find phishing vectors.
:author: Andres Riancho ([email protected])
"""
TAGS = ('iframe', 'frame')
# I test this with different URL handlers because the developer may have
# blacklisted http:// and https:// but missed ftp://.
#
# I also use hTtp instead of http because I want to evade some (stupid)
# case sensitive filters
TEST_URLS = ('hTtp://w3af.org/',
'htTps://w3af.org/',
'fTp://w3af.org/',
'//w3af.org')
def audit(self, freq, orig_response, debugging_id):
"""
Find those phishing vectors!
:param freq: A FuzzableRequest
:param orig_response: The HTTP response associated with the fuzzable request
:param debugging_id: A unique identifier for this call to audit()
"""
mutants = create_mutants(freq, self.TEST_URLS)
self._send_mutants_in_threads(self._uri_opener.send_mutant,
mutants,
self._analyze_result,
debugging_id=debugging_id)
om.out.debug('Finished audit.phishing_vector (did=%s)' % debugging_id)
def _contains_payload(self, response):
"""
get_tags_by_filter is CPU-intensive (but cached whenever possible), and
we want to prevent calls to it, so we first check if the HTTP response
body contains the payloads we sent.
:param response: The HTTP response body
:return: True if the response body contains at least one of the payloads
"""
body = response.body
body = body.lower()
for test_url in self.TEST_URLS:
if test_url.lower() in body:
return True
return False
def _analyze_result(self, mutant, response):
"""
Analyze results of the _send_mutant method.
"""
if not response.is_text_or_html():
return
if self._has_bug(mutant):
return
# Performance improvement to prevent calling the CPU-expensive
# get_tags_by_filter
if not self._contains_payload(response):
return
for tag in parser_cache.dpc.get_tags_by_filter(response, self.TAGS):
# pylint: disable=E1101
src_attr = tag.attrib.get('src', None)
# pylint: enable=E1101
if src_attr is None:
continue
for url in self.TEST_URLS:
if not src_attr.startswith(url):
continue
# Vuln vuln!
desc = 'A phishing vector was found at: %s'
desc %= mutant.found_at()
v = Vuln.from_mutant('Phishing vector', desc, severity.LOW,
response.id, self.get_name(), mutant)
v.add_to_highlight(src_attr)
self.kb_append_uniq(self, 'phishing_vector', v)
break
msg = ('Performed HTTP response analysis at audit.phishing_vector URL %s,'
' HTTP response ID %s.')
args = (response.get_uri(), response.id)
om.out.debug(msg % args)
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
This plugins identifies phishing vectors in web applications, a bug of
this type is found if the victim requests the URL
"http://site.tld/asd.asp?info=http://attacker.tld" and the HTTP response
contains:
...
<iframe src="http://attacker.tld">
...
"""
| [
"[email protected]"
] | |
f60c1eb3c8a6cb9f4f440d9385e77ee379db0e27 | f594560136416be39c32d5ad24dc976aa2cf3674 | /mmdet/models/utils/__init__.py | e74ba89e8c2101360d921a5f8437da48d0250e9a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | ShiqiYu/libfacedetection.train | bd9eb472c2599cbcb2f028fe7b51294e76868432 | dce01651d44d2880bcbf4e296ad5ef383a5a611e | refs/heads/master | 2023-07-14T02:37:02.517740 | 2023-06-12T07:42:00 | 2023-06-12T07:42:00 | 245,094,849 | 732 | 206 | Apache-2.0 | 2023-06-12T07:42:01 | 2020-03-05T07:19:23 | Python | UTF-8 | Python | false | false | 1,809 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness', 'get_uncertainty'
]
| [
"[email protected]"
] | |
52c36b71575238f0e602221a8231091745c3f7e7 | 63e2bed7329c79bf67279f9071194c9cba88a82c | /SevOneApi/python-client/test/test_flow_device_mapping_dto.py | f5a0fb8a90151ec40ab8e9ae1adadd21f084d03a | [] | no_license | jsthomason/LearningPython | 12422b969dbef89578ed326852dd65f65ab77496 | 2f71223250b6a198f2736bcb1b8681c51aa12c03 | refs/heads/master | 2021-01-21T01:05:46.208994 | 2019-06-27T13:40:37 | 2019-06-27T13:40:37 | 63,447,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.flow_device_mapping_dto import FlowDeviceMappingDto # noqa: E501
from swagger_client.rest import ApiException
class TestFlowDeviceMappingDto(unittest.TestCase):
"""FlowDeviceMappingDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFlowDeviceMappingDto(self):
"""Test FlowDeviceMappingDto"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.flow_device_mapping_dto.FlowDeviceMappingDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
04bf6220e402915f8d1332a869e2c2ba6796980f | 1dd7fecaa182c1d7a29460dc5385066b68bcf676 | /Inheritance - example 1/inheritance_ex_1.py | c18d95665de1909c9d64948bc046485bd709560e | [] | no_license | mainka1f/PythonUtilities | f081df31e6ea4311d4973ef7ba6bc0ff6be75fb1 | f310d088a7a7a5f2c95c27cba3a7985207568d62 | refs/heads/master | 2021-12-02T19:21:11.915510 | 2012-05-01T21:43:57 | 2012-05-01T21:43:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,670 | py | # Can a class talk to another class?
#
# DWB 052808
def func1(value):
print " From func1, value is ", value
class Class1():
class1Data=5
print " This is Class1(), class1Data = ", class1Data
def testPrint1(self):
print " 888888 This is def testPrint1"
# call_2_from_1=Class2()
class Class2(Class1):
def __init__(self,print_value):
self.print_value=print_value
self.class2Data=15
self.class3Data=100
if self.print_value == 1:
print " print_value = %d " % self.print_value
# if self.print_value == 2:
# print " print_value = %d, class2Data = " % (self.print_value, class2Data)
class2Data=10
func1(3)
# def testPrint(self):
# print " *** Inside testPrint, class2Data = ", self.class2Data
# print ' *** Inside testPrint, class3Data = ', self.class3Data
# if self.print_value == 2:
# print " print_value = %d, class2Data = " % (self.print_value, class2Data)
print " This is Class2(), class2Data =", class2Data
# call_1_from_2=Class1()
# call_1_from_2.testPrint1()
# class1Data is not defined within the scope of Class2
# print " class1Data before instantiate Class2 = ",class1Data
# Proof you can instantiate Class1 from within Class2 even tho Class2 also inherits from Class1
# testclass=Class1()
# testclass.testPrint1()
# print " class1Data after instantiate Class2 = ",class1Data
print " This is the third print statement"
#print " *** class2Data = ", class2Data
class2=Class2(1)
class22=Class2(2)
#class22.testPrint()
class2.testPrint1()
class22.testPrint1() | [
"[email protected]"
] | |
2d9fa1e779bb18f4c5a6cdc8952046e41c32841e | cabe35a027a4c26a6360f60b00b176235d79c98b | /others/13.py | 89cefbd8b028eab64906e16e7c34a8e66aab9aae | [] | no_license | rishikant42/Python-TheHardWay | e3ac9c903be5065277095827a7e31662a1d56cbf | 5c1c7ff6c376627bc6b1abf1fc7a8d7f3ef40176 | refs/heads/master | 2022-07-23T15:12:32.572778 | 2022-06-25T10:29:52 | 2022-06-25T10:29:52 | 70,502,885 | 0 | 1 | null | 2017-02-26T12:51:23 | 2016-10-10T15:42:42 | Python | UTF-8 | Python | false | false | 282 | py | # recursive approach
def fact1(n):
if n == 1:
return 1
else:
return n * fact1(n-1)
# iterative approach
def fact2(n):
res = 1
for i in range(1,n+1):
res *= i
return res
print fact1(6)
print fact2(6)
import math
print math.factorial(6)
| [
"[email protected]"
] | |
9ec48f0b256ba4be2a48b69990488c3634b4b7d3 | 5b1ff6054c4f60e4ae7315db9f20a334bc0b7634 | /APC20/consts.py | a80a1506e7a792c468d852b8ff12a2d980fc556d | [] | no_license | maratbakirov/AbletonLive9_RemoteScripts | 2869122174634c75405a965401aa97a2dae924a1 | 4a1517c206353409542e8276ebab7f36f9bbd4ef | refs/heads/master | 2021-06-05T14:38:27.959025 | 2021-05-09T11:42:10 | 2021-05-09T11:42:10 | 13,348,327 | 3 | 4 | null | 2016-10-16T13:51:11 | 2013-10-05T16:27:04 | Python | UTF-8 | Python | false | false | 184 | py | #Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/APC20/consts.py
MANUFACTURER_ID = 71
ABLETON_MODE = 65
NOTE_MODE = 67 | [
"[email protected]"
] | |
43bf58661d6e0ea6a9cd7d718c36496cdac79967 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02696/s249129827.py | 607d524390d78fa01a8e97afe32cade758efaa2e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | a,b,n = list(map(int,input().split()))
i = min(n,b-1)
print(a*i//b) | [
"[email protected]"
] | |
66d338740fdb20e03fa286110358ce655f6f26da | e7031386a884ae8ed568d8c219b4e5ef1bb06331 | /ram/serializers.py | b87c62ef863a93dcee31c43b9f95f7700fede589 | [] | no_license | ikbolpm/ultrashop-backend | a59c54b8c4d31e009704c3bf0e963085477092cf | 290fa0ecdad40ec817867a019bff2ce82f08d6fe | refs/heads/dev | 2022-11-30T21:49:17.965273 | 2020-09-24T10:16:12 | 2020-09-24T10:16:12 | 147,561,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from rest_framework import serializers
from .models import Ram
class RamSerializer(serializers.ModelSerializer):
class Meta:
model = Ram
fields = [
'id',
'generation',
]
| [
"[email protected]"
] | |
9a55141c5b1f4d7d1780699da7390a14e77ea789 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/23833417cfe83723f088bea08e63844cae6f1121-<convert_dense_weights_data_format>-fix.py | 69a055d556bb6f7f95b4123fc14c444fa5fbfd70 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py |
def convert_dense_weights_data_format(dense, previous_feature_map_shape, target_data_format='channels_first'):
'Utility useful when changing a convnet\'s `data_format`.\n\n When porting the weights of a convnet from one data format to the other,\n if the convnet includes a `Flatten` layer\n (applied to the last convolutional feature map)\n followed by a `Dense` layer, the weights of that `Dense` layer\n should be updated to reflect the new dimension ordering.\n\n # Arguments\n dense: The target `Dense` layer.\n previous_feature_map_shape: A shape tuple of 3 integers,\n e.g. `(512, 7, 7)`. The shape of the convolutional\n feature map right before the `Flatten` layer that\n came before the target `Dense` layer.\n target_data_format: One of "channels_last", "channels_first".\n Set it "channels_last"\n if converting a "channels_first" model to "channels_last",\n or reciprocally.\n '
assert (target_data_format in {'channels_last', 'channels_first'})
(kernel, bias) = dense.get_weights()
for i in range(kernel.shape[1]):
if (target_data_format == 'channels_first'):
(c, h, w) = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1))
else:
(h, w, c) = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0))
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
| [
"[email protected]"
] | |
c382de0265f1f1ec89213eb1b3fdfd2e350d1205 | eeb469954b768095f2b8ad2376f1a114a3adb3fa | /399.py | 5b8d40d1b0b9c2f4aa9770d6b7d9621200cdd1c4 | [
"MIT"
] | permissive | RafaelHuang87/Leet-Code-Practice | ef18dda633932e3cce479f7d5411552d43da0259 | 7754dcee38ffda18a5759113ef06d7becf4fe728 | refs/heads/master | 2020-07-18T20:09:10.311141 | 2020-02-11T09:56:39 | 2020-02-11T09:56:39 | 206,305,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
import collections
def dfs(x, y, graph, visited):
if x not in graph or y not in graph:
return -1
if x == y: return 1
for n in graph[x]:
if n in visited:
continue
visited.add(n)
d = dfs(n, y, graph, visited)
if d != -1:
return graph[x][n] * d
return -1
graph = collections.defaultdict(dict)
for (x, y), val in zip(equations, values):
graph[x][y] = val
graph[y][x] = 1.0 / val
return [dfs(x, y, graph, set()) for x, y in queries]
| [
"[email protected]"
] | |
05bf8a2b61905ec7be7a20f74f1fac33bed20718 | 6a3af6fe669b2e17db1fa7d0751cbc4e04948079 | /fn_utilities/fn_utilities/components/utilities_xml_transformation.py | 4045c55e0ea7cf1fa36decd6a8d1b5931e341ba4 | [
"MIT"
] | permissive | jjfallete/resilient-community-apps | 5f0a728fe0be958acc44d982bf0289959f84aa20 | 2e3c4b6102555517bad22bf87fa4a06341714166 | refs/heads/master | 2022-04-17T13:20:36.961976 | 2020-04-13T07:03:54 | 2020-04-13T07:03:54 | 169,295,943 | 1 | 0 | MIT | 2020-04-13T07:03:56 | 2019-02-05T19:06:57 | Python | UTF-8 | Python | false | false | 3,313 | py | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from lxml import etree
import os
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'utilities_xml_transformation"""
XML_DIR = "xml_stylesheet_dir"
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_utilities", {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_utilities", {})
@function("utilities_xml_transformation")
def _utilities_xml_transformation_function(self, event, *args, **kwargs):
"""Function: Perform a transformation of an xml document based on a given stylesheet"""
try:
# Get the function parameters:
xml_source = kwargs.get("xml_source") # text
xml_stylesheet = kwargs.get("xml_stylesheet") # text
validate_fields(("xml_source", "xml_stylesheet"), kwargs)
# confirm that our required parameter exists and is a directory
if not (self.options.get(FunctionComponent.XML_DIR) and os.path.isdir(self.options.get(FunctionComponent.XML_DIR))):
raise ValueError("missing or incorrectly specified configuration property: {}".format(FunctionComponent.XML_DIR))
log = logging.getLogger(__name__)
log.info("xml_source: %s", xml_source)
log.info("xml_stylesheet: %s", xml_stylesheet)
# get the stylesheet
stylesheet = os.path.join(self.options.get(FunctionComponent.XML_DIR), xml_stylesheet)
if not (os.path.exists(stylesheet) and os.path.isfile(stylesheet)):
raise ValueError("stylesheet file not found: {}".format(stylesheet))
yield StatusMessage("starting...")
parser = etree.XMLParser(ns_clean=True, recover=True, encoding="utf-8")
# read xsl file
xsl = open(stylesheet, mode="rb").read()
xsl_root = etree.fromstring(xsl, parser=parser)
transform = etree.XSLT(xsl_root)
# read xml
xml_root = etree.fromstring(xml_source.encode("utf-8"), parser=parser)
# transform xml with xslt
transformation_doc = transform(xml_root)
# return transformation result
result = etree.tostring(transformation_doc)
results = {
"content": result.decode("utf-8")
}
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
def validate_fields(fieldList, kwargs):
"""
ensure required fields are present. Throw ValueError if not
:param fieldList:
:param kwargs:
:return: no return
"""
for field in fieldList:
if field not in kwargs or kwargs.get(field) == '':
raise ValueError('Required field is missing or empty: '+field) | [
"[email protected]"
] | |
2f50e3dff496deca069c7a1bdc2f9db071839fe6 | fc29ccdcf9983a54ae2bbcba3c994a77282ae52e | /Leetcode_By_Topic/backtrack-037.py | 86ec511949455de352be7b227edc243671f39727 | [] | no_license | linnndachen/coding-practice | d0267b197d9789ab4bcfc9eec5fb09b14c24f882 | 5e77c3d7a0632882d16dd064f0aad2667237ef37 | refs/heads/master | 2023-09-03T19:26:25.545006 | 2021-10-16T16:29:50 | 2021-10-16T16:29:50 | 299,794,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,559 | py | from typing import List
class Solution:
def solveSudoku(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
self.backtrack(board, 0, 0)
def backtrack(self, board, r, c):
# Go to next empty space
while board[r][c] != '.':
c += 1
if c == 9:
c, r = 0, r+1
if r == 9: # base case
return True
# for all possibilities,
for i in range(1, 10):
# if one of them works
if self.isValidSudokuMove(board, r, c, str(i)):
board[r][c] = str(i)
#continue to test if it will fit the rest
if self.backtrack(board, r, c):
return True
# backtracking if it doesn't work and continue
# with another possibility
board[r][c] = '.'
return False
def isValidSudokuMove(self, board, r, c, n):
# Check row
if any(board[r][j] == n for j in range(9)):
return False
# Check col
if any(board[i][c] == n for i in range(9)):
return False
# Check block
br, bc = 3*(r//3), 3*(c//3)
if any(board[i][j] == n for i in range(br, br+3) for j in range(bc, bc+3)):
return False
return True
| [
"[email protected]"
] | |
bc91f06774edb26b8793277101e26cf610a1def6 | d29293cbaef904a8b1cae69b94ff215fe5e52af3 | /website-sosokan/sosokan/migrations/0056_auto_20161217_1903.py | e15639273abd6aff023c399eb51617ef08a2d02c | [] | no_license | Sosoking328/BeautyContest | e00e22a8b8539846e5f39802fab927804bf81e3e | 5689640be30d92a81a9f4d50e348fec26705a149 | refs/heads/master | 2022-12-11T22:53:31.419871 | 2017-07-21T20:56:39 | 2017-07-21T20:56:39 | 97,427,120 | 1 | 0 | null | 2022-12-07T23:57:43 | 2017-07-17T02:29:38 | Python | UTF-8 | Python | false | false | 1,514 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-12-18 00:03
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sosokan', '0055_auto_20161215_2006'),
]
operations = [
migrations.AlterField(
model_name='ad',
name='createdAt',
field=models.FloatField(default=1482019404.335),
),
migrations.AlterField(
model_name='ad',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 17, 19, 3, 24, 335000)),
),
migrations.AlterField(
model_name='ad',
name='descendingTime',
field=models.FloatField(default=-1482019404.335),
),
migrations.AlterField(
model_name='ad',
name='updatedAt',
field=models.FloatField(default=1482019404.335),
),
migrations.AlterField(
model_name='adimage',
name='createdAt',
field=models.FloatField(default=1482019404.336),
),
migrations.AlterField(
model_name='adimage',
name='descendingTime',
field=models.FloatField(default=-1482019404.336),
),
migrations.AlterField(
model_name='adimage',
name='updatedAt',
field=models.FloatField(default=1482019404.336),
),
]
| [
"[email protected]"
] | |
96b40d4b78c509377f89326c15ed85475f2edc54 | 87fdda531054e0bbbaa0c88fc8bb21426d8466c4 | /blog/views.py | 7e0f8857efdcd090ed7284d8c281e5d3206bdb87 | [] | no_license | fanyang668/mysite | ef771586f4ed192844875ff050047960bf63c9cd | 1d8e02997f90d4f0f4b46590a398d9f12083a210 | refs/heads/master | 2021-08-30T00:47:02.019599 | 2017-12-15T12:17:31 | 2017-12-15T12:17:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | from django.shortcuts import render, get_object_or_404
from .models import BlogArticles
# Create your views here.
def blog_title(request):
blogs = BlogArticles.objects.all()
return render(request, 'blog/titles.html', {'blogs': blogs})
def article_title(request, article_id):
# article = BlogArticles.objects.get(id=article_id)
article = get_object_or_404(BlogArticles, id=article_id)
pub = article.publish
return render(request, "blog/content.html", {"article": article, "publish": pub}) | [
"[email protected]"
] | |
679092e71078cb45dd798bcddb85b4492c792f93 | 5aa8671494c09de995259e3f1473f134ea7b93fd | /lithops/localhost/localhost.py | 3c0c882f604db59faa0b482b0c343cdc075f83e8 | [
"Apache-2.0"
] | permissive | notras/lithops | 440d6d9d70bb71e7360dcd82bb81f261975ca51b | 84b6b5cc66354fa4747d7cf4c8b4990c0201e400 | refs/heads/master | 2023-08-29T23:09:14.554607 | 2021-11-11T08:04:36 | 2021-11-11T08:04:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,284 | py | #
# (C) Copyright Cloudlab URV 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import shlex
import queue
import signal
import lithops
import logging
import shutil
import threading
import subprocess as sp
from shutil import copyfile
from pathlib import Path
from lithops.constants import RN_LOG_FILE, TEMP, LITHOPS_TEMP_DIR, COMPUTE_CLI_MSG, JOBS_PREFIX
from lithops.utils import is_lithops_worker, is_unix_system
logger = logging.getLogger(__name__)
RUNNER = os.path.join(LITHOPS_TEMP_DIR, 'runner.py')
LITHOPS_LOCATION = os.path.dirname(os.path.abspath(lithops.__file__))
class LocalhostHandler:
"""
A localhostHandler object is used by invokers and other components to access
underlying localhost backend without exposing the implementation details.
"""
def __init__(self, localhost_config):
logger.debug('Creating Localhost compute client')
self.config = localhost_config
self.env = {} # dict to store environments
self.job_queue = queue.Queue()
self.job_manager = None
self.should_run = True
msg = COMPUTE_CLI_MSG.format('Localhost compute')
logger.info("{}".format(msg))
def init(self):
"""
Init tasks for localhost
"""
pass
def start_manager(self):
"""
Starts manager thread to keep order in tasks
"""
def job_manager():
logger.debug('Staring localhost job manager')
self.should_run = True
while self.should_run:
job_payload, job_filename = self.job_queue.get()
if job_payload is None and job_filename is None:
break
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
runtime_name = job_payload['runtime_name']
env = self.get_env(runtime_name)
process = env.run(job_payload, job_filename)
process.communicate() # blocks until the process finishes
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Execution finished')
if self.job_queue.empty():
break
self.job_manager = None
logger.debug("Localhost job manager stopped")
if not self.job_manager:
self.job_manager = threading.Thread(target=job_manager)
self.job_manager.start()
def _get_env_type(self, runtime_name):
"""
Gets the environment type based on the runtime name
"""
return 'default' if '/' not in runtime_name else 'docker'
def get_env(self, runtime_name):
"""
Generates the proper runtime environment based on the runtime name
"""
if runtime_name not in self.env:
if '/' not in runtime_name:
env = DefaultEnv()
else:
pull_runtime = self.config.get('pull_runtime', False)
env = DockerEnv(runtime_name, pull_runtime)
env.setup()
self.env[runtime_name] = env
return self.env[runtime_name]
def create_runtime(self, runtime_name, *args):
"""
Extract the runtime metadata and preinstalled modules
"""
logger.info(f"Extracting preinstalled Python modules from {runtime_name}")
env = self.get_env(runtime_name)
runtime_metadata = env.preinstalls()
return runtime_metadata
def invoke(self, job_payload):
"""
Run the job description against the selected environment
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
runtime_name = job_payload['runtime_name']
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Putting job into localhost queue')
self.start_manager()
env = self.get_env(runtime_name)
job_filename = env._prepare_job_file(job_payload)
self.job_queue.put((job_payload, job_filename))
def get_runtime_key(self, runtime_name, *args):
"""
Generate the runtime key that identifies the runtime
"""
env_type = self._get_env_type(runtime_name)
runtime_key = os.path.join('localhost', env_type, runtime_name.strip("/"))
return runtime_key
def get_backend_type(self):
"""
Wrapper method that returns the type of the backend (Batch or FaaS)
"""
return 'batch'
def clean(self, **kwargs):
"""
Deletes all local runtimes
"""
pass
def clear(self, job_keys=None):
"""
Kills all running jobs processes
"""
self.should_run = False
while not self.job_queue.empty():
try:
self.job_queue.get(False)
except Exception:
pass
for runtime_name in self.env:
self.env[runtime_name].stop(job_keys)
if self.job_manager:
self.job_queue.put((None, None))
self.should_run = True
class BaseEnv():
"""
Base environment class for shared methods
"""
def __init__(self, runtime):
self.runtime = runtime
self.jobs = {} # dict to store executed jobs (job_keys) and PIDs
def _copy_lithops_to_tmp(self):
if is_lithops_worker() and os.path.isfile(RUNNER):
return
os.makedirs(LITHOPS_TEMP_DIR, exist_ok=True)
try:
shutil.rmtree(os.path.join(LITHOPS_TEMP_DIR, 'lithops'))
except FileNotFoundError:
pass
shutil.copytree(LITHOPS_LOCATION, os.path.join(LITHOPS_TEMP_DIR, 'lithops'))
src_handler = os.path.join(LITHOPS_LOCATION, 'localhost', 'runner.py')
copyfile(src_handler, RUNNER)
def _prepare_job_file(self, job_payload):
"""
Creates the job file that contains the job payload to be executed
"""
job_key = job_payload['job_key']
storage_bucket = job_payload['config']['lithops']['storage_bucket']
local_job_dir = os.path.join(LITHOPS_TEMP_DIR, storage_bucket, JOBS_PREFIX)
docker_job_dir = f'/tmp/lithops/{storage_bucket}/{JOBS_PREFIX}'
job_file = f'{job_key}-job.json'
os.makedirs(local_job_dir, exist_ok=True)
local_job_filename = os.path.join(local_job_dir, job_file)
with open(local_job_filename, 'w') as jl:
json.dump(job_payload, jl, default=str)
if isinstance(self, DockerEnv):
job_filename = '{}/{}'.format(docker_job_dir, job_file)
else:
job_filename = local_job_filename
return job_filename
def stop(self, job_keys=None):
"""
Stops running processes
"""
def kill_job(job_key):
if self.jobs[job_key].poll() is None:
logger.debug(f'Killing job {job_key} with PID {self.jobs[job_key].pid}')
PID = self.jobs[job_key].pid
if is_unix_system():
PGID = os.getpgid(PID)
os.killpg(PGID, signal.SIGKILL)
else:
os.kill(PID, signal.SIGTERM)
del self.jobs[job_key]
to_delete = job_keys or list(self.jobs.keys())
for job_key in to_delete:
try:
if job_key in self.jobs:
kill_job(job_key)
except Exception:
pass
class DockerEnv(BaseEnv):
"""
Docker environment uses a docker runtime image
"""
def __init__(self, docker_image, pull_runtime):
logger.debug(f'Starting Docker Environment for {docker_image}')
super().__init__(runtime=docker_image)
self.pull_runtime = pull_runtime
self.uid = os.getuid() if is_unix_system() else None
self.gid = os.getuid() if is_unix_system() else None
def setup(self):
logger.debug('Setting up Docker environment')
self._copy_lithops_to_tmp()
if self.pull_runtime:
logger.debug('Pulling Docker runtime {}'.format(self.runtime))
sp.run(shlex.split(f'docker pull {self.runtime}'), check=True,
stdout=sp.PIPE, universal_newlines=True)
def preinstalls(self):
if not os.path.isfile(RUNNER):
self.setup()
tmp_path = Path(TEMP).as_posix()
cmd = 'docker run '
cmd += f'--user {self.uid}:{self.gid} ' if is_unix_system() else ''
cmd += f'--rm -v {tmp_path}:/tmp --entrypoint "python3" {self.runtime} /tmp/lithops/runner.py preinstalls'
process = sp.run(shlex.split(cmd), check=True, stdout=sp.PIPE,
universal_newlines=True, start_new_session=True)
runtime_meta = json.loads(process.stdout.strip())
return runtime_meta
def run(self, job_payload, job_filename):
"""
Runs a job
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
total_calls = len(job_payload['call_ids'])
job_key = job_payload['job_key']
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Running '
f'{total_calls} activations in the localhost worker')
if not os.path.isfile(RUNNER):
self.setup()
tmp_path = Path(TEMP).as_posix()
cmd = f'docker run --name lithops_{job_key} '
cmd += f'--user {self.uid}:{self.gid} ' if is_unix_system() else ''
cmd += f'--rm -v {tmp_path}:/tmp --entrypoint "python3" {self.runtime} /tmp/lithops/runner.py run {job_filename}'
log = open(RN_LOG_FILE, 'a')
process = sp.Popen(shlex.split(cmd), stdout=log, stderr=log, start_new_session=True)
self.jobs[job_key] = process
return process
def stop(self, job_keys=None):
"""
Stops running containers
"""
if job_keys:
for job_key in job_keys:
sp.Popen(shlex.split(f'docker rm -f lithops_{job_key}'),
stdout=sp.DEVNULL, stderr=sp.DEVNULL)
else:
for job_key in self.jobs:
sp.Popen(shlex.split(f'docker rm -f lithops_{job_key}'),
stdout=sp.DEVNULL, stderr=sp.DEVNULL)
super().stop(job_keys)
class DefaultEnv(BaseEnv):
"""
Default environment uses current python3 installation
"""
def __init__(self):
logger.debug(f'Starting Default Environment for {sys.executable}')
super().__init__(runtime=sys.executable)
def setup(self):
logger.debug('Setting up Default environment')
self._copy_lithops_to_tmp()
def preinstalls(self):
if not os.path.isfile(RUNNER):
self.setup()
cmd = [self.runtime, RUNNER, 'preinstalls']
process = sp.run(cmd, check=True, stdout=sp.PIPE, universal_newlines=True,
start_new_session=True)
runtime_meta = json.loads(process.stdout.strip())
return runtime_meta
def run(self, job_payload, job_filename):
"""
Runs a job
"""
executor_id = job_payload['executor_id']
job_id = job_payload['job_id']
total_calls = len(job_payload['call_ids'])
job_key = job_payload['job_key']
logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Running '
f'{total_calls} activations in the localhost worker')
if not os.path.isfile(RUNNER):
self.setup()
cmd = [self.runtime, RUNNER, 'run', job_filename]
log = open(RN_LOG_FILE, 'a')
process = sp.Popen(cmd, stdout=log, stderr=log, start_new_session=True)
self.jobs[job_key] = process
return process
| [
"[email protected]"
] | |
78e0b151b1426fb3a47c554d33ae1df1193ee67f | 7f771a20879dab8bb48309d98ffe6f1125204abb | /users/views.py | bcb4b9ec473582737beabc6e0e500e6d409a4b23 | [] | no_license | sergiy-chumachenko/all-auth | 34269aadada2d8d7dbc32c64ec5435ba9c68bbae | 79ec735c702f535360f8b547b71e9e14d5be6e0c | refs/heads/master | 2022-12-12T00:38:02.378627 | 2020-07-12T09:05:21 | 2020-07-12T09:05:21 | 193,956,902 | 1 | 0 | null | 2022-04-22T21:53:14 | 2019-06-26T18:21:24 | Python | UTF-8 | Python | false | false | 270 | py | from django.views.generic import CreateView
from django.urls import reverse_lazy
from .forms import CustomUserCreationForm
class SignUpView(CreateView):
template_name = 'signup.html'
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
| [
"[email protected]"
] | |
484e7b413bbeb560929b680ac097f71c1dd5e2d9 | 26fc334777ce27d241c67d97adc1761e9d23bdba | /tests/django_tests/tests/staticfiles_tests/cases.py | 918ec4f99e43c2544f29a71013c544dcea1e3953 | [
"BSD-3-Clause"
] | permissive | alihoseiny/djongo | 1434c9e78c77025d7e0b3330c3a40e9ea0029877 | e2edf099e398573faa90e5b28a32c3d7f1c5f1e9 | refs/heads/master | 2020-03-27T23:27:02.530397 | 2018-08-30T14:44:37 | 2018-08-30T14:44:37 | 147,317,771 | 2 | 1 | BSD-3-Clause | 2018-09-04T09:00:53 | 2018-09-04T09:00:53 | null | UTF-8 | Python | false | false | 4,385 | py | import codecs
import os
import shutil
import tempfile
from django.conf import settings
from django.core.management import call_command
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from .settings import TEST_SETTINGS
class BaseStaticFilesMixin:
"""
Test case with a couple utility assertions.
"""
def assertFileContains(self, filepath, text):
self.assertIn(
text,
self._get_file(filepath),
"'%s' not in '%s'" % (text, filepath),
)
def assertFileNotFound(self, filepath):
with self.assertRaises(IOError):
self._get_file(filepath)
def render_template(self, template, **kwargs):
if isinstance(template, str):
template = Template(template)
return template.render(Context(**kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from static %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from static %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
with self.assertRaises(exc):
self.assertStaticRenders(path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesMixin, SimpleTestCase):
pass
@override_settings(**TEST_SETTINGS)
class CollectionTestCase(BaseStaticFilesMixin, SimpleTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super().setUp()
temp_dir = tempfile.mkdtemp()
# Override the STATIC_ROOT for all tests from setUp to tearDown
# rather than as a context manager
self.patched_settings = self.settings(STATIC_ROOT=temp_dir)
self.patched_settings.enable()
self.run_collectstatic()
# Same comment as in runtests.teardown.
self.addCleanup(shutil.rmtree, temp_dir)
def tearDown(self):
self.patched_settings.disable()
super().tearDown()
def run_collectstatic(self, *, verbosity=0, **kwargs):
call_command('collectstatic', interactive=False, verbosity=verbosity,
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class TestDefaults:
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
def test_filename_with_percent_sign(self):
self.assertFileContains('test/%2F.txt', '%2F content')
| [
"[email protected]"
] | |
a47718f6027b7994870b738ed4ba9c3887c47f3b | 993ff3e6eb510b5083db5f15f2c0299c05a2c0f6 | /hr_leave_calculation/models/hr_payslip_leave.py | 249a42b9c4bfe8dcaf554da7e24cc2ec02be2c22 | [] | no_license | Raghupathy15/Sigma | 36b24e76e81bad4ae5435508018f1c960878901d | 42eed3b211a7be8d8c9b70dc359a432c02be07f1 | refs/heads/main | 2023-01-31T18:25:09.712666 | 2020-12-09T09:22:17 | 2020-12-09T09:22:17 | 319,904,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | import base64
import logging
from odoo import api, fields, models
from odoo import tools, _
from odoo.exceptions import ValidationError, AccessError
from odoo.modules.module import get_module_resource
from datetime import datetime
class HrPayslipLeave(models.Model):
_name='hr.payslip.leave'
#establishing the one2many relation between models
leave_payslip_ids = fields.Many2one('hr.payslip')
leaves_taken = fields.Float(string="Leaves Taken")
leave_type_id = fields.Many2one('hr.leave.allocation',string="Leave Type")
employee_id = fields.Many2one('hr.employee',string="Employee Name",related="leave_payslip_ids.employee_id",store=True)
name = fields.Many2one('hr.employee',related="employee_id")
date_from = fields.Date(string="From Date",related="leave_payslip_ids.date_from",store=True)
date_to = fields.Date(string="To Date",related="leave_payslip_ids.date_to",store=True)
current_year = fields.Integer(string="Current Year",compute="check_year")
@api.multi
@api.depends('date_from')
def check_year(self):
for line in self:
present_year = line.date_from
line.current_year = present_year.year
@api.multi
def unlink(self):
return super(HrPayslipLeave, self).unlink() | [
"[email protected]"
] | |
650dd79043efc6560641569792262a2d69200509 | cbc5e26bb47ae69e80a3649c90275becf25ce404 | /xlsxwriter/test/styles/test_write_cell_xfs.py | 3dc4700ec4158cc5e3987704539c462907e5ea23 | [
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] | permissive | mst-solar-car/kicad-bom-generator | c3549409c3139f787ad28391372b5cb03791694a | 2aae905056d06f3d25343a8d784049c141d05640 | refs/heads/master | 2021-09-07T14:00:40.759486 | 2018-02-23T23:21:13 | 2018-02-23T23:21:13 | 107,868,801 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteCellXfs(unittest.TestCase):
"""
Test the Styles _write_cell_xfs() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_cell_xfs(self):
"""Test the _write_cell_xfs() method"""
xf_format = Format()
xf_format.has_font = 1
self.styles._set_style_properties([[xf_format], None, 1, 0, 0, 0, [], []])
self.styles._write_cell_xfs()
exp = """<cellXfs count="1"><xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/></cellXfs>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
3ff9e02698530cd2acb3ebe154d35b9db080f78c | 2fed297f777ac6a01f21870d74e4259ed0c17dfd | /examples/distributions/continuous/comparisons.py | f3741b9fc6332da0e64e6626ef4e8762716053cb | [
"MIT"
] | permissive | vahndi/probability | 1bf4e5e7835d5dc57b5a5a43d76ded2b23975ec5 | ff3f5434d3da0d46b127b02cf733699e5a43c904 | refs/heads/master | 2023-05-24T17:04:00.430326 | 2023-05-22T13:53:41 | 2023-05-22T13:53:41 | 231,456,036 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | import matplotlib.pyplot as plt
from math import sqrt
from numpy import arange
from examples.colors import ML_APP_DARK_BLUE
from probability.distributions import Normal, Laplace
from probability.distributions.continuous.students_t import StudentsT
x = arange(-4, 4.01, 0.05)
def plot_normal_students_t_laplace():
"""
Machine Learning: A Probabilistic Perspective. Figure 2.7
"""
_, axes = plt.subplots(ncols=2, figsize=(16, 9))
# define distributions
normal = Normal(mu=0, sigma=1)
students_t = StudentsT(nu=1)
laplace = Laplace(mu=0, b=1 / sqrt(2))
# plot pdfs
ax = axes[0]
normal.plot(x=x, ls=':', color='black', ax=ax)
students_t.plot(x=x, ls='--', color=ML_APP_DARK_BLUE, ax=ax)
laplace.plot(x=x, ls='-', color='red', ax=ax)
ax.set_ylim(0, 0.8)
ax.legend(loc='upper right')
# plot log-pdfs
ax = axes[1]
normal.log_pdf().plot(x=x, ls=':', color='black', ax=ax)
students_t.log_pdf().plot(x=x, ls='--', color=ML_APP_DARK_BLUE, ax=ax)
laplace.log_pdf().plot(x=x, ls='-', color='red', ax=ax)
ax.set_ylim(-9, 0)
ax.legend(loc='upper right')
plt.show()
if __name__ == '__main__':
plot_normal_students_t_laplace()
| [
"[email protected]"
] | |
efa5ecd7bb2cc2f159e12d3e479f215733507223 | 7afcf3cf0f55ecc255aabdda3b90c44528f53b50 | /Crawler/tg/tg/settings.py | a930596fa6bee0605dcf347a66676d581f174813 | [] | no_license | entscheidsuche/scraper | 368c6ac8fd14e15116c26f936f32d2ed0acac2ae | b9fafd3f1c2600a78471d4e4c466250ab11a8f33 | refs/heads/master | 2023-04-05T22:09:20.270314 | 2021-04-18T19:29:24 | 2021-04-18T19:29:24 | 264,894,732 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,128 | py | # -*- coding: utf-8 -*-
# Scrapy settings for tg project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tg'
SPIDER_MODULES = ['tg.spiders']
NEWSPIDER_MODULE = 'tg.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tg (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
MYFILESPIPELINE_FILES_EXPIRES = 365000
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tg.middlewares.TgSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tg.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'tg.pipelines.TgPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"[email protected]"
] | |
9f52785f1e128273a432d4fd107afdce23cc2482 | 4142b8c513d87361da196631f7edd82f11465abb | /python/globalround17/1610A.py | 43f737746b3709c50f36fc2d47e6019e07c30e65 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | from sys import stdin
for _ in range(int(stdin.readline())):
a, b = list(map(int, stdin.readline().split()))
if a == 1 and b == 1:
print(0)
else:
print(min([a, b, 2]))
| [
"[email protected]"
] | |
6f61be457dacf2ea995176cc6b5ab19159a4408c | 0d87906ca32b68965c3aa5b4cb829383276b13c8 | /tests/extension/thread_/axi_dma_long_wide/thread_axi_dma_long_wide.py | b470e2299c027001f75aea514eca00311c865250 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | tanbour/veriloggen | 301beea3d9419c2d63d1d1159a2ec52ed316ef20 | 858fbc872be78964cfc7e5a23e1491b2c3d5cf52 | refs/heads/master | 2020-03-18T20:38:24.653119 | 2018-05-19T04:49:01 | 2018-05-19T04:49:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,275 | py | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed(memory_datawidth=128):
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, memory_datawidth)
myram = vthread.RAM(m, 'myram', clk, rst, datawidth, addrwidth)
all_ok = m.TmpReg(initval=0)
def blink(size):
all_ok.value = True
# Test for 4KB boundary check
offset = myaxi.boundary_size - 16
body(size, offset)
if all_ok:
print('ALL OK')
def body(size, offset):
# write
for i in range(size):
wdata = i + 100
myram.write(i, wdata)
laddr = 0
gaddr = offset
myaxi.dma_write(myram, laddr, gaddr, size)
print('dma_write: [%d] -> [%d]' % (laddr, gaddr))
# write
for i in range(size):
wdata = i + 1000
myram.write(i, wdata)
laddr = 0
gaddr = (size + size) * 4 + offset
myaxi.dma_write(myram, laddr, gaddr, size)
print('dma_write: [%d] -> [%d]' % (laddr, gaddr))
# read
laddr = 0
gaddr = offset
myaxi.dma_read(myram, laddr, gaddr, size)
print('dma_read: [%d] <- [%d]' % (laddr, gaddr))
for i in range(size):
rdata = myram.read(i)
if vthread.verilog.NotEql(rdata, i + 100):
print('rdata[%d] = %d' % (i, rdata))
all_ok.value = False
# read
laddr = 0
gaddr = (size + size) * 4 + offset
myaxi.dma_read(myram, laddr, gaddr, size)
print('dma_read: [%d] <- [%d]' % (laddr, gaddr))
for i in range(size):
rdata = myram.read(i)
if vthread.verilog.NotEql(rdata, i + 1000):
print('rdata[%d] = %d' % (i, rdata))
all_ok.value = False
th = vthread.Thread(m, 'th_blink', clk, rst, blink)
fsm = th.start(256 + 256 + 64)
return m
def mkTest(memory_datawidth=128):
m = Module('test')
# target instance
led = mkLed(memory_datawidth)
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memory_datawidth)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('tmp.v')
print(verilog)
sim = simulation.Simulator(test)
rslt = sim.run()
print(rslt)
| [
"[email protected]"
] | |
b179a283949bcad747cb9b49e48b4d422e022aaa | 4b68243d9db908945ee500174a8a12be27d150f9 | /pogoprotos/networking/requests/messages/use_item_move_reroll_message_pb2.py | 273b1820e6a4533e1e3bde4d1ba0975d24b6c332 | [] | no_license | ykram/pogoprotos-py | 7285c86498f57dcbbec8e6c947597e82b2518d80 | a045b0140740625d9a19ded53ece385a16c4ad4a | refs/heads/master | 2020-04-20T10:19:51.628964 | 2019-02-02T02:58:03 | 2019-02-02T02:58:03 | 168,787,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,705 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/use_item_move_reroll_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_id_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/use_item_move_reroll_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nJpogoprotos/networking/requests/messages/use_item_move_reroll_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\'pogoprotos/inventory/item/item_id.proto\"\x80\x01\n\x18UseItemMoveRerollMessage\x12\x32\n\x07item_id\x18\x01 \x01(\x0e\x32!.pogoprotos.inventory.item.ItemId\x12\x12\n\npokemon_id\x18\x02 \x01(\x06\x12\x1c\n\x14reroll_unlocked_move\x18\x03 \x01(\x08\x62\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__id__pb2.DESCRIPTOR,])
_USEITEMMOVEREROLLMESSAGE = _descriptor.Descriptor(
name='UseItemMoveRerollMessage',
full_name='pogoprotos.networking.requests.messages.UseItemMoveRerollMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='pogoprotos.networking.requests.messages.UseItemMoveRerollMessage.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='pogoprotos.networking.requests.messages.UseItemMoveRerollMessage.pokemon_id', index=1,
number=2, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reroll_unlocked_move', full_name='pogoprotos.networking.requests.messages.UseItemMoveRerollMessage.reroll_unlocked_move', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=289,
)
_USEITEMMOVEREROLLMESSAGE.fields_by_name['item_id'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__id__pb2._ITEMID
DESCRIPTOR.message_types_by_name['UseItemMoveRerollMessage'] = _USEITEMMOVEREROLLMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UseItemMoveRerollMessage = _reflection.GeneratedProtocolMessageType('UseItemMoveRerollMessage', (_message.Message,), dict(
DESCRIPTOR = _USEITEMMOVEREROLLMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.use_item_move_reroll_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.UseItemMoveRerollMessage)
))
_sym_db.RegisterMessage(UseItemMoveRerollMessage)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
e9cb1df974800fca9dbdcdd57dfd6d44af0d781e | a74cabbe1b11fc8ef575ea86f2543cd95db78ec9 | /python_program/q813_Largest_Sum_of_Averages.py | ba6895f075efa57a8d1917bfbb87dbf17a1b3760 | [] | no_license | tszandy/leetcode | 87e3ccf291b2879637d2d8238935a455b401a78a | f1f4361541dcffbb291285663c8820d7ffb37d2f | refs/heads/master | 2023-04-06T15:34:04.847875 | 2023-03-26T12:22:42 | 2023-03-26T12:22:42 | 204,069,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | from typing import List
from collections import Counter,defaultdict,deque
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count,zip_longest
import queue
class Solution:
def largestSumOfAverages(self, nums: List[int], k: int) -> float:
self.n = len(nums)
self.nums = nums
return self.dp(0,k)
@lru_cache(None)
def dp(self,i,k):
if k==1:
return self.average(self.nums[i:])
max_score = 0
for j in range(i+1,self.n-1-(k-3)):
max_score = max(max_score,self.average(self.nums[i:j])+self.dp(j,k-1))
return max_score
def average(self,arr):
return sum(arr)/len(arr)
sol = Solution()
# input
nums = [9,1,2,3,9,1,2,3,4,1,4,21,4,69,2,3,3,2,15,1,32,2,3,1,70,5,2,3,2,2,1,1,5,2,1,45]
k = 20
# output
output = sol.largestSumOfAverages(nums,k)
# answer
answer = 303.53333
print(output, answer, answer == output)
# input
nums = [1,2,3,4,5,6,7]
k = 4
# output
output = sol.largestSumOfAverages(nums,k)
# answer
answer = 20.50000
print(output, answer, answer == output)
# input
nums = [9,1,2,3,9]
k = 3
# output
output = sol.largestSumOfAverages(nums,k)
# answer
answer = 20.50000
print(output, answer, answer == output)
| [
"[email protected]"
] | |
675804bc3995971da75d7f7dc54fd7b0cfca0c94 | 9ac35a2327ca9fddcf55077be58a1babffd23bdd | /cadence/replay_interceptor.py | 7a511b6fc7c5bd8161e0861eb3de409232798476 | [
"MIT"
] | permissive | meetchandan/cadence-python | f1eb987c135f620607a62495096a89494216d847 | cfd7a48e6da7c289c9ae0c29c94d12d2b05986e4 | refs/heads/master | 2022-12-14T12:46:32.364375 | 2020-09-16T15:50:55 | 2020-09-16T15:50:55 | 260,763,097 | 1 | 0 | MIT | 2020-09-16T15:48:14 | 2020-05-02T19:47:56 | Python | UTF-8 | Python | false | false | 719 | py | import inspect
from typing import Callable
def get_replay_aware_interceptor(fn: Callable):
def interceptor(*args, **kwargs):
from cadence.decision_loop import ITask
task: ITask = ITask.current()
if not task.decider.decision_context.is_replaying():
return fn(*args, **kwargs)
return interceptor
def make_replay_aware(target: object):
# TODO: Consider using metaclasses instead
if hasattr(target, "_cadence_python_intercepted"):
return target
for name, fn in inspect.getmembers(target):
if inspect.ismethod(fn):
setattr(target, name, get_replay_aware_interceptor(fn))
target._cadence_python_intercepted = True
return target
| [
"[email protected]"
] | |
9fa1df29f6ebdaf4a3ff97f7ab176ba0af22ab0b | a71582e89e84a4fae2595f034d06af6d8ad2d43a | /tensorflow/python/keras/engine/sequential_test.py | afd7d230f9a8e69ed45e374de90216580de5a367 | [
"Apache-2.0"
] | permissive | tfboyd/tensorflow | 5328b1cabb3e24cb9534480fe6a8d18c4beeffb8 | 865004e8aa9ba630864ecab18381354827efe217 | refs/heads/master | 2021-07-06T09:41:36.700837 | 2019-04-01T20:21:03 | 2019-04-01T20:26:09 | 91,494,603 | 3 | 0 | Apache-2.0 | 2018-07-17T22:45:10 | 2017-05-16T19:06:01 | C++ | UTF-8 | Python | false | false | 14,560 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to `Sequential` model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TestSequential(keras_parameterized.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
@keras_parameterized.run_all_keras_modes
def test_basic_methods(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
model.add(keras.layers.Dropout(0.3, name='dp'))
model.add(keras.layers.Dense(2, kernel_regularizer='l2',
kernel_constraint='max_norm'))
self.assertEqual(len(model.layers), 3)
self.assertEqual(len(model.weights), 2 * 2)
self.assertEqual(model.get_layer(name='dp').name, 'dp')
@keras_parameterized.run_all_keras_modes
def test_input_defined_first_layer(self):
model = keras.models.Sequential()
model.add(keras.Input(shape=(2,), name='input_layer'))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Dropout(0.3, name='dp'))
model.add(keras.layers.Dense(2, kernel_regularizer='l2',
kernel_constraint='max_norm'))
self.assertLen(model.layers, 3)
self.assertLen(model.weights, 2 * 2)
self.assertEqual(model.get_layer(name='dp').name, 'dp')
@keras_parameterized.run_all_keras_modes
def test_sequential_pop(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
model = testing_utils.get_small_sequential_mlp(
num_hidden, num_classes, input_dim)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertEqual(model.output_shape, (None, num_hidden))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.random((batch_size, num_hidden))
model.fit(x, y, epochs=1)
# Test popping single-layer model
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.pop()
self.assertEqual(model.layers, [])
self.assertEqual(model.outputs, None)
# Invalid use case
model = keras.models.Sequential()
with self.assertRaises(TypeError):
model.pop()
@keras_parameterized.run_all_keras_modes
def test_sequential_deferred_build_with_np_arrays(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.layers), 2)
self.assertEqual(len(model.weights), 0)
self.assertFalse(model.built)
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
self.assertTrue(model.built)
self.assertFalse(model._is_graph_network)
self.assertEqual(len(model.weights), 2 * 2)
@keras_parameterized.run_all_keras_modes
def test_sequential_deferred_build_with_dataset_iterators(self):
num_hidden = 5
input_dim = 3
num_classes = 2
num_samples = 50
steps_per_epoch = 10
model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.layers), 2)
self.assertEqual(len(model.weights), 0)
self.assertFalse(model.built)
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
model.fit(iterator, epochs=1, steps_per_epoch=steps_per_epoch)
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 2 * 2)
self.assertFalse(model._is_graph_network)
# TODO(kaftan) This test fails w/ run_with_all_keras_modes. File ticket
@parameterized.parameters((True,), (False,))
@tf_test_util.run_deprecated_v1
def test_training_and_eval_methods_on_symbolic_tensors(self, deferred):
with self.cached_session():
def get_model():
if deferred:
model = testing_utils.get_small_sequential_mlp(10, 4)
else:
model = testing_utils.get_small_sequential_mlp(10, 4, input_dim=3)
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
model = get_model()
model.fit(inputs, targets, epochs=10, steps_per_epoch=30)
model = get_model()
model.evaluate(inputs, targets, steps=2, verbose=0)
model = get_model()
model.predict(inputs, steps=2)
model = get_model()
model.train_on_batch(inputs, targets)
model = get_model()
model.test_on_batch(inputs, targets)
model = get_model()
model.fit(
inputs,
targets,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=(inputs, targets),
validation_steps=2)
@keras_parameterized.run_all_keras_modes
def test_invalid_use_cases(self):
# Added objects must be layer instances
with self.assertRaises(TypeError):
model = keras.models.Sequential()
model.add(None)
# Added layers cannot have multiple outputs
class MyLayer(keras.layers.Layer):
def call(self, inputs):
return [3 * inputs, 2 * inputs]
def compute_output_shape(self, input_shape):
return [input_shape, input_shape]
with self.assertRaises(ValueError):
model = keras.models.Sequential()
model.add(MyLayer(input_shape=(3,)))
with self.assertRaises(TypeError):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=1))
model.add(MyLayer())
@keras_parameterized.run_all_keras_modes
def test_nested_sequential_trainability(self):
input_dim = 20
num_units = 10
num_classes = 2
inner_model = keras.models.Sequential()
inner_model.add(keras.layers.Dense(num_units, input_shape=(input_dim,)))
model = keras.models.Sequential()
model.add(inner_model)
model.add(keras.layers.Dense(num_classes))
self.assertEqual(len(model.layers), 2)
self.assertEqual(len(model.trainable_weights), 4)
inner_model.trainable = False
self.assertEqual(len(model.trainable_weights), 2)
inner_model.trainable = True
self.assertEqual(len(model.trainable_weights), 4)
def test_sequential_update_disabling(self):
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.BatchNormalization(input_shape=(4,)))
assert model.updates
model.trainable = False
assert not model.updates
model.compile('sgd', 'mse')
assert not model.updates
x1 = model.predict(val_a)
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
model.trainable = True
model.compile('sgd', 'mse')
assert model.updates
model.train_on_batch(val_a, val_out)
x2 = model.predict(val_a)
assert np.abs(np.sum(x1 - x2)) > 1e-5
@keras_parameterized.run_all_keras_modes
def test_sequential_deferred_build_serialization(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
self.assertFalse(model.built)
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.train_on_batch(x, y)
self.assertTrue(model.built)
config = model.get_config()
self.assertIn('build_input_shape', config)
new_model = keras.models.Sequential.from_config(config)
self.assertEqual(len(new_model.layers), 2)
self.assertEqual(len(new_model.weights), 4)
@keras_parameterized.run_all_keras_modes
def test_sequential_shape_inference_deferred(self):
model = testing_utils.get_small_sequential_mlp(4, 5)
output_shape = model.compute_output_shape((None, 7))
self.assertEqual(tuple(output_shape.as_list()), (None, 5))
@keras_parameterized.run_all_keras_modes
def test_sequential_build_deferred(self):
model = testing_utils.get_small_sequential_mlp(4, 5)
model.build((None, 10))
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 4)
# Test with nested model
model = testing_utils.get_small_sequential_mlp(4, 3)
inner_model = testing_utils.get_small_sequential_mlp(4, 5)
model.add(inner_model)
model.build((None, 10))
self.assertTrue(model.built)
self.assertEqual(len(model.weights), 8)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_sequential_deferred_manual_build(self):
model = testing_utils.get_small_sequential_mlp(4, 5)
self.assertFalse(model.built)
model(array_ops.zeros([1, 2]))
self.assertTrue(model.built)
self.assertEqual(len(model.outputs), 0)
model.compile('rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
self.assertEqual(len(model.outputs), 0)
model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5)))
self.assertEqual(len(model.outputs), 1)
@keras_parameterized.run_all_keras_modes
def test_sequential_nesting(self):
model = testing_utils.get_small_sequential_mlp(4, 3)
inner_model = testing_utils.get_small_sequential_mlp(4, 5)
model.add(inner_model)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((2, 6))
y = np.random.random((2, 5))
model.fit(x, y, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_variable_names(self):
model = keras.models.Sequential([keras.layers.Dense(3)])
model.add(keras.layers.Dense(2))
model(array_ops.ones([2, 4]))
self.assertEqual(
['sequential/dense/kernel:0', 'sequential/dense/bias:0',
'sequential/dense_1/kernel:0', 'sequential/dense_1/bias:0'],
[v.name for v in model.variables])
@keras_parameterized.run_all_keras_modes
def test_input_assumptions_propagation(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1))
if context.executing_eagerly():
with self.assertRaisesRegexp(ValueError,
'expected min_ndim=2, found ndim=0'):
model(1.0)
class TestSequentialEagerIntegration(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_defun_on_call(self):
# Check that one can subclass Sequential and place the `call` in a `defun`.
class MySequential(keras.Sequential):
def __init__(self, name=None):
super(MySequential, self).__init__(name=name)
self.call = function.defun(self.call)
model = MySequential()
model.add(keras.layers.Dense(4, activation='relu'))
model.add(keras.layers.Dense(5, activation='softmax'))
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
x = np.random.random((2, 6))
y = np.random.random((2, 5))
model.fit(x, y, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_build_before_fit(self):
# Fix for b/112433577
model = testing_utils.get_small_sequential_mlp(4, 5)
model.compile(
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
model.build((None, 6))
x = np.random.random((2, 6))
y = np.random.random((2, 5))
model.fit(x, y, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_sequential_model_fails_with_dict_inputs(self):
num_classes = 5
model = testing_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=num_classes)
model.compile(
'rmsprop',
metrics=['acc'],
weighted_metrics=['mae'],
loss='categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
x = {'dense_input': np.random.random((10, 1))}
y = np.random.randint(num_classes, size=(10, 1))
with self.assertRaisesRegexp(
ValueError, 'Passing a dictionary input to a Sequential Model which '
'doesn\'t have FeatureLayer as the first layer is an error'):
model.fit(x, y, batch_size=5, epochs=1)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
64518c69af49d5bd076e7957e75967dd8de06063 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/matrix_squaring_20200420015947.py | a91ddf0d68123108ba4379dfff7a7cce9614810c | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,593 | py | # -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 20200414
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partícula libre en un toro infinito."""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta) )
def harmonic_potential(x):
"""Devuelve valor del potencial armónico para una posición x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmónico para una posición x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teórica cuántica de encontrar al oscilador armónico
(inmerso en un baño térmico a temperatura inversa beta) en la posición x.
Recibe:
x: float -> posición
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teórica cuántica en posición x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def rho_trotter(x_max = 5., nx = 101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximación de Trotter para altas temperaturas
y bajo influencia del potencial "potential".
Recibe:
x_max: float -> los valores de x estarán en el intervalo (-x_max,x_max).
nx: int -> número de valores de x considerados (igualmente espaciados).
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacción. Debe ser función de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximación de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
dx: float -> separación entre valores contiguos de grid_x
"""
# Valor de la discretización de posiciones según x_max y nx dados como input
dx = 2. * x_max / (nx - 1)
# Lista de valores de x teniendo en cuenta discretización y x_max
grid_x = np.array([i*dx for i in range(-int((nx-1)/2), int(nx/2 + 1))])
# Construcción de matriz densidad dada por aproximación de Trotter
rho = np.array([ [ rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp))) for x in grid_x] for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter = 1, beta_ini = 1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
En la primera iteración se usa matriz de densidad dada por el input rho (a
temperatura inversa beta_ini); en las siguientes iteraciones se usa matriz densidad
generada por la iteración inmediatamente anterior. El sistema asociado a la matriz
densidad obtenida (al final de aplicar el algoritmo) está a temperatura inversa
beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad discretizada en valores dados
por x_grid.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
N_iter: int -> número de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad rho dada como input.
print_steps: bool -> decide si muestra valores de beta en cada
iteración.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definición que tomamos
de rho, ésta es equivalente a la función
partición a dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a rho.
"""
# Valor de discretixación de las posiciones
dx = grid_x[1] - grid_x[0]
# Cálculo del valor de beta_fin según valores beta_ini y N_iter dados como input
beta_fin = beta_ini * 2 ** N_iter
# Imprime infromación relevante
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
# Itera algoritmo matrix squaring
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
# Imprime información relevante
if print_steps==True:
print(u'Iteración %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
# Calcula traza de rho
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=True):
"""
Uso: guarda datos de la distribución de probabilidad pi(x;beta) en un archivo .csv
Recibe:
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada pi(x;beta).
x_weights: numpy array, shape=(nx,) -> valores de pi(x;beta) para cada x en grid_x
file_name: str -> nombre del archivo en el que se guardarán datos.
relevant_info: list of str -> información que se agrega como comentario en
primeras líneas. Cada elemento de esta lista
se agrega como una nueva línea.
print_data: bool -> decide si imprime datos guardados, en pantalla.
Devuelve:
pi_x_data: pd.DataFrame -> valores de pi(x;beta) para x en grid_x en formato
"pandas".
"""
# Almacena datos de probabilifad en diccionario: grid_x para posiciones y x_weights para
# valores de densidad de probabilidad.
pi_x_data = {'position_x': grid_x,
'prob_density': x_weights}
# Pasamos datos a formato DataFrame de pandas.
pi_x_data = pd.DataFrame(data=pi_x_data)
# Crea archivo .csv y agrega comentarios relevantes dados como input
with open(file_name,mode='w') as rho_csv:
for info in list(relevant_info):
rho_csv.write('# '+info+'\n')
rho_csv.close()
# Usamos pandas para escribir en archivo en formato csv.
with open(file_name,mode='a') as rho_csv:
pi_x_data.to_csv(rho_csv)
rho_csv.close()
# Imprime en pantalla datos de posiciones y probabilidades.
if print_data==True:
print(pi_x_data)
return pi_x_data
def run_pi_x_sq_trotter(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string = 'harmonic_potential', print_steps=True,
save_data=True, plot=True, save_plot=True, show_plot=True):
"""
Uso: corre algoritmo matrix squaring iterativamente (N_iter veces). En la primera
iteración se usa una matriz densidad en aproximación de Trotter a temperatura
inversa beta_ini = beta_fin * 2**(-N_iter) para potencial dado por potential;
en las siguientes iteraciones se usa matriz densidad generada por la iteración
inmediatamente anterior. Además ésta función guarda datos de pi(x;beta) vs. x
en archivo de texto y grafica pi(x;beta) comparándolo con teoría para el oscilador
armónico cuántico.
Recibe:
x_max: float -> los valores de x estarán en el intervalo (-x_max,x_max).
nx: int -> número de valores de x considerados.
N_iter: int -> número de iteraciones del algoritmo matrix squaring.
beta_ini: float -> valor de inverso de temperatura que queremos tener al final de
aplicar el algoritmo matrix squaring iterativamente.
potential: func -> potencial de interacción usado en aproximación de trotter. Debe
ser función de x.
potential_string: str -> nombre del potencial (con éste nombramos los archivos que
se generan).
print_steps: bool -> decide si imprime los pasos del algoritmo matrix squaring.
save_data: bool -> decide si guarda los datos en archivo .csv.
plot: bool -> decide si grafica.
save_plot: bool -> decide si guarda la figura.
show_plot: bool -> decide si muestra la figura en pantalla.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definición que tomamos
de "rho", ésta es equivalente a la función
partición en dicha temperatura.
grid_x: numpy array, shape=(nx,) -> valores de x en los que está evaluada rho.
"""
# Cálculo del valor de beta_ini según valores beta_fin y N_iter dados como input
beta_ini = beta_fin * 2**(-N_iter)
# Cálculo de rho con aproximación de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
# Aproximación de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring(rho, grid_x, N_iter,
beta_ini, print_steps)
print('----------------------------------------------------------------\n' + \
u'beta_fin = %.3f Z(beta_fin) = Tr(rho(beta_fin)) = %.3E \n'%(beta_fin_2,trace_rho))
# Normalización de rho a 1 y cálculo de densidades de probabilidad para valores en grid_x.
rho_normalized = np.copy(rho)/trace_rho
x_weights = np.diag(rho_normalized)
# Guarda datos en archivo .csv.
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
if save_data==True:
# Nombre del archivo .csv en el que guardamos valores de pi(x;beta_fin).
file_name = script_dir+u'/pi_x-ms-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.csv'\
%(potential_string,x_max,nx,N_iter,beta_fin)
# Información relevante para agregar como comentario al archivo csv.
relevant_info = [ 'pi(x;beta_fin) computed using matrix squaring algorithm and' + \
' Trotter approximation. Parameters:',
u'%s x_max = %.3f nx = %d '%(potential_string,x_max,nx) + \
u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,) + \
u'beta_fin = %.3f'%beta_fin ]
# Guardamos valores de pi(x;beta_fin) en archivo csv.
pi_x_data = save_pi_x_csv(grid_x, x_weights, file_name, relevant_info, print_data=0)
# Gráfica y comparación con teoría
if plot == True:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot==True:
plot_name = script_dir+u'/pi_x-ms-plot-%s-x_max_%.3f-nx_%d-N_iter_%d-beta_fin_%.3f.eps'\
%(potential_string,x_max,nx,N_iter,beta_fin)
plt.savefig(plot_name)
if show_plot==True:
plt.show()
plt.close()
return rho, trace_rho, grid_x
# Agranda letra en texto de figuras generadas
plt.rcParams.update({'font.size':15})
# Corre el algoritmo
# rho, trace_rho, grid_x = run_pi_x_sq_trotter( potential = harmonic_potential,
# potential_string = 'harmonic_potential',
# save_data=True, save_plot=True, show_plot=True)
# Cálculo de la energía interna
avg_energy = False
if avg_energy:
beta_array = np.linspace(10,2,300)
Z = []
for beta_fin in beta_array:
rho, trace_rho, grid_x = run_pi_x_sq_trotter(x_max=7, nx=301, beta_fin = beta_fin, potential = harmonic_potential,
potential_string = 'harmonic_potential', print_steps=False,
save_data=False, save_plot=False, show_plot=False)
Z.append(trace_rho)
Z_data = {'beta':beta_array.copy(),'temperature':1./beta_array.copy(),'Z':Z}
Z_data = pd.DataFrame(Z_data)
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
script_dir+'/'+'partition_function.csv'
Z_file_name = script_dir+'/'+'partition_function.csv'
Z_data.to_csv(Z_file_name)
# READ DATA IS OK
Z_file_read = pd.read_csv(Z_file_name, index_col=0, comment='#')
beta_read = Z_file_read['beta']
beta_read = beta_read.to_numpy()
temp_read = Z_file_read['temperature']
temp_read = temp_read.to_numpy()
Z_read = Z_file_read['Z']
Z_read = Z_read.to_numpy()
E_avg = np.gradient(-np.log(Z_read),beta_read)
def Z_QHO(beta):
return (2*np.sinh(beta/2))**-1
E_avg_theo = np.gradient(-np.log(Z_QHO(beta_read)),beta_read)
plt.figure()
plt.plot(temp_read,E_avg,'o-',label=u'$< E > Path Integral$')
plt.plot(temp_read,E_avg_theo,'^-',label=u'$< E > theory$')
plt.plot(temp_read,Z_read,'v-',label=u'$ Z(T) $')
plt.legend(loc='best')
plt.xlabel(u'$T$')
plt.ylabel(u'$< E >$ or $Z(T)$')
plt.show()
plt.close() | [
"[email protected]"
] | |
8ed2c23107f1e3c65b51da65f817da0e32039f3b | 6b37deabac3116e65bc869035cf8cfa50f22590c | /past/past3/c_geometric_progression/main.py | 5f19e1e8cdefd25049f0dda8767837bd2e333831 | [] | no_license | hiromichinomata/atcoder | 92122a2a2a8b9327f4c8dc0e40889e8dc0321079 | 82216622d9040e95239b4a21e973cb12e59d7f6e | refs/heads/master | 2022-10-05T04:00:44.509719 | 2022-08-14T04:46:49 | 2022-08-14T04:46:49 | 176,891,471 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | #!/bin/python3
# pypy3
import sys
input = sys.stdin.readline
def main():
a, r, n = list(map(int, input().strip().split()))
v = a
LIMIT = 10**9
for _ in range(n-1):
v *= r
if v > LIMIT:
print('large')
sys.exit()
print(v)
main()
| [
"[email protected]"
] | |
e67f148f30ae95e4a342a9e23a9135c865b157f3 | 786de89be635eb21295070a6a3452f3a7fe6712c | /ParCorAna/tags/V00-00-04/src/CommSystem.py | 3e65eb32f29eed038a819a1285e4659e4b0362e0 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,380 | py | ### system
from mpi4py import MPI
import os
import numpy as np
import time
import StringIO
import traceback
import copy
import collections
import logging
## this package
import CommSystemUtil
import PsanaUtil
from MessageBuffers import SM_MsgBuffer, MVW_MsgBuffer
import Timing
from XCorrBase import XCorrBase
import Counter120hz
########## for Timing.timecall
VIEWEREVT = 'update'
CSPADEVT = 'cspadevt'
timingorder =[] # list of names in order inserted
timingdict = {}
###########
def roundRobin(n, dictData):
'''returns list from dict by round robin over keys
Args:
n (int): length of list to return
dictData (dict): values are lists to select return items from
Returns:
list: n items from values of dictData
Examples:
>>> roundRobin(5, {'keyA':[1,2,3], 'keyB':[10,20,30]})
[1,10,2,20,3]
'''
if n==0:
return []
keys = dictData.keys()
keys.sort()
assert len(keys)>0, "rounRobin will fail to find n=%d items from empty dict" % n
nextVal = dict([(ky,0) for ky in keys])
results = []
keyIndex = 0
keysWithAllValuesUsed = set()
while len(results)<n:
ky = keys[keyIndex]
if nextVal[ky] < len(dictData[ky]):
results.append(dictData[ky][nextVal[ky]])
nextVal[ky] += 1
else:
keysWithAllValuesUsed.add(ky)
if len(keysWithAllValuesUsed)==len(keys):
break
keyIndex += 1
keyIndex %= len(keys)
if len(results)!=n:
raise Exception("roundRobin did not get n=%d values from dictData=%r" % (n, dictData))
return results
def identifyServerRanks(comm, numServers, serverHosts=None):
'''returns ranks to be the servers, puts servers on distinct hosts if possible.
Server ranks will be picked in a round robin fashion among the distinct hosts
in the MPI world.
Args:
comm (MPI.Comm): communicator from whith mpi world hostnames are identified.
*IMPORTANT* This function may do blocking collective communication
with comm. All ranks in comm should call this during initialization.
numServers (int): number of servers to find
serverHosts (list, optional): None or empty means use default host assignment. Otherwise
list must be a set of unique hostnames.
Returns:
(tuple): tuple containing:
* servers (dict)- a list of ints, the ranks in the comm to use as servers
* hostmsg (str)- a logging string about the hosts chosen
'''
assert comm.Get_size() >= numServers, "More servers requested than are in the MPI World. numServers=%d > MPI World Size=%d" % \
(numServers, comm.Get_size())
if serverHosts is None:
serverHosts = []
## identify host -> rank map through collective MPI communication
localHostName = MPI.Get_processor_name()
allHostNames = []
allHostNames = comm.allgather(localHostName, None)
assert len(allHostNames) == comm.Get_size(), 'allgather failed - did not get one host per rank'
serverHost2ranks = collections.defaultdict(list)
for ii, hostName in enumerate(allHostNames):
serverHost2ranks[hostName].append(ii)
for host in serverHosts:
assert host in serverHost2ranks.keys(), "specified host: %s not in MPI world hosts: %r" % (host, serverHost2ranks.keys())
if len(serverHosts) == 0:
ranksForRoundRobin = serverHost2ranks
else:
ranksForRoundRobin = dict()
for host in serverHosts:
ranksForRoundRobin[host]=serverHost2ranks[host]
serverRanks = roundRobin(numServers, ranksForRoundRobin)
hostmsg = 'server host assignment:'
rank2host=collections.defaultdict(list)
for host,rankList in serverHost2ranks.iteritems():
for rank in rankList:
rank2host[rank].append(host)
hostmsg += ", ".join(["rnk=%d->host=%s" % (rank, rank2host[rank]) for rank in serverRanks \
if rank in serverRanks])
return serverRanks, hostmsg
class MPI_Communicators:
'''Keeps track of the different communicators for collective
communications. Includes many parameters that identify the ranks
and the communicators they are in.
Call identifyCommSubsystems to return a initialized instance or
getTestingMPIObject()
Then call the method setMask.
'''
def __init__(self):
pass
def setLogger(self, verbosity):
self.logger = CommSystemUtil.makeLogger(self.testMode, self.isMaster, \
self.isViewer, self.isServer, self.rank, verbosity)
def notWorker2toN(self):
if self.isWorker and not self.isFirstWorker:
return False
return True
# these functions make it simpler to exclude logging from all the workers.
# all workers generally do the same thing. If they all logged it gets noisy.
def logInfo(self, msg, allWorkers=False):
'''By default logs a message only from worker one
'''
if allWorkers or self.notWorker2toN():
self.logger.info(msg)
def logWarning(self, msg, allWorkers=False):
'''By default logs a message only from worker one
'''
if allWorkers or self.notWorker2toN():
self.logger.warning(msg)
def logDebug(self, msg, allWorkers=False):
'''By default logs a message only from worker one
'''
if allWorkers or self.notWorker2toN():
self.logger.debug(msg)
def logError(self, msg, allWorkers=False):
'''By default logs a message only from worker one
'''
if allWorkers or self.notWorker2toN():
self.logger.error(msg)
def setMask(self, maskNdarrayCoords):
'''sets scatterv parameters and stores mask
Args:
maskNdarrayCoords (numpy.ndarray): integer array, 1 for elements that should be processed.
It must have the same shape as the NDArray for the detector.
Notes:
sets the following attributes
* totalElements: number of pixels that are 1 in the mask
* workerWorldRankToCount[rank]: number of element that worker processes
* workerWorldRankToOffset[rank]: offset of where those elements start in
a flattened version of the ndarray
* maskNdarrayCoords: the mask as a logical True/False array
shape has not been changed
'''
mask_flat = maskNdarrayCoords.flatten()
maskValues = set(mask_flat)
assert maskValues.union(set([0,1])) == set([0,1]), "mask contains values other than 0 and 1." + \
(" mask contains %d distinct values" % len(maskValues))
assert 1 in maskValues, "The mask does not have the value 1, it is all 0. Elements marked with 1 are processed"
self.totalElements = np.sum(mask_flat)
self.maskNdarrayCoords = maskNdarrayCoords == 1
if self.logger.isEnabledFor(logging.DEBUG):
self.logDebug("MPIParams.setMask: loaded and stored mask with shape=%s elements included=%d excluded=%s" % \
(self.maskNdarrayCoords.shape, np.sum(self.maskNdarrayCoords),
np.sum(0==self.maskNdarrayCoords)))
workerOffsets, workerCounts = CommSystemUtil.divideAmongWorkers(self.totalElements,
self.numWorkers)
assert self.numWorkers == len(self.workerRanks)
assert len(workerCounts)==self.numWorkers
self.workerWorldRankToCount = {}
self.workerWorldRankToOffset = {}
for workerRank, workerOffset, workerCount in zip(self.workerRanks,
workerOffsets,
workerCounts):
self.workerWorldRankToCount[workerRank] = workerCount
self.workerWorldRankToOffset[workerRank] = workerOffset
for serverRank in self.serverRanks:
serverCommDict = self.serverWorkers[serverRank]
serverRankInComm = serverCommDict['serverRankInComm']
scatterCounts = copy.copy(workerCounts)
serverCount = 0
scatterCounts.insert(serverRankInComm, serverCount)
scatterOffsets = copy.copy(workerOffsets)
# the value we use for the server offset is not important, but checkCountsOffsets
# which we call below checks for offset[i+1]=offset[i]+count[i]
if serverRankInComm == 0:
serverOffset = 0
else:
serverOffset = workerOffsets[serverRankInComm-1]+workerCounts[serverRankInComm-1]
scatterOffsets.insert(serverRankInComm, serverOffset)
self.serverWorkers[serverRank]['groupScattervCounts'] = tuple(scatterCounts)
self.serverWorkers[serverRank]['groupScattervOffsets'] = tuple(scatterOffsets)
CommSystemUtil.checkCountsOffsets(scatterCounts, scatterOffsets, self.totalElements)
def getTestingMPIObject():
'''mock up MPI_Communicators object for test_alt mode.
Simulate an MPI_Communicators which looks like a single server/worker/viewer as being the
same rank.
'''
mp = MPI_Communicators()
mp.isServer = True
mp.isWorker = True
mp.isFirstWorker = True
mp.isViewer = True
mp.isMaster = False
mp.testMode = True
mp.rank = MPI.COMM_WORLD.rank
assert mp.rank == 0, "test MPI object is for non-MPI environment, but MPI world rank != 0"
mp.workerRanks = [mp.rank]
mp.numWorkers = 1
mp.serverRanks = [0]
mp.serverWorkers = {}
mp.serverWorkers[0]={'serverRankInComm':0}
mp.viewerRankInViewerWorkersComm = 0
return mp
def identifyCommSubsystems(serverRanks, worldComm=None):
'''Return a fully initialized instance of a MPI_Communicators object
The object will contain the following attributes::
serverRanks - ranks that are servers in COMM_WORLD
comm - duplicate of the COMM_WORLD
rank - rank in COMM_WORLD
worldNumProcs - size of COMM_WORLD
masterRank - master rank in COMM_WORLD
viewerRank - viewer rank in COMM_WORLD
workerRanks - list of worker ranks in COMM_WORLD
firstWorkerRank - first worker rank in COMM_WORLD
numWorkers - number of workers
# these parameters identify which group this rank is
isMaster
isViewer
isServer
isFirstWorker
isWorker
isTestMode = False
masterWorkersComm - intra communicator for master/workers collective communication
viewerWorkersComm - intra communicator for viewer/workers collective communication
viewerRankInViewerWorkersComm - viewer rank in the above intra-communicator
firstWorkerRankInViewerWorkersComm - first worker rank in the above intra-communicator
# the following is a dict with one key for each server rank
serverWorkers[serverRank]['comm'] - intra communicator, this server and all workers
serverWorkers[serverRank]['serverRankInComm'] -
serverWorkers[serverRank]['workerRanksInCommDict'] - a key for this dict is a
worker rank in the world space. The value is the rank in the 'comm' value
'''
assert len(serverRanks) > 0, "need at least one server"
assert min(serverRanks) >= 0, "cannot have negative server ranks"
if worldComm is None:
worldComm = MPI.COMM_WORLD
mc = MPI_Communicators()
mc.testMode = False
mc.serverRanks = serverRanks
mc.comm = worldComm.Dup()
mc.rank = mc.comm.Get_rank()
mc.worldNumProcs = mc.comm.Get_size()
assert mc.worldNumProcs >= 4, "need at least 4 ranks for comm system (server/master/viewer/workers)"
assert mc.worldNumProcs - len(mc.serverRanks) >= 3, "With %d servers but only %d ranks in world, not enough ranks for worker/viewer/master" % \
(len(mc.serverRanks), mc.worldNumProcs)
availRanks = [rank for rank in range(mc.worldNumProcs) \
if rank not in mc.serverRanks]
assert len(availRanks)>=3, "To many servers for world size. " + \
("Only %d ranks left for master/viewer/workers" % len(availRanks))
mc.masterRank = min(availRanks)
availRanks.remove(mc.masterRank)
mc.viewerRank = min(availRanks)
availRanks.remove(mc.viewerRank)
mc.workerRanks = availRanks
mc.firstWorkerRank = min(mc.workerRanks)
mc.isMaster = mc.rank == mc.masterRank
mc.isViewer = mc.rank == mc.viewerRank
mc.isServer = mc.rank in mc.serverRanks
mc.isFirstWorker = mc.rank == mc.firstWorkerRank
mc.isWorker = mc.rank not in ([mc.masterRank, mc.viewerRank] + mc.serverRanks)
mc.numWorkers = len(mc.workerRanks)
worldGroup = mc.comm.Get_group()
masterWorkersGroup = worldGroup.Excl([mc.viewerRank] + mc.serverRanks)
viewerWorkersGroup = worldGroup.Excl([mc.masterRank] + mc.serverRanks)
mc.masterWorkersComm = mc.comm.Create(masterWorkersGroup) # will be an invalid group on proc with viewer
mc.viewerWorkersComm = mc.comm.Create(viewerWorkersGroup) # will be an invalid group on proc with master
mc.serverWorkers = dict()
for serverRank in mc.serverRanks:
otherServers = [rank for rank in mc.serverRanks if rank != serverRank]
serverWorkersGroup = worldGroup.Excl([mc.viewerRank, mc.masterRank]+otherServers)
serverRankInComm = MPI.Group.Translate_ranks(worldGroup, [serverRank],
serverWorkersGroup)[0]
workerRanksInComm = MPI.Group.Translate_ranks(worldGroup, mc.workerRanks,
serverWorkersGroup)
workerRanksInCommDict = dict(zip(mc.workerRanks,workerRanksInComm))
serverWorkersComm = mc.comm.Create(serverWorkersGroup)
mc.serverWorkers[serverRank]={'comm':serverWorkersComm,
'serverRankInComm':serverRankInComm,
'workerRanksInCommDict':workerRanksInCommDict,
}
tmp1,tmp2 = MPI.Group.Translate_ranks(worldGroup, [mc.firstWorkerRank, mc.viewerRank],
viewerWorkersGroup)
mc.firstWorkerRankInViewerWorkersComm,mc.viewerRankInViewerWorkersComm = tmp1,tmp2
tmp1,tmp2 = MPI.Group.Translate_ranks(worldGroup, [mc.firstWorkerRank, mc.masterRank],
masterWorkersGroup)
mc.firstWorkerRankInMasterWorkersComm, mc.masterRankInMasterWorkersComm = tmp1,tmp2
return mc
class RunServer(object):
'''runs server rank
This function carries out the server side communication in the package.
It does the following
::
* iteratates through data in Python generator argument
* for each datum from generator:
** sends sec/nsec to master
** gets OkForWorkers or Abort from master
** upon OkForWorkers calls .sendToWorkers(datum) method in dataIterator
The dataIterator handles details such as scattering detector data to workers
Args:
dataIter: instance of a callback class. Must provide these methods:
.dataGenerator() a Python generator. Each returned object
must have a time() returning sec,nsec
.sendToWorkers(datum) receives a datum returned by dataGenerator.
user can now send data to workers
workers will know the upcoming time already
comm: MPI intra-communicator for server ranks and master rank.
rank: rank of this server
masterRank: rank of master
logger: Python logging logger
'''
def __init__(self,dataIter, comm, rank, masterRank, logger):
self.dataIter = dataIter
self.comm = comm
self.rank = rank
self.masterRank = masterRank
self.logger = logger
def recordTimeToGetData(self, startTime, endTime):
global timingdict
global timingorder
if startTime is None: return
key = 'ServerTimeToGetData'
if key not in timingdict:
timingdict[key]=[0.0, 0, 'event', 1e-3, 'ms']
timingorder.append(key)
timingdict[key][0] += endTime-startTime
timingdict[key][1] += 1
def run(self):
sendEventReadyBuffer = SM_MsgBuffer(rank=self.rank)
sendEventReadyBuffer.setEvt()
receiveOkForWorkersBuffer = SM_MsgBuffer(rank=self.rank)
abortFromMaster = False
dataGen = self.dataIter.dataGenerator()
t0 = None
for datum in dataGen:
self.recordTimeToGetData(startTime=t0, endTime=time.time())
sec, nsec, fiducials = datum.eventId()
sendEventReadyBuffer.setEventId(sec, nsec, fiducials)
if self.logger.isEnabledFor(logging.DEBUG):
debugMsg = "CommSystem.run: server has data to scatter."
debugMsg += " Event Id: sec=%d nsec=%d fid=0x%8.8X." % (sec, nsec, fiducials)
debugMsg += " Before Send EVT"
self.logger.debug(debugMsg)
self.comm.Send([sendEventReadyBuffer.getNumpyBuffer(),
sendEventReadyBuffer.getMPIType()],
dest=self.masterRank)
self.logger.debug("CommSystem.run: After Send, before Recv")
self.comm.Recv([receiveOkForWorkersBuffer.getNumpyBuffer(),
receiveOkForWorkersBuffer.getMPIType()],
source=self.masterRank)
if receiveOkForWorkersBuffer.isSendToWorkers():
self.logger.debug("CommSystem.run: After Recv. is Send to workers")
self.dataIter.sendToWorkers(datum)
elif receiveOkForWorkersBuffer.isAbort():
self.logger.debug("CommSystem.run: After Recv. Abort")
abortFromMaster = True
break
else:
raise Exception("unknown msgtag from master. buffer=%r" % receiveOkForWorkersBuffer)
t0 = time.time()
if abortFromMaster:
self.dataIter.abortFromMaster()
else:
sendEventReadyBuffer.setEnd()
self.logger.debug("CommSystem.run: Before Send END")
self.comm.Send([sendEventReadyBuffer.getNumpyBuffer(),
sendEventReadyBuffer.getMPIType()],
dest=self.masterRank)
self.logger.debug("CommSystem.run: After Send END. Finished")
class RunMaster(object):
'''runs master message passing.
'''
def __init__(self, worldComm, masterRank, viewerRank, serverRanks,
masterWorkersComm, masterRankInMasterWorkersComm,
updateIntervalEvents, hostmsg, logger):
self.worldComm = worldComm
self.masterRank = masterRank
self.serverRanks = serverRanks
self.masterWorkersComm = masterWorkersComm
self.masterRankInMasterWorkersComm = masterRankInMasterWorkersComm
self.updateIntervalEvents = updateIntervalEvents
self.worldComm = worldComm
self.viewerRank = viewerRank
self.logger = logger
self.logger.info(hostmsg)
# initially all servers are not ready
self.notReadyServers = [r for r in serverRanks] # MPI Test on request is false
self.readyServers = [] # MPI Test on request is True
self.finishedServers = [] # rank has returend end
self.sendOkForWorkersBuffer = SM_MsgBuffer()
self.bcastWorkersBuffer = MVW_MsgBuffer()
self.viewerBuffer = MVW_MsgBuffer()
self.lastUpdate = 0
self.numEvents = 0
self.eventIdToCounter = None
def getEarliest(self, serverDataList):
'''Takes a list of server data buffers. identifies oldest server::
ARGS:
serverDataList: each element is a SeversMasterMessaging buffer
RET:
the buffer with the earliest time
'''
idx = 0
sec, nsec, fiducials = serverDataList[idx].getEventId()
for curIdx in range(1,len(serverDataList)):
curSec, curNsec, curFiducials = serverDataList[idx].getEventId()
if (curSec < sec) or ((curSec == sec) and (curNsec < nsec)):
sec = curSec
nsec = curNsec
fiducials = curFiducials
idx = curIdx
return serverDataList[idx]
def initRecvRequestsFromServers(self):
# create buffers for receiving, and the requests
serverReceiveData = dict()
serverRequests = dict()
self.logger.debug("CommSystem: before first Irecv from servers")
for serverRank in self.serverRanks:
serverReceiveBuffer = SM_MsgBuffer(rank=serverRank)
firstServerRequest = self.worldComm.Irecv([serverReceiveBuffer.getNumpyBuffer(),
serverReceiveBuffer.getMPIType()],
source=serverRank)
serverReceiveData[serverRank] = serverReceiveBuffer
serverRequests[serverRank] = firstServerRequest
self.logger.debug("CommSystem: after first Irecv from servers")
return serverReceiveData, serverRequests
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def informWorkersOfNewData(self, selectedServerRank, sec, nsec, fiducials, counter):
self.bcastWorkersBuffer.setEvt()
self.bcastWorkersBuffer.setRank(selectedServerRank)
self.bcastWorkersBuffer.setSeconds(sec)
self.bcastWorkersBuffer.setCounter(nsec)
self.bcastWorkersBuffer.setCounter(fiducials)
self.bcastWorkersBuffer.setCounter(counter)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem: before Bcast -> workers EVT sec=%8.8d fid=0x%8.8X counter=%d" % (sec, fiducials, counter))
self.masterWorkersComm.Bcast([self.bcastWorkersBuffer.getNumpyBuffer(),
self.bcastWorkersBuffer.getMPIType()],
root=self.masterRankInMasterWorkersComm)
self.masterWorkersComm.Barrier()
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem: after Bcast/Barrier -> workers EVT counter=%d" % counter)
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def informViewerOfUpdate(self, sec, nsec, fiducials, counter):
self.viewerBuffer.setUpdate()
self.viewerBuffer.setSeconds(sec)
self.viewerBuffer.setNanoSeconds(nsec)
self.viewerBuffer.setFiducials(fiducials)
self.viewerBuffer.setCounter(counter)
self.worldComm.Send([self.viewerBuffer.getNumpyBuffer(),
self.viewerBuffer.getMPIType()],
dest=self.viewerRank)
def sendEndToViewer(self):
self.viewerBuffer.setEnd()
self.worldComm.Send([self.viewerBuffer.getNumpyBuffer(),
self.viewerBuffer.getMPIType()],
dest=self.viewerRank)
@Timing.timecall(CSPADEVT)
def informWorkersToUpdateViewer(self):
self.logger.debug("CommSystem: before Bcast -> workers UPDATE")
self.bcastWorkersBuffer.setUpdate()
self.masterWorkersComm.Bcast([self.bcastWorkersBuffer.getNumpyBuffer(),
self.bcastWorkersBuffer.getMPIType()],
root=self.masterRankInMasterWorkersComm)
self.masterWorkersComm.Barrier()
self.logger.debug("CommSystem: after Bcast/Barrier -> workers UPDATE")
def sendEndToWorkers(self):
self.bcastWorkersBuffer.setEnd()
self.logger.debug("CommSystem: before Bcast -> workers END")
self.masterWorkersComm.Bcast([self.bcastWorkersBuffer.getNumpyBuffer(),
self.bcastWorkersBuffer.getMPIType()],
root=self.masterRankInMasterWorkersComm)
self.masterWorkersComm.Barrier()
self.logger.debug("CommSystem: after Bcast/Barrier -> workers END")
def run(self):
########## begin helper functions ########
def waitOnServer(self):
'''called during communication loop.
Called when no ready servers.
identifies a done server through waitany
tests all done servers
idenfies finisned servers among done servers
at the end of this function,
self.notReadyServers + self.finishedServers + self.readyServers
will be the same as it was on entry, and at least one of the notReadyServers will
be moved into the finishedServers or readyServers group.
'''
assert len(self.notReadyServers)>0, "waitOnServer called, but no not-ready servers"
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem: before waitany. notReadyServers=%s" % self.notReadyServers)
requestList = [serverRequests[rnk] for rnk in self.notReadyServers]
idx=MPI.Request.Waitany(requestList)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem: after waitany. server %d is now ready" % self.notReadyServers[idx])
newReadyServers = [server for server in self.notReadyServers \
if serverRequests[server].Test()]
newFinishedServers = [server for server in newReadyServers \
if serverReceiveData[server].isEnd()]
self.finishedServers.extend(newFinishedServers)
for server in newFinishedServers:
# take finsished servers out of pool that we wait for a request from
self.notReadyServers.remove(server)
# take finished servers out of pool to get next event from
newReadyServers.remove(server)
for server in newReadyServers:
self.notReadyServers.remove(server)
self.readyServers.extend(newReadyServers)
############ end helper functions ######
serverReceiveData, serverRequests = self.initRecvRequestsFromServers()
numEventsAtLastDataRateMsg = 0
timeAtLastDataRateMsg = time.time()
startTime = time.time()
while True:
# a server must be in one of: ready, noReady or finished
serversAccountedFor = len(self.readyServers) + len(self.finishedServers) + \
len(self.notReadyServers)
assert serversAccountedFor == len(self.serverRanks), \
"loop invariant broken? #servers=%d != #accountedfor=%d" % \
(len(self.serverRanks), len(serversAccountedFor))
if len(self.finishedServers)==len(self.serverRanks):
break
if len(self.readyServers)==0:
waitOnServer(self)
if len(self.readyServers)==0:
# the server we waited on was finished.
continue
earlyServerData = self.getEarliest([serverReceiveData[server] for server in self.readyServers])
selectedServerRank = earlyServerData.getRank()
sec, nsec, fiducials = earlyServerData.getEventId()
if self.eventIdToCounter is None:
self.eventIdToCounter = Counter120hz.Counter120hz(sec, nsec, fiducials)
counter = self.eventIdToCounter.getCounter(sec, fiducials)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem: next server rank=%d sec=%d nsec=%10d fiducials=0x%8.8X counter=%d" % \
(selectedServerRank, sec, nsec, fiducials, counter))
self.informWorkersOfNewData(selectedServerRank, sec, nsec, fiducials, counter)
# tell server n to scatter to workers
self.sendOkForWorkersBuffer.setSendToWorkers()
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem: before SendOkForWorkers to server %d" % selectedServerRank)
self.worldComm.Send([self.sendOkForWorkersBuffer.getNumpyBuffer(),
self.sendOkForWorkersBuffer.getMPIType()],
dest=selectedServerRank)
# do new Irecv from worker n
self.readyServers.remove(selectedServerRank)
self.notReadyServers.append(selectedServerRank)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem: after sendOk, before replacing request with Irecv from rank %d" % selectedServerRank)
serverReceiveBuffer = serverReceiveData[selectedServerRank]
serverRequests[selectedServerRank] = self.worldComm.Irecv([serverReceiveBuffer.getNumpyBuffer(),
serverReceiveBuffer.getMPIType()], \
source = selectedServerRank)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem: after Irecv from rank %d" % selectedServerRank)
# check to see if there should be an update for the viewer
self.numEvents += 1
if (self.updateIntervalEvents > 0) and (self.numEvents - self.lastUpdate > self.updateIntervalEvents):
self.lastUpdate = self.numEvents
self.logger.debug("CommSystem: Informing viewers and workers to update" )
self.informViewerOfUpdate(sec, nsec, fiducials, counter)
self.informWorkersToUpdateViewer()
# check to display message
eventsSinceLastDataRateMsg = self.numEvents - numEventsAtLastDataRateMsg
if eventsSinceLastDataRateMsg > 2400: # about 20 seconds of data at 120hz
curTime = time.time()
dataRateHz = eventsSinceLastDataRateMsg/(curTime-timeAtLastDataRateMsg)
self.logger.info("Current data rate is %.2f Hz. %d events processed" % (dataRateHz, self.numEvents))
timeAtLastDataRateMsg = curTime
numEventsAtLastDataRateMsg = self.numEvents
# one last datarate msg
dataRateHz = self.numEvents/(time.time()-startTime)
self.logger.info("Overall data rate is %.2f Hz. Number of events is %d" % (dataRateHz, self.numEvents))
# send one last update at the end
self.logger.debug("CommSystem: servers finished. sending one last update")
self.informViewerOfUpdate(sec, nsec, fiducials, counter)
self.informWorkersToUpdateViewer()
self.sendEndToWorkers()
self.sendEndToViewer()
class RunWorker(object):
def __init__(self, masterWorkersComm, masterRankInMasterWorkersComm,
wrapEventNumber, xCorrBase, logger):
self.masterWorkersComm = masterWorkersComm
self.masterRankInMasterWorkersComm = masterRankInMasterWorkersComm
self.wrapEventNumber = wrapEventNumber
self.xCorrBase = xCorrBase
self.logger = logger
self.msgBuffer = MVW_MsgBuffer()
self.evtNumber = 0
self.wrapped = False
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def workerWaitForMasterBcastWrapped(self):
self.workerWaitForMasterBcast()
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def workerWaitForMasterBcastNotWrapped(self):
self.workerWaitForMasterBcast()
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def workerWaitForMasterBcast(self):
self.masterWorkersComm.Bcast([self.msgBuffer.getNumpyBuffer(),
self.msgBuffer.getMPIType()],
root=self.masterRankInMasterWorkersComm)
self.masterWorkersComm.Barrier()
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def serverWorkersScatterWrapped(self, serverWorldRank):
self.xCorrBase.serverWorkersScatter(serverFullDataArray=None,
serverWorldRank = serverWorldRank)
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def serverWorkersScatterNotWrapped(self, serverWorldRank):
self.xCorrBase.serverWorkersScatter(serverFullDataArray=None,
serverWorldRank = serverWorldRank)
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def storeNewWorkerDataWrapped(self, counter):
self.xCorrBase.storeNewWorkerData(counter = counter)
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def storeNewWorkerDataNotWrapped(self, counter):
self.xCorrBase.storeNewWorkerData(counter = counter)
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def viewerWorkersUpdateWrapped(self, lastTime):
self.xCorrBase.viewerWorkersUpdate(lastTime = lastTime)
@Timing.timecall(CSPADEVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def viewerWorkersUpdateNotWrapped(self, lastTime):
self.xCorrBase.viewerWorkersUpdate(lastTime = lastTime)
def run(self):
lastTime = {'sec':0, 'nsec':0, 'fiducials':0, 'counter':0}
while True:
self.logger.debug("CommSystem.run: before Bcast from master")
if self.wrapped:
self.workerWaitForMasterBcastWrapped()
else:
self.workerWaitForMasterBcastNotWrapped()
if self.msgBuffer.isEvt():
serverWithData = self.msgBuffer.getRank()
lastTime = self.msgBuffer.getTime()
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug("CommSystem.run: after Bcast from master. EVT server=%2d counter=%d" % \
(serverWithData, lastTime['counter']))
if self.wrapped:
self.serverWorkersScatterWrapped(serverWorldRank = serverWithData)
self.storeNewWorkerDataWrapped(counter = lastTime['counter'])
else:
self.serverWorkersScatterNotWrapped(serverWorldRank = serverWithData)
self.storeNewWorkerDataNotWrapped(counter = lastTime['counter'])
elif self.msgBuffer.isUpdate():
self.logger.debug("CommSystem.run: after Bcast from master - UPDATE")
if self.wrapped:
self.viewerWorkersUpdateWrapped(lastTime = lastTime)
else:
self.viewerWorkersUpdateNotWrapped(lastTime = lastTime)
self.logger.debug("CommSystem.run: returned from viewer workers update")
elif self.msgBuffer.isEnd():
self.logger.debug("CommSystem.run: after Bcast from master - END. quiting")
break
else:
raise Exception("unknown msgtag")
self.evtNumber += 1
if self.evtNumber >= self.wrapEventNumber:
self.wrapped = True
class RunViewer(object):
def __init__(self, worldComm, masterRank, xCorrBase, logger):
self.worldComm = worldComm
self.masterRank = masterRank
self.logger = logger
self.xCorrBase = xCorrBase
self.msgbuffer = MVW_MsgBuffer()
@Timing.timecall(VIEWEREVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def waitForMasterMessage(self):
self.worldComm.Recv([self.msgbuffer.getNumpyBuffer(),
self.msgbuffer.getMPIType()],
source=self.masterRank)
@Timing.timecall(VIEWEREVT, timingDict=timingdict, timingDictInsertOrder=timingorder)
def viewerWorkersUpdate(self, lastTime):
self.xCorrBase.viewerWorkersUpdate(lastTime)
def run(self):
while True:
self.logger.debug('CommSystem.run: before Recv from master')
self.waitForMasterMessage()
if self.msgbuffer.isUpdate():
lastTime = self.msgbuffer.getTime()
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug('CommSystem.run: after Recv from master. get UPDATE: counter=%d' % lastTime['counter'])
self.viewerWorkersUpdate(lastTime = lastTime)
elif self.msgbuffer.isEnd():
self.logger.debug('CommSystem.run: after Recv from master. get END. quiting.')
break
else:
raise Exception("unknown msgtag")
self.xCorrBase.shutdown_viewer()
def runTestAlt(mp, xCorrBase):
xCorrBase.serverInit()
xCorrBase.workerInit()
xCorrBase.viewerInit()
eventIter = xCorrBase.makeEventIter()
eventIds = []
allData = []
mp.logInfo("Starting to read through data for test_alt")
for datum in eventIter.dataGenerator():
maskedData = datum.dataArray[mp.maskNdarrayCoords]
maskedData = maskedData.flatten().copy()
xCorrBase.userObj.workerAdjustData(maskedData)
eventIds.append((datum.sec, datum.nsec, datum.fiducials))
allData.append(maskedData)
mp.logInfo("read through data for test_alt")
sortedCounters, newDataOrder = PsanaUtil.getSortedCountersBasedOnSecNsecAtHertz(eventIds, 120)
if not np.all(newDataOrder==np.sort(newDataOrder)):
mp.logWarning("DAQ data did not come in sorted order.")
eventIdNumpyDtype = np.dtype([('sec',np.int32),
('nsec',np.int32),
('fiducials',np.int32),
('counter',np.int64)])
sortedData = np.zeros((len(allData),len(allData[0])), dtype=allData[0].dtype)
sortedEventIds = np.zeros(len(allData), dtype=eventIdNumpyDtype)
for idx,sortedPos in enumerate(newDataOrder):
sortedData[idx,:]=allData[sortedPos][:]
sortedEventIds[idx]['counter'] = sortedCounters[idx]
eventIdInSortOrder = eventIds[sortedPos]
sortedEventIds[idx]['sec'] = eventIdInSortOrder[0]
sortedEventIds[idx]['nsec'] = eventIdInSortOrder[1]
sortedEventIds[idx]['fiducials'] = eventIdInSortOrder[2]
testGroup = xCorrBase.h5file.create_group('test')
testGroup['detectorEventIds'] = sortedEventIds
testGroup['detectorData'] = sortedData
xCorrBase.userObj.calcAndPublishForTestAlt(sortedEventIds, sortedData, xCorrBase.h5GroupUser)
xCorrBase.shutdown_viewer()
def runCommSystem(mp, updateInterval, wrapEventNumber, xCorrBase, hostmsg, test_alt):
'''main driver for the system.
ARGS:
mp - instance of MPI_Communicators
updateInterval (int):
wrapEventNumber (int):
xCorrBase:
hostmsg:
test_alt (bool): True if this is testing mode
'''
if test_alt:
runTestAlt(mp, xCorrBase)
return
logger = mp.logger
reportTiming = False
timingNode = ''
try:
if mp.isServer:
xCorrBase.serverInit()
eventIter = xCorrBase.makeEventIter()
runServer = RunServer(eventIter,
mp.comm, mp.rank, mp.masterRank, logger)
runServer.run()
reportTiming = True
timingNode = 'SERVER'
elif mp.isMaster:
runMaster = RunMaster(mp.comm, mp.masterRank, mp.viewerRank, mp.serverRanks,
mp.masterWorkersComm, mp.masterRankInMasterWorkersComm,
updateInterval, hostmsg, logger)
runMaster.run()
reportTiming = True
timingNode = 'MASTER'
elif mp.isViewer:
xCorrBase.viewerInit()
runViewer = RunViewer(mp.comm, mp.masterRank, xCorrBase, logger)
runViewer.run()
reportTiming = True
timingNode = 'VIEWER'
elif mp.isWorker:
xCorrBase.workerInit()
runWorker = RunWorker(mp.masterWorkersComm, mp.masterRankInMasterWorkersComm,
wrapEventNumber, xCorrBase, logger)
runWorker.run()
if mp.isFirstWorker:
reportTiming = True
timingNode = 'FIRST WORKER'
else:
raise Exception("rank is neither server/master/viewer or worker - internal error")
except Exception:
exceptBuffer = StringIO.StringIO()
traceback.print_exc(file=exceptBuffer)
logger.error('encountered exception: %s' % exceptBuffer.getvalue())
MPI.COMM_WORLD.Abort(1)
if reportTiming:
hdr = '--BEGIN %s TIMING--' % timingNode
footer = '--END %s TIMING--' % timingNode
Timing.reportOnTimingDict(logger,hdr, footer,
timingDict=timingdict, keyOrder=timingorder)
def isNoneOrListOfStrings(arg):
def isListOfStrings(arg):
if not isinstance(arg, list):
return False
def isStr(x): return isinstance(x,str)
return all(map(isStr,arg))
if arg is None:
return True
return isListOfStrings(arg)
class CommSystemFramework(object):
def __init__(self, system_params, user_params, test_alt=False):
numServers = int(system_params['numServers'])
dataset = system_params['dataset']
serverHosts = system_params['serverHosts']
assert isNoneOrListOfStrings(serverHosts), "system_params['serverHosts'] is neither None or a list of str"
self.test_alt = test_alt
if test_alt:
assert MPI.COMM_WORLD.size == 1, "In test_alt mode, do not run in MPI mode"
hostmsg = "test mode - no host assignent"
mp = getTestingMPIObject()
else:
serverRanks, hostmsg = identifyServerRanks(MPI.COMM_WORLD,
numServers,
serverHosts)
# set mpi paramemeters for framework
mp = identifyCommSubsystems(serverRanks=serverRanks, worldComm=MPI.COMM_WORLD)
self.hostmsg = hostmsg
verbosity = system_params['verbosity']
mp.setLogger(verbosity)
maskNdarrayCoords_Filename = system_params['maskNdarrayCoords']
assert os.path.exists(maskNdarrayCoords_Filename), "mask file %s not found" % maskNdarrayCoords_Filename
maskNdarrayCoords = np.load(maskNdarrayCoords_Filename)
mp.setMask(maskNdarrayCoords)
srcString = system_params['src']
numEvents = system_params['numEvents']
maxTimes = system_params['times']
assert isinstance(srcString,str), "system parameters src is not a string"
assert isinstance(numEvents,int), "system parameters numevents is not an int"
assert isinstance(maxTimes,int), "system parameters maxTimes is not an int"
xcorrBase = XCorrBase(mp,
dataset,
srcString,
numEvents,
maxTimes,
system_params,
user_params,
test_alt)
self.mp = mp
self.xcorrBase = xcorrBase
self.maxTimes = maxTimes
self.updateInterval = system_params['update']
def run(self):
runCommSystem(self.mp, self.updateInterval, self.maxTimes, self.xcorrBase, self.hostmsg, self.test_alt)
| [
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
] | [email protected]@b967ad99-d558-0410-b138-e0f6c56caec7 |
b7ef3750a7d4fe8089d84c6855b84748143367c2 | 5fda498ef0bfc06962ad9b864d229193c45ccb4a | /Project1_Analyzing_the_NYC_Subway_Dataset/problem_sets2to5/problem_set2_wrangling_subway_data/2_9_get_hourly_exits.py | f4774640992d40f8fab48a745798f5a75ac894b5 | [] | no_license | prabhurgit/Data_Aanlyst_Nanodegree_projects | 7934869b63cae57cb2851e22a5023c6cbe3d18ba | a7a13d93c632cd1840ba3a00fff80a60a131b7f3 | refs/heads/master | 2021-05-31T18:47:48.669414 | 2016-03-30T04:08:39 | 2016-03-30T04:08:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py | import pandas
def get_hourly_exits(df):
'''
The data in the MTA Subway Turnstile data reports on the cumulative
number of entries and exits per row. Assume that you have a dataframe
called df that contains only the rows for a particular turnstile machine
(i.e., unique SCP, C/A, and UNIT). This function should change
these cumulative exit numbers to a count of exits since the last reading
(i.e., exits since the last row in the dataframe).
More specifically, you want to do two things:
1) Create a new column called EXITSn_hourly
2) Assign to the column the difference between EXITSn of the current row
and the previous row. If there is any NaN, fill/replace it with 0.
You may find the pandas functions shift() and fillna() to be helpful in this exercise.
Example dataframe below:
Unnamed: 0 C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly EXITSn_hourly
0 0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 0 0
1 1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23 8
2 2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18 18
3 3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71 54
4 4 A002 R051 02-00-00 05-01-11 16:00:00 REGULAR 3144594 1088275 170 44
5 5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214 42
6 6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 87 11
7 7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10 3
8 8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36 89
9 9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153 333
'''
#your code here
df['EXITSn_hourly'] = (df['EXITSn'] - df.shift(1)['EXITSn']) #calculate hourly exits
df = df.fillna(0) #fill NA with 0
return df
| [
"[email protected]"
] | |
f9212d2fd1d107205017fa97f2f49b6f73cef9d8 | 545243267907eff3e2dcf79d0afcf14561d1c232 | /DTL/perforce/__init__.py | 2b94c3199d35d5c73f84de43c7e33163fb476dac | [
"MIT"
] | permissive | Python3pkg/DevToolsLib | 7f824825f090602ef9f962e1d8d0b72d50ab104f | d2c6971b9de44841bf607f5ce895c55463fe6c34 | refs/heads/master | 2021-01-21T17:29:36.640116 | 2017-05-21T12:50:48 | 2017-05-21T12:50:48 | 91,956,694 | 1 | 0 | null | 2017-05-21T12:50:41 | 2017-05-21T12:50:40 | null | UTF-8 | Python | false | false | 28 | py | from .client import P4Client | [
"[email protected]"
] | |
d0c266d3816a85e94982006addc47834bed26724 | beae392dcd51001613e5d3e226587d646d854c1f | /ML_Finance/NumpySumRowsColumns.py | 1aa0c502cc9ad2908e0d0a1bdc501c332b60bb86 | [] | no_license | ShubraChowdhury/ML | 2c7a0a51ca657dda47ceb8a19cecbcafa6cd16b0 | 1e631f2ab86bfd35e2c8790501a7effb4e0d106a | refs/heads/master | 2021-05-15T14:44:28.753164 | 2018-11-18T14:06:48 | 2018-11-18T14:06:48 | 106,418,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 08:39:38 2016
@author: DevAdmin
"""
import numpy as np
def test_run():
print("Random seed initializing the pseudo-random number generator.\n",
"Each time you run the random variable generator you will \n",
"get the same value untill session is closed")
np.random.seed(693)
a = np.random.randint(0,10, size=(5,4))
print("Array \n", a)
print("\n Sum of Columns use axis =0 \n", a.sum(axis=0))
print("\n Sum of Rows use axis =1 \n", a.sum(axis=1))
print("\n Mean of Columns use axis =0 \n", a.mean(axis=0))
print("\n Mean of Rows use axis =1 \n", a.mean(axis=1))
print("\n Total mean \n", a.mean())
if __name__ == "__main__":
test_run() | [
"[email protected]"
] | |
fa38ee2c202a55385c5053c08d36f4cf040a5090 | 7725cafb8259f94cd9b3e2240182eb90d0e5246f | /src/scripts/icu_transliterate.py | e83fb9ba9c8e6b66c954df8aa0eac69e56227635 | [] | no_license | rasoolims/zero-shot-mt | c95cee19b583b428941932cd93e45025a919f1d8 | 33830dc7e48fa4a06641590cfaa0f4eb52c2e314 | refs/heads/master | 2023-08-22T07:07:54.093714 | 2021-10-08T22:36:39 | 2021-10-08T22:36:39 | 364,627,830 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | import os
import sys
import icu
tl = icu.Transliterator.createInstance('Any-Latin; Latin-ASCII')
with open(os.path.abspath(sys.argv[1]), "r") as r, open(os.path.abspath(sys.argv[2]), "w") as w:
for i, line in enumerate(r):
transliteration = tl.transliterate(line.strip())
w.write(transliteration)
w.write("\n")
print(i, end="\r")
print("\n Finished!")
| [
"[email protected]"
] | |
ae148a59eca9f309030041953e95838ba788030e | 6b9b032a5516c8d7dbb26deeb1b189022f8f9411 | /LeetCode/arrary/easy/118.杨辉三角.py | 33fb9a2306f182e155161f1caca7c305cd6bc9c0 | [] | no_license | mrmenand/Py_transaction | 84db99a0010ae90f43fba6b737d7035e48af55fb | 7e82422c84ad699805cc12568b8d3d969f66a419 | refs/heads/master | 2021-07-13T21:15:03.714689 | 2020-06-21T11:49:31 | 2020-06-21T11:49:31 | 176,281,954 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | # 118.杨辉三角
class Solution:
def generate(self, numRows):
ret = [[1] * i for i in range(1,numRows+1)]
for i in range(2,numRows):
for j in range(1,i):
ret[i][j] = ret[i-1][j] + ret[i-1][j-1]
return ret
# class Solution:
# def generate(self, numRows):
# # numRows: int) -> List[List[int]]:
# res = []
# for i in range(1,numRows+1):
# if i==1:
# res.append([1])
# else:
# temp = []
# for j in range(i):
#
# if j==0 or j==i-1:
# temp.append(1)
# else:
# temp.append(res[i-2][j-1]+res[i-2][j])
# res.append(temp)
#
# return res
# class Solution:
# def generate(self, numRows):
# """
# :type numRows: int
# :rtype: List[List[int]]
# """
# L = []
# if numRows == 0:
# return L
# for i in range(numRows):
# L.append([1])
# for j in range(1,i+1):
# if j==i:
# L[i].append(1)
# else:
# L[i].append(L[i-1][j]+L[i-1][j-1])
# return L | [
"[email protected]"
] | |
44c406f18354a8c9152bbfa3c76ca89805199ac9 | 11dbb589aa305a1f33525f6ead538330aa1ae677 | /modelling/devices/convertors.py | 309fd9568f5b7d1b962a7f030d7a8c798b74a0fd | [
"MIT"
] | permissive | riahtu/EnergyManagementSystem | e70f48d70dc59a3cf6089149ec17b4dac054439e | 2a48ba3b9bf7ff3003c197ee43ea9efbfbe42baa | refs/heads/master | 2021-09-09T14:27:20.170163 | 2018-03-17T01:51:03 | 2018-03-17T01:51:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | """
Convertor models for universal energy management system
The models include the following types of convertors.
1) AC 2 DC convertors
2) DC 2 DC convertors
"""
import configuration.configuration_convertors as default_parameters
BIC = \
{
"ID": default_parameters.BIC["AREA"], # Static information
"SMAX": default_parameters.BIC["SMAX"], # Static information
"EFF_AC2DC": default_parameters.BIC["EFF_AC2DC"], # Static information
"EFF_DC2AC": default_parameters.BIC["EFF_DC2AC"], # Static information
"STATUS": default_parameters.BIC["STATUS"], # Measurement information
"P_AC2DC":default_parameters.BIC["P_AC2DC"], # Measurement information
"P_DC2AC":default_parameters.BIC["P_DC2AC"],# Measurement information
"Q_AC":default_parameters.BIC["COMMAND_DC2AC"],# Measurement information
"TIME_GENERATED": default_parameters.BIC["TIME_GENERATED"], # Dynamic information
"TIME_APPLIED": default_parameters.BIC["TIME_APPLIED"], # Dynamic information
"TIME_COMMANDED": default_parameters.BIC["TIME_COMMANDED"], # Dynamic information
"COMMAND_AC2DC":default_parameters.BIC["COMMAND_AC2DC"], # Dynamic information
"COMMAND_DC2AC":default_parameters.BIC["COMMAND_DC2AC"], # Dynamic information
"COMMAND_Q":default_parameters.BIC["COMMAND_DC2AC"],# Dynamic information
}
| [
"[email protected]"
] | |
3f61d5add3da668acc3f4a002df63ca6c9826407 | 57cb9fef5efac78758f5d151b959ca2216c94083 | /edx/app/analytics_api/venvs/analytics_api/bin/cwutil | d16864d59606f61bb57c2e230c1973db1db7d249 | [] | no_license | JosiahKennedy/openedx-branded | 9751d5362088276a87b2e0edca0913568eeb1ac4 | d16a25b035b2e810b8ab2b0a2ac032b216562e26 | refs/heads/master | 2022-12-21T02:39:17.133147 | 2020-03-25T06:03:23 | 2020-03-25T06:03:23 | 249,895,218 | 0 | 1 | null | 2022-12-08T01:23:48 | 2020-03-25T05:33:05 | null | UTF-8 | Python | false | false | 5,083 | #!/edx/app/analytics_api/venvs/analytics_api/bin/python2.7
# Author: Chris Moyer <[email protected]>
# Description: CloudWatch Utility
# For listing stats, creating alarms, and managing
# other CloudWatch aspects
import boto
cw = boto.connect_cloudwatch()
from datetime import datetime, timedelta
def _parse_time(time_string):
"""Internal function to parse a time string"""
def _parse_dict(d_string):
result = {}
if d_string:
for d in d_string.split(","):
d = d.split(":")
result[d[0]] = d[1]
return result
def ls(namespace=None):
"""
List metrics, optionally filtering by a specific namespace
namespace: Optional Namespace to filter on
"""
print "%-10s %-50s %s" % ("Namespace", "Metric Name", "Dimensions")
print "-"*80
for m in cw.list_metrics():
if namespace is None or namespace.upper() in m.namespace:
print "%-10s %-50s %s" % (m.namespace, m.name, m.dimensions)
def stats(namespace, metric_name, dimensions=None, statistics="Average", start_time=None, end_time=None, period=60, unit=None):
"""
Lists the statistics for a specific metric
namespace: The namespace to use, usually "AWS/EC2", "AWS/SQS", etc.
metric_name: The name of the metric to track, pulled from `ls`
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
statistics: The statistics to measure, defaults to "Average"
'Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'
start_time: Start time, default to now - 1 day
end_time: End time, default to now
period: Period/interval for counts, default to 60 minutes
unit: Unit to track, default depends on what metric is being tracked
"""
# Parse the dimensions
dimensions = _parse_dict(dimensions)
# Parse the times
if end_time:
end_time = _parse_time(end_time)
else:
end_time = datetime.utcnow()
if start_time:
start_time = _parse_time(start_time)
else:
start_time = datetime.utcnow() - timedelta(days=1)
print "%-30s %s" % ('Timestamp', statistics)
print "-"*50
data = {}
for m in cw.get_metric_statistics(int(period), start_time, end_time, metric_name, namespace, statistics, dimensions, unit):
data[m['Timestamp']] = m[statistics]
keys = data.keys()
keys.sort()
for k in keys:
print "%-30s %s" % (k, data[k])
def put(namespace, metric_name, dimensions=None, value=None, unit=None, statistics=None, timestamp=None):
"""
Publish custom metrics
namespace: The namespace to use; values starting with "AWS/" are reserved
metric_name: The name of the metric to update
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
value: The value to store, mutually exclusive with `statistics`
statistics: The statistics to store, mutually exclusive with `value`
(must specify all of "Minimum", "Maximum", "Sum", "SampleCount")
timestamp: The timestamp of this measurement, default is current server time
unit: Unit to track, default depends on what metric is being tracked
"""
def simplify(lst):
return lst[0] if len(lst) == 1 else lst
print cw.put_metric_data(namespace, simplify(metric_name.split(';')),
dimensions = simplify(map(_parse_dict, dimensions.split(';'))) if dimensions else None,
value = simplify(value.split(';')) if value else None,
statistics = simplify(map(_parse_dict, statistics.split(';'))) if statistics else None,
timestamp = simplify(timestamp.split(';')) if timestamp else None,
unit = simplify(unit.split(';')) if unit else None)
def help(fnc=None):
"""
Print help message, optionally about a specific function
"""
import inspect
self = sys.modules['__main__']
if fnc:
try:
cmd = getattr(self, fnc)
except:
cmd = None
if not inspect.isfunction(cmd):
print "No function named: %s found" % fnc
sys.exit(2)
(args, varargs, varkw, defaults) = inspect.getargspec(cmd)
print cmd.__doc__
print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args]))
else:
print "Usage: cwutil [command]"
for cname in dir(self):
if not cname.startswith("_") and not cname == "cmd":
cmd = getattr(self, cname)
if inspect.isfunction(cmd):
doc = cmd.__doc__
print "\t%s - %s" % (cname, doc)
sys.exit(1)
if __name__ == "__main__":
import sys
self = sys.modules['__main__']
if len(sys.argv) >= 2:
try:
cmd = getattr(self, sys.argv[1])
except:
cmd = None
args = sys.argv[2:]
else:
cmd = help
args = []
if not cmd:
cmd = help
try:
cmd(*args)
except TypeError as e:
print e
help(cmd.__name__)
| [
"[email protected]"
] | ||
65f8f9c9fa8f5862c057810abb89c961205f5026 | 472baa2414822520f7cb8d491d4bf5608f765ad8 | /djsite/djsite/urls.py | e368ff4894992a51ad05df0f0a1872c894276425 | [] | no_license | Umi101108/django-projects | cdcf0c9bb8bd272e04a4b7a702f09adb16c28404 | 50edfdc3511e1de5b4a5a3e92fe9ddad932b5396 | refs/heads/master | 2021-01-12T08:20:48.113696 | 2017-06-11T14:45:20 | 2017-06-11T14:45:20 | 76,545,822 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | """djsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from django.contrib import admin
from learn import views as learn_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', learn_views.index),
]
| [
"[email protected]"
] | |
3365f9a3741b09624b9cb9f33dbbe0772f11f3f0 | c71d332dd845036c21c9fd8f4f571f9209bf2672 | /Remove K Digits.py | d0debe9c725fa1a85abe458b44127cde95a061cc | [] | no_license | diksha12p/DSA_Practice_Problems | 2884fd9e77094d9662cb8747744dd2ef563e25e4 | d56e3d07620d51871199f61ae82cff2bd75b4744 | refs/heads/master | 2023-01-20T15:31:37.824918 | 2020-11-29T21:37:12 | 2020-11-29T21:37:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | """
Given a non-negative integer num represented as a string, remove k digits from the number so that the new number is the
smallest possible.
Note:
The length of num is less than 10002 and will be ≥ k.
The given num does not contain any leading zero.
Example 1:
Input: num = "1432219", k = 3
Output: "1219"
Explanation: Remove the three digits 4, 3, and 2 to form the new number 1219 which is the smallest.
Example 2:
Input: num = "10200", k = 1
Output: "200"
Explanation: Remove the leading 1 and the number is 200. Note that the output must not contain leading zeroes.
Example 3:
Input: num = "10", k = 2
Output: "0"
Explanation: Remove all the digits from the number and it is left with nothing which is 0.
"""
class Solution:
def removeKdigits(self, num: str, k: int) -> str:
stack = []
# IDEA: Remove the element from L to R if it causes a dip i.e. greater than the next element
for char in num:
while k and stack and stack[-1] > char:
stack.pop()
k -= 1
stack.append(char)
# Num is already in an increasing order -> Stack has the same numbers
while k:
stack.pop()
k -= 1
# Retrieving the number from the entries in stack
# or '0'ensures that something is returned in case stack in empty
return ''.join(stack).lstrip('0') or '0'
sol = Solution()
print(sol.removeKdigits('10', 2))
| [
"[email protected]"
] | |
a3ea6474e50b140da2329e05fcf499abc667ef99 | ea1af1a564f96fb36974aa094192877598b0c6bf | /Chapter5/Exercises/ex5_6.py | 1f18d6252d311a11cea4128fa003c09bcec92874 | [] | no_license | GSantos23/Crash_Course | 63eecd13a60141e520b5ca4351341c21c4782801 | 4a5fc0cb9ce987948a728d43c4f266d34ba49a87 | refs/heads/master | 2020-03-20T23:20:43.201255 | 2018-08-21T01:13:06 | 2018-08-21T01:13:06 | 137,841,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | # Exercise 5.6
"""
Stages of Life: Write an if - elif - else chain that determines a person’s
stage of life. Set a value for the variable age , and then:
• If the person is less than 2 years old, print a message that the person
is a baby.
• If the person is at least 2 years old but less than 4, print a message
that the person is a toddler.
• If the person is at least 4 years old but less than 13, print a message
that the person is a kid.
• If the person is at least 13 years old but less than 20, print a message
that the person is a teenager.
• If the person is at least 20 years old but less than 65, print a message
that the person is an adult.
• If the person is age 65 or older, print a message that the person is an
elder.
"""
age = 25
if age < 2:
print("You're a baby")
elif age >= 2 and age < 4:
print("You're a toddler")
elif age >= 4 and age < 13:
print("You're a kid")
elif age >= 13 and age < 20:
print("You're a teenager")
elif age >= 20 and age < 65:
print("You're an adult")
else:
print("You're an elder")
| [
"[email protected]"
] | |
a4b3e54f985e9427b3a556587c36b4e7e9b1bb4b | 13e91d812e7e0133f45273945ccca5523b1eefe5 | /task 4/apinow/urls.py | 9b164e3d8053a11f04da28a1b60799267b94bb21 | [] | no_license | Harshvartak/Unicode | 30d7298253f1feba4c47b89bdb8403e88b1707a1 | 2903d445fa5435b835f1543b8a67fb417749e1c3 | refs/heads/master | 2020-07-10T15:29:48.115326 | 2020-01-20T18:34:42 | 2020-01-20T18:34:42 | 204,299,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """apinow URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from SoaceX import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.spaceX, name='home'),
]
| [
"[email protected]"
] | |
9d84e3bc55c3151f848a83afe7ea6ccc17c78ccf | 1450bb467a73b80a3e3f649fd9423679482a235a | /test/printEgammaUserData.py | 5c558ea0fee2f17400d678aad9111dd962eecf89 | [] | no_license | cms-egamma/EgammaPostRecoTools | 430d57decd747f16904d06ccb8f61a0556ef2bb2 | 209673a77cd91b36f1fe3f09579b7f7fc4552089 | refs/heads/master | 2021-09-25T09:38:01.836119 | 2021-09-17T14:19:49 | 2021-09-17T14:19:49 | 223,762,349 | 0 | 4 | null | 2021-04-04T16:59:18 | 2019-11-24T15:03:11 | Python | UTF-8 | Python | false | false | 2,846 | py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DataFormats.FWLite import Events, Handle
import ROOT
import argparse
def convert_to_str(vec_str):
output = ""
for entry in vec_str:
if output != "": output+="\n "
output+=entry
return output
def convertpair_to_str(vec_str):
output = ""
for entry in vec_str:
if output != "": output+="\n "
output+=entry.first
return output
def print_ele_user_data(ele):
print("ele userfloats:")
print(" "+convert_to_str(ele.userFloatNames()))
print("ele userints:")
print(" "+convert_to_str(ele.userIntNames()))
print("ele IDs:")
print(" "+convertpair_to_str(ele.electronIDs()))
def print_pho_user_data(pho):
print("pho userfloats:")
print(" "+convert_to_str(pho.userFloatNames()))
print("pho userints:")
print(" "+convert_to_str(pho.userIntNames()))
print("pho IDs:")
print(" "+convertpair_to_str(pho.photonIDs()))
if __name__ == "__main__":
"""
prints electron and photon miniAOD user data
note: it assumes that all electrons and photons have exactly the same userdata so we can just print
the first one. This is currently true except for low pt electrons and photons hence we put a >20 GeV
cut on the ele/pho we print
"""
ROOT.gSystem.Load("libFWCoreFWLite.so");
ROOT.gSystem.Load("libDataFormatsFWLite.so");
ROOT.FWLiteEnabler.enable()
parser = argparse.ArgumentParser(description='prints E/gamma pat::Electrons/Photons user data')
parser.add_argument('filename',help='input filename')
args = parser.parse_args()
eles, ele_label = Handle("std::vector<pat::Electron>"), "slimmedElectrons"
phos, pho_label = Handle("std::vector<pat::Photon>"), "slimmedPhotons"
#we put a minimum et as low et electrons/photons may not have all the variables
min_pho_et = 20
min_ele_et = 20
done_ele = False
done_pho = False
events = Events(args.filename)
for event_nr,event in enumerate(events):
if done_ele and done_pho: break
if not done_pho:
event.getByLabel(pho_label,phos)
for pho_nr,pho in enumerate(phos.product()):
if pho.et()<min_pho_et:
continue
else:
print_pho_user_data(pho)
done_pho = True
break
if not done_ele:
event.getByLabel(ele_label,eles)
for ele_nr,ele in enumerate(eles.product()):
if ele.et()<min_ele_et:
continue
else:
print_ele_user_data(ele)
done_ele = True
break
| [
"[email protected]"
] | |
32d5735e30c382c4e7768e9ca3cfbc44ac537e6d | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/twilio/tests/integration/numbers/v2/regulatory_compliance/test_supporting_document.py | c6d7aab5bb0875c1e174a0bdd4fc75c4d84b9225 | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:b7f19e719e96de6cf10ec0f84fda59ffe9a97e89340d069975676ef4fca5b46d
size 8793
| [
"[email protected]"
] | |
531654703bc4607c33e930eb411f8c3da0423548 | 072f8bffbfef6e149ad1934ea9183a79864c1acd | /venv/Lib/site-packages/ironic_inspector_client/test/test_common_http.py | b7a15d5a131c23653b35f8a98bf9f8c3300b7dca | [] | no_license | numvc/LuxoftBot | 77d9bf8f5f63aee63350f1ec82f4b940afe203d2 | 29d7ca8868ab86bc076509d103f7596039333417 | refs/heads/master | 2020-09-21T21:37:12.527546 | 2019-12-04T23:24:35 | 2019-12-04T23:24:35 | 224,939,956 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,174 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from keystoneauth1 import exceptions
from keystoneauth1 import session
import mock
from ironic_inspector_client.common import http
class TestCheckVersion(unittest.TestCase):
@mock.patch.object(http.BaseClient, 'server_api_versions',
lambda *args, **kwargs: ((1, 0), (1, 99)))
def _check(self, version):
cli = http.BaseClient(1, inspector_url='http://127.0.0.1:5050')
return cli._check_api_version(version)
def test_tuple(self):
self.assertEqual((1, 0), self._check((1, 0)))
def test_small_tuple(self):
self.assertEqual((1, 0), self._check((1,)))
def test_int(self):
self.assertEqual((1, 0), self._check(1))
def test_str(self):
self.assertEqual((1, 0), self._check("1.0"))
def test_invalid_tuple(self):
self.assertRaises(TypeError, self._check, (1, "x"))
self.assertRaises(ValueError, self._check, (1, 2, 3))
def test_invalid_str(self):
self.assertRaises(ValueError, self._check, "a.b")
self.assertRaises(ValueError, self._check, "1.2.3")
self.assertRaises(ValueError, self._check, "foo")
def test_unsupported(self):
self.assertRaises(http.VersionNotSupported, self._check, (99, 42))
FAKE_HEADERS = {
http._MIN_VERSION_HEADER: '1.0',
http._MAX_VERSION_HEADER: '1.9'
}
@mock.patch.object(session.Session, 'get', autospec=True,
**{'return_value.status_code': 200,
'return_value.headers': FAKE_HEADERS})
class TestServerApiVersions(unittest.TestCase):
def _check(self, current=1):
return http.BaseClient(
api_version=current,
inspector_url='http://127.0.0.1:5050').server_api_versions()
def test_no_headers(self, mock_get):
mock_get.return_value.headers = {}
minv, maxv = self._check()
self.assertEqual((1, 0), minv)
self.assertEqual((1, 0), maxv)
def test_with_headers(self, mock_get):
mock_get.return_value.headers = {
'X-OpenStack-Ironic-Inspector-API-Minimum-Version': '1.1',
'X-OpenStack-Ironic-Inspector-API-Maximum-Version': '1.42',
}
minv, maxv = self._check(current=(1, 2))
self.assertEqual((1, 1), minv)
self.assertEqual((1, 42), maxv)
def test_with_404(self, mock_get):
mock_get.return_value.status_code = 404
mock_get.return_value.headers = {}
minv, maxv = self._check()
self.assertEqual((1, 0), minv)
self.assertEqual((1, 0), maxv)
def test_with_other_error(self, mock_get):
mock_get.return_value.status_code = 500
mock_get.return_value.headers = {}
self.assertRaises(http.ClientError, self._check)
class TestRequest(unittest.TestCase):
base_url = 'http://127.0.0.1:5050/v1'
def setUp(self):
super(TestRequest, self).setUp()
self.headers = {http._VERSION_HEADER: '1.0'}
self.session = mock.Mock(spec=session.Session)
self.session.get_endpoint.return_value = self.base_url
self.req = self.session.request
self.req.return_value.status_code = 200
@mock.patch.object(http.BaseClient, 'server_api_versions',
lambda self: ((1, 0), (1, 42)))
def get_client(self, version=1, inspector_url=None, use_session=True):
if use_session:
return http.BaseClient(version, session=self.session,
inspector_url=inspector_url)
else:
return http.BaseClient(version, inspector_url=inspector_url)
def test_ok(self):
res = self.get_client().request('get', '/foo/bar')
self.assertIs(self.req.return_value, res)
self.req.assert_called_once_with(self.base_url + '/foo/bar', 'get',
raise_exc=False, headers=self.headers)
self.session.get_endpoint.assert_called_once_with(
service_type='baremetal-introspection',
interface=None, region_name=None)
def test_no_endpoint(self):
self.session.get_endpoint.return_value = None
self.assertRaises(http.EndpointNotFound, self.get_client)
self.session.get_endpoint.assert_called_once_with(
service_type='baremetal-introspection',
interface=None, region_name=None)
def test_endpoint_not_found(self):
self.session.get_endpoint.side_effect = exceptions.EndpointNotFound()
self.assertRaises(http.EndpointNotFound, self.get_client)
self.session.get_endpoint.assert_called_once_with(
service_type='baremetal-introspection',
interface=None, region_name=None)
@mock.patch.object(session.Session, 'request', autospec=True,
**{'return_value.status_code': 200})
def test_ok_no_auth(self, mock_req):
res = self.get_client(
use_session=False,
inspector_url='http://some/host').request('get', '/foo/bar')
self.assertIs(mock_req.return_value, res)
mock_req.assert_called_once_with(mock.ANY,
'http://some/host/v1/foo/bar', 'get',
raise_exc=False, headers=self.headers)
def test_ok_with_session_and_url(self):
res = self.get_client(
use_session=True,
inspector_url='http://some/host').request('get', '/foo/bar')
self.assertIs(self.req.return_value, res)
self.req.assert_called_once_with('http://some/host/v1/foo/bar', 'get',
raise_exc=False, headers=self.headers)
def test_explicit_version(self):
res = self.get_client(version='1.2').request('get', '/foo/bar')
self.assertIs(self.req.return_value, res)
self.headers[http._VERSION_HEADER] = '1.2'
self.req.assert_called_once_with(self.base_url + '/foo/bar', 'get',
raise_exc=False, headers=self.headers)
def test_error(self):
self.req.return_value.status_code = 400
self.req.return_value.content = json.dumps(
{'error': {'message': 'boom'}}).encode('utf-8')
self.assertRaisesRegexp(http.ClientError, 'boom',
self.get_client().request, 'get', 'url')
def test_error_discoverd_style(self):
self.req.return_value.status_code = 400
self.req.return_value.content = b'boom'
self.assertRaisesRegexp(http.ClientError, 'boom',
self.get_client().request, 'get', 'url')
| [
"[email protected]"
] | |
4ad05ebbeca5160b6ac94c8e898cfc8f3c38295f | 22b348a0d10519cb1f1da5e886fdf2d3c167cf5a | /myweb/api/controllers/v1/schemas/user.py | 407d624aefe8d1983e695e6d585c34ae1be6d667 | [] | no_license | liuluyang/openstack_mogan_study | dab0a8f918ffd17e0a747715998e81304672b75b | 8624f765da7f5aa0c210f0fa945fc50cf8a67b9e | refs/heads/master | 2021-01-19T17:03:15.370323 | 2018-04-12T09:50:38 | 2018-04-12T09:50:38 | 101,040,396 | 1 | 1 | null | 2017-11-01T02:17:31 | 2017-08-22T08:30:22 | Python | UTF-8 | Python | false | false | 505 | py | from myweb.api.validation import parameter_types
metadata = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,255}$': {
'type': 'string', 'maxLength': 255, 'minLength':1
}
},
'additionalProperties': False
}
user_add = {
'type': 'object',
'properties': {
'name':parameter_types.name,
'policy':{ "enum": [ "disk" ,'one'] },
'metadata':metadata
},
'required': ['policy'],
'additionalProperties': False,
} | [
"[email protected]"
] | |
6aede4f0a10c3494cb90fe869bdc6fdb62075c3c | 8520c991dc543f5f4e1efe59ab401824173bb985 | /565-array-nesting/solution.py | 078594952952ef661f9c1760257fa4f6bfdf44c3 | [] | no_license | katryo/leetcode | d44f70f2853c4f5ea9a462d022feb0f5436c2236 | 0da45559271d3dba687858b8945b3e361ecc813c | refs/heads/master | 2020-03-24T12:04:53.859047 | 2020-02-18T04:27:55 | 2020-02-18T04:27:55 | 142,703,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | class Solution(object):
def arrayNesting(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
moves_to_end = [-1] * len(nums)
for i in range(len(nums)):
if moves_to_end[i] != -1:
continue
visited = set()
path = []
cur = i
while cur not in visited:
visited.add(cur)
path.append(cur)
cur = nums[cur]
# path: [a, b, c, ..., f] f is the nearest to the goal
for j, loc in enumerate(reversed(path)):
moves_to_end[loc] = j+1
ans = -1
for moves in moves_to_end:
ans = max(ans, moves)
return ans
# s = Solution()
# print(s.arrayNesting([5,4,0,3,1,6,2]))
# print(s.arrayNesting([0]))
# print(s.arrayNesting([1, 0]))
# print(s.arrayNesting([0, 1]))
| [
"[email protected]"
] | |
e3319eb3a032dedfd3f3b2d69472c2756060c227 | 26d128c7343ca370f646001f2813ad2131ad2d17 | /mlp.py | 369a8714e6d738226636b1f42ffa1fdd85f632a8 | [] | no_license | daureg/deep-time | 0c336c4ce8c0341c1e10168c6ffa1e1eaafe51c6 | 113ab5bc79913f756a2a5c5872aa0883a62a91d7 | refs/heads/master | 2021-01-01T15:45:04.236302 | 2013-12-03T13:27:59 | 2013-12-03T13:27:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,874 | py | """
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
__docformat__ = 'restructedtext en'
import os
import time
import numpy
import theano
import theano.tensor as T
from logistic_sgd import LogisticRegression, load_data
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (lin_output if activation is None
else activation(lin_output))
# parameters of the model
self.params = [self.W, self.b]
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function thanh or the
sigmoid function (defined here by a ``SigmoidalLayer`` class) while the
top layer is a softamx layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will
# translate into a TanhLayer connected to the LogisticRegression
# layer; this can be replaced by a SigmoidalLayer, or a layer
# implementing any other nonlinearity
self.hiddenLayer = HiddenLayer(rng=rng, input=input,
n_in=n_in, n_out=n_hidden,
activation=T.tanh)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out)
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = abs(self.hiddenLayer.W).sum() \
+ abs(self.logRegressionLayer.W).sum()
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \
+ (self.logRegressionLayer.W ** 2).sum()
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
dataset='../data/mnist.pkl.gz', batch_size=20, n_hidden=500):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(rng=rng, input=x, n_in=28 * 28,
n_hidden=n_hidden, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = classifier.negative_log_likelihood(y) \
+ L1_reg * classifier.L1 \
+ L2_reg * classifier.L2_sqr
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]})
validate_model = theano.function(inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]})
# compute the gradient of cost with respect to theta (sotred in params)
# the resulting gradients will be stored in a list gparams
gparams = []
for param in classifier.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
updates = []
# given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
# same length, zip generates a list C of same size, where each element
# is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
for param, gparam in zip(classifier.params, gparams):
updates.append((param, param - learning_rate * gparam))
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(inputs=[index], outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]})
###############
# TRAIN MODEL #
###############
print('... training')
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = time.clock()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print('The code for file ' + os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
test_mlp()
| [
"[email protected]"
] | |
6ef83d142494ed6bb3e2514a5a63d600fac7ecdf | c4afc78e2e8ffbcc430b8799e3e1073dac8e972d | /src/themester/views.py | b862f20737f533596cc82d36a9688be279b8cedd | [
"MIT"
] | permissive | pauleveritt/themester-pre-hypermodern | 5c04197c7d4589b29709c736b3bcd02312e40a61 | 219595956f8ca1886d0c30b33efd86234aaf57ec | refs/heads/master | 2023-08-27T18:53:52.693340 | 2020-11-22T15:04:25 | 2020-11-22T15:04:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,482 | py | """
Like a component, but with for_=View only.
"""
from typing import Callable, Optional, Type
from venusian import Scanner, attach
from wired import ServiceContainer, ServiceRegistry
from wired_injector.injector import Injector
from themester.protocols import View
def register_view(
registry: ServiceRegistry,
target: Callable = None,
context: Optional[Type] = None,
name: Optional[str] = None,
):
""" Imperative form of the view decorator """
def view_factory(container: ServiceContainer):
injector = Injector(container)
view_instance = injector(target)
return view_instance
if name is None:
registry.register_factory(
view_factory, View, context=context
)
else:
registry.register_factory(
view_factory, View, context=context, name=name
)
class view:
def __init__(
self,
context: Optional[Type] = None,
name: Optional[str] = None
):
self.context = context
self.name = name
def __call__(self, wrapped):
def callback(scanner: Scanner, name: str, cls):
registry: ServiceRegistry = getattr(scanner, 'registry')
register_view(
registry,
target=cls,
context=self.context,
name=self.name,
)
attach(wrapped, callback, category='viewdom_wired')
return wrapped
| [
"[email protected]"
] | |
81b3a5ff4378c320344f0c7d26a210698bcf51ad | 3a3533b16b54d42d6889c490224345ca985bef74 | /deliver_status_loewie/__init__.py | a5172e4b596e74fbe03f47d22c233cefb470d9df | [
"Apache-2.0"
] | permissive | lester-lees/extra_addons_hk | 52916ac6858d4b4484bd137b55268c7d5de177d0 | edd2c2595146bc9c99b75a2d0831a93f940fa55c | refs/heads/master | 2021-01-06T20:43:33.448307 | 2019-03-28T06:46:17 | 2019-03-28T06:46:17 | 99,546,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | # -*- coding: utf-8 -*-
import sale
import purchase
import generate_purchaseorder
import generate_salesorder | [
"[email protected]"
] | |
222d709d64de1813eca0d9d49af5111d7c124b88 | 0e91030c47071029d978dbfb9e7a30ae6826afe5 | /venv/Scripts/easy_install-script.py | a0923dd8d08599e350611459fd8c7d659ba84dc3 | [] | no_license | liqi629/python_lemon | 095983fadda3639b058043b399180d19f899284b | bc5e6e6c92561ba9cec2798b7735505b377e9cd6 | refs/heads/master | 2023-02-04T00:57:09.447008 | 2020-12-27T14:46:31 | 2020-12-27T14:46:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | #!C:\Users\lipan\PycharmProjects\lemon_python_learning\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
c0d46bae7bb41fb5836e0cddab066566832ec3b3 | c6759b857e55991fea3ef0b465dbcee53fa38714 | /tools/nntool/nntool/quantization/verify_quantization.py | 70b3ea3146bfa1c5422a82a807a5c2ebef945dbe | [
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] | permissive | GreenWaves-Technologies/gap_sdk | 1b343bba97b7a5ce62a24162bd72eef5cc67e269 | 3fea306d52ee33f923f2423c5a75d9eb1c07e904 | refs/heads/master | 2023-09-01T14:38:34.270427 | 2023-08-10T09:04:44 | 2023-08-10T09:04:44 | 133,324,605 | 145 | 96 | Apache-2.0 | 2023-08-27T19:03:52 | 2018-05-14T07:50:29 | C | UTF-8 | Python | false | false | 3,015 | py | # Copyright (C) 2020, 2021 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
def walk_graph(G, qrecs, node, visited):
problems = []
if node in visited:
return problems
visited.add(node)
qrec = qrecs.get(node.name)
if qrec is None:
problems.append(f"node {node.name} has no quantization set")
elif qrec.out_qs is None:
problems.append(f"node {node.name} has no output quantization set")
elif qrec.in_qs is None:
problems.append(f"node {node.name} has no input quantization set")
else:
for idx, edge_group in enumerate(G.indexed_out_edges(node.name)):
if len(qrec.out_qs) <= idx:
problems.append(
f"node {node.name} has no output quantization set on output {idx}")
continue
qtype = qrec.out_qs[idx]
if qtype is None and edge_group:
problems.append(
f"node {node.name} quantization on output {idx} is None")
continue
for edge in edge_group:
to_qrec = qrecs.get(edge.to_node.name)
if to_qrec is None or to_qrec.in_qs is None:
# error will be reported when node is visited
continue
if len(to_qrec.in_qs) <= edge.to_idx:
problems.append(
f"node {edge.to_node.name} has no input quantization set on input {edge.to_idx}")
if to_qrec.in_qs[edge.to_idx] is None:
problems.append(
f"node {edge.to_node.name} quantization set on input {edge.to_idx} is None")
if not qtype.quantization_equal(to_qrec.in_qs[edge.to_idx]):
problems.append(f"node {edge.to_node.name} quantization set on input {edge.to_idx} "
f"does not match node {node.name} output {idx} {qtype} -> {to_qrec.in_qs[edge.to_idx]}")
for edge in G.out_edges(node.name):
problems.extend(walk_graph(G, qrecs, edge.to_node, visited))
return problems
def verify_quantization(G):
if G.quantization is None:
return ["quantization is not set"]
qrecs = G.quantization
visited = set()
problems = []
for node in G.inputs():
problems.extend(walk_graph(G, qrecs, node, visited))
return problems
| [
"[email protected]"
] | |
1706a5f900e84feda55426b1a048153a06e8fd8c | 6d05f11c55ea277a08fc375b1c5af5ecc076000b | /python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py | 2959043482c9a587feffe35476ba2e6f4102938e | [
"Apache-2.0"
] | permissive | sfraczek/Paddle | 8602df1b11937400f93ac5861a366226208a6f05 | d1e2c61b22b9675adc3c4a52227d2220babaa001 | refs/heads/develop | 2023-04-04T22:52:42.629243 | 2023-03-16T12:06:10 | 2023-03-16T12:06:10 | 140,574,617 | 0 | 0 | Apache-2.0 | 2019-03-26T15:54:00 | 2018-07-11T12:51:20 | C++ | UTF-8 | Python | false | false | 4,102 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test fleet."""
import os
import unittest
import paddle
class TestFleet1(unittest.TestCase):
"""
Test cases for fleet minimize.
"""
def setUp(self):
"""Set up, set envs."""
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ[
"PADDLE_PSERVERS_IP_PORT_LIST"
] = "127.0.0.1:36001,127.0.0.2:36001"
def test_pslib_1(self):
"""Test cases for pslib."""
import paddle.fluid as fluid
from paddle.incubate.distributed.fleet.parameter_server.pslib import (
fleet,
)
from paddle.incubate.distributed.fleet.role_maker import (
GeneralRoleMaker,
)
os.environ["POD_IP"] = "127.0.0.1"
os.environ["PADDLE_PORT"] = "36001"
os.environ["TRAINING_ROLE"] = "TRAINER"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001"
os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002"
os.environ["PADDLE_TRAINER_ID"] = "0"
role_maker = GeneralRoleMaker()
# role_maker.generate_role()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# fleet.init(role_maker)
train_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(train_program, startup_program):
show = paddle.static.data(
name="show",
shape=[-1, 1],
dtype="int64",
lod_level=1,
)
emb = fluid.layers.embedding(
input=show,
size=[1, 1],
is_sparse=True,
is_distributed=True,
param_attr=fluid.ParamAttr(name="embedding"),
)
fc = paddle.static.nn.fc(x=emb, size=1, activation=None)
label = paddle.static.data(
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=1,
)
label_cast = paddle.cast(label, dtype='float32')
cost = paddle.nn.functional.log_loss(fc, label_cast)
try:
adam = fluid.optimizer.Adam(learning_rate=0.000005)
adam = fleet.distributed_optimizer(
adam,
strategy={
"embedding": {
"sparse_accessor_class": "DownpourCtrAccessor"
}
},
)
adam.minimize([cost], [scope])
fleet.run_server()
except:
print("do not support pslib test, skip")
return
try:
# worker should call these methods instead of server
# the following is only for test when with_pslib=off
def test_func():
"""
it is only a test function
"""
return True
fleet._role_maker.is_first_worker = test_func
fleet._role_maker._barrier_worker = test_func
fleet.save_model("./model_000")
fleet.save_one_table(0, "./model_001")
fleet.save_one_table(0, "./model_002", prefix="hahaha")
fleet.load_model("./model_0003")
fleet.load_one_table(0, "./model_004")
fleet.confirm()
fleet.revert()
except:
print("do not support pslib test, skip")
return
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
418990d7fc131fafa72d7cf2a4e781b2938d3a2f | 71327347c4ffe832b656edd40bdcdaf13f123e16 | /pywolf/migrations/0018_auto_20180722_2259.py | 8633d5ea8314a6bddcc80c7ee880e3b9c12ae265 | [
"BSD-3-Clause"
] | permissive | tevawolf/pywolf | 8b58570deac4a33643c323c1ff9754e0ce2b33ed | 94e3c26d8c3b279990624f23658e22ab00eead46 | refs/heads/master | 2020-04-02T12:17:12.680330 | 2018-11-19T07:34:42 | 2018-11-19T07:34:42 | 154,426,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | # Generated by Django 2.0.6 on 2018-07-22 13:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pywolf', '0017_villageparticipantvoice_voice_order'),
]
operations = [
migrations.AddField(
model_name='mvoicesetting',
name='max_str_length',
field=models.SmallIntegerField(default=0),
),
migrations.AddField(
model_name='mvoicesetting',
name='max_voice_point',
field=models.SmallIntegerField(default=0),
),
migrations.AddField(
model_name='villagevoicesetting',
name='max_str_length',
field=models.SmallIntegerField(default=0),
),
migrations.AddField(
model_name='villagevoicesetting',
name='max_voice_point',
field=models.SmallIntegerField(default=0),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='epilogue_limit_off_flg',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='prologue_limit_off_flg',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='tomb_limit_off_flg',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='voice_number',
field=models.SmallIntegerField(default=0),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='voice_point',
field=models.SmallIntegerField(default=0),
),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.