blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33088de85d1d21fb85db5ede234527249596c566 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Sklearn_arm/source/scipy/special/tests/test_bdtr.py | 57694becc49b2028f17eac819b80a225ac010795 | [
"MIT",
"GPL-3.0-or-later",
"BSD-3-Clause",
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"BSD-2-Clause",
"GCC-exception-3.1",
"Python-2.0",
"Qhull",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 3,231 | py | import numpy as np
import scipy.special as sc
import pytest
from numpy.testing import assert_allclose, assert_array_equal, suppress_warnings
class TestBdtr:
def test(self):
val = sc.bdtr(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtr([0, 1, 2], 2, 0.5)
assert_array_equal(val, [0.25, 0.75, 1.0])
def test_rounding(self):
double_val = sc.bdtr([0.1, 1.1, 2.1], 2, 0.5)
int_val = sc.bdtr([0, 1, 2], 2, 0.5)
assert_array_equal(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtr(k, n, p)
assert np.isnan(val)
def test_domain(self):
val = sc.bdtr(-1.1, 1, 0.5)
assert np.isnan(val)
class TestBdtrc:
def test_value(self):
val = sc.bdtrc(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtrc([0, 1, 2], 2, 0.5)
assert_array_equal(val, [0.75, 0.25, 0.0])
def test_rounding(self):
double_val = sc.bdtrc([0.1, 1.1, 2.1], 2, 0.5)
int_val = sc.bdtrc([0, 1, 2], 2, 0.5)
assert_array_equal(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtrc(k, n, p)
assert np.isnan(val)
def test_domain(self):
val = sc.bdtrc(-1.1, 1, 0.5)
val2 = sc.bdtrc(2.1, 1, 0.5)
assert np.isnan(val2)
assert_allclose(val, 1.0)
def test_bdtr_bdtrc_sum_to_one(self):
bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5)
bdtrc_vals = sc.bdtrc([0, 1, 2], 2, 0.5)
vals = bdtr_vals + bdtrc_vals
assert_allclose(vals, [1.0, 1.0, 1.0])
class TestBdtri:
def test_value(self):
val = sc.bdtri(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtri([0, 1], 2, 0.5)
actual = np.asarray([1 - 1/np.sqrt(2), 1/np.sqrt(2)])
assert_allclose(val, actual)
def test_rounding(self):
double_val = sc.bdtri([0.1, 1.1], 2, 0.5)
int_val = sc.bdtri([0, 1], 2, 0.5)
assert_allclose(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtri(k, n, p)
assert np.isnan(val)
@pytest.mark.parametrize('k, n, p', [
(-1.1, 1, 0.5),
(2.1, 1, 0.5)
])
def test_domain(self, k, n, p):
val = sc.bdtri(k, n, p)
assert np.isnan(val)
def test_bdtr_bdtri_roundtrip(self):
bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5)
roundtrip_vals = sc.bdtri([0, 1, 2], 2, bdtr_vals)
assert_allclose(roundtrip_vals, [0.5, 0.5, np.nan])
| [
"[email protected]"
]
| |
418b625b9dc9be8261cdeeedf0f8fb6c7ec8adb3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/13/usersdata/104/5684/submittedfiles/flipper.py | 525dfd44a7e141261a476cc49e226822c90d857c | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#ENTRADA
p=input('Determine a posição de p:')
r=input('Determine a posição de r:')
#PROCESSAMENTO
if p==0:
print('C')
else:
if r==0:
print('B')
else:
print('A') | [
"[email protected]"
]
| |
0bdea361b10a4f3475f4dc9966169daced84f42c | 0b767d1516ff77f62431f7464fb11b4e747b4a5a | /src/okok.py | c20649bc9a51ee921ebbfcdfd0c5062ea101c110 | [
"BSD-2-Clause"
]
| permissive | se4ai/code | 1429f6c2e649cad1b42323cb1cf0deded5cf23a0 | e2ac87c48863a471459d6aabc67ebdc1c96f440e | refs/heads/master | 2020-05-23T17:45:14.567820 | 2019-08-06T13:56:27 | 2019-08-06T13:56:27 | 186,873,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from ok import ok
@ok
def ok1():
"This will always fail."
assert 2==1, "oops"
@ok
def ok2():
"This will always pass."
n = sum([1,2,3,4])
assert n==10, "should not fail"
if __name__ == "__main__": ok()
| [
"[email protected]"
]
| |
4c30510bd6ce2bb79440bcadd772954fbe1cd46a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/allergies/56c0c9b37bf84ea598db6cbc74fd8ebe.py | 1ef0988f5a8722518177c195c0c49f2735807e69 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 465 | py | class Allergies(object):
allergies = [
"eggs",
"peanuts",
"shellfish",
"strawberries",
"tomatoes",
"chocolate",
"pollen",
"cats"
]
def __init__(self, score):
score = score & 0xff
self.list = [
self.allergies[b]
for b in xrange(8)
if score & (1 << b)
]
def is_allergic_to(self, allergy):
return allergy in self.list
| [
"[email protected]"
]
| |
348d9b7b8309079a4c69ee619bc7bf6d819d36c4 | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /stypy/sgmc/sgmc_cache/testing/test_programs/numpy/basic_numpy/functions/numpy_abs.py | eb6a5f64cfb4c351a781f911bb2dd4dd546d5b68 | []
| no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,858 | py |
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: # http://www.labri.fr/perso/nrougier/teaching/numpy.100/
2:
3: import numpy as np
4:
5: Z = np.arange(100)
6: v = np.random.uniform(0,100)
7: index = (np.abs(Z-v)).argmin()
8: e = Z[index]
9:
10: # l = globals().copy()
11: # for v in l:
12: # print ("'" + v + "'" + ": instance_of_class_name(\"" + type(l[v]).__name__ + "\"),")
13:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 3, 0))
# 'import numpy' statement (line 3)
update_path_to_current_file_folder('C:/Users/redon/PycharmProjects/stypyV2/testing//test_programs/numpy/basic_numpy/functions/')
import_1 = generate_type_inference_code_for_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'numpy')
if (type(import_1) is not StypyTypeError):
if (import_1 != 'pyd_module'):
__import__(import_1)
sys_modules_2 = sys.modules[import_1]
import_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'np', sys_modules_2.module_type_store, module_type_store)
else:
import numpy as np
import_module(stypy.reporting.localization.Localization(__file__, 3, 0), 'np', numpy, module_type_store)
else:
# Assigning a type to the variable 'numpy' (line 3)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 3, 0), 'numpy', import_1)
remove_current_file_folder_from_path('C:/Users/redon/PycharmProjects/stypyV2/testing//test_programs/numpy/basic_numpy/functions/')
# Assigning a Call to a Name (line 5):
# Call to arange(...): (line 5)
# Processing the call arguments (line 5)
int_5 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 5, 14), 'int')
# Processing the call keyword arguments (line 5)
kwargs_6 = {}
# Getting the type of 'np' (line 5)
np_3 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 5, 4), 'np', False)
# Obtaining the member 'arange' of a type (line 5)
arange_4 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 5, 4), np_3, 'arange')
# Calling arange(args, kwargs) (line 5)
arange_call_result_7 = invoke(stypy.reporting.localization.Localization(__file__, 5, 4), arange_4, *[int_5], **kwargs_6)
# Assigning a type to the variable 'Z' (line 5)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 5, 0), 'Z', arange_call_result_7)
# Assigning a Call to a Name (line 6):
# Call to uniform(...): (line 6)
# Processing the call arguments (line 6)
int_11 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 6, 22), 'int')
int_12 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 6, 24), 'int')
# Processing the call keyword arguments (line 6)
kwargs_13 = {}
# Getting the type of 'np' (line 6)
np_8 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 6, 4), 'np', False)
# Obtaining the member 'random' of a type (line 6)
random_9 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 6, 4), np_8, 'random')
# Obtaining the member 'uniform' of a type (line 6)
uniform_10 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 6, 4), random_9, 'uniform')
# Calling uniform(args, kwargs) (line 6)
uniform_call_result_14 = invoke(stypy.reporting.localization.Localization(__file__, 6, 4), uniform_10, *[int_11, int_12], **kwargs_13)
# Assigning a type to the variable 'v' (line 6)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 6, 0), 'v', uniform_call_result_14)
# Assigning a Call to a Name (line 7):
# Call to argmin(...): (line 7)
# Processing the call keyword arguments (line 7)
kwargs_23 = {}
# Call to abs(...): (line 7)
# Processing the call arguments (line 7)
# Getting the type of 'Z' (line 7)
Z_17 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 7, 16), 'Z', False)
# Getting the type of 'v' (line 7)
v_18 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 7, 18), 'v', False)
# Applying the binary operator '-' (line 7)
result_sub_19 = python_operator(stypy.reporting.localization.Localization(__file__, 7, 16), '-', Z_17, v_18)
# Processing the call keyword arguments (line 7)
kwargs_20 = {}
# Getting the type of 'np' (line 7)
np_15 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 7, 9), 'np', False)
# Obtaining the member 'abs' of a type (line 7)
abs_16 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 7, 9), np_15, 'abs')
# Calling abs(args, kwargs) (line 7)
abs_call_result_21 = invoke(stypy.reporting.localization.Localization(__file__, 7, 9), abs_16, *[result_sub_19], **kwargs_20)
# Obtaining the member 'argmin' of a type (line 7)
argmin_22 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 7, 9), abs_call_result_21, 'argmin')
# Calling argmin(args, kwargs) (line 7)
argmin_call_result_24 = invoke(stypy.reporting.localization.Localization(__file__, 7, 9), argmin_22, *[], **kwargs_23)
# Assigning a type to the variable 'index' (line 7)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 7, 0), 'index', argmin_call_result_24)
# Assigning a Subscript to a Name (line 8):
# Obtaining the type of the subscript
# Getting the type of 'index' (line 8)
index_25 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 8, 6), 'index')
# Getting the type of 'Z' (line 8)
Z_26 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 8, 4), 'Z')
# Obtaining the member '__getitem__' of a type (line 8)
getitem___27 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 8, 4), Z_26, '__getitem__')
# Calling the subscript (__getitem__) to obtain the elements type (line 8)
subscript_call_result_28 = invoke(stypy.reporting.localization.Localization(__file__, 8, 4), getitem___27, index_25)
# Assigning a type to the variable 'e' (line 8)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 8, 0), 'e', subscript_call_result_28)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
| [
"[email protected]"
]
| |
b81f349b219ecfb366970a6ddf21bfdcdcad34a5 | 71894f980d1209017837d7d02bc38ffb5dbcb22f | /audio/AlexaWithRaspioProHat/AlexaPi/main.py | 9d5d327ba169eb237d036b14e4e26a54db885dad | [
"MIT"
]
| permissive | masomel/py-iot-apps | 0f2418f8d9327a068e5db2cdaac487c321476f97 | 6c22ff2f574a37ba40a02625d6ed68d7bc7058a9 | refs/heads/master | 2021-03-22T04:47:59.930338 | 2019-05-16T06:48:32 | 2019-05-16T06:48:32 | 112,631,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,211 | py | # msm: source - http://www.instructables.com/id/Build-an-Alexa-With-Raspio-Pro-Hat-and-Raspberry-P/?ALLSTEPS edits original
#! /usr/bin/env python
import os
import random
import time
import RPi.GPIO as GPIO
import alsaaudio
import wave
import random
from creds import *
import requests
import json
import re
from memcache import Client
#Settings
button = 18 #GPIO Pin with button connected
lights = [24, 25, 27] # GPIO Pins with LED's conneted
device = "plughw:1" # Name of your microphone/soundcard in arecord -L
#Setup
recorded = False
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
def internet_on():
print("Checking Internet Connection")
try:
r =requests.get('https://api.amazon.com/auth/o2/token')
print("Connection OK")
return True
except:
print("Connection Failed")
return False
def gettoken():
token = mc.get("access_token")
refresh = refresh_token
if token:
return token
elif refresh:
payload = {"client_id" : Client_ID, "client_secret" : Client_Secret, "refresh_token" : refresh, "grant_type" : "refresh_token", }
url = "https://api.amazon.com/auth/o2/token"
r = requests.post(url, data = payload)
resp = json.loads(r.text)
mc.set("access_token", resp['access_token'], 3570)
return resp['access_token']
else:
return False
def alexa():
GPIO.output(27, GPIO.LOW) #blue light out
GPIO.output(24, GPIO.HIGH)
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
headers = {'Authorization' : 'Bearer %s' % gettoken()}
d = {
"messageHeader": {
"deviceContext": [
{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}
]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us",
"format": "audio/L16; rate=16000; channels=1"
}
}
with open(path+'recording.wav') as inf:
files = [
('file', ('request', json.dumps(d), 'application/json; charset=UTF-8')),
('file', ('audio', inf, 'audio/L16; rate=16000; channels=1'))
]
r = requests.post(url, headers=headers, files=files)
if r.status_code == 200:
for v in r.headers['content-type'].split(";"):
if re.match('.*boundary.*', v):
boundary = v.split("=")[1]
data = r.content.split(boundary)
for d in data:
if (len(d) >= 1024):
audio = d.split('\r\n\r\n')[1].rstrip('--')
with open(path+"response.mp3", 'wb') as f:
f.write(audio)
GPIO.output(25, GPIO.LOW)
os.system('mpg123 -q {}1sec.mp3 {}response.mp3'.format(path, path))
GPIO.output(24, GPIO.LOW)
else:
GPIO.output(lights, GPIO.LOW)
for x in range(0, 3):
time.sleep(.2)
GPIO.output(25, GPIO.HIGH)
time.sleep(.2)
GPIO.output(lights, GPIO.LOW)
GPIO.output(27, GPIO.HIGH) #blue light on
def start():
GPIO.output(27, GPIO.HIGH) #blue light
last = GPIO.input(button)
while True:
val = GPIO.input(button)
if val != last:
GPIO.output(27, GPIO.LOW) #blue light out
last = val
if val == 1 and recorded == True:
rf = open(path+'recording.wav', 'w')
rf.write(audio)
rf.close()
inp = None
alexa()
elif val == 0:
GPIO.output(25, GPIO.HIGH)
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, device)
inp.setchannels(1)
inp.setrate(16000)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(500)
audio = ""
l, data = inp.read()
if l:
audio += data
recorded = True
elif val == 0:
l, data = inp.read()
if l:
audio += data
if __name__ == "__main__":
try:
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(lights, GPIO.OUT)
GPIO.output(lights, GPIO.LOW)
while internet_on() == False:
print(".")
token = gettoken()
os.system('mpg123 -q {}1sec.mp3 {}hello.mp3'.format(path, path))
for x in range(0, 3):
time.sleep(.1)
GPIO.output(24, GPIO.HIGH)
time.sleep(.1)
GPIO.output(24, GPIO.LOW)
start()
except KeyboardInterrupt:
GPIO.cleanup()
print(" clean program exit.")
pass
| [
"[email protected]"
]
| |
ca4a09119aeb8e0bf90846f2387285fcd2f58815 | 008ea0c503829f33840495373ad3d60794575af3 | /source/sublime/oop/o12.py | 95dae399b2627636d23c46aad39907c93034e366 | []
| no_license | JyHu/PYStudy | 6515bea47ca6f80e336f3b6a7a14b1159fde872f | ec0855c414237bdd7d0cb28f79a81c02ccd52d45 | refs/heads/master | 2016-08-12T19:44:06.723361 | 2016-04-11T10:38:59 | 2016-04-11T10:38:59 | 45,384,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | #
# coding:utf-8
#
'''
除了使用 type() 动态创建类以外,要控制类的创建行为,还可以使用 metaclass
'''
__author__ = 'JyHu'
class ListMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['add'] = lambda self, value : self.append(value)
return type.__new__(cls, name, bases, attrs)
class MyList(list, metaclass = ListMetaclass):
pass
L = MyList()
L.add(1)
print(L)
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
if name == 'Model':
return type.__new__(cls, name, bases, attrs)
print('Found model: %s' % name)
mappings = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping : %s ==> %s' % (k, v))
mappings[k] = v
for k in mappings.keys():
attrs.pop(k)
attrs['__mappings__'] = mappings
attrs['__table__'] = name
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass = ModelMetaclass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mappings__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s (%s) values (%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL: %s' % sql)
print('ARGS: %s' % str(args))
class User(Model):
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
u = User(id = 12345, name = 'Michael', email = '[email protected]', password = 'my-pwd')
u.save()
| [
"[email protected]"
]
| |
3807d388af745242e706f2bb498ca4887e7d8ad5 | ecd4b06d5d5368b71fd72a1c2191510a03b728fd | /6 - introduction to databases in python/count of Records by State.py | 1a718a86aaba2d5c28da2d05dd2855263e57b0c8 | [
"MIT"
]
| permissive | Baidaly/datacamp-samples | 86055db5e326b59bfdce732729c80d76bf44629e | 37b4f78a967a429e0abca4a568da0eb9d58e4dff | refs/heads/master | 2022-07-27T01:18:00.700386 | 2022-07-18T19:27:23 | 2022-07-18T19:27:23 | 123,827,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | '''
Often, we want to get a count for each record with a particular value in another column. The .group_by() method helps answer this type of query. You can pass a column to the .group_by() method and use in an aggregate function like sum() or count(). Much like the .order_by() method, .group_by() can take multiple columns as arguments.
'''
# Import func
from sqlalchemy import func
# Build a query to select the state and count of ages by state: stmt
stmt = select([census.columns.state, func.count(census.columns.age)])
# Group stmt by state
stmt = stmt.group_by(census.columns.state)
# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()
# Print results
print(results)
# Print the keys/column names of the results returned
print(results[0].keys()) | [
"[email protected]"
]
| |
71a1987f65749e123abe8d4ab519826b34bf172a | bec8f235b1392542560166dd02c2f0d88c949a24 | /examples/twisted/wamp1/rpc/simple/example2/server.py | 2e21dcdeffdb7ac18f40f3c1c3790e7731539144 | [
"Apache-2.0"
]
| permissive | gourneau/AutobahnPython | f740f69b9ecbc305a97a5412ba3bb136a4bdec69 | 5193e799179c2bfc3b3f8dda86ccba69646c7ee3 | refs/heads/master | 2021-01-15T22:02:32.459491 | 2014-07-02T13:34:57 | 2014-07-02T13:34:57 | 21,437,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,564 | py | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, math
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import listenWS
from autobahn.wamp1.protocol import exportRpc, \
WampServerFactory, \
WampServerProtocol
class Calc:
"""
A simple calc service we will export for Remote Procedure Calls (RPC).
All you need to do is use the @exportRpc decorator on methods
you want to provide for RPC and register a class instance in the
server factory (see below).
The method will be exported under the Python method name, or
under the (optional) name you can provide as an argument to the
decorator (see asyncSum()).
"""
@exportRpc
def add(self, x, y):
return x + y
@exportRpc
def sub(self, x, y):
return x - y
@exportRpc
def square(self, x):
MAX = 1000
if x > MAX:
## raise a custom exception
raise Exception("http://example.com/error#number_too_big",
"%d too big for me, max is %d" % (x, MAX),
MAX)
return x * x
@exportRpc
def sum(self, list):
return reduce(lambda x, y: x + y, list)
@exportRpc
def pickySum(self, list):
errs = []
for i in list:
if i % 3 == 0:
errs.append(i)
if len(errs) > 0:
raise Exception("http://example.com/error#invalid_numbers",
"one or more numbers are multiples of 3",
errs)
return reduce(lambda x, y: x + y, list)
@exportRpc
def sqrt(self, x):
return math.sqrt(x)
@exportRpc("asum")
def asyncSum(self, list):
## Simulate a slow function.
d = defer.Deferred()
reactor.callLater(3, d.callback, self.sum(list))
return d
class SimpleServerProtocol(WampServerProtocol):
"""
Demonstrates creating a simple server with Autobahn WebSockets that
responds to RPC calls.
"""
def onSessionOpen(self):
# when connection is established, we create our
# service instances ...
self.calc = Calc()
# .. and register them for RPC. that's it.
self.registerForRpc(self.calc, "http://example.com/simple/calc#")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampServerFactory("ws://localhost:9000", debugWamp = debug)
factory.protocol = SimpleServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
| [
"[email protected]"
]
| |
62aa187e75d0d640cc2d69a224a102a0cafca5fc | 0179a8c11f51d89cc962c7d9249203ff0e67e405 | /shell/shell_contract.py | 1238844b436bb74f76efe8dda6e11fca6a4f0c77 | [
"MIT"
]
| permissive | paulo-romano/orcamentos | 7033637065c39c457a59b53eab215234f7d5b85a | dc87fd2736e9f8262ed775bf9160d1e21eb1684a | refs/heads/master | 2021-01-15T22:14:41.595934 | 2016-02-16T23:02:59 | 2016-02-16T23:02:59 | 51,551,215 | 1 | 0 | null | 2016-02-11T22:16:43 | 2016-02-11T22:16:43 | null | UTF-8 | Python | false | false | 523 | py | from random import choice
from django.db import IntegrityError
from orcamentos.core.models import Contract, Proposal, Customer
REPEAT = Proposal.objects.filter(status='a')
for i in REPEAT:
proposal = Proposal.objects.get(pk=i.pk)
contractor = Customer.objects.get(pk=proposal.work.customer.pk)
try:
Contract.objects.create(
proposal=proposal,
contractor=contractor,
is_canceled=choice((True, False)))
except IntegrityError:
print('Registro existente.')
| [
"[email protected]"
]
| |
5739e561cd3360ae20088b5f64ed45c14b854723 | cfb44550355ea3c36e610d3f1eb75d8dcbdc8ebe | /strawberry/setup.py | 62732a79141f77a94d9dc5da70b6e018a0858ff8 | [
"Apache-2.0"
]
| permissive | KiritoDv/blueberry | bd0dc38bfe16622693efd8ff4a31368b4dbeb4ac | a47feeb3e944d44b9f2af4661f6c409f51fbabd6 | refs/heads/master | 2023-01-18T19:06:00.074071 | 2020-11-24T23:04:09 | 2020-11-24T23:04:09 | 314,765,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from distutils.core import setup, Extension
module1 = Extension('strawberry', sources = ['straw.c'])
setup (name = 'strawberry',
version = '1.0',
description = 'Strawberry miniaudio wrapper',
ext_modules = [module1]) | [
"[email protected]"
]
| |
cf8df78c19fed7972b683782a743137388fcee12 | 6b518cf14ea3f59fd59136dbd2a7ac70234bb96e | /pspipe.py | 4523f7e32db887641957d2c80753873e9e831bcc | []
| no_license | simula67/advanced-python-course-material | 8064a1adddff45b0980d4bd1948fdeb2f88aec89 | 98870da337cbc001bcf4215ce44f82f0430fd3ce | refs/heads/master | 2016-09-06T12:29:37.397321 | 2015-06-29T05:10:19 | 2015-06-29T05:10:19 | 38,228,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | __author__ = 'antonjoj'
import subprocess
cat = subprocess.Popen('type datafiles\\passwd', shell=True, stdout=subprocess.PIPE)
find = subprocess.Popen('find \"root\"', stdout=subprocess.PIPE, shell=True, stdin=cat.stdout)
for line in find.communicate():
if line:
print line | [
"[email protected]"
]
| |
9cf98b7b4745bf18117c0e68108e370d4226cd25 | 24e21c68bc2c4f1c3f58b96ae13512968a919024 | /memoryAndMulti/threadDemo.py | 557b1eb374bb59d12ee08ff31de2c68f27abdcf2 | []
| no_license | maketubu7/spiderDemo | 0308e88815c2035fa33acd1c4ca85329d2435034 | 9c5e78fdafba37a08e51c2e988c54957feed5b0f | refs/heads/master | 2021-02-09T18:43:32.493539 | 2020-11-11T09:13:21 | 2020-11-11T09:13:21 | 244,314,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | # -*- coding: utf-8 -*-
# @Time : 2020/2/27 0:10
# @Author : Deng Wenxing
# @Email : [email protected]
# @File : threadDemo.py
# @Software: PyCharm
from threading import Thread
import threading,time
from typing import Optional
def loop():
print(threading.current_thread().name)
n = 0
while n < 5:
print(n)
n += 1
def use_thread():
print(threading.current_thread().name)
t = Thread(target=loop,name='loop_thread')
##启动
t.start()
##挂起
t.join()
class my_thread(Thread):
def __init__(self):
super(my_thread,self).__init__()
self.n = 0
def run(self):
while self.n < 5:
print(self.n)
print(threading.current_thread().name)
time.sleep(1)
self.n += 1
if __name__ == "__main__":
# use_thread()
t = my_thread()
t.start()
t.join() | [
"[email protected]"
]
| |
fda1f90a4be88c7944f2879764d5c153faed9cb0 | c57439f0c98af370ace65f9d55ef5a457bedc531 | /ydk/models/ipv6/Cisco_IOS_XR_ipv6_ma_subscriber_cfg.py | a66d84f0a2924a7e9df63458243f00228eb1dd1d | [
"Apache-2.0"
]
| permissive | myahmao/ydk-py | c932fbd8245e554227cce0fd723d9a22887b0c40 | 2f367d93f2088d4abdc2f2bb10ca4864952b458a | refs/heads/master | 2021-01-14T11:32:29.064494 | 2016-03-15T22:44:05 | 2016-03-15T22:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | """ Cisco_IOS_XR_ipv6_ma_subscriber_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv6\-ma\-subscriber package configuration.
This YANG module augments the
Cisco\-IOS\-XR\-subscriber\-infra\-tmplmgr\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYDataValidationError
class Ipv6ReachableVia_Enum(Enum):
"""
Ipv6ReachableVia_Enum
Ipv6 reachable via
"""
"""
Source is reachable via interface on which
packet was received
"""
RECEIVED = 1
@staticmethod
def _meta_info():
from ydk.models.ipv6._meta import _Cisco_IOS_XR_ipv6_ma_subscriber_cfg as meta
return meta._meta_table['Ipv6ReachableVia_Enum']
| [
"[email protected]"
]
| |
a55f91c3b4e428b323ddb4834febff18bff53cb7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02818/s319321320.py | ec787c89f517dd3576a0c30e3d24e3bf48cf1b60 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | # ABC149
# B Greesy Takahashi
# takはA枚、aokiはB枚、TAKはK回
a, b, k = map(int, input().split())
if k > a:
if k - a > b:
print(0,0)
else:
print(0,b - (k - a))
else:
print(a-k,b)
| [
"[email protected]"
]
| |
4312c5132af6818ca35ed0f704d81bfac2ddb825 | 5963c12367490ffc01c9905c028d1d5480078dec | /tests/components/wallbox/test_config_flow.py | 6b5a05a3486830b64b8d0d53f7b409dfb288bb79 | [
"Apache-2.0"
]
| permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 5,028 | py | """Test the Wallbox config flow."""
import json
from unittest.mock import patch
import requests_mock
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.wallbox import InvalidAuth, config_flow
from homeassistant.components.wallbox.const import DOMAIN
from homeassistant.core import HomeAssistant
test_response = json.loads(
'{"charging_power": 0,"max_available_power": 25,"charging_speed": 0,"added_range": 372,"added_energy": 44.697}'
)
async def test_show_set_form(hass: HomeAssistant) -> None:
"""Test that the setup form is served."""
flow = config_flow.ConfigFlow()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.wallbox.config_flow.WallboxHub.async_authenticate",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_authenticate(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=403,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=403,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=200,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=404,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_validate_input(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with requests_mock.Mocker() as mock_request:
mock_request.get(
"https://api.wall-box.com/auth/token/user",
text='{"jwt":"fakekeyhere","user_id":12345,"ttl":145656758,"error":false,"status":200}',
status_code=200,
)
mock_request.get(
"https://api.wall-box.com/chargers/status/12345",
text='{"Temperature": 100, "Location": "Toronto", "Datetime": "2020-07-23", "Units": "Celsius"}',
status_code=200,
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"station": "12345",
"username": "test-username",
"password": "test-password",
},
)
assert result2["title"] == "Wallbox Portal"
assert result2["data"]["station"] == "12345"
| [
"[email protected]"
]
| |
27cd1801d257361237f2eacb2dbcb8e287f6685b | 3f7d5999bb7e5a75454c8df2c5a8adcd1a8341ff | /tests/unit/modules/network/fortios/test_fortios_log_eventfilter.py | 32a1e9c532163bad832b3009b0d154dc776ce8a7 | []
| no_license | ansible-collection-migration/ansible.fortios | f7b1a7a0d4b69c832403bee9eb00d99f3be65e74 | edad6448f7ff4da05a6c856b0e7e3becd0460f31 | refs/heads/master | 2020-12-18T13:08:46.739473 | 2020-02-03T22:10:49 | 2020-02-03T22:10:49 | 235,393,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,342 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.ansible.fortios.plugins.modules import fortios_log_eventfilter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.modules.fortios_log_eventfilter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_eventfilter_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_eventfilter': {
'compliance_check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security_rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan_opt': 'enable',
'wireless_activity': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_eventfilter.fortios_log(input_data, fos_instance)
expected_data = {
'compliance-check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security-rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan-opt': 'enable',
'wireless-activity': 'enable'
}
set_method_mock.assert_called_with('log', 'eventfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_eventfilter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_eventfilter': {
'compliance_check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security_rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan_opt': 'enable',
'wireless_activity': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_eventfilter.fortios_log(input_data, fos_instance)
expected_data = {
'compliance-check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security-rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan-opt': 'enable',
'wireless-activity': 'enable'
}
set_method_mock.assert_called_with('log', 'eventfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_eventfilter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_eventfilter': {
'compliance_check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security_rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan_opt': 'enable',
'wireless_activity': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_eventfilter.fortios_log(input_data, fos_instance)
expected_data = {
'compliance-check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security-rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan-opt': 'enable',
'wireless-activity': 'enable'
}
set_method_mock.assert_called_with('log', 'eventfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_eventfilter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.fortios.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_eventfilter': {
'random_attribute_not_valid': 'tag',
'compliance_check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security_rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan_opt': 'enable',
'wireless_activity': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_eventfilter.fortios_log(input_data, fos_instance)
expected_data = {
'compliance-check': 'enable',
'endpoint': 'enable',
'event': 'enable',
'ha': 'enable',
'router': 'enable',
'security-rating': 'enable',
'system': 'enable',
'user': 'enable',
'vpn': 'enable',
'wan-opt': 'enable',
'wireless-activity': 'enable'
}
set_method_mock.assert_called_with('log', 'eventfilter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"[email protected]"
]
| |
b7b8ce02d0aba506b2683b3c8862f61ba4fd4293 | 9095c1a0da8c6ffe914ee6dd9c4708062fd95c9a | /vtpl_api/models/source_type.py | 99b3143d277011d407f04a5955fab602b32550ca | [
"MIT"
]
| permissive | vtpl1/vtpl_api_py | 2e5338bd08677f12fc7304fb6ac7a32f32af1c93 | d289c92254deb040de925205c583de69802a1c6b | refs/heads/master | 2020-09-10T23:34:21.828350 | 2019-11-15T07:26:53 | 2019-11-15T07:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class SourceType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
NONE = "none"
RTSP = "rtsp"
HTTP = "http"
FILE = "file"
FTP = "ftp"
VMS = "vms"
MQTT = "mqtt"
AMQP = "amqp"
S3 = "S3"
VS3 = "VS3"
BASEURL = "BaseUrl"
RELATIVEURL = "RelativeUrl"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""SourceType - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SourceType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
29cd5aa3c4e1875cf4d2d691c2218d861a2d333c | 7e4460c85790fae2d470182732289bcd1b8777b2 | /Process/process_meshes.py | 1ea42ad249869c9afd8713ee9ab0cb63fbd9752a | []
| no_license | khamukkamu/swconquest-msys | 5b23654c8dd2e8b2f25bc7914252eedc05a5cc1e | 71337a4ae9c507b9440e84cf49d31fc67a781978 | refs/heads/master | 2021-04-29T19:00:10.389224 | 2019-05-01T15:11:11 | 2019-05-01T15:11:11 | 121,704,753 | 1 | 1 | null | 2018-02-16T01:40:58 | 2018-02-16T01:40:58 | null | UTF-8 | Python | false | false | 1,015 | py | import string
from header_common import *
from module_info import *
from module_meshes import *
from process_common import *
from process__swyhelper import *
def save_meshes():
ofile = open(export_dir + "meshes.txt","w")
ofile.write("%d\n"%len(meshes))
for i_mesh in xrange(len(meshes)):
mesh = meshes[i_mesh]
ofile.write("mesh_%s %d %s %s %s %s %s %s %s %s %s %s\n"%(mesh[0],mesh[1],replace_spaces(mesh[2]),swytrailzro(mesh[3]),swytrailzro(mesh[4]),swytrailzro(mesh[5]),swytrailzro(mesh[6]),swytrailzro(mesh[7]),swytrailzro(mesh[8]),swytrailzro(mesh[9]),swytrailzro(mesh[10]),swytrailzro(mesh[11])))
ofile.close()
def save_python_header():
if (wb_compile_switch):
ofile = open("./IDs/ID_meshes_wb.py","w")
else:
ofile = open("./IDs/ID_meshes_mb.py","w")
for i_mesh in xrange(len(meshes)):
ofile.write("mesh_%s = %d\n"%(meshes[i_mesh][0],i_mesh))
ofile.write("\n\n")
ofile.close()
print "Exporting meshes..."
save_python_header()
save_meshes()
| [
"[email protected]"
]
| |
88f88a537c87284e71ef254d24a05d22fc3a9233 | 6a928130337dafece1a6158badd00d1d46571003 | /reportForm/wsgi.py | 28a489cea41932132be6da890e260ca78c6ee72b | []
| no_license | Yanl05/reportForm | bb5a36cff3fac3aca76b5bc50c92fe54282250a8 | 45a915b29102c1f49035df93217782ea563cdb9f | refs/heads/master | 2023-04-18T00:40:19.355040 | 2021-04-29T14:37:59 | 2021-04-29T14:37:59 | 362,485,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for untitled project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reportForm.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
6a7bd840b05232033b4479a414b2dba8cac470bb | d2fae2d0ff36fde8d8402bdac1de5b6760f050b7 | /app/tests/Test_passwordchecker.py | 031f23e09f40532aa833df7d554126e8cd5b2beb | []
| no_license | DennisMufasa/mongodb-flask_app | 8701d817d757a5144b9a98ba4293a948c537b6c5 | 53c3447850d16d630428a020fe28949ff84c4a03 | refs/heads/master | 2022-12-09T11:31:59.085865 | 2020-08-31T02:32:57 | 2020-08-31T02:32:57 | 260,714,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # third-party import
import unittest
# local import
from ..api.v1.models.utils import password_checker
class Test_Password_checker(unittest.TestCase):
def test_password_len(self):
password_check1 = password_checker('boo')
password_check2 = password_checker('lysergicaciddyethylammide')
self.assertEqual(password_check1, 'password too short')
self.assertEqual(password_check2, 'password too long')
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
374f86075d5c187fad6bfde503fbdb0362a57e76 | 4985143dce9379c939d562d277350f0d8224f06a | /venv/bin/django-admin.py | e788bb50f4c0599e5161f6209905f79392df6d1e | []
| no_license | jkinathan/Task_todo | a74ae010dc703ba0ed4654a569b57a5ce7634857 | e19da9ab9dede272b6c148b686e6e77e3da1687a | refs/heads/master | 2023-03-23T13:51:41.816050 | 2021-03-20T09:32:32 | 2021-03-20T09:32:32 | 274,080,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | #!/home/jo-kinathany/Desktop/Task_todo/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
]
| |
faa2e47e01b26f98eb24501a23c59d2dd2f3081a | 70bc77336e4544031ad7d7d29a2e964ef2626076 | /base/models.py | bf4ba34fec4fc78262b81397124b4041d26e64fd | []
| no_license | DronMDF/vanadis | 9af7a8c9281bf0eb17df593f5c9fc9345e474612 | de692207bbd127c5a9952e3144653492a0ba969f | refs/heads/master | 2020-04-17T08:11:18.411429 | 2016-12-21T20:50:05 | 2016-12-21T20:50:05 | 66,539,179 | 1 | 0 | null | 2016-12-21T20:50:06 | 2016-08-25T08:20:03 | Python | UTF-8 | Python | false | false | 654 | py | from django.db import models
class Project(models.Model):
name = models.CharField(max_length=100, db_index=True)
repo_url = models.CharField(max_length=256, null=True)
class Object(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, db_index=True)
oid = models.BigIntegerField(db_index=True)
issues_count = models.IntegerField()
class Issue(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, db_index=True)
object = models.ForeignKey(Object, on_delete=models.CASCADE, db_index=True)
line = models.IntegerField()
position = models.IntegerField()
text = models.CharField(max_length=256)
| [
"[email protected]"
]
| |
8b09a98c3ac1acf69e5c84f6bbeeb54671c20bc6 | 11ce41733d6f31153fe14f800c9dd0be18615862 | /news/admin.py | 50285420a545e93e7a3d322e73e11bb5a4d627f4 | [
"MIT"
]
| permissive | techacademypython/django_image_crop_views | 6ff6731944f5d09721452a71b0745089d1b035ef | 2f9c51ae80705dc23607e157baa4f5767957a2f1 | refs/heads/master | 2023-05-05T13:12:23.642970 | 2019-09-03T16:38:24 | 2019-09-03T16:38:24 | 206,105,932 | 0 | 0 | MIT | 2022-11-22T04:13:41 | 2019-09-03T15:07:05 | Python | UTF-8 | Python | false | false | 375 | py | from django.contrib import admin
from image_cropping import ImageCroppingMixin
# Register your models here.
from news.models import NewsModel
class NewsModelAdmin(ImageCroppingMixin, admin.ModelAdmin):
readonly_fields = ["preview_count"]
fields = [
"image", "name", "text", "cropping", "preview_count"
]
admin.site.register(NewsModel, NewsModelAdmin)
| [
"[email protected]"
]
| |
de2b6b74989a2467127597423d029e5b5810eb06 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/compute/azure-mgmt-vmwarecloudsimple/generated_samples/list_customization_policies.py | 7deb911d492ae31425baf1a5011cba20636db4d2 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,604 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.vmwarecloudsimple import VMwareCloudSimple
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-vmwarecloudsimple
# USAGE
python list_customization_policies.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = VMwareCloudSimple(
credential=DefaultAzureCredential(),
subscription_id="{subscription-id}",
)
response = client.customization_policies.list(
region_id="myResourceGroup",
pc_name="myPrivateCloud",
)
for item in response:
print(item)
# x-ms-original-file: specification/vmwarecloudsimple/resource-manager/Microsoft.VMwareCloudSimple/stable/2019-04-01/examples/ListCustomizationPolicies.json
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
2eebba53d96810f87e30cf377556f367d5ae17b1 | cc91403e4302d70127562591ab3fda7a212e6312 | /asqcenv/lib/python3.9/site-packages/asqc/asqc.py | ade74103aca22e8cf2e1c774024a023bed7f5a03 | []
| no_license | gklyne/asqc | fdfb59e243f92968e2147ef28ce6c85394d9cab0 | 1fcd4d80727b8385e8707be12d1e45fe26d2229a | refs/heads/master | 2022-07-28T09:05:07.923525 | 2022-07-14T19:44:52 | 2022-07-14T19:44:52 | 3,959,374 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,669 | py | #!/usr/bin/env python
"""
ASQC - A SPARQL query client
"""
import sys
import os
import os.path
import urllib.parse
import urllib
import urllib.request
# import io.StringIO
import io
import json
import re
import optparse
import logging
import traceback
from .SparqlHttpClient import SparqlHttpClient
from .SparqlXmlResults import writeResultsXML
from .StdoutContext import SwitchStdout
from .StdinContext import SwitchStdin
import rdflib
# Set up to use SPARQL
# rdflib.plugin.register(
# 'sparql', rdflib.query.Processor,
# 'rdfextras.sparql.processor', 'Processor')
# rdflib.plugin.register(
# 'sparql', rdflib.query.Result,
# 'rdfextras.sparql.query', 'SPARQLQueryResult')
# Register serializers (needed?)
#rdflib.plugin.register('n3', Serializer,
# 'rdflib.plugins.serializers.n3','N3Serializer')
#rdflib.plugin.register('turtle', Serializer,
# 'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
#rdflib.plugin.register('nt', Serializer,
# 'rdflib.plugins.serializers.nt', 'NTSerializer')
#rdflib.plugin.register('xml', Serializer,
# 'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
#rdflib.plugin.register('pretty-xml', Serializer,
# 'rdflib.plugins.serializers.rdfxml', 'PrettyXMLSerializer')
#rdflib.plugin.register('json-ld', Serializer,
# 'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
#plugin.register('json-ld', Serializer,
# 'rdfextras.serializers.jsonld', 'JsonLDSerializer')
# Type codes and mapping for RDF and query variable p[arsing and serializing
RDFTYP = ["RDFXML","N3","TURTLE","NT","JSONLD","RDFA","HTML5"]
VARTYP = ["JSON","CSV","XML"]
RDFTYPPARSERMAP = (
{ "RDFXML": "xml"
, "N3": "n3"
, "TURTLE": "n3"
, "NT": "nt"
, "JSONLD": "jsonld"
, "RDFA": "rdfa"
, "HTML5": "rdfa+html"
})
RDFTYPSERIALIZERMAP = (
{ "RDFXML": "pretty-xml"
, "N3": "n3"
, "TURTLE": "turtle"
, "NT": "nt"
, "JSONLD": "jsonld"
})
# Logging object
log = logging.getLogger(__name__)
from . import __init__
class asqc_settings(object):
VERSION = "1.0.9" # __init__.__version__ @@@@
# Helper function for templated SPARQL results formatting and parsing
def formatBindings(template, bindings):
"""
Return bindings formatted with supplied template
"""
formatdict = {}
for (var, val) in bindings.iteritems():
formatdict[var] = val["value"]
if val["type"] == "bnode":
vf = "_:%(value)s"
elif val["type"] == "uri":
vf = "<%(value)s>"
elif val["type"] == "literal":
vf = '"%(value)s"'
elif val["type"] == "typed-literal":
vf = '"%(value)s"^^<%(datatype)s>'
formatdict[var+"_repr"] = vf%val
return template.decode(encoding='string_escape')%formatdict
# Helper function for CSV formatting query result from JSON
def char_escape(c):
if c == '"': return '""'
if ord(c) >= 128: return r"\u" + "%04x"%ord(c)
return c
def termToCSV(result):
if result == None:
return None
resval = result['value']
restyp = result['type']
if restyp == "uri":
return "<" + resval + ">"
if restyp == "bnode":
return "_:" + resval
# strval = '"' + resval.replace('"', '""') + '"'
strval = '"' + "".join([char_escape(c) for c in resval]) + '"'
strlang = result.get('xml:lang', None)
if restyp == "literal":
if strlang:
return strval + '@' + strlang
else:
return strval
if restyp == "typed-literal":
return strval + '^^' + result['datatype']
raise rdflib.query.ResultException('Unknown term type: %s (%s)'%(term, type(term)))
# Helper functions for JSON formatting and parsing
# Mostly copied from rdflib SPARQL code (rdfextras/sparql/results/jsonresults)
def termToJSON(term):
if isinstance(term, rdflib.URIRef):
return { 'type': 'uri', 'value': str(term) }
elif isinstance(term, rdflib.Literal):
if term.datatype!=None:
return { 'type': 'typed-literal',
'value': str(term),
'datatype': str(term.datatype) }
else:
r={'type': 'literal',
'value': str(term) }
if term.language!=None:
r['xml:lang']=term.language
return r
elif isinstance(term, rdflib.BNode):
return { 'type': 'bnode', 'value': str(term) }
elif term==None:
return None
else:
raise rdflib.query.ResultException('Unknown term type: %s (%s)'%(term, type(term)))
def bindingToJSON(binding):
res={}
for var in binding:
t = termToJSON(binding[var])
if t != None: res[str(var)] = t
return res
def parseJsonTerm(d):
"""rdflib object (Literal, URIRef, BNode) for the given json-format dict.
input is like:
{ 'type': 'uri', 'value': 'http://famegame.com/2006/01/username' }
{ 'type': 'bnode', 'value': '123abc456' }
{ 'type': 'literal', 'value': 'drewp' }
{ 'type': 'literal', 'value': 'drewp', xml:lang="en" }
{ 'type': 'typed-literal', 'value': '123', datatype="http://(xsd)#int" }
"""
t = d['type']
if t == 'uri':
return rdflib.URIRef(d['value'])
elif t == 'literal':
if 'xml:lang' in d:
return rdflib.Literal(d['value'], lang=d['xml:lang'])
return rdflib.Literal(d['value'])
elif t == 'typed-literal':
return rdflib.Literal(d['value'], datatype=rdflib.URIRef(d['datatype']))
elif t == 'bnode':
return rdflib.BNode(d['value'])
else:
raise NotImplementedError("json term type %r" % t)
def parseJsonBindings(bindings):
newbindings = []
for row in bindings:
outRow = {}
for k, v in row.items():
outRow[k] = parseJsonTerm(v)
newbindings.append(outRow)
return newbindings
# Helper functions to form join of mutiple binding sets
def joinBinding(result_binding, constraint_binding):
for k in result_binding:
if k in constraint_binding:
if result_binding[k] != constraint_binding[k]:
return None
joined_binding = result_binding.copy()
joined_binding.update(constraint_binding)
return joined_binding
def joinBindings(result_bindings, constraint_bindings):
return [ bj
for bj in [ joinBinding(b1, b2) for b1 in result_bindings for b2 in constraint_bindings ]
if bj ]
def joinBindingsToJSON(result_bindings, constraint_bindings):
return [ bindingToJSON(bj)
for bj in [ joinBinding(b1, b2) for b1 in result_bindings for b2 in constraint_bindings ]
if bj ]
# Helper functions for accessing data at URI reference, which may be a path relative to current directory
def resolveUri(uriref, base, path=""):
"""
Resolve a URI reference against a supplied base URI and path.
(The path is a local file system path, and may need converting to use URI conventions)
"""
upath = urllib.request.pathname2url(path)
if os.path.isdir(path) and not upath.endswith('/'):
upath = upath + '/'
return urllib.parse.urljoin(urllib.parse.urljoin(base, upath), uriref)
def retrieveUri(uriref):
uri = resolveUri(uriref, "file://", os.getcwd())
log.debug("retrievUri: %s"%(uri))
request = urllib.request.Request(uri)
try:
response = io.TextIOWrapper(urllib.request.urlopen(request), encoding="utf-8")
result = response.read()
except:
result = None
return result
# Helper function for determining type of query
def queryType(query):
"""
Returns "ASK", "SELECT", "CONSTRUCT", "DESCRIBE" or None
"""
iriregex = "<[^>]*>"
baseregex = ".*base.*"+iriregex
prefixregex = ".*prefix.*"+iriregex
queryregex = "^("+baseregex+")?("+prefixregex+")*.*(ask|select|construct|describe).*$"
match = re.match(queryregex, query, flags=re.IGNORECASE|re.DOTALL)
if match:
return match.group(3).upper()
return None
# Main program functions
def getQuery(options, args):
"""
Get query string from command line option or argument.
"""
if options.query:
return retrieveUri(options.query)
elif len(args) >= 2:
return args[1]
return None
def getPrefixes(options):
"""
Get prefix string from command line option.
"""
defaultPrefixes = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
"""
# PREFIX xml: <http://www.w3.org/XML/1998/namespace>
configbase = os.path.expanduser("~")
prefixUri = options.prefix or resolveUri(
".asqc-prefixes", "file://", configbase)
if prefixUri.startswith("~"):
prefixUri = configbase+prefixUri[1:]
log.debug("Prefix URI %s"%(prefixUri))
prefixes = retrieveUri(prefixUri)
return prefixes or defaultPrefixes
def getBindings(options):
bndtext = None
bindings = (
{ "head": { "vars": [] }
, "results": { "bindings": [{}] }
})
if options.bindings and options.bindings != "-":
bndtext = retrieveUri(options.bindings)
elif options.bindings == "-":
if options.rdf_data or options.endpoint:
bndtext = sys.stdin.read()
else:
# Can't read bindings from stdin if trying to read RDF from stdin
return None
else:
bndtext = None
if bndtext:
try:
bindings = json.loads(bndtext)
bindings['results']['bindings'] = parseJsonBindings(bindings['results']['bindings'])
except Exception as e:
bindings = None
return bindings
def getRdfData(options):
"""
Reads RDF data from files specified using -r or from stdin
"""
if not options.rdf_data:
options.rdf_data = ['-']
rdfgraph = rdflib.Graph()
for r in options.rdf_data:
base = ""
if r == "-":
rdftext = sys.stdin.read()
else:
log.debug("Reading RDF from %s"%(r))
rdftext = retrieveUri(r)
base = r
rdfformatdefault = RDFTYPPARSERMAP[RDFTYP[0]]
rdfformatselect = RDFTYPPARSERMAP.get(options.format_rdf_in, rdfformatdefault)
try:
log.debug("Parsing RDF format %s"%(rdfformatselect))
if rdfformatselect == "rdfa+html":
rdfgraph.parse(data=rdftext, format="rdfa", media_type="text/html", publicID=base)
else:
rdfgraph.parse(data=rdftext, format=rdfformatselect, publicID=base)
except Exception as e:
log.debug("RDF Parse failed: %s"%(repr(e)))
log.debug("traceback: %s"%(traceback.format_exc()))
return None
return rdfgraph
def queryRdfData(progname, options, prefixes, query, bindings):
"""
Submit query against RDF data.
Result is tuple of status and dictionary/list structure suitable for JSON encoding,
or an rdflib.graph value.
"""
rdfgraph = getRdfData(options)
if not rdfgraph:
print( "%s: Could not read RDF data, or syntax error in input"%progname )
print( " Use -r <file> or supply RDF on stdin; specify input format if not RDF/XML" )
return (2, None)
query = prefixes + query
log.debug("queryRdfData query:\n%s\n"%(query))
try:
resps = [rdfgraph.query(query, initBindings=b) for b in bindings['results']['bindings']]
except AssertionError as e:
print( "Query failed (query syntax problem?)" )
print( "Submitted query:" )
print( query )
return (2, None)
res = { "head": {} }
if resps[0].type == 'ASK':
res["boolean"] = any([ r.askAnswer for r in resps ])
return (0 if res["boolean"] else 1, res)
elif resps[0].type == 'SELECT':
res["head"]["vars"] = resps[0].vars
res["results"] = {}
res["results"]["bindings"] = [ bindingToJSON(b) for r in resps for b in r.bindings ]
return (0 if len(res["results"]["bindings"]) > 0 else 1, res)
elif resps[0].type == 'CONSTRUCT':
res = rdflib.graph.ReadOnlyGraphAggregate( [r.graph for r in resps] )
return (0 if len(res) > 0 else 1, res)
else:
assert False, "Unexpected query response type %s"%resp.type
return (2, None)
def querySparqlEndpoint(progname, options, prefixes, query, bindings):
"""
Issue SPARQL query to SPARQL HTTP endpoint.
Requests either JSON or RDF/XML depending on query type.
Returns JSON-like dictionary/list structure or RDF graph, depending on query type.
These are used as basis for result formatting by outputResult function
"""
query = prefixes + query
resulttype = "application/RDF+XML"
resultjson = False
querytype = queryType(query)
if querytype in ["ASK", "SELECT"]:
# NOTE application/json doesn't work with Fuseki
# See: http://gearon.blogspot.co.uk/2011/09/sparql-json-after-commenting-other-day.html
resulttype = "application/sparql-results+json"
resultjson = True
if options.verbose:
print( "== Query to endpoint ==" )
print( query )
print( "== resulttype: "+resulttype )
print( "== resultjson: "+str(resultjson) )
sc = SparqlHttpClient(endpointuri=options.endpoint)
((status, reason), result) = sc.doQueryPOST(query, accept=resulttype, JSON=False)
if status != 200:
assert False, "Error from SPARQL query request: %i %s"%(status, reason)
if options.verbose:
print( "== Query response ==" )
print( result )
if resultjson:
result = json.loads(result)
status = 1
if querytype == "SELECT":
result['results']['bindings'] = parseJsonBindings(result['results']['bindings'])
result['results']['bindings'] = joinBindingsToJSON(
result['results']['bindings'],
bindings['results']['bindings'])
if result['results']['bindings']: status = 0
elif bindings:
assert False, "Can't use supplied bindings with endpoint query other than SELECT"
elif querytype == "ASK":
# Just return JSON from Sparql query
if result['boolean']: status = 0
else:
# return RDF
rdfgraph = rdflib.Graph()
try:
# Note: declaring xml prefix in SPAQL query can result in invalid XML from Fuseki (v2.1)
# See: https://issues.apache.org/jira/browse/JENA-24
rdfgraph.parse(data=result)
result = rdfgraph # Return parsed RDF graph
if len(result) > 0: status = 0
except Exception as e:
assert False, "Error parsing RDF from SPARQL endpoint query: "+str(e)
return (status, result)
def outputResult(progname, options, result):
outstr = sys.stdout
if options.output and options.output != "-":
print( "Output to other than stdout not implemented" )
if isinstance(result, rdflib.Graph):
rdfformatdefault = RDFTYPSERIALIZERMAP[RDFTYP[0]]
rdfformatselect = RDFTYPSERIALIZERMAP.get(options.format_rdf_out, rdfformatdefault)
result.serialize(destination=outstr, format=rdfformatselect, base=None)
elif isinstance(result, str):
outstr.write(result)
else:
if options.format_var_out == "JSON" or options.format_var_out == None:
outstr.write(json.dumps(result))
outstr.write("\n")
elif options.format_var_out == "XML":
writeResultsXML(outstr, result)
elif options.format_var_out == "CSV":
qvars = result["head"]["vars"]
outstr.write(", ".join(qvars))
outstr.write("\n")
for bindings in result["results"]["bindings"]:
### print("---- bindings: "+repr(bindings))
vals = [ termToCSV(bindings.get(str(v),{'type': 'literal', 'value': ''})) for v in qvars ]
outstr.write(", ".join(vals))
outstr.write("\n")
else:
for bindings in result["results"]["bindings"]:
#log.debug("options.format_var_out '%s'"%(repr(options.format_var_out)))
formattedrow = formatBindings(options.format_var_out, bindings)
#log.debug("formattedrow '%s'"%(repr(formattedrow)))
outstr.write(formattedrow)
return
def run(configbase, options, args):
status = 0
if options.examples:
print( "%s/examples"%(os.path.dirname(os.path.abspath(__file__))) )
return 0
progname = os.path.basename(args[0])
query = getQuery(options, args)
if not query:
print( "%s: Could not determine query string (need query argument or -q option)"%progname )
print( "Run '%s --help' for more information"%progname )
return 2
prefixes = getPrefixes(options)
if not prefixes:
print( "%s: Could not determine query prefixes"%progname )
print( "Run '%s --help' for more information"%progname )
return 2
## log.debug("Prefixes:\n%s\n"%(prefixes))
bindings = getBindings(options)
if not bindings:
print( "%s: Could not determine incoming variable bindings"%progname )
print( "Run '%s --help' for more information"%progname )
return 2
if options.verbose:
print( "== Options ==" )
print( repr(options) )
print( "== Prefixes ==" )
print( prefixes )
print( "== Query ==" )
print( query )
print( "== Initial bindings ==" )
print( bindings )
if options.endpoint:
(status,result) = querySparqlEndpoint(progname, options, prefixes, query, bindings)
else:
(status,result) = queryRdfData(progname, options, prefixes, query, bindings)
if result:
outputResult(progname, options, result)
return status
def parseCommandArgs(argv):
"""
Parse command line arguments
argv -- argument list from command line
Returns a pair consisting of options specified as returned by
OptionParser, and any remaining unparsed arguments.
"""
# create a parser for the command line options
parser = optparse.OptionParser(
usage=("\n"+
" %prog [options] [query]\n"+
" %prog --help for an options summary\n"+
" %prog --examples to display the path containing example queries"),
description="A sparql query client, designed to be used as a filter in a command pipeline. "+
"Pipelined data can be RDF or query variable binding sets, depending on the options used.",
version="%prog "+asqc_settings.VERSION)
parser.add_option("--examples",
action="store_true",
dest="examples",
default=False,
help="display path of examples directory and exit")
parser.add_option("-b", "--bindings",
dest="bindings",
default=None,
help="URI or filename of resource containing incoming query variable bindings "+
"(default none). "+
"Specify '-' to use stdin. "+
"This option works for SELECT queries only when accessing a SPARQL endpoint.")
parser.add_option("--debug",
action="store_true",
dest="debug",
default=False,
help="run with full debug output enabled")
parser.add_option("-e", "--endpoint",
dest="endpoint",
default=None,
help="URI of SPARQL endpoint to query.")
parser.add_option("-f", "--format",
dest="format",
default=None,
help="Format for input and/or output: "+
"RDFXML, N3, NT, TURTLE, JSONLD, RDFA, HTML5, JSON, CSV or template. "+
"XML, N3, NT, TURTLE, JSONLD, RDFA, HTML5 apply to RDF data, "+
"others apply to query variable bindings. "+
"Multiple comma-separated values may be specified; "+
"they are applied to RDF or variable bindings as appropriate. "+
"'template' is a python formatting template with '%(var)s' for query variable 'var'. "+
"If two values are given for RDF or variable binding data, "+
"they are applied to input and output respectively. "+
"Thus: RDFXML,JSON = RDF/XML and JSON result bindings; "+
"RDFXML,N3 = RDF/XML input and Turtle output; etc.")
parser.add_option("-o", "--output",
dest="output",
default='-',
help="URI or filename of RDF resource for output "+
"(default stdout)."+
"Specify '-'to use stdout.")
parser.add_option("-p", "--prefix",
dest="prefix",
default="~/.asqc-prefixes",
help="URI or filename of resource containing query prefixes "+
"(default %default)")
parser.add_option("-q", "--query",
dest="query",
help="URI or filename of resource containing query to execute. "+
"If not present, query must be supplied as command line argument.")
parser.add_option("-r", "--rdf-input",
action="append",
dest="rdf_data",
default=None,
help="URI or filename of RDF resource to query "+
"(default stdin or none). "+
"May be repeated to merge multiple input resources. "+
"Specify '-' to use stdin.")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="display verbose output")
parser.add_option("--query-type",
dest="query_type",
default=None,
help="Type of query output: SELECT (variable bindings, CONSTRUCT (RDF) or ASK (status). "+
"May be used when system cannot tell the kind of result by analyzing the query itself. "+
"(Currently not used)")
parser.add_option("--format-rdf-in",
dest="format_rdf_in",
default=None,
help="Format for RDF input data: RDFXML, N3, NT, TURTLE, JSONLD, RDFA or HTML5. "+
"RDFA indicates RDFa embedded in XML (or XHTML); "+
"HTML5 indicates RDFa embedded in HTML5.")
parser.add_option("--format-rdf-out",
dest="format_rdf_out",
default=None,
help="Format for RDF output data: RDFXML, N3, NT, TURTLE or JSONLD.")
parser.add_option("--format-var-in",
dest="format_var_in",
default=None,
help="Format for query variable binding input data: JSON or CSV.")
parser.add_option("--format-var-out",
dest="format_var_out",
default=None,
help="Format for query variable binding output data: JSON, CSV or template. "+
"The template option is a Python format string applied to a dictionary of query result variables.")
# parse command line now
(options, args) = parser.parse_args(argv)
if len(args) < 1: parser.error("No command present")
if len(args) > 2: parser.error("Too many arguments present: "+repr(args))
def pick_next_format_option(s,kws):
t = s
for k in kws:
if s.upper().startswith(k):
s = s[len(k):]
if s == "": return (k, "")
if s.startswith(','): return (k, s[1:])
break
return (t, "")
if options.format:
fs = options.format
while fs:
fn,fs = pick_next_format_option(fs, RDFTYP+VARTYP)
if fn in RDFTYP:
if not options.format_rdf_in:
options.format_rdf_in = fn
if fn in RDFTYPSERIALIZERMAP:
options.format_rdf_out = fn
else:
if not options.format_var_in and fn in VARTYP:
options.format_var_in = fn
options.format_var_out = fn
if options.verbose:
print( "RDF graph input format: "+repr(options.format_rdf_in) )
print( "RDF graph output format: "+repr(options.format_rdf_out) )
print( "Var binding input format: "+repr(options.format_var_in) )
print( "Var binding output format: "+repr(options.format_var_out) )
return (options, args)
def runCommand(configbase, argv):
"""
Run program with supplied configuration base directory, Base directory
from which to start looking for research objects, and arguments.
This is called by main function (below), and also by test suite routines.
Returns exit status.
"""
log.debug("runCommand: configbase %s, argv %s"%(configbase, repr(argv)))
(options, args) = parseCommandArgs(argv)
if not options or options.debug:
logging.basicConfig(level=logging.DEBUG)
status = 2
if options:
status = run(configbase, options, args)
return status
def runMain():
"""
Main program transfer function for setup.py console script
"""
configbase = os.path.expanduser("~")
return runCommand(configbase, sys.argv)
if __name__ == "__main__":
"""
Program invoked from the command line.
"""
# main program
status = runMain()
sys.exit(status)
#--------+---------+---------+---------+---------+---------+---------+---------+
| [
"[email protected]"
]
| |
610057694e00c3e4fac05320e103e3137f135d00 | 76192480d7469e3d7f6ac8d8bbc3334445e5fddc | /app.py | 07f63fbd87e728aa0ad6e9cd795f03b20816c8e7 | []
| no_license | forgeworks/splendor | b7d383a154bf72701a00d005f9aafbd3e90a6b30 | f99d66b76971f318637944a8ce5921367ee4aa21 | refs/heads/master | 2023-05-12T03:07:17.860147 | 2020-04-03T17:38:55 | 2020-04-03T17:38:55 | 155,748,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from flask import Flask
from api_example import GreetingV1
app = Flask(__name__)
app.register_blueprint(GreetingV1())
| [
"[email protected]"
]
| |
67dccdaf388e326388afec57b7acdf38c78908a9 | eba0e40667d6082b5eeefdbaf2862e3f02fd774c | /mr_utils/sim/ssfp/quantitative_field_mapping.py | 44a85af73a56bb265904c32bd1da3b6aaf216bbc | []
| no_license | zongjg/mr_utils | a0ec98ed2d03a6d52d81be8ef108993f92baeee1 | 08cb43dcf53fd6fddd3304e3514a608842310a34 | refs/heads/master | 2022-01-04T16:25:41.065177 | 2019-05-11T20:20:22 | 2019-05-11T20:20:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,355 | py | '''Quantitative field mapping for bSSFP.
Collect quantitative MR maps (T1, T2, flip angle), then, assuming that these
won't change during the duration of the scan, we can use these to take a single
bSSFP scan each time point and solve for the off-resonance. Thus we get a
field map at time point.
'''
import numpy as np
from mr_utils.utils import find_nearest
from mr_utils.sim.ssfp import ssfp
# from mr_utils import view
def get_df_responses(T1, T2, PD, TR, alpha, phase_cyc, dfs):
'''Simulate bSSFP response across all possible off-resonances.
Parameters
==========
T1 : float
scalar T1 longitudinal recovery value in seconds.
T2 : float
scalar T2 transverse decay value in seconds.
PD : float
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
dfs : float
Off-resonance values to simulate over.
Returns
=======
resp : array_like
Frequency response of SSFP signal across entire spectrum.
'''
# Feed ssfp sim an array of parameters to be used with all the df values
T1s = np.ones(dfs.shape)*T1
T2s = np.ones(dfs.shape)*T2
PDs = np.ones(dfs.shape)*PD
resp = ssfp(T1s, T2s, TR, alpha, dfs, phase_cyc=phase_cyc, M0=PDs)
# Returns a vector of simulated Mxy with index corresponding to dfs
return resp
def quantitative_fm_scalar(Mxy, dfs, T1, T2, PD, TR, alpha, phase_cyc):
'''For scalar T1, T2, PD.
Parameters
==========
Mxy : float
Complex transverse signal we measure.
dfs : array_like
Off-resonance values to simulate over.
T1 : float
scalar T1 longitudinal recovery value in seconds.
T2 : float
scalar T2 transverse decay value in seconds.
PD : float
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
Returns
=======
float
Off-resonace value that most closely matches Mxy prior.
'''
# Simulate over the total range of off-resonance values
resp = get_df_responses(T1, T2, PD, TR, alpha, phase_cyc, dfs)
# Find the response that matches Mxy most closely
idx, _val = find_nearest(resp, Mxy)
# Return the df's value, because that's really what the caller wanted
return dfs[idx]
def quantitative_fm(Mxys, dfs, T1s, T2s, PDs, TR, alpha, phase_cyc, mask=None):
'''Find field map given quantitative maps.
Parameters
==========
Mxys : array_like
Complex transverse signal we measure.
dfs : array_like
Off-resonance values to simulate over.
T1s : array_like
scalar T1 longitudinal recovery value in seconds.
T2s : array_like
scalar T2 transverse decay value in seconds.
PDs : array_like
scalar proton density value scaled the same as acquisiton.
TR : float
Repetition time in seconds.
alpha : float
Flip angle in radians.
phase_cyc : float
RF phase cycling in radians.
mask : array_like
Boolean mask to tell which pixels we should compute df for.
Returns
=======
fm : array_like
Field map.
'''
resps = {}
orig_size = np.asarray(T1s).shape
if mask is None:
mask = np.ones(Mxys.shape)
Mxys = np.asarray(Mxys).flatten()
T1s = np.asarray(T1s).flatten()
T2s = np.asarray(T2s).flatten()
PDs = np.asarray(PDs).flatten()
mask = np.asarray(mask).flatten()
fm = np.zeros(Mxys.size)
for ii in range(Mxys.size):
if mask[ii]:
# Cache results for later in case we come across the same T1,T2,PD
if (PDs[ii], T1s[ii], T2s[ii]) not in resps:
resps[(PDs[ii], T1s[ii], T2s[ii])] = get_df_responses(
T1s[ii], T2s[ii], PDs[ii], TR, alpha, phase_cyc, dfs)
# Find the appropriate off-resonance value for this T1,T2,PD,Mxy
idx, _val = find_nearest(
resps[(PDs[ii], T1s[ii], T2s[ii])], Mxys[ii])
fm[ii] = dfs[idx]
else:
fm[ii] = 0
return fm.reshape(orig_size)
| [
"[email protected]"
]
| |
a4e0c192f3c8f4463eae05876b00114d00ab91c7 | 8ce23f191870868c86c7616882e6043b1102cb0d | /tools/text_processing/join_files_on_column_fuzzy/join_files_on_column_fuzzy.py | 1e19f1dcfe4a4d3ab0743078894f5c196b0b2559 | []
| no_license | StevenVerbruggen/galaxytools | 56f99d0d629cb6d9e3db290c64f30b920de04f26 | 7d7365197e2cba2eb048121c9f0ee5546f06c520 | refs/heads/master | 2021-01-16T17:51:39.721403 | 2020-12-01T08:35:51 | 2020-12-01T08:35:51 | 100,017,016 | 0 | 0 | null | 2017-08-11T09:42:20 | 2017-08-11T09:42:20 | null | UTF-8 | Python | false | false | 4,755 | py | #!/usr/bin/env python
import os
import argparse
import sys
def main(args):
if args.header:
h1 = True
h2 = True
else:
h1 = False
h2 = False
cache = list()
out = open(args.outfile, 'w+')
write_buffer = list()
def _readline(header = False):
with open(args.f2) as handle2:
for line in handle2:
line = line.strip()
if header:
header = False
yield line
continue
if not line:
continue
columns = line.split(args.sep)
value2 = columns[args.c2-1]
yield columns, float(value2)
def fill_cache():
try:
cache.append(next(it))
except StopIteration:
pass
it = _readline(header = h2)
with open(args.f1) as handle1:
for line in handle1:
line = line.strip()
if h1:
h1 = False
seconda_header = next(it)
if args.add_distance:
out.write('%s\t%s\t%s\n' % (line, seconda_header, args.unit))
else:
out.write('%s\t%s\n' % (line, seconda_header))
continue
if not line:
continue
columns = line.split(args.sep)
value1 = float(columns[args.c1-1])
_cache = list()
fill_cache()
while cache:
_c, value2 = cache.pop(0)
upper_bound = value1 + args.distance
if args.unit == 'absolute':
if value2 <= upper_bound and value2 >= (value1 - args.distance):
line_template = '%s\n'
abs_dist = abs(value1 - value2)
if args.add_distance:
line_template = '%s\t' + str(abs_dist) + '\n'
write_buffer.append([abs_dist, line_template % '\t'.join( columns + _c )])
_cache.append([_c, value2])
fill_cache()
elif value2 > upper_bound:
# if the value from list 2 is bigger then the current value, he will be taken into the next round
_cache.append([_c, value2])
elif value2 < upper_bound:
# if the value from list 2 is smaller then the currecnt value, check the next one of list 2
fill_cache()
elif args.unit == 'ppm':
ppm_dist = abs((value1 - value2) / value1 * 1000000)
if ppm_dist <= args.distance:
line_template = '%s\n'
if args.add_distance:
line_template = '%s\t' + str(ppm_dist) + '\n'
write_buffer.append([ppm_dist, line_template % '\t'.join( columns + _c )])
_cache.append([_c, value2])
fill_cache()
elif ppm_dist > args.distance:
_cache.append([_c, value2])
elif ppm_dist < args.distance:
fill_cache()
if args.closest and write_buffer:
write_buffer.sort(key=lambda x: x[0])
out.write(write_buffer[0][1])
else:
for _dist, line in write_buffer:
out.write(line)
write_buffer = list()
cache = _cache
out.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merge two files on a common column the fuzzy way.')
parser.add_argument('--f1', required=True)
parser.add_argument('--f2', required=True)
parser.add_argument('--c1', type=int, required=True, help="Column in file 1 to be merged on.")
parser.add_argument('--c2', type=int, required=True, help="Column in file 2 to be merged on.")
parser.add_argument('--outfile', required=True)
parser.add_argument('--header', action='store_true', help="The files have a header line at the beginning.")
parser.add_argument('--closest', action='store_true', help="Only report the closest match.")
parser.add_argument('--add_distance', action='store_true', help="Add addional column with the distance between the two values.")
parser.add_argument('--sep', type=str, default="\t", help="Files are separated by this separator.")
parser.add_argument('--distance', type=float, default="0.2", help="Maximal allowed distance.")
parser.add_argument('--unit', choices=['ppm', 'absolute'], default='absolute')
args = parser.parse_args()
main(args)
| [
"[email protected]"
]
| |
b897b084b288350d1a287661007953393d395943 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/BprimeBprime/BprimeBprimeToBHBHinc_M_800_TuneZ2star_8TeV_madgraph_cff.py | 352c4947ebcf1ce31ccf35f0dd2e24c3165cb26a | []
| no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 4,231 | py | import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=7 ! User defined processes',
'MWID(7)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(7,1) = 800.0D0 ! bprime quarks mass',
'PMAS(7,2) = 8.000D0 ! bprime quark width',
'PMAS(7,3) = 80.00D0 ! Max value above which the BW shape is truncated',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(56,1)=0 ! g b4',
'MDME(57,1)=0 ! gamma b4',
'MDME(58,1)=0 ! Z0 b',
'MDME(59,1)=0 ! W u',
'MDME(60,1)=0 ! W c',
'MDME(61,1)=0 ! W t',
'MDME(62,1)=0 ! W t4',
'KFDP(63,2)=5 ! defines H0 b',
'MDME(63,1)=1 ! h0 b4',
'MDME(64,1)=-1 ! H- c',
'MDME(65,1)=-1 ! H- t',
'BRAT(56) = 0.0D0',
'BRAT(57) = 0.0D0',
'BRAT(58) = 0.0D0',
'BRAT(59) = 0.0D0',
'BRAT(60) = 0.0D0',
'BRAT(61) = 0.0D0',
'BRAT(62) = 0.0D0',
'BRAT(63) = 1.0D0',
'BRAT(64) = 0.0D0',
'BRAT(65) = 0.0D0',
'MDME(210,1)=1 !Higgs decay into dd',
'MDME(211,1)=1 !Higgs decay into uu',
'MDME(212,1)=1 !Higgs decay into ss',
'MDME(213,1)=1 !Higgs decay into cc',
'MDME(214,1)=1 !Higgs decay into bb',
'MDME(215,1)=1 !Higgs decay into tt',
'MDME(216,1)=1 !Higgs decay into',
'MDME(217,1)=1 !Higgs decay into Higgs decay',
'MDME(218,1)=1 !Higgs decay into e nu e',
'MDME(219,1)=1 !Higgs decay into mu nu mu',
'MDME(220,1)=1 !Higgs decay into tau nu tau',
'MDME(221,1)=1 !Higgs decay into Higgs decay',
'MDME(222,1)=1 !Higgs decay into g g',
'MDME(223,1)=1 !Higgs decay into gam gam',
'MDME(224,1)=1 !Higgs decay into gam Z',
'MDME(225,1)=1 !Higgs decay into Z Z',
'MDME(226,1)=1 !Higgs decay into W W',
),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
]
| |
d330066fb6ba0e836748a43a60059fe223936d8f | 7e53ed2d6074a025fe960f72c21672cc23dcab14 | /vt/tests/test_vt.py | c1d05f734a3b1993f80ddc0c57020fbbb90a49cb | [
"MIT"
]
| permissive | kyokley/vittlify-cli | 154410638b3a33640c01ab915dbf24d4e6afe13f | e3be7f3c7b0c00d59defe73af9aed0ec792800cc | refs/heads/master | 2023-02-17T21:08:29.452548 | 2021-12-05T15:54:06 | 2021-12-05T15:54:06 | 58,974,128 | 0 | 0 | MIT | 2023-02-08T02:27:24 | 2016-05-16T23:09:51 | Python | UTF-8 | Python | false | false | 41,993 | py | import shlex
import unittest
import mock
import pytest
import requests
from vt.utils import VittlifyError
from vt.vt import (
Status,
add,
categories,
complete,
display_all_shopping_lists,
display_item,
display_shopping_list,
display_shopping_list_categories,
help,
modify,
move,
run,
show,
term,
)
class TestDisplayShoppingList(unittest.TestCase):
def setUp(self):
self.get_shopping_list_info_patcher = mock.patch('vt.vt.get_shopping_list_info')
self.mock_get_shopping_list_info = self.get_shopping_list_info_patcher.start()
self.get_shopping_list_items_patcher = mock.patch(
'vt.vt.get_shopping_list_items'
)
self.mock_get_shopping_list_items = self.get_shopping_list_items_patcher.start()
self.get_completed_patcher = mock.patch('vt.vt.get_completed')
self.mock_get_completed = self.get_completed_patcher.start()
self.get_all_shopping_list_items_patcher = mock.patch(
'vt.vt.get_all_shopping_list_items'
)
self.mock_get_all_shopping_list_items = (
self.get_all_shopping_list_items_patcher.start()
)
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
test_shopping_list = {'name': 'test_list'}
self.mock_get_shopping_list_info.return_value = test_shopping_list
test_items = [
{'name': 'item1'},
{'name': 'item2'},
{'name': 'item3'},
]
self.mock_get_shopping_list_items.return_value = test_items
self.mock_get_all_shopping_list_items.return_value = test_items
self.mock_get_completed.return_value = test_items
self.mock_format_row.side_effect = [
'formatted_row_1',
'formatted_row_2',
'formatted_row_3',
]
def tearDown(self):
self.get_shopping_list_info_patcher.stop()
self.get_shopping_list_items_patcher.stop()
self.get_completed_patcher.stop()
self.get_all_shopping_list_items_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_not_completed(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.NOT_COMPLETED)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_all(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.ALL)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_all_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_completed(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.COMPLETED)
self.assertFalse(self.mock_get_shopping_list_info.called)
self.mock_get_completed.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
None,
include_category=False,
include_comments=False,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='Recently Completed',
quiet=False,
)
def test_not_completed_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.NOT_COMPLETED, extended=True)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_all_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.ALL, extended=True)
self.mock_get_shopping_list_info.assert_called_once_with(guid)
self.mock_get_all_shopping_list_items.assert_called_once_with(guid)
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
{'name': 'test_list'},
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='test_list',
quiet=False,
)
def test_completed_extended(self):
guid = 'test_guid'
display_shopping_list(guid=guid, mode=Status.COMPLETED, extended=True)
self.assertFalse(self.mock_get_shopping_list_info.called)
self.mock_get_completed.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call(
{'name': 'item1'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item2'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
mock.call(
{'name': 'item3'},
None,
include_category=False,
include_comments=True,
no_wrap=False,
),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'],
title='Recently Completed',
quiet=False,
)
class TestDisplayItem(unittest.TestCase):
def setUp(self):
self.get_item_patcher = mock.patch('vt.vt.get_item')
self.mock_get_item = self.get_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
self.test_guid = 'test_guid'
def tearDown(self):
self.get_item_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_(self):
display_item(self.test_guid)
self.mock_get_item.assert_called_once_with(self.test_guid)
self.mock_format_row.assert_called_once_with(
self.mock_get_item.return_value, None, include_comments=True, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestDisplayAllShoppingLists(unittest.TestCase):
def setUp(self):
self.get_all_shopping_lists_patcher = mock.patch('vt.vt.get_all_shopping_lists')
self.mock_get_all_shopping_lists = self.get_all_shopping_lists_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
self.mock_get_all_shopping_lists.return_value = [
{'name': 'list1'},
{'name': 'list2'},
{'name': 'list3'},
]
self.mock_format_row.side_effect = [
'formatted_row_1',
'formatted_row_2',
'formatted_row_3',
]
def tearDown(self):
self.get_all_shopping_lists_patcher.stop()
self.format_row_patcher.stop()
def test_(self):
display_all_shopping_lists()
self.mock_get_all_shopping_lists.assert_called_once_with()
self.mock_format_row.assert_has_calls(
[
mock.call({'name': 'list1'}, None, no_wrap=False),
mock.call({'name': 'list2'}, None, no_wrap=False),
mock.call({'name': 'list3'}, None, no_wrap=False),
]
)
self.mock_print_table.assert_called_once_with(
['formatted_row_1', 'formatted_row_2', 'formatted_row_3'], title='All Lists'
)
class TestShowNoDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', '')
self.DEFAULT_LIST_patcher.start()
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.display_all_shopping_lists_patcher = mock.patch(
'vt.vt.display_all_shopping_lists'
)
self.mock_display_all_shopping_lists = (
self.display_all_shopping_lists_patcher.start()
)
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
def tearDown(self):
self.DEFAULT_LIST_patcher.stop()
self.display_shopping_list_patcher.stop()
self.display_all_shopping_lists_patcher.stop()
self.display_item_patcher.stop()
def test_list_empty_guid(self):
args = shlex.split("list ''")
self.assertRaises(IndexError, show, args)
def test_list_no_guid(self):
args = shlex.split("list")
self.assertRaises(IndexError, show, args)
def test_list_empty_guid_extended(self):
args = shlex.split("list '' -e")
self.assertRaises(IndexError, show, args)
def test_list_no_guid_extended(self):
args = shlex.split("list -e")
self.assertRaises(IndexError, show, args)
def test_list_no_extended(self):
args = shlex.split("list test_guid")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_list_extended(self):
args = shlex.split("list test_guid -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='test_guid',
extended=True,
)
def test_lists(self):
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
def test_item_no_guid(self):
args = shlex.split("item")
self.assertRaises(IndexError, show, args)
def test_item_empty_guid(self):
args = shlex.split("item ''")
self.assertRaises(IndexError, show, args)
def test_item(self):
args = shlex.split("item test_guid")
show(args)
self.mock_display_item.assert_called_once_with('test_guid')
class TestShowDefaultList:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', 'default_list')
self.DEFAULT_LIST_patcher.start()
self.parse_options_patcher = mock.patch('vt.vt.parse_options')
self.mock_parse_options = self.parse_options_patcher.start()
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.display_all_shopping_lists_patcher = mock.patch(
'vt.vt.display_all_shopping_lists'
)
self.mock_display_all_shopping_lists = (
self.display_all_shopping_lists_patcher.start()
)
self.display_shopping_list_categories_patcher = mock.patch(
'vt.vt.display_shopping_list_categories'
)
self.mock_display_shopping_list_categories = (
self.display_shopping_list_categories_patcher.start()
)
mocker.patch.object(term, 'red', autospec=True)
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
self.mock_parse_options.return_value = {}
yield
self.DEFAULT_LIST_patcher.stop()
self.parse_options_patcher.stop()
self.display_shopping_list_patcher.stop()
self.display_all_shopping_lists_patcher.stop()
self.display_item_patcher.stop()
self.display_shopping_list_categories_patcher.stop()
def test_list_empty_guid(self):
args = shlex.split("list ''")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='default_list')
def test_list_no_guid(self):
args = shlex.split("list")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='default_list')
def test_list_empty_guid_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list '' -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='default_list', extended=True
)
def test_list_no_guid_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='default_list', extended=True
)
def test_list_no_extended(self):
args = shlex.split("list test_guid")
show(args)
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_list_extended(self):
self.mock_parse_options.return_value = {'extended': True}
args = shlex.split("list test_guid -e")
show(args)
self.mock_display_shopping_list.assert_called_once_with(
guid='test_guid',
extended=True,
)
def test_lists(self):
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
def test_item_no_guid(self):
args = shlex.split("item")
with pytest.raises(IndexError):
show(args)
def test_item_empty_guid(self):
args = shlex.split("item ''")
with pytest.raises(IndexError):
show(args)
def test_item(self):
args = shlex.split("item test_guid")
show(args)
self.mock_display_item.assert_called_once_with('test_guid')
def test_display_list_categories(self):
self.mock_parse_options.return_value = {
'categories': [{'name': 'type A'}, {'name': 'type B'}]
}
args = shlex.split("test_guid")
categories(args)
self.mock_display_shopping_list_categories.assert_called_once_with('test_guid')
def test_display_list_categories_raises(self):
self.mock_parse_options.return_value = {
'categories': [{'name': 'type A'}, {'name': 'type B'}]
}
self.mock_display_shopping_list_categories.side_effect = VittlifyError(
'Got an error'
)
args = shlex.split("test_guid")
categories(args)
term.red.assert_called_once_with('Got an error')
self.mock_display_shopping_list_categories.assert_called_once_with('test_guid')
def test_display_shopping_list_raises(self):
self.mock_display_shopping_list.side_effect = VittlifyError('Got an error')
args = shlex.split("list test_guid")
show(args)
term.red.assert_called_once_with('Got an error')
self.mock_display_shopping_list.assert_called_once_with(guid='test_guid')
def test_display_item_raises(self):
self.mock_display_item.side_effect = VittlifyError('Got an error')
args = shlex.split("show test_guid")
show(args)
term.red.assert_called_once_with('Got an error')
def test_display_all_shopping_lists_raises(self):
self.mock_display_all_shopping_lists.side_effect = VittlifyError('Got an error')
args = shlex.split("lists")
show(args)
self.mock_display_all_shopping_lists.assert_called_once_with()
term.red.assert_called_once_with('Got an error')
class TestComplete:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.complete_item_patcher = mock.patch('vt.vt.complete_item')
self.mock_complete_item = self.complete_item_patcher.start()
self.mock_print = mocker.patch('builtins.print')
self.display_shopping_list_patcher = mock.patch('vt.vt.display_shopping_list')
self.mock_display_shopping_list = self.display_shopping_list_patcher.start()
self.apply_strikethrough_patcher = mock.patch('vt.vt.apply_strikethrough')
self.mock_apply_strikethrough = self.apply_strikethrough_patcher.start()
self.mock_complete_item.return_value = {'name': 'test_name'}
self.mock_apply_strikethrough.return_value = 'struck_through'
yield
self.complete_item_patcher.stop()
self.apply_strikethrough_patcher.stop()
def test_complete(self):
args = shlex.split("test_guid")
complete(args)
self.mock_complete_item.assert_called_once_with('test_guid', uncomplete=False)
self.mock_apply_strikethrough.assert_called_once_with('test_name')
self.mock_print.assert_called_once_with(
f'Marked {term.magenta}struck_through{term.normal} as done.'
)
def test_uncomplete(self):
args = shlex.split("test_guid")
complete(args, uncomplete=True)
self.mock_complete_item.assert_called_once_with('test_guid', uncomplete=True)
self.mock_print.assert_called_once_with(
f'Marked {term.magenta}test_name{term.normal} undone.'
)
def test_done_extended(self):
args = shlex.split("-e")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(
extended=True, mode=Status.COMPLETED
)
def test_completed_no_extended(self):
args = shlex.split("")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(mode=Status.COMPLETED)
def test_completed_extended(self):
args = shlex.split("--extended")
complete(args)
self.mock_display_shopping_list.assert_called_once_with(
extended=True, mode=Status.COMPLETED
)
class TestModify(unittest.TestCase):
def setUp(self):
self.modify_item_patcher = mock.patch('vt.vt.modify_item')
self.mock_modify_item = self.modify_item_patcher.start()
self.display_item_patcher = mock.patch('vt.vt.display_item')
self.mock_display_item = self.display_item_patcher.start()
def tearDown(self):
self.modify_item_patcher.stop()
self.display_item_patcher.stop()
def test_no_options(self):
args = shlex.split("test_guid this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with('test_guid', 'this is a comment')
self.mock_display_item.assert_called_once_with('test_guid')
def test_with_short_options(self):
args = shlex.split("test_guid -a this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with(
'test_guid', 'this is a comment', append=True
)
self.mock_display_item.assert_called_once_with('test_guid')
def test_with_options(self):
args = shlex.split("test_guid --append this is a comment")
modify(args)
self.mock_modify_item.assert_called_once_with(
'test_guid', 'this is a comment', append=True
)
self.mock_display_item.assert_called_once_with('test_guid')
class TestAddDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', 'default_list')
self.DEFAULT_LIST_patcher.start()
self.add_item_patcher = mock.patch('vt.vt.add_item')
self.mock_add_item = self.add_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
def tearDown(self):
self.add_item_patcher.stop()
self.DEFAULT_LIST_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_no_guid(self):
args = shlex.split("'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('default_list', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
def test_with_guid(self):
args = shlex.split("test_guid 'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('test_guid', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestAddNoDefaultList(unittest.TestCase):
def setUp(self):
self.DEFAULT_LIST_patcher = mock.patch('vt.vt.DEFAULT_LIST', None)
self.DEFAULT_LIST_patcher.start()
self.add_item_patcher = mock.patch('vt.vt.add_item')
self.mock_add_item = self.add_item_patcher.start()
self.format_row_patcher = mock.patch('vt.vt.format_row')
self.mock_format_row = self.format_row_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
def tearDown(self):
self.add_item_patcher.stop()
self.DEFAULT_LIST_patcher.stop()
self.format_row_patcher.stop()
self.print_table_patcher.stop()
def test_no_guid(self):
args = shlex.split("'this is a new item'")
self.assertRaises(IndexError, add, args)
def test_with_guid(self):
args = shlex.split("test_guid 'this is a new item'")
add(args)
self.mock_add_item.assert_called_once_with('test_guid', 'this is a new item')
self.mock_format_row.assert_called_once_with(
self.mock_add_item.return_value, no_wrap=False
)
self.mock_print_table.assert_called_once_with(
[self.mock_format_row.return_value]
)
class TestMove:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.move_item_patcher = mock.patch('vt.vt.move_item')
self.mock_move_item = self.move_item_patcher.start()
self.mock_print = mocker.patch('builtins.print')
yield
self.move_item_patcher.stop()
def test_(self):
args = shlex.split('test_guid to_list_guid')
move(args)
self.mock_move_item.assert_called_once_with('test_guid', 'to_list_guid')
self.mock_print.assert_called_once_with(
f'Moved item {term.blue}test_guid{term.normal} to list {term.blue}to_list_guid{term.normal}'
)
class TestRun:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.show_patcher = mock.patch('vt.vt.show')
self.mock_show = self.show_patcher.start()
self.complete_patcher = mock.patch('vt.vt.complete')
self.mock_complete = self.complete_patcher.start()
self.modify_patcher = mock.patch('vt.vt.modify')
self.mock_modify = self.modify_patcher.start()
self.add_patcher = mock.patch('vt.vt.add')
self.mock_add = self.add_patcher.start()
self.move_patcher = mock.patch('vt.vt.move')
self.mock_move = self.move_patcher.start()
mocker.patch.object(term, 'red', autospec=True)
self.SHOW_TRACEBACK_patcher = mock.patch('vt.vt.SHOW_TRACEBACK', False)
self.SHOW_TRACEBACK_patcher.start()
self.PROXY_patcher = mock.patch('vt.vt.PROXY', False)
self.PROXY_patcher.start()
self.VITTLIFY_URL_patcher = mock.patch('vt.vt.VITTLIFY_URL', 'vittlify_url')
self.VITTLIFY_URL_patcher.start()
self.help_patcher = mock.patch('vt.vt.help')
self.mock_help = self.help_patcher.start()
yield
self.show_patcher.stop()
self.complete_patcher.stop()
self.modify_patcher.stop()
self.add_patcher.stop()
self.move_patcher.stop()
self.SHOW_TRACEBACK_patcher.stop()
self.PROXY_patcher.stop()
self.VITTLIFY_URL_patcher.stop()
self.help_patcher.stop()
def test_list(self):
test_args = shlex.split('list test_guid')
run(test_args)
self.mock_show.assert_called_once_with(test_args)
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_lists(self):
test_args = shlex.split('lists')
run(test_args)
self.mock_show.assert_called_once_with(test_args)
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_item(self):
test_args = shlex.split('item test_guid')
run(test_args)
self.mock_show.assert_called_once_with(test_args)
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_show(self):
test_args = shlex.split('show test_guid')
run(test_args)
self.mock_show.assert_called_once_with(test_args)
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_done(self):
test_args = shlex.split('done test_guid')
expected = ['test_guid']
run(test_args)
assert not self.mock_show.called
self.mock_complete.assert_called_once_with(expected)
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_complete(self):
test_args = shlex.split('complete test_guid')
expected = ['test_guid']
run(test_args)
assert not self.mock_show.called
self.mock_complete.assert_called_once_with(expected)
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_undone(self):
test_args = shlex.split('undone test_guid')
expected = ['test_guid']
run(test_args)
assert not self.mock_show.called
self.mock_complete.assert_called_once_with(expected, uncomplete=True)
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_uncomplete(self):
test_args = shlex.split('uncomplete test_guid')
expected = ['test_guid']
run(test_args)
assert not self.mock_show.called
self.mock_complete.assert_called_once_with(expected, uncomplete=True)
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_modify(self):
test_args = shlex.split("modify test_guid 'these are comments'")
expected = ['test_guid', 'these are comments']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
self.mock_modify.assert_called_once_with(expected)
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_edit(self):
test_args = shlex.split("edit test_guid 'these are comments'")
expected = ['test_guid', 'these are comments']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
self.mock_modify.assert_called_once_with(expected)
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_comment(self):
test_args = shlex.split("comment test_guid 'these are comments'")
expected = ['test_guid', 'these are comments']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
self.mock_modify.assert_called_once_with(expected)
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_comments(self):
test_args = shlex.split("comments test_guid 'these are comments'")
expected = ['test_guid', 'these are comments']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
self.mock_modify.assert_called_once_with(expected)
assert not self.mock_add.called
assert not self.mock_move.called
assert not self.mock_help.called
def test_add(self):
test_args = shlex.split("add 'this is a new item'")
expected = ['this is a new item']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
assert not self.mock_modify.called
self.mock_add.assert_called_once_with(expected)
assert not self.mock_move.called
assert not self.mock_help.called
def test_move(self):
test_args = shlex.split("move old_guid new_guid")
expected = ['old_guid', 'new_guid']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_help.called
self.mock_move.assert_called_once_with(expected)
def test_mv(self):
test_args = shlex.split("mv old_guid new_guid")
expected = ['old_guid', 'new_guid']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_help.called
self.mock_move.assert_called_once_with(expected)
def test_index_error(self):
self.mock_add.side_effect = IndexError()
test_args = shlex.split("add 'this is a new item'")
with pytest.raises(SystemExit):
run(test_args)
term.red.assert_called_once_with('Incorrect number of arguments provided')
def test_connection_error(self):
self.mock_add.side_effect = requests.exceptions.ConnectionError()
test_args = shlex.split("add 'this is a new item'")
with pytest.raises(SystemExit):
run(test_args)
term.red.assert_called_once_with(
'Unable to connect to Vittlify instance at vittlify_url'
)
def test_http_error(self):
self.mock_add.side_effect = requests.exceptions.HTTPError('500 Message')
test_args = shlex.split("add 'this is a new item'")
with pytest.raises(SystemExit):
run(test_args)
term.red.assert_called_once_with('Server responded with 500 Message')
def test_help(self):
test_args = shlex.split("help command")
expected = ['command']
run(test_args)
assert not self.mock_show.called
assert not self.mock_complete.called
assert not self.mock_modify.called
assert not self.mock_add.called
assert not self.mock_move.called
self.mock_help.assert_called_once_with(expected)
class TestDisplayShoppingListCategories:
@pytest.fixture(autouse=True)
def setUp(self, mocker):
self.get_shopping_list_info_patcher = mock.patch('vt.vt.get_shopping_list_info')
self.mock_get_shopping_list_info = self.get_shopping_list_info_patcher.start()
self.print_table_patcher = mock.patch('vt.vt.print_table')
self.mock_print_table = self.print_table_patcher.start()
mocker.patch.object(term, 'red', autospec=True)
self.mock_get_shopping_list_info.return_value = {'name': 'test_list'}
yield
self.get_shopping_list_info_patcher.stop()
self.print_table_patcher.stop()
def test_no_categories(self):
display_shopping_list_categories('test_guid')
self.mock_get_shopping_list_info.assert_called_once_with('test_guid')
term.red.assert_called_once_with("No categories found for test_list.")
def test_has_categories(self):
self.mock_get_shopping_list_info.return_value = {
'name': 'test_list',
'categories': [
{'name': 'type A'},
{'name': 'type B'},
],
}
display_shopping_list_categories('test_guid')
self.mock_print_table.assert_called_once_with(
[['type A'], ['type B']], title='test_list'
)
class TestHelp(unittest.TestCase):
def setUp(self):
self.general_help_patcher = mock.patch('vt.vt.GENERAL_HELP')
self.mock_general_help = self.general_help_patcher.start()
self.lists_help_patcher = mock.patch('vt.vt.LISTS_HELP')
self.mock_lists_help = self.lists_help_patcher.start()
self.list_help_patcher = mock.patch('vt.vt.LIST_HELP')
self.mock_list_help = self.list_help_patcher.start()
self.done_help_patcher = mock.patch('vt.vt.DONE_HELP')
self.mock_done_help = self.done_help_patcher.start()
self.undone_help_patcher = mock.patch('vt.vt.UNDONE_HELP')
self.mock_undone_help = self.undone_help_patcher.start()
self.comment_help_patcher = mock.patch('vt.vt.COMMENT_HELP')
self.mock_comment_help = self.comment_help_patcher.start()
self.move_help_patcher = mock.patch('vt.vt.MOVE_HELP')
self.mock_move_help = self.move_help_patcher.start()
self.categories_help_patcher = mock.patch('vt.vt.CATEGORIES_HELP')
self.mock_categories_help = self.categories_help_patcher.start()
self.categorize_help_patcher = mock.patch('vt.vt.CATEGORIZE_HELP')
self.mock_categorize_help = self.categorize_help_patcher.start()
def tearDown(self):
self.general_help_patcher.stop()
self.lists_help_patcher.stop()
self.list_help_patcher.stop()
self.done_help_patcher.stop()
self.undone_help_patcher.stop()
self.comment_help_patcher.stop()
self.move_help_patcher.stop()
self.categories_help_patcher.stop()
self.categorize_help_patcher.stop()
def test_no_args(self):
expected = self.mock_general_help
actual = help([])
self.assertEqual(expected, actual)
def test_unknown_command(self):
expected = self.mock_general_help
actual = help(['unknown command'])
self.assertEqual(expected, actual)
def test_lists(self):
expected = self.mock_lists_help
actual = help(['lists'])
self.assertEqual(expected, actual)
def test_list(self):
expected = self.mock_list_help
actual = help(['list'])
self.assertEqual(expected, actual)
def test_done(self):
expected = self.mock_done_help
actual = help(['done'])
self.assertEqual(expected, actual)
def test_complete(self):
expected = self.mock_done_help
actual = help(['complete'])
self.assertEqual(expected, actual)
def test_undone(self):
expected = self.mock_undone_help
actual = help(['undone'])
self.assertEqual(expected, actual)
def test_uncomplete(self):
expected = self.mock_undone_help
actual = help(['uncomplete'])
self.assertEqual(expected, actual)
def test_comment(self):
expected = self.mock_comment_help
actual = help(['comment'])
self.assertEqual(expected, actual)
def test_modify(self):
expected = self.mock_comment_help
actual = help(['modify'])
self.assertEqual(expected, actual)
def test_comments(self):
expected = self.mock_comment_help
actual = help(['comments'])
self.assertEqual(expected, actual)
def test_edit(self):
expected = self.mock_comment_help
actual = help(['edit'])
self.assertEqual(expected, actual)
def test_move(self):
expected = self.mock_move_help
actual = help(['move'])
self.assertEqual(expected, actual)
def test_mv(self):
expected = self.mock_move_help
actual = help(['mv'])
self.assertEqual(expected, actual)
def test_categories(self):
expected = self.mock_categories_help
actual = help(['categories'])
self.assertEqual(expected, actual)
def test_categorize(self):
expected = self.mock_categorize_help
actual = help(['categorize'])
self.assertEqual(expected, actual)
def test_label(self):
expected = self.mock_categorize_help
actual = help(['label'])
self.assertEqual(expected, actual)
| [
"[email protected]"
]
| |
41ef33c1c1af378a664ea82f485c5a12ebeedd1c | a0fb29f99a852089193e4cc9a11e7263dc3f8b5f | /mayan/apps/metadata/literals.py | aba1309e370f89d0f6259a24ca393df9dc3e1f1c | [
"Apache-2.0"
]
| permissive | ikang9712/Mayan-EDMS | 0e22a944d63657cea59c78023b604a01a622b52a | d6e57e27a89805329fe0c5582caa8e17882d94e6 | refs/heads/master | 2023-07-28T19:41:55.269513 | 2021-09-07T14:16:14 | 2021-09-07T14:16:14 | 402,884,683 | 1 | 0 | NOASSERTION | 2021-09-03T20:00:09 | 2021-09-03T20:00:09 | null | UTF-8 | Python | false | false | 227 | py | from .parsers import MetadataParser
from .validators import MetadataValidator
DEFAULT_METADATA_AVAILABLE_VALIDATORS = MetadataValidator.get_import_paths()
DEFAULT_METADATA_AVAILABLE_PARSERS = MetadataParser.get_import_paths()
| [
"[email protected]"
]
| |
c93ba3313bf6c3ee32e36cad9c787f55c5d4548b | 8395ffb48750359d1bd51a201a41c7fe124998bc | /apc2015/perception/single_utils/src/generate_naive_cloud.py | 4195bb9783faaf79d4485ed09ada91429266c3d6 | []
| no_license | duke-iml/ece490-s2016 | ab6c3d3fb159a28a9c38487cdb1ad3993008b854 | f9cc992fbaadedc8a69678ba39f0c9d108e6910d | refs/heads/master | 2020-04-12T09:03:56.601000 | 2016-11-29T21:36:48 | 2016-11-29T21:36:48 | 49,226,568 | 2 | 6 | null | 2016-11-29T21:36:49 | 2016-01-07T19:42:34 | Python | UTF-8 | Python | false | false | 2,659 | py | #!/usr/bin/env python
from __future__ import division
import sys
import rospy
import cv2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import subprocess
import time
import psutil
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
from common_utils import *
from math import pi, sin, cos, tan, atan, sqrt
pid = None
file_name = None
rgb_mat = None
depth_mat = None
bridge = CvBridge()
w = 320
h = 240
diag_ang = 74/180*pi
diag = sqrt(w**2+h**2)
lift = diag/2 / tan(diag_ang/2)
def receive_rgb(data):
global rgb_mat
rgb_mat = bridge.imgmsg_to_cv2(data, "bgr8")
if depth_mat is not None:
process()
def receive_depth(data):
global depth_mat
depth_mat = bridge.imgmsg_to_cv2(data, "mono16")
depth_mat = depth_mat[:,:,0]
if rgb_mat is not None:
process()
def process():
psutil.Process(pid).kill()
cv2.imwrite(file_name+".bmp", rgb_mat)
cv2.imwrite(file_name+".depth.bmp", depth_mat)
assert depth_mat.shape == (h, w)
point_cloud = []
for i in range(h):
for j in range(w):
depth = depth_mat[i, j]
b1, g1, r1 = list(rgb_mat[i*2, j*2, :].flatten())
b2, g2, r2 = list(rgb_mat[i*2+1, j*2, :].flatten())
b3, g3, r3 = list(rgb_mat[i*2, j*2+1, :].flatten())
b4, g4, r4 = list(rgb_mat[i*2+1, j*2+1, :].flatten())
b1 = int(b1)
b2 = int(b2)
b3 = int(b3)
b4 = int(b4)
g1 = int(g1)
g2 = int(g2)
g3 = int(g3)
g4 = int(g4)
r1 = int(r1)
r2 = int(r2)
r3 = int(r3)
r4 = int(r4)
r = int((r1+r2+r3+r4)/4)
g = int((g1+g2+g3+g4)/4)
b = int((b1+b2+b3+b4)/4)
rgb = rgb_to_pcl_float(r1, g1, b1)
if depth==32001:
continue
assert depth<20000
coord = (j+0.5-w/2, i+0.5-h/2)
real_x = coord[0]/lift*depth
real_y = coord[1]/lift*depth
point_cloud.append([real_x/1000, real_y/1000, depth/1000, rgb])
write_pcd_file(point_cloud, file_name)
rospy.signal_shutdown("Point cloud made, shutting down...\n")
def main():
global file_name
if len(sys.argv)>=2:
file_name = sys.argv[1]
else:
file_name = 'point_cloud.pcd'
global pid
process = subprocess.Popen('hardware_layer/RealSense_ROS_Emitter', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid = process.pid
time.sleep(3)
rospy.init_node('naive_point_cloud', disable_signals=True)
rgb_sub = rospy.Subscriber("/realsense/rgb", Image, receive_rgb, queue_size=1)
depth_sub = rospy.Subscriber("/realsense/depth", Image, receive_depth, queue_size=1)
rospy.spin()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
1ffc8b3649921a8cf943112df31655726ca74210 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/143/usersdata/210/62277/submittedfiles/av2_p3_m2.py | 26e0d4120d107718e01aaba735ca255a96ae8f9d | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # -*- coding: utf-8 -*-
def degrais(a):
soma=0
degrau=0
for i in range(0,len(a)-1,1):
soma=(a[i]-a[i+1])
if soma<0:
soma=soma*(-1)
if soma>degrau:
degrau=soma
return degrau
h=int(input('digite o valor de h:'))
j=[]
for i in range(0,h,1):
numero=int(input('digite o numero:'))
j.append(numero)
x=degrais(j)
print(x)
| [
"[email protected]"
]
| |
7e70251ae9261b6cc83c7ebf3233459f5515f267 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/oslogin/v1beta/oslogin-v1beta-py/google/cloud/oslogin_v1beta/services/os_login_service/transports/grpc_asyncio.py | 1c6fbed331f6c7d8d3fb9b348ed3be8e16af48ff | [
"Apache-2.0"
]
| permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,112 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.oslogin.common import common_pb2 # type: ignore
from google.cloud.oslogin_v1beta.types import oslogin
from google.protobuf import empty_pb2 # type: ignore
from .base import OsLoginServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import OsLoginServiceGrpcTransport
class OsLoginServiceGrpcAsyncIOTransport(OsLoginServiceTransport):
"""gRPC AsyncIO backend transport for OsLoginService.
Cloud OS Login API
The Cloud OS Login API allows you to manage users and their
associated SSH public keys for logging into virtual machines on
Google Cloud Platform.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'oslogin.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'oslogin.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def delete_posix_account(self) -> Callable[
[oslogin.DeletePosixAccountRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete posix account method over gRPC.
Deletes a POSIX account.
Returns:
Callable[[~.DeletePosixAccountRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_posix_account' not in self._stubs:
self._stubs['delete_posix_account'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/DeletePosixAccount',
request_serializer=oslogin.DeletePosixAccountRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_posix_account']
@property
def delete_ssh_public_key(self) -> Callable[
[oslogin.DeleteSshPublicKeyRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete ssh public key method over gRPC.
Deletes an SSH public key.
Returns:
Callable[[~.DeleteSshPublicKeyRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_ssh_public_key' not in self._stubs:
self._stubs['delete_ssh_public_key'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/DeleteSshPublicKey',
request_serializer=oslogin.DeleteSshPublicKeyRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_ssh_public_key']
@property
def get_login_profile(self) -> Callable[
[oslogin.GetLoginProfileRequest],
Awaitable[oslogin.LoginProfile]]:
r"""Return a callable for the get login profile method over gRPC.
Retrieves the profile information used for logging in
to a virtual machine on Google Compute Engine.
Returns:
Callable[[~.GetLoginProfileRequest],
Awaitable[~.LoginProfile]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_login_profile' not in self._stubs:
self._stubs['get_login_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/GetLoginProfile',
request_serializer=oslogin.GetLoginProfileRequest.serialize,
response_deserializer=oslogin.LoginProfile.deserialize,
)
return self._stubs['get_login_profile']
@property
def get_ssh_public_key(self) -> Callable[
[oslogin.GetSshPublicKeyRequest],
Awaitable[common_pb2.SshPublicKey]]:
r"""Return a callable for the get ssh public key method over gRPC.
Retrieves an SSH public key.
Returns:
Callable[[~.GetSshPublicKeyRequest],
Awaitable[~.SshPublicKey]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_ssh_public_key' not in self._stubs:
self._stubs['get_ssh_public_key'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/GetSshPublicKey',
request_serializer=oslogin.GetSshPublicKeyRequest.serialize,
response_deserializer=common_pb2.SshPublicKey.FromString,
)
return self._stubs['get_ssh_public_key']
@property
def import_ssh_public_key(self) -> Callable[
[oslogin.ImportSshPublicKeyRequest],
Awaitable[oslogin.ImportSshPublicKeyResponse]]:
r"""Return a callable for the import ssh public key method over gRPC.
Adds an SSH public key and returns the profile
information. Default POSIX account information is set
when no username and UID exist as part of the login
profile.
Returns:
Callable[[~.ImportSshPublicKeyRequest],
Awaitable[~.ImportSshPublicKeyResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'import_ssh_public_key' not in self._stubs:
self._stubs['import_ssh_public_key'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/ImportSshPublicKey',
request_serializer=oslogin.ImportSshPublicKeyRequest.serialize,
response_deserializer=oslogin.ImportSshPublicKeyResponse.deserialize,
)
return self._stubs['import_ssh_public_key']
@property
def update_ssh_public_key(self) -> Callable[
[oslogin.UpdateSshPublicKeyRequest],
Awaitable[common_pb2.SshPublicKey]]:
r"""Return a callable for the update ssh public key method over gRPC.
Updates an SSH public key and returns the profile
information. This method supports patch semantics.
Returns:
Callable[[~.UpdateSshPublicKeyRequest],
Awaitable[~.SshPublicKey]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_ssh_public_key' not in self._stubs:
self._stubs['update_ssh_public_key'] = self.grpc_channel.unary_unary(
'/google.cloud.oslogin.v1beta.OsLoginService/UpdateSshPublicKey',
request_serializer=oslogin.UpdateSshPublicKeyRequest.serialize,
response_deserializer=common_pb2.SshPublicKey.FromString,
)
return self._stubs['update_ssh_public_key']
__all__ = (
'OsLoginServiceGrpcAsyncIOTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
68062176f60959a3d7bd5ed6cfbb91d826843649 | ca5b5c217e0053645c2664d777699e9a5050715e | /python/primitive.py | 1352c69b984be863d2167845164d2ffaa39c93e0 | [
"MIT"
]
| permissive | rodluger/starrynight | 1405ffdb5a0dd0fefc0ae34e7cdaf7eab4735356 | d3f015e466621189cb271d4d18b538430b14a557 | refs/heads/master | 2021-10-26T03:32:15.220725 | 2021-10-22T15:16:48 | 2021-10-22T15:16:48 | 236,542,672 | 7 | 1 | MIT | 2020-06-03T19:51:10 | 2020-01-27T16:58:05 | Jupyter Notebook | UTF-8 | Python | false | false | 15,076 | py | from special import hyp2f1, J, ellip
from utils import *
from vieta import Vieta
from linear import dP2
import matplotlib.pyplot as plt
import numpy as np
__ALL__ = ["compute_P", "compute_Q", "comput_T"]
def compute_U(vmax, s1):
"""
Given s1 = sin(0.5 * kappa), compute the integral of
cos(x) sin^v(x)
from 0.5 * kappa1 to 0.5 * kappa2 recursively and return an array
containing the values of this function from v = 0 to v = vmax.
"""
U = np.empty(vmax + 1)
U[0] = pairdiff(s1)
term = s1 ** 2
for v in range(1, vmax + 1):
U[v] = pairdiff(term) / (v + 1)
term *= s1
return U
def compute_I(nmax, kappa, s1, c1):
# Lower boundary
I = np.empty(nmax + 1)
I[0] = 0.5 * pairdiff(kappa)
# Recurse upward
s2 = s1 ** 2
term = s1 * c1
for v in range(1, nmax + 1):
I[v] = (1.0 / (2 * v)) * ((2 * v - 1) * I[v - 1] - pairdiff(term))
term *= s2
return I
def _compute_W_indef(nmax, s2, q2, q3):
"""
Compute the expression
s^(2n + 2) (3 / (n + 1) * 2F1(-1/2, n + 1, n + 2, 1 - q^2) + 2q^3) / (2n + 5)
evaluated at n = [0 .. nmax], where
s = sin(1/2 kappa)
q = (1 - s^2 / k^2)^1/2
by either upward recursion (stable for |1 - q^2| > 1/2) or downward
recursion (always stable).
"""
W = np.empty(nmax + 1)
if np.abs(1 - q2) < 0.5:
# Setup
invs2 = 1 / s2
z = (1 - q2) * invs2
s2nmax = s2 ** nmax
x = q2 * q3 * s2nmax
# Upper boundary condition
W[nmax] = (
s2
* s2nmax
* (3 / (nmax + 1) * hyp2f1(-0.5, nmax + 1, nmax + 2, 1 - q2) + 2 * q3)
/ (2 * nmax + 5)
)
# Recurse down
for b in range(nmax - 1, -1, -1):
f = 1 / (b + 1)
A = z * (1 + 2.5 * f)
B = x * f
W[b] = A * W[b + 1] + B
x *= invs2
else:
# Setup
z = s2 / (1 - q2)
x = -2 * q3 * (z - s2) * s2
# Lower boundary condition
W[0] = (2 / 5) * (z * (1 - q3) + s2 * q3)
# Recurse up
for b in range(1, nmax + 1):
f = 1 / (2 * b + 5)
A = z * (2 * b) * f
B = x * f
W[b] = A * W[b - 1] + B
x *= s2
return W
def compute_W(nmax, s2, q2, q3):
return pairdiff(
np.array([_compute_W_indef(nmax, s2[i], q2[i], q3[i]) for i in range(len(s2))])
)
def compute_J(nmax, k2, km2, kappa, s1, s2, c1, q2, dF, dE):
"""
Return the array J[0 .. nmax], computed recursively using
a tridiagonal solver and a lower boundary condition
(analytic in terms of elliptic integrals) and an upper
boundary condition (computed numerically).
"""
# Boundary conditions
z = s1 * c1 * np.sqrt(q2)
resid = km2 * pairdiff(z)
f0 = (1 / 3) * (2 * (2 - km2) * dE + (km2 - 1) * dF + resid)
fN = J(nmax, k2, kappa)
# Set up the tridiagonal problem
a = np.empty(nmax - 1)
b = np.empty(nmax - 1)
c = np.empty(nmax - 1)
term = k2 * z * q2 ** 2
for i, v in enumerate(range(2, nmax + 1)):
amp = 1.0 / (2 * v + 3)
a[i] = -2 * (v + (v - 1) * k2 + 1) * amp
b[i] = (2 * v - 3) * k2 * amp
c[i] = pairdiff(term) * amp
term *= s2
# Add the boundary conditions
c[0] -= b[0] * f0
c[-1] -= fN
# Construct the tridiagonal matrix
A = np.diag(a, 0) + np.diag(b[1:], -1) + np.diag(np.ones(nmax - 2), 1)
# Solve
soln = np.linalg.solve(A, c)
return np.concatenate(([f0], soln, [fN]))
def K(I, delta, u, v):
"""Return the integral K, evaluated as a sum over I."""
return sum([Vieta(i, u, v, delta) * I[i + u] for i in range(u + v + 1)])
def L(J, k, delta, u, v, t):
"""Return the integral L, evaluated as a sum over J."""
return k ** 3 * sum(
[Vieta(i, u, v, delta) * J[i + u + t] for i in range(u + v + 1)]
)
def compute_H(uvmax, xi, gradient=False):
c = np.cos(xi)
s = np.sin(xi)
cs = c * s
cc = c ** 2
ss = s ** 2
H = np.empty((uvmax + 1, uvmax + 1))
dH = np.empty((uvmax + 1, uvmax + 1, len(xi)))
H[0, 0] = pairdiff(xi)
dH[0, 0] = 1
H[1, 0] = pairdiff(s)
dH[1, 0] = c
H[0, 1] = -pairdiff(c)
dH[0, 1] = s
H[1, 1] = -0.5 * pairdiff(cc)
dH[1, 1] = cs
for u in range(2):
for v in range(2, uvmax + 1 - u):
H[u, v] = (-pairdiff(dH[u, v - 2] * cs) + (v - 1) * H[u, v - 2]) / (u + v)
dH[u, v] = dH[u, v - 2] * ss
for u in range(2, uvmax + 1):
for v in range(uvmax + 1 - u):
H[u, v] = (pairdiff(dH[u - 2, v] * cs) + (u - 1) * H[u - 2, v]) / (u + v)
dH[u, v] = dH[u - 2, v] * cc
if gradient:
return H, dH
else:
return H
def _compute_T2_indef(b, xi):
"""
Note: requires b >= 0.
"""
s = np.sin(xi)
c = np.cos(xi)
t = s / c
sgn = np.sign(s)
bc = np.sqrt(1 - b ** 2)
bbc = b * bc
# Special cases
if xi == 0:
return -(np.arctan((2 * b ** 2 - 1) / (2 * bbc)) + bbc) / 3
elif xi == 0.5 * np.pi:
return (0.5 * np.pi - np.arctan(b / bc)) / 3
elif xi == np.pi:
return (0.5 * np.pi + bbc) / 3
elif xi == 1.5 * np.pi:
return (0.5 * np.pi + np.arctan(b / bc) + 2 * bbc) / 3
# Figure out the offset
if xi < 0.5 * np.pi:
delta = 0
elif xi < np.pi:
delta = np.pi
elif xi < 1.5 * np.pi:
delta = 2 * bbc
else:
delta = np.pi + 2 * bbc
# We're done
return (
np.arctan(b * t)
- sgn * (np.arctan(((s / (1 + c)) ** 2 + 2 * b ** 2 - 1) / (2 * bbc)) + bbc * c)
+ delta
) / 3
def compute_P(ydeg, bo, ro, kappa):
"""Compute the P integral."""
# Basic variables
delta = (bo - ro) / (2 * ro)
k2 = (1 - ro ** 2 - bo ** 2 + 2 * bo * ro) / (4 * bo * ro)
k = np.sqrt(k2)
km2 = 1.0 / k2
fourbr15 = (4 * bo * ro) ** 1.5
k3fourbr15 = k ** 3 * fourbr15
tworo = np.empty(ydeg + 4)
tworo[0] = 1.0
for i in range(1, ydeg + 4):
tworo[i] = tworo[i - 1] * 2 * ro
# Pre-compute the helper integrals
x = 0.5 * kappa
s1 = np.sin(x)
s2 = s1 ** 2
c1 = np.cos(x)
q2 = 1 - np.minimum(1.0, s2 / k2)
q3 = q2 ** 1.5
U = compute_U(2 * ydeg + 5, s1)
I = compute_I(ydeg + 3, kappa, s1, c1)
W = compute_W(ydeg, s2, q2, q3)
# Compute the elliptic integrals
F, E, PIprime = ellip(bo, ro, kappa)
J = compute_J(ydeg + 1, k2, km2, kappa, s1, s2, c1, q2, F, E)
# Now populate the P array
P = np.zeros((ydeg + 1) ** 2)
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
mu = l - m
nu = l + m
if (mu / 2) % 2 == 0:
# Same as in starry
P[n] = 2 * tworo[l + 2] * K(I, delta, (mu + 4) // 4, nu // 2)
elif mu == 1:
if l == 1:
# Same as in starry, but using expression from Pal (2012)
P[2] = dP2(bo, ro, k2, kappa, s1, s2, c1, F, E, PIprime)
elif l % 2 == 0:
# Same as in starry
P[n] = (
tworo[l - 1]
* fourbr15
* (
L(J, k, delta, (l - 2) // 2, 0, 0)
- 2 * L(J, k, delta, (l - 2) // 2, 0, 1)
)
)
else:
# Same as in starry
P[n] = (
tworo[l - 1]
* fourbr15
* (
L(J, k, delta, (l - 3) // 2, 1, 0)
- 2 * L(J, k, delta, (l - 3) // 2, 1, 1)
)
)
elif (mu - 1) / 2 % 2 == 0:
# Same as in starry
P[n] = (
2
* tworo[l - 1]
* fourbr15
* L(J, k, delta, (mu - 1) // 4, (nu - 1) // 2, 0)
)
else:
"""
A note about these cases. In the original starry code, these integrals
are always zero because the integrand is antisymmetric about the
midpoint. Now, however, the integration limits are different, so
there's no cancellation in general.
The cases below are just the first and fourth cases in equation (D25)
of the starry paper. We can re-write them as the first and fourth cases
in (D32) and (D35), respectively, but note that we pick up a factor
of `sgn(cos(phi))`, since the power of the cosine term in the integrand
is odd.
The other thing to note is that `u` in the call to `K(u, v)` is now
a half-integer, so our Vieta trick (D36, D37) doesn't work out of the box.
"""
if nu % 2 == 0:
res = 0
u = int((mu + 4.0) // 4)
v = int(nu / 2)
for i in range(u + v + 1):
res += Vieta(i, u, v, delta) * U[2 * (u + i) + 1]
P[n] = 2 * tworo[l + 2] * res
else:
res = 0
u = (mu - 1) // 4
v = (nu - 1) // 2
for i in range(u + v + 1):
res += Vieta(i, u, v, delta) * W[i + u]
P[n] = tworo[l - 1] * k3fourbr15 * res
n += 1
return P
def compute_Q(ydeg, lam, gradient=False):
# Pre-compute H
if gradient:
H, dH = compute_H(ydeg + 2, lam, gradient=True)
else:
H = compute_H(ydeg + 2, lam)
# Allocate
Q = np.zeros((ydeg + 1) ** 2)
dQdlam = np.zeros(((ydeg + 1) ** 2, len(lam)))
# Note that the linear term is special
Q[2] = pairdiff(lam) / 3
dQdlam[2] = np.ones_like(lam) / 3
# Easy!
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
mu = l - m
nu = l + m
if nu % 2 == 0:
Q[n] = H[(mu + 4) // 2, nu // 2]
if gradient:
dQdlam[n] = dH[(mu + 4) // 2, nu // 2]
n += 1
# Enforce alternating signs for (lower, upper) limits
dQdlam *= np.repeat([-1, 1], len(lam) // 2).reshape(1, -1)
if gradient:
return Q, dQdlam
else:
return Q
def compute_T(ydeg, b, theta, xi):
# Pre-compute H
H = compute_H(ydeg + 2, xi)
# Vars
ct = np.cos(theta)
st = np.sin(theta)
ttinvb = st / (b * ct)
invbtt = ct / (b * st)
b32 = (1 - b ** 2) ** 1.5
bct = b * ct
bst = b * st
# Recurse
T = np.zeros((ydeg + 1) ** 2)
# Case 2 (special)
T[2] = pairdiff([np.sign(b) * _compute_T2_indef(np.abs(b), x) for x in xi])
# Special limit: sin(theta) = 0
if np.abs(st) < STARRY_T_TOL:
sgnct = np.sign(ct)
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
mu = l - m
nu = l + m
if nu % 2 == 0:
T[n] = sgnct ** l * b ** (1 + nu // 2) * H[(mu + 4) // 2, nu // 2]
else:
if mu == 1:
if (l % 2) == 0:
T[n] = -sgnct * b32 * H[l - 2, 4]
elif l > 1:
T[n] = -b * b32 * H[l - 3, 5]
else:
T[n] = sgnct ** (l - 1) * (
b32 * b ** ((nu + 1) // 2) * H[(mu - 1) // 2, (nu + 5) // 2]
)
n += 1
return T
# Special limit: cos(theta) = 0
elif np.abs(ct) < STARRY_T_TOL:
sgnst = np.sign(st)
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
mu = l - m
nu = l + m
if nu % 2 == 0:
T[n] = b ** ((mu + 2) // 2) * H[nu // 2, (mu + 4) // 2]
if sgnst == 1:
T[n] *= (-1) ** (mu // 2)
else:
T[n] *= (-1) ** (nu // 2)
else:
if mu == 1:
if (l % 2) == 0:
T[n] = (
(-sgnst) ** (l - 1) * b ** (l - 1) * b32 * H[1, l + 1]
)
elif l > 1:
T[n] = b ** (l - 2) * b32 * H[2, l]
if sgnst == 1:
T[n] *= (-1) ** l
else:
T[n] *= -1
else:
T[n] = (
b32 * b ** ((mu - 3) // 2) * H[(nu - 1) // 2, (mu + 5) // 2]
)
if sgnst == 1:
T[n] *= (-1) ** ((mu - 1) // 2)
else:
T[n] *= (-1) ** ((nu - 1) // 2)
n += 1
return T
# Cases 1 and 5
jmax = 0
Z0 = 1
for nu in range(0, 2 * ydeg + 1, 2):
kmax = 0
Z1 = Z0
for mu in range(0, 2 * ydeg - nu + 1, 2):
l = (mu + nu) // 2
n1 = l ** 2 + nu
n5 = (l + 2) ** 2 + nu + 1
Z2 = Z1
for j in range(jmax + 1):
Z_1 = -bst * Z2
Z_5 = b32 * Z2
for k in range(kmax + 1):
p = j + k
q = l + 1 - (j + k)
fac = -invbtt / (k + 1)
T[n1] += Z_1 * (bct * H[p + 1, q] - st * H[p, q + 1])
Z_1 *= (kmax + 1 - k) * fac
if n5 < (ydeg + 1) ** 2:
T[n5] += Z_5 * (bct * H[p + 1, q + 2] - st * H[p, q + 3])
Z_5 *= (kmax - k) * fac
T[n1] += Z_1 * (bct * H[p + 2, q - 1] - st * H[p + 1, q])
Z2 *= (jmax - j) / (j + 1) * ttinvb
kmax += 1
Z1 *= -bst
jmax += 1
Z0 *= bct
# Cases 3 and 4
Z0 = b32
kmax = 0
for l in range(2, ydeg + 1, 2):
n3 = l ** 2 + 2 * l - 1
n4 = (l + 1) ** 2 + 2 * l + 1
Z = Z0
for k in range(kmax + 1):
p = k
q = l + 1 - k
T[n3] -= Z * (bst * H[p + 1, q] + ct * H[p, q + 1])
if l < ydeg:
T[n4] -= Z * (
bst * st * H[p + 2, q]
+ bct * ct * H[p, q + 2]
+ (1 + b ** 2) * st * ct * H[p + 1, q + 1]
)
Z *= -(kmax - k) / (k + 1) * invbtt
kmax += 2
Z0 *= bst ** 2
return T
| [
"[email protected]"
]
| |
1ba87cd411f46c264b9fd8759ef716c3d9e27938 | c06efd90533c51c2b29b7e92cd13723388de25ee | /actions/patchStorageV1beta1StorageClass.py | a57bbce258efa5ad9e6ef149ec1d897e8648932f | []
| no_license | ajohnstone/stackstorm-kubernetes | 490e4a73daad3713d7c5b5b639d5f30ff1ab3e58 | 99ffad27f5947583a2ab1b56e80c06003d014c47 | refs/heads/master | 2021-01-11T23:29:49.642435 | 2016-12-07T13:20:34 | 2016-12-07T13:20:34 | 78,588,572 | 0 | 0 | null | 2017-01-11T00:48:59 | 2017-01-11T00:48:59 | null | UTF-8 | Python | false | false | 746 | py | from lib import k8s
from st2actions.runners.pythonrunner import Action
class patchStorageV1beta1StorageClass(Action):
def run(self,body,name,config_override=None,pretty=None):
myk8s = k8s.K8sClient(self.config)
args = {}
if body is not None:
args['body'] = body
else:
return (False, "body is a required parameter")
if name is not None:
args['name'] = name
else:
return (False, "name is a required parameter")
if config_override is not None:
args['config_override'] = config_override
if pretty is not None:
args['pretty'] = pretty
return (True, myk8s.runAction('patchStorageV1beta1StorageClass', **args))
| [
"[email protected]"
]
| |
003433cb893cff17a7ae9e5807ff49deed068997 | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2/dircache.pyi | 523b850bc3e93f867de75c9bef0100e3b6d22c54 | []
| no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | pyi | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2/dircache.pyi | [
"[email protected]"
]
| |
27e3a773e1f3b1c7193ce9a831b0b54a38653ad7 | cf5f24e5a32f8cafe90d4253d727b1c0457da6a4 | /algorithm/BOJ_1629.py | 11a30af639ff558eb56b49660735d2acd32acf3e | []
| no_license | seoljeongwoo/learn | 537659ca942875f6846646c2e21e1e9f2e5b811e | 5b423e475c8f2bc47cb6dee09b8961d83ab08568 | refs/heads/main | 2023-05-04T18:07:27.592058 | 2021-05-05T17:32:50 | 2021-05-05T17:32:50 | 324,725,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # import sys
# input = sys.stdin.readline
# A,B,C=map(int,input().split())
# def solve(a,b):
# if b==1: return a
# ret = solve(a,b//2)%C
# ret = (ret*ret)%C
# if b%2==1: ret = (ret*a)%C
# return ret
# print(solve(A,B)%C)
print(pow(*map(int,input().split()))) | [
"[email protected]"
]
| |
f23530b0fcab203fccb0a43b9d3560015edbb1df | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/safExtCnfg.py | f32c8cf1cee106166c91aab4c960446be0295d8e | []
| no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | """safExtCnfg standard property type, originally defined in resource file set
standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.base
from pylon.resources.standard import standard
class safExtCnfg(pylon.resources.base.Inheriting):
"""safExtCnfg standard property type. Safety mode. Mode that a device
has to be brought to when a safety external request state is pending."""
def __init__(self):
super().__init__(
)
self._original_name = 'SCPTsafExtCnfg'
self._property_scope, self._property_key = 0, 257
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = safExtCnfg()
pass
| [
"[email protected]"
]
| |
df760f3fb2bae9441d342cf168781c8ce3d3cf92 | 261fa6004234ccae2b1a4ff455ae54aefecbb172 | /ui_extensions/content_export/views.py | cc9e021e8399ec531eb798666ee498596ae79847 | [
"Apache-2.0"
]
| permissive | svang001/cloudbolt-forge | 671575eecd54e1207b7dde144db2fdb6c43c9ddf | 3796900115876f8a9ee333b75f45e3d60d7705d7 | refs/heads/master | 2023-02-23T23:03:33.225739 | 2021-01-19T20:09:21 | 2021-01-19T20:09:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,587 | py | import requests
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.utils.html import mark_safe
from cbhooks.models import (
HookPointAction, RecurringActionJob, ServerAction, ResourceAction, TriggerPoint
)
from extensions.models import UIExtension, XUIIndexer
from extensions.views import admin_extension
from servicecatalog.models import ServiceBlueprint
from utilities.decorators import dialog_view
from utilities.permissions import cbadmin_required
from xui.content_export.forms import ExportContentForm
@admin_extension(title='Exportable Contents', description='All Exportable CloudBolt Contents')
@cbadmin_required
def export_content_list(request):
"""
View for listing metadata for all exportable contents.
"""
proto = request.META['wsgi.url_scheme']
host = request.META['HTTP_HOST']
resp = requests.get('{}://{}/api/v2/exportable-content/?version=dev'.format(proto, host), verify=False)
exportable_contents = []
response = resp.json()
from api.v2.serializers import keys_hyphens_to_underscores
if 'server-actions' in response:
for sa in response['server-actions']:
sa['id'] = sa['package-url'].split('/')[-2]
sa['collections'] = 'server-actions'
exportable_contents.append(keys_hyphens_to_underscores(sa))
if 'orchestration-actions' in response:
for oa in response['orchestration-actions']:
oa['id'] = oa['package-url'].split('/')[-2]
oa['collections'] = 'orchestration-actions'
exportable_contents.append(keys_hyphens_to_underscores(oa))
if 'ui-extension-packages' in response:
XUIIndexer().index()
for ui in response['ui-extension-packages']:
id = ui['package-url'].split('/')[-1]
ui['id'] = UIExtension.objects.get(name=id).id
ui['collections'] = 'ui-extension-packages'
exportable_contents.append(keys_hyphens_to_underscores(ui))
if 'blueprints' in response:
for bp in response['blueprints']:
bp['id'] = bp['package-url'].split('/')[-2]
bp['collections'] = 'blueprints'
exportable_contents.append(keys_hyphens_to_underscores(bp))
if 'recurring-jobs' in response:
for job in response['recurring-jobs']:
job['id'] = job['package-url'].split('/')[-2]
job['collections'] = 'recurring-jobs'
exportable_contents.append(keys_hyphens_to_underscores(job))
if 'resource-actions' in response:
for ra in response['resource-actions']:
ra['id'] = ra['package-url'].split('/')[-2]
ra['collections'] = 'resource-actions'
exportable_contents.append(keys_hyphens_to_underscores(ra))
list_context = {
'exportable_contents': exportable_contents,
'pagetitle': 'Exportable Contents',
}
return render(request, 'content_export/templates/list.html', list_context)
@dialog_view
@cbadmin_required
def export_content_edit(request, id=None, collections=''):
"""
Edit exportable contents
"""
if collections == 'blueprints':
instance = ServiceBlueprint.objects.get(id=id)
elif collections == 'resource-actions':
instance = ResourceAction.objects.get(id=id)
elif collections == 'server-actions':
instance = ServerAction.objects.get(id=id)
elif collections == 'recurring-jobs':
instance = RecurringActionJob.objects.get(id=id)
elif collections == 'orchestration-actions':
instance = HookPointAction.objects.get(id=id)
elif collections == 'ui-extension-packages':
instance = UIExtension.objects.get(id=id)
if request.method == 'POST':
form = ExportContentForm(request.POST, request.FILES, instance=instance)
if form.is_valid():
instance = form.save()
msg = "Metadata details for {} have been saved.".format(instance)
messages.success(request, msg)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
form = ExportContentForm(instance=instance)
return {
'title': 'Edit Exportable Metadata',
'form': form,
'action_url': reverse('export_content_edit', args=[id, collections]),
'use_ajax': True,
'submit': 'Save',
'extra_onready_js': mark_safe("$('.render_as_datepicker').datepicker({dateFormat: 'yy-mm-dd'});")
}
| [
"[email protected]"
]
| |
490fcdfb16141de4f142150d27b614173af087da | 2f0cb310e2ec8fb176ee240aa964a7eef5ed23b4 | /giico/quality_control_and_material_testing/doctype/bulk_density_of_masonary/bulk_density_of_masonary.py | 682281f533740a8c16ef57cb3acb6c2e523d8ca2 | [
"MIT"
]
| permissive | thispl/giico | b96cf6b707f361275f8723d15f8ea1f95f908c9c | 14c5631639ab56a586a7962be9871d722c20e205 | refs/heads/master | 2021-06-18T03:56:02.928303 | 2021-04-27T06:42:59 | 2021-04-27T06:42:59 | 200,183,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class BulkDensityOfmasonary(Document):
pass
| [
"[email protected]"
]
| |
56ab994254b3b1de4c46198dd4067152d1c0b8b9 | 47703c8cfd6b6cbbec7ceb2509da1bc049dd621f | /udoy_013.py | de28dafdc46a3bbd08f2137b5bbcbf693cf22f3f | []
| no_license | udoy382/PyCode | 0638a646bd4cac4095a58135aea97ba4ccfd5535 | 69efde580f019cd41061501554b6193688a0a06f | refs/heads/main | 2023-03-26T17:45:15.943887 | 2021-03-25T14:22:42 | 2021-03-25T14:22:42 | 324,485,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | # Short Hand If Else Notation In Python #22
a = int(input("enter a\n"))
b = int(input("enter b\n"))
# 1st
# if a>b: print("A B se bada hai bhai")
# 2nd
# print("B A se bada hai bhai") if a<b else print("A B se bada hai bhai") | [
"[email protected]"
]
| |
acdf56c82a6bb37ed814ba0a5223a77421137d5c | ef78bd58d61002f45778a40da7759ed0b1998cd3 | /code/transforms/univariategaussianization.py | 85eb0ed34aec6c919cee82f5578985a62cf4bd41 | [
"MIT"
]
| permissive | afcarl/isa | 61e85c0c790c7cc357e0c29fc5bda948e9c77ce4 | f0497c0cc7bd72e0de7f4f9a8da40e214c22abe9 | refs/heads/master | 2020-03-19T21:36:06.716167 | 2013-01-28T18:32:30 | 2013-01-28T18:32:30 | 136,944,562 | 1 | 0 | null | 2018-06-11T15:20:45 | 2018-06-11T15:20:44 | null | UTF-8 | Python | false | false | 1,634 | py | __license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <[email protected]>'
__docformat__ = 'epytext'
from scipy.special import erf, erfinv
from scipy.stats import norm
from scipy.optimize import bisect
from numpy import mean, sqrt, asarray, max, min, any
from transforms import Transform
import pdb
class UnivariateGaussianization(Transform):
def __init__(self, mog):
self.mog = mog
def apply(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply model CDF
data = self.mog.cdf(data)
# apply inverse Gaussian CDF
result = erfinv(data * 2. - 1.)
result[result > 6.] = 6.
result[result < -6.] = -6.
return result * sqrt(2.)
def inverse(self, data, max_iter=100):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply Gaussian CDF
data = norm.cdf(data)
# apply inverse model CDF
val_max = mean(self.mog.means) + 1.
val_min = mean(self.mog.means) - 1.
for t in range(data.shape[1]):
# make sure root lies between val_min and val_max
while float(self.mog.cdf(val_min)) > data[0, t]:
val_min -= 1.
while float(self.mog.cdf(val_max)) < data[0, t]:
val_max += 1.
# find root numerically
data[0, t] = bisect(
f=lambda x: float(self.mog.cdf(x)) - data[0, t],
a=val_min,
b=val_max,
maxiter=max_iter,
disp=False)
return data
def logjacobian(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
data_ug = self.apply(data)
return self.mog.loglikelihood(data) - norm.logpdf(data_ug)
| [
"[email protected]"
]
| |
e4261450da05009ae1e965dc60840264ffe2a1e9 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/stmt_for_list_nested-32.py | 8e5e853af19d4b97d79508438d9d61dc0c5fdca2 | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | x:int = 0
y:int = 0
z:[int] = None
$ID = [1, 2, 3]
for x in z:
for y in z:
print(x * y)
| [
"[email protected]"
]
| |
b6415094da921188a6c07160bf88440442a8f16d | 049e2fab5e9e8f248e537cbada15d60d60536990 | /environment/env_multi.py | be5637dedf9dd6ef8320973bbc255ebc9740da5c | [
"MIT"
]
| permissive | RubenPants/RobotSimulator2D | adfd8c16ec48b34419cae096d16e5e6714410407 | 334d7b9cab0edb22d4670cfaf39fbed76c351758 | refs/heads/master | 2023-05-14T20:09:44.604695 | 2020-07-11T14:16:58 | 2020-07-11T14:16:58 | 223,198,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,070 | py | """
env_multi.py
Environment where a single genome gets evaluated over multiple games. This environment will be called in a process.
"""
import sys
from config import Config
from environment.entities.game import get_game
from population.population import Population
from utils.dictionary import D_DONE, D_SENSOR_LIST
class MultiEnvironment:
""" This class provides an environment to evaluate a single genome on multiple games. """
__slots__ = {
"batch_size", "game_config", "games", "make_net", "max_steps", "pop_config", "query_net",
}
def __init__(self,
make_net,
query_net,
game_config: Config,
pop_config: Config,
):
"""
Create an environment in which the genomes get evaluated across different games.
:param make_net: Method to create a network based on the given genome
:param query_net: Method to evaluate the network given the current state
:param game_config: Config file for game-creation
:param pop_config: Config file specifying how genome's network will be made
"""
self.batch_size = 0
self.games = []
self.make_net = make_net
self.max_steps = game_config.game.duration * game_config.game.fps
self.query_net = query_net
self.game_config = game_config
self.pop_config = pop_config
def eval_genome(self,
genome,
return_dict=None,
):
"""
Evaluate a single genome in a pre-defined game-environment.
:param genome: Tuple (genome_id, genome_class)
:param return_dict: Dictionary used to return observations corresponding the genome
"""
# Split up genome by id and genome itself
genome_id, genome = genome
used_connections = set(genome.get_used_connections().keys())
# Initialize the games on which the genome is tested
games = [get_game(g, cfg=self.game_config) for g in self.games]
for g in games: g.player.set_active_sensors(used_connections) # Set active-sensors
# Ask for each of the games the starting-state
states = [g.reset()[D_SENSOR_LIST] for g in games]
# Finished-state for each of the games is set to false
finished = [False] * self.batch_size
# Create the network used to query on, initialize it with the first-game's readings (good approximation)
net = self.make_net(genome=genome,
genome_config=self.pop_config.genome,
game_config=self.game_config,
bs=self.batch_size,
initial_read=states[0],
)
# Start iterating the environments
step_num = 0
while True:
# Check if maximum iterations is reached
if step_num == self.max_steps: break
# Determine the actions made by the agent for each of the states
actions = self.query_net(net, states)
# Check if each game received an action
assert len(actions) == len(games)
for i, (g, a, f) in enumerate(zip(games, actions, finished)):
# Ignore if game has finished
if not f:
# Proceed the game with one step, based on the predicted action
obs = g.step(l=a[0], r=a[1])
finished[i] = obs[D_DONE]
# Update the candidate's current state
states[i] = obs[D_SENSOR_LIST]
# Stop if agent reached target in all the games
if all(finished): break
step_num += 1
# Return the final observations
if return_dict is not None: return_dict[genome_id] = [g.close() for g in games]
def trace_genome(self,
genome,
return_dict=None,
):
"""
Get the trace of a single genome for a pre-defined game-environment.
:param genome: Tuple (genome_id, genome_class)
:param return_dict: Dictionary used to return the traces corresponding the genome-game combination
"""
# Split up genome by id and genome itself
genome_id, genome = genome
used_connections = set(genome.get_used_connections().keys())
# Initialize the games on which the genome is tested
games = [get_game(g, cfg=self.game_config) for g in self.games]
for g in games: g.player.set_active_sensors(used_connections) # Set active-sensors
# Ask for each of the games the starting-state
states = [g.reset()[D_SENSOR_LIST] for g in games]
# Initialize the traces
traces = [[g.player.pos.get_tuple()] for g in games]
# Finished-state for each of the games is set to false
finished = [False] * self.batch_size
# Create the network used to query on, initialize it with the first-game's readings (good approximation)
net = self.make_net(genome=genome,
genome_config=self.pop_config.genome,
game_config=self.game_config,
bs=self.batch_size,
initial_read=states[0],
)
# Start iterating the environments
step_num = 0
while True:
# Check if maximum iterations is reached
if step_num == self.max_steps: break
# Determine the actions made by the agent for each of the states
actions = self.query_net(net, states)
# Check if each game received an action
assert len(actions) == len(games)
for i, (g, a, f) in enumerate(zip(games, actions, finished)):
# Do not advance the player if target is reached
if f:
traces.append(g.player.pos.get_tuple())
continue
# Proceed the game with one step, based on the predicted action
obs = g.step(l=a[0], r=a[1])
finished[i] = obs[D_DONE]
# Update the candidate's current state
states[i] = obs[D_SENSOR_LIST]
# Update the trace
traces[i].append(g.player.pos.get_tuple())
# Next step
step_num += 1
# Return the final observations
if return_dict is not None: return_dict[genome_id] = traces
# -----------------------------------------------> HELPER METHODS <----------------------------------------------- #
def set_games(self, games):
"""
Set the games-set with new games.
:param games: List of Game-IDs
"""
self.games = games
self.batch_size = len(games)
def get_game_params(self):
"""Return list of all game-parameters currently in self.games."""
return [get_game(i, cfg=self.game_config).game_params() for i in self.games]
def get_multi_env(pop: Population, game_config: Config):
"""Create a multi-environment used to evaluate a population on."""
if sys.platform == 'linux':
from environment.cy.env_multi_cy import MultiEnvironmentCy
return MultiEnvironmentCy(
make_net=pop.make_net,
query_net=pop.query_net,
game_config=game_config,
pop_config=pop.config,
)
else:
return MultiEnvironment(
make_net=pop.make_net,
query_net=pop.query_net,
game_config=game_config,
pop_config=pop.config,
)
| [
"[email protected]"
]
| |
45b817d4a75f46e4e626eb9c9fb88a7376806c4e | 0805420ce1890c36aa9e0cc1a782945464433ef6 | /client/videoplayer/__init__.py | b55a1624352086133c05f65c066095386a59df16 | []
| no_license | cnrat/dec-eve-serenity | 4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c | 37519e66a5fbb0d7c417d5cf9778636991efbed8 | refs/heads/master | 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\videoplayer\__init__.py
from _videoplayer import * | [
"[email protected]"
]
| |
a88f046366e59b9019ba58620dd77522a9c42a0a | 616cc6c05f525dd2cb67916601f6ecd2c8242f24 | /homework/hw01/problems/client/cli/ok.py | 66ac72252d323b4bd4a142ddda60f52f67f70359 | []
| no_license | cookieli/cs61a_li | 6f1d51aad7cd32fb27f64c855b3803bd2f8d9aad | 6ee0df9c64842bde9e30a0484e661abf04212358 | refs/heads/master | 2020-04-07T14:32:38.337554 | 2018-03-07T10:18:03 | 2018-03-07T10:18:03 | 124,218,933 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,952 | py | """ok is an autograder that you can use to run tests, back up your work, and
submit assignments.
You can run all tests with
python3 ok
There are several "options" you can give ok to modify its behavior. These
options generally have both a short form (preceded by a single dash, like -q)
or a long form (preceded by two dashes, like --question). This is similar to how
many other command line applications accept options. These options can be mixed
and matched in any order. The options are listed in full below, but we'll
describe some of the more common ones here.
To test a specific question, use the -q (or --question) option with the name of
the question:
python3 ok -q foo
python3 ok -q 12
By default, only tests that fail will appear. If you want to see the results
from all tests, you can use the -v (or --verbose) option:
python3 ok -q foo -v
To start an interactive interpreter after a failed test for debugging, use the
-i (or --interactive) option:
python3 ok -q foo -i
By default, after each test run ok will attempt to back up your work to the
server. To run the tests without any network access, use the --local option:
python3 ok -q foo --local
To submit the assignment after you're done, use the --submit option:
python3 ok --submit
Finally, to log out and log in under a different email, use --authenticate:
python3 ok --authenticate
Visit https://okpy.org to view your backups and submissions.
"""
from client import exceptions as ex
from client.api import assignment
from client.cli.common import messages
from client.utils import auth
from client.utils import output
from client.utils import software_update
from datetime import datetime
import argparse
import client
import logging
import os
import sys
import struct
LOGGING_FORMAT = '%(levelname)s | %(filename)s:%(lineno)d | %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger('client') # Get top-level logger
CLIENT_ROOT = os.path.dirname(client.__file__)
##########################
# Command-line Interface #
##########################
def parse_input(command_input=None):
"""Parses command line input."""
parser = argparse.ArgumentParser(
prog='python3 ok',
description=__doc__,
usage='%(prog)s [--help] [options]',
formatter_class=argparse.RawDescriptionHelpFormatter)
testing = parser.add_argument_group('running tests')
testing.add_argument('-q', '--question', type=str, action='append',
help="run tests for a specific question")
testing.add_argument('--suite', type=int, default=None,
help="run cases from a specific suite")
testing.add_argument('--case', type=int, action='append',
help="run specific cases")
testing.add_argument('-u', '--unlock', action='store_true',
help="unlock tests interactively")
testing.add_argument('-i', '--interactive', action='store_true',
help="start the Python interpreter after a failed test")
testing.add_argument('-v', '--verbose', action='store_true',
help="show all tests, not just passing tests")
testing.add_argument('--all', action='store_true',
help="run tests for all questions in config file")
testing.add_argument('--submit', action='store_true',
help="submit the assignment")
testing.add_argument('--backup', action='store_true',
help="attempt to reliably backup your work")
testing.add_argument('--revise', action='store_true',
help="submit composition revision")
testing.add_argument('--restore', action='store_true',
help="restore assignment from an earlier backup")
testing.add_argument('--timeout', type=int, default=10,
help="set the timeout duration (in seconds) for running tests")
# Experiments
experiment = parser.add_argument_group('experiment options')
experiment.add_argument('--no-hints', action='store_true',
help="do not give hints")
experiment.add_argument('--hint', action='store_true',
help="give a hint (if available)")
experiment.add_argument('--style', action='store_true',
help="run AutoStyle feedback system")
experiment.add_argument('--collab', action='store_true',
help="launch collaborative programming environment")
# Debug information
debug = parser.add_argument_group('debugging options')
debug.add_argument('--version', action='store_true',
help="print the version number and exit")
debug.add_argument('--tests', action='store_true',
help="display a list of all available tests")
debug.add_argument('--debug', action='store_true',
help="show debugging output")
# Grading
grading = parser.add_argument_group('grading options')
grading.add_argument('--lock', action='store_true',
help="lock the tests in a directory")
grading.add_argument('--score', action='store_true',
help="score the assignment")
grading.add_argument('--score-out', type=argparse.FileType('w'),
default=sys.stdout, help="write scores to a file")
grading.add_argument('--config', type=str,
help="use a specific configuration file")
# Server parameters
server = parser.add_argument_group('server options')
server.add_argument('--local', action='store_true',
help="disable any network activity")
server.add_argument('--server', type=str,
default='okpy.org',
help="set the server address")
server.add_argument('--authenticate', action='store_true',
help="authenticate, ignoring previous authentication")
server.add_argument('--get-token', action='store_true',
help="get ok access token")
server.add_argument('--insecure', action='store_true',
help="use http instead of https")
server.add_argument('--no-update', action='store_true',
help="do not check for ok updates")
server.add_argument('--update', action='store_true',
help="update ok and exit")
return parser.parse_args(command_input)
def main():
"""Run all relevant aspects of ok.py."""
args = parse_input()
log.setLevel(logging.DEBUG if args.debug else logging.ERROR)
log.debug(args)
# Checking user's Python bit version
bit_v = (8 * struct.calcsize("P"))
log.debug("Python {}bit".format(bit_v))
if args.version:
print("okpy=={}".format(client.__version__))
exit(0)
elif args.update:
print("Current version: {}".format(client.__version__))
did_update = software_update.check_version(
args.server, client.__version__, client.FILE_NAME, timeout=10)
exit(not did_update) # exit with error if ok failed to update
if args.get_token:
access_token = auth.authenticate(True)
print("Token: {}".format(access_token))
exit(not access_token) # exit with error if no access_token
assign = None
try:
if args.authenticate:
# Authenticate and check for success
if not auth.authenticate(True):
exit(1)
# Instantiating assignment
assign = assignment.load_assignment(args.config, args)
if args.tests:
print('Available tests:')
for name in assign.test_map:
print(' ' + name)
exit(0)
msgs = messages.Messages()
for name, proto in assign.protocol_map.items():
log.info('Execute {}.run()'.format(name))
proto.run(msgs)
msgs['timestamp'] = str(datetime.now())
except ex.LoadingException as e:
log.warning('Assignment could not load', exc_info=True)
print('Error loading assignment: ' + str(e))
except ex.AuthenticationException as e:
log.warning('Authentication exception occurred', exc_info=True)
print('Authentication error: {0}'.format(e))
except ex.OkException as e:
log.warning('General OK exception occurred', exc_info=True)
print('Error: ' + str(e))
except KeyboardInterrupt:
log.info('KeyboardInterrupt received.')
finally:
if not args.no_update:
try:
software_update.check_version(args.server, client.__version__,
client.FILE_NAME)
except KeyboardInterrupt:
pass
if assign:
assign.dump_tests()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
d62b71cee786178eddaf065c8d8850790282de38 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinci_v36r1p3/InstallArea/x86_64-slc6-gcc48-opt/python/StrippingArchive/Stripping20r0p2/Beauty2Charm_Lb2XBuilder.py | c4d358fa0c53e8008b7e6ea45f0cfda3390b17fc | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,053 | py | #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
from copy import deepcopy
from Gaudi.Configuration import *
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
from PhysSelPython.Wrappers import Selection
from Beauty2Charm_LoKiCuts import LoKiCuts
from Beauty2Charm_Utils import *
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
class LcBuilder(object):
'''Produces all Lambda_c baryons for the Beauty2Charm module.'''
def __init__(self,pions,kaons,protons,config,config_pid):
self.pions = pions
self.kaons = kaons
self.protons = protons
self.config = config
self.pkpi = [self._makeLc2pKpi()]
self.pkpi_pid = [filterPID('Lc2pKPiPID',self.pkpi,config_pid)]
self.xic_pkpi = [self._makeXic2pKpi()]
def _makeLc2pKpi(self):
'''Makes Lc -> p K pi + cc'''
dm,units = LoKiCuts.cutValue(self.config['MASS_WINDOW'])
comboCuts = [LoKiCuts(['ASUMPT'],self.config).code(),
"(ADAMASS('Lambda_c+') < %s*%s) " % (dm+10,units),
hasTopoChild()]
comboCuts.append(LoKiCuts(['AMAXDOCA'],self.config).code())
comboCuts = LoKiCuts.combine(comboCuts)
momCuts = ["(ADMASS('Lambda_c+') < %s*%s) " % (dm,units),
LoKiCuts(['VCHI2DOF','BPVVDCHI2','BPVDIRA'],
self.config).code()]
momCuts = LoKiCuts.combine(momCuts)
cp = CombineParticles(CombinationCut=comboCuts,MotherCut=momCuts,
DecayDescriptors=["[Lambda_c+ -> p+ K- pi+]cc"])
return Selection('Lc2PKPiBeauty2Charm',Algorithm=cp,
RequiredSelections=[self.pions,self.kaons,
self.protons])
def _makeXic2pKpi(self):
'''Makes Xic -> p K pi + cc'''
dm,units = LoKiCuts.cutValue(self.config['MASS_WINDOW'])
comboCuts = [LoKiCuts(['ASUMPT'],self.config).code(),
"(ADAMASS('Xi_c+') < %s*%s) " % (dm+10,units),
hasTopoChild()]
comboCuts.append(LoKiCuts(['AMAXDOCA'],self.config).code())
comboCuts = LoKiCuts.combine(comboCuts)
momCuts = ["(ADMASS('Xi_c+') < %s*%s) " % (dm,units),
LoKiCuts(['VCHI2DOF','BPVVDCHI2','BPVDIRA'],
self.config).code()]
momCuts = LoKiCuts.combine(momCuts)
cp = CombineParticles(CombinationCut=comboCuts,MotherCut=momCuts,
DecayDescriptors=["[Xi_c+ -> p+ K- pi+]cc"])
return Selection('Xic2PKPiBeauty2Charm',Algorithm=cp,
RequiredSelections=[self.pions,self.kaons,
self.protons])
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
class Lb2XBuilder(object):
'''Makes all Lambda_b -> X lines.'''
def __init__(self,lc,d,hh,topoPions,topoKaons,protons,hhh,dst,lambda0,config):
self.lc = lc.pkpi
self.lc_pid = lc.pkpi_pid
self.xic = lc.xic_pkpi
self.d = d
self.d0 = d.hh
self.hh = hh
self.hhh = hhh
self.dst = dst
self.lambda0 = lambda0
self.topoPions = [topoPions]
self.topoKaons = [topoKaons]
self.protons = [protons]
self.config = deepcopy(config)
self.config['AM_MIN'] = '5200*MeV'
self.lines = []
# Lb -> Lc+- H-+ (+WS)
self._makeLb2LcH()
# Lb -> Xic+- H-+ (+WS)
self._makeLb2XicH()
# Sb+- -> D0(HH) p+-
self._makeSb2D0P()
# Sb -> D-+(HHH) p+-
self._makeSb02DP()
# Lb -> D0(HH) p+- H-+
self._makeLb2D0PH()
# Lb -> Lc+- 3Pi, KPiPi, ppbarPi, ppbarK (+WS)
self._makeLb2LcHHH()
# Lb -> Lc D (+WS)
self._makeLb2LcD()
# Lb -> Lc D* (+WS)
self._makeLb2LcDst()
# X -> Lc Lc (+WS)
self._makeX2LcLc()
# Lb -> Lc 5pi
self._makeLb2Lc5Pi()
# Lb -> D0 Lambda0
self._makeLb2D0Lambda0()
def _makeLb2LcH(self):
'''Make RS and WS Lb -> Lc H (H=pi,K) + cc.'''
pions = self.topoPions
kaons = self.topoKaons
decays = {'Lb2LcPi': ["[Lambda_b0 -> Lambda_c+ pi-]cc"],
'Lb2LcK' : ["[Lambda_b0 -> Lambda_c+ K-]cc"]}
inputs = {'Lb2LcPi': self.lc+pions, 'Lb2LcK': self.lc+kaons}
rs = makeB2XSels(decays,'Lc2PKPi',inputs,self.config)
decays = {'Lb2LcPiWS': ["[Lambda_b0 -> Lambda_c+ pi+]cc"],
'Lb2LcKWS' : ["[Lambda_b0 -> Lambda_c+ K+]cc"]}
inputs = {'Lb2LcPiWS':self.lc+pions, 'Lb2LcKWS':self.lc+kaons}
ws = makeB2XSels(decays,'Lc2PKPi',inputs,self.config)
decays = {'Lb2LcPiNoIP': ["[Lambda_b0 -> Lambda_c+ pi-]cc"]}
inputs = {'Lb2LcPiNoIP': self.lc_pid+pions}
noip = makeB2XSels(decays,'Lc2PKPi',inputs,self.config,False)
decays = {'Lb2LcPiNoIPWS': ["[Lambda_b0 -> Lambda_c+ pi+]cc"]}
inputs = {'Lb2LcPiNoIPWS': self.lc_pid+pions}
noip_ws = makeB2XSels(decays,'Lc2PKPi',inputs,self.config,False)
self.lines.append(ProtoLine(rs,1.0))
self.lines.append(ProtoLine(ws,0.1))
self.lines.append(ProtoLine(noip,1.0))
self.lines.append(ProtoLine(noip_ws,0.1))
def _makeLb2XicH(self):
'''Make RS and WS Lb -> Xi_c H (H=pi,K) + cc.'''
pions = self.topoPions
kaons = self.topoKaons
decays = {'Lb2XicPi': ["[Lambda_b0 -> Xi_c+ pi-]cc"],
'Lb2XicK' : ["[Lambda_b0 -> Xi_c+ K-]cc"]}
inputs = {'Lb2XicPi': self.xic+pions, 'Lb2XicK': self.xic+kaons}
rs = makeB2XSels(decays,'Xic2PKPi',inputs,self.config)
decays = {'Lb2XicPiWS': ["[Lambda_b0 -> Xi_c+ pi+]cc"],
'Lb2XicKWS' : ["[Lambda_b0 -> Xi_c+ K+]cc"]}
inputs = {'Lb2XicPiWS':self.xic+pions, 'Lb2XicKWS':self.xic+kaons}
ws = makeB2XSels(decays,'Xic2PKPi',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2LcHHH(self):
'''Make RS and WS Lb -> Lc HHH (H=pi,K) + cc.'''
pipipi = self.hhh.pipipi
kpipi = self.hhh.kpipi
kkpi = self.hhh.kkpi
ppbarpi = self.hhh.ppbarpi
ppbark = self.hhh.ppbark
decays = {'Lb2LcPiPiPi' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)-]cc"],
'Lb2LcKPiPi' : ["[Lambda_b0 -> Lambda_c+ K_1(1270)-]cc"],
'Lb2LcppbarPi' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)-]cc"],
'Lb2LcppbarK' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)-]cc"],
'Lb2LcKKPi' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)-]cc"]}
inputs = {'Lb2LcPiPiPi' : self.lc_pid+pipipi,
'Lb2LcKPiPi' : self.lc_pid+kpipi,
'Lb2LcppbarPi' : self.lc_pid+ppbarpi,
'Lb2LcppbarK' : self.lc_pid+ppbark,
'Lb2LcKKPi' : self.lc_pid+kkpi}
rs = makeB2XSels(decays,'Lc2PKPi',inputs,self.config)
decays = {'Lb2LcPiPiPiWS' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)+]cc"],
'Lb2LcKPiPiWS' : ["[Lambda_b0 -> Lambda_c+ K_1(1270)+]cc"],
'Lb2LcppbarPiWS' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)+]cc"],
'Lb2LcppbarKWS' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)+]cc"],
'Lb2LcKKPiWS' : ["[Lambda_b0 -> Lambda_c+ a_1(1260)+]cc"]}
inputs = {'Lb2LcPiPiPiWS' : self.lc_pid+pipipi,
'Lb2LcKPiPiWS' : self.lc_pid+kpipi,
'Lb2LcppbarPiWS' : self.lc_pid+ppbarpi,
'Lb2LcppbarKWS' : self.lc_pid+ppbark,
'Lb2LcKKPiWS' : self.lc_pid+kkpi}
ws = makeB2XSels(decays,'Lc2PKPi',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2D0PH(self):
'''Makes RS Lb -> D0(HH) p+- H-+ + c.c. and WS lines'''
decs = ["Lambda_b0 -> D0 Lambda0","Lambda_b0 -> D0 Lambda~0"]
decays = {'Lb2D0PH': decs}
inputs = {'Lb2D0PH': self.d0+self.hh.ph_pid}
rs = makeB2XSels(decays,'D02HH',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
decays = {'Lb2D0PHWS': decs}
inputs = {'Lb2D0PHWS': self.d0+self.hh.ph_ws}
ws = makeB2XSels(decays,'D02HH',inputs,self.config)
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2D0Lambda0(self):
'''Makes RS Lb -> D0(HH) Lambda0 + c.c.'''
decs = ["Lambda_b0 -> D0 Lambda0","Lambda_b0 -> D0 Lambda~0"]
decays = {'Lb2D0Lambda0DD': decs}
inputs = {'Lb2D0Lambda0DD': self.d0 + self.lambda0["DD"]}
lb_dd = makeB2XSels(decays,'D02HH',inputs,self.config)
self.lines.append(ProtoLine(lb_dd,1.0))
decays = {'Lb2D0Lambda0LL': decs}
inputs = {'Lb2D0Lambda0LL': self.d0 + self.lambda0["LL"]}
lb_ll = makeB2XSels(decays,'D02HH',inputs,self.config)
self.lines.append(ProtoLine(lb_ll,1.0))
def _makeSb02DP(self):
'''Make RS and WS Sb0 -> D+- p-+ + cc.'''
protons = self.protons
decays = {'Sb02DP': ["[Sigma_b0 -> D- p+]cc"]}
inputs = {'Sb02DP': self.d.hhh_pid+protons}
rs = makeB2XSels(decays,'D2HHHPID',inputs,self.config)
decays = {'Sb02DPWS': ["[Sigma_b0 -> D+ p+]cc"]}
inputs = {'Sb02DPWS': self.d.hhh_pid+protons}
ws = makeB2XSels(decays,'D2HHHPID',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
self.lines.append(ProtoLine(ws,0.1))
def _makeSb2D0P(self):
'''Make Sb+- -> D0 p+- + cc.'''
protons = self.protons
decays = {'Sb2D0P': ["Sigma_b+ -> D0 p+","Sigma_b- -> D0 p~-"]}
inputs = {'Sb2D0P': self.d.hh_pid+protons}
rs = makeB2XSels(decays,'D2HHPID',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
def _makeLb2LcD(self):
'''Makes RS + WS Lb -> Lc D + c.c.'''
decays = {'Lb2LcD': ["[Lambda_b0 -> Lambda_c+ D-]cc"]}
inputs = {'Lb2LcD': self.d.hhh_pid+self.lc_pid}
rs = makeB2XSels(decays,'D2HHHPID',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
decays = {'Lb2LcDWS': ["[Lambda_b0 -> Lambda_c+ D+]cc"]}
inputs = {'Lb2LcDWS': self.d.hhh_pid+self.lc_pid}
ws = makeB2XSels(decays,'D2HHHPID',inputs,self.config)
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2LcDst(self):
'''Makes RS + WS Lb -> Lc D* + c.c.'''
decays = {'Lb2LcDst': ["[Lambda_b0 -> Lambda_c+ D*(2010)-]cc"]}
inputs = {'Lb2LcDst': self.dst.d0pi_pid+self.lc_pid}
rs = makeB2XSels(decays,'Dstar2D0PiPID',inputs,self.config)
self.lines.append(ProtoLine(rs,1.0))
decays = {'Lb2LcDstWS': ["[Lambda_b0 -> Lambda_c+ D*(2010)+]cc"]}
inputs = {'Lb2LcDstWS': self.dst.d0pi_pid+self.lc_pid}
ws = makeB2XSels(decays,'Dstar2D0PiPID',inputs,self.config)
self.lines.append(ProtoLine(ws,0.1))
def _makeX2LcLc(self):
config = deepcopy(self.config)
config['AM_MIN' ] = '4800*MeV'
decays = {'X2LcLc': ["[B0 -> Lambda_c+ Lambda_c~-]cc"]}
inputs = {'X2LcLc': self.lc_pid}
rs = makeB2XSels(decays,'',inputs,config)
self.lines.append(ProtoLine(rs,1.0))
decays = {'X2LcLcWS': ["[B0 -> Lambda_c+ Lambda_c+]cc"]}
inputs = {'X2LcLcWS': self.lc_pid}
ws = makeB2XSels(decays,'',inputs,config)
self.lines.append(ProtoLine(ws,0.1))
def _makeLb2Lc5Pi(self):
decays = {'Lb2Lc5Pi':
["[Lambda_b0 -> Lambda_c+ a_1(1260)- rho(770)0]cc"]}
inputs = {'Lb2Lc5Pi': self.lc_pid + self.hhh.pipipi + self.hh.pipi_pid}
lb2lc5pi = makeB2XSels(decays,'Lc2PKPiPID',inputs,self.config)
self.lines.append(ProtoLine(lb2lc5pi,1.0))
#\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\#
| [
"[email protected]"
]
| |
46bf955e07557ee8530320380cf68eb939581578 | 227539d0906cdfbb7cd19f16599c35d5bd09abfd | /Stepik_Adaptive_Python/adaptive-python-en-master/Step 070 Riddle.py | 5762e4cba5be9ed1142cc7c9eba781abb385451a | []
| no_license | solomonli/PycharmProjects | cceb92a11ec1f9e7fef25bca552d8264c75228a0 | 31673627487db1370424f5b0aeee3e20bb23b47a | refs/heads/master | 2021-06-24T11:59:36.365496 | 2019-07-08T09:53:18 | 2019-07-08T09:53:18 | 148,558,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | riddle = '''
{0} and {1} sat in the tree.
{0} had fallen, {1} was stolen.
What's remaining in the tree?
'''
print(riddle.format(input(), input()))
| [
"[email protected]"
]
| |
4a13ba1319edbfe715b0595a65cffb4119942d5b | b84c89d0ade21bf8c2df9d0cf8f94d7a27c2824b | /test/integration/test_cursor.py | fc9dc209577a61eeb75a497eb6aa8552833b627a | [
"Apache-2.0"
]
| permissive | srlabUsask/py2neo | 931b06678561201d56a36ec10da7ad4614ab6c87 | 80d3cf1ab0b4cfb03b7824fd7a407b33c95a1e8f | refs/heads/master | 2022-11-16T21:17:42.319698 | 2020-07-12T23:00:29 | 2020-07-12T23:00:29 | 279,281,481 | 0 | 0 | Apache-2.0 | 2020-07-13T11:17:53 | 2020-07-13T11:17:50 | null | UTF-8 | Python | false | false | 5,453 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytest import raises
from py2neo import Record, Subgraph
def test_cannot_move_beyond_end(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward()
assert not cursor.forward()
def test_can_only_move_until_end(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward(2) == 1
def test_moving_by_zero_keeps_same_position(graph):
cursor = graph.run("RETURN 1")
assert cursor.forward(0) == 0
def test_keys_are_populated_before_moving(graph):
cursor = graph.run("RETURN 1 AS n")
assert list(cursor.keys()) == ["n"]
def test_keys_are_populated_after_moving(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n")
n = 0
while cursor.forward():
n += 1
assert list(cursor.keys()) == ["n"]
def test_keys_are_populated_before_moving_within_a_transaction(graph):
with graph.begin() as tx:
cursor = tx.run("RETURN 1 AS n")
assert list(cursor.keys()) == ["n"]
def test_stats_available(graph):
cursor = graph.run("CREATE (a:Banana)")
stats = cursor.stats()
assert stats["nodes_created"] == 1
assert stats["labels_added"] == 1
assert stats["contained_updates"] == 1
def test_current_is_none_at_start(graph):
cursor = graph.run("RETURN 1")
assert cursor.current is None
def test_current_updates_after_move(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n")
n = 0
while cursor.forward():
n += 1
assert cursor.current == Record(zip(["n"], [n]))
def test_select_picks_next(graph):
cursor = graph.run("RETURN 1")
record = next(cursor)
assert record == Record(zip(["1"], [1]))
def test_cannot_select_past_end(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
with raises(StopIteration):
_ = next(cursor)
def test_selection_triggers_move(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
for i in range(1, 11):
n, n_sq = next(cursor)
assert n == i
assert n_sq == i * i
def test_can_use_next_function(graph):
cursor = graph.run("RETURN 1")
record = next(cursor)
assert record == Record(zip(["1"], [1]))
def test_raises_stop_iteration(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
with raises(StopIteration):
_ = next(cursor)
def test_can_get_data(graph):
cursor = graph.run("UNWIND range(1, 3) AS n RETURN n, n * n AS n_sq")
data = cursor.data()
assert data == [{"n": 1, "n_sq": 1}, {"n": 2, "n_sq": 4}, {"n": 3, "n_sq": 9}]
def test_stream_yields_all(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
record_list = list(cursor)
assert record_list == [Record(zip(["n", "n_sq"], [1, 1])),
Record(zip(["n", "n_sq"], [2, 4])),
Record(zip(["n", "n_sq"], [3, 9])),
Record(zip(["n", "n_sq"], [4, 16])),
Record(zip(["n", "n_sq"], [5, 25])),
Record(zip(["n", "n_sq"], [6, 36])),
Record(zip(["n", "n_sq"], [7, 49])),
Record(zip(["n", "n_sq"], [8, 64])),
Record(zip(["n", "n_sq"], [9, 81])),
Record(zip(["n", "n_sq"], [10, 100]))]
def test_stream_yields_remainder(graph):
cursor = graph.run("UNWIND range(1, 10) AS n RETURN n, n * n as n_sq")
cursor.forward(5)
record_list = list(cursor)
assert record_list == [Record(zip(["n", "n_sq"], [6, 36])),
Record(zip(["n", "n_sq"], [7, 49])),
Record(zip(["n", "n_sq"], [8, 64])),
Record(zip(["n", "n_sq"], [9, 81])),
Record(zip(["n", "n_sq"], [10, 100]))]
def test_can_evaluate_single_value(graph):
cursor = graph.run("RETURN 1")
value = cursor.evaluate()
assert value == 1
def test_can_evaluate_value_by_index(graph):
cursor = graph.run("RETURN 1, 2")
value = cursor.evaluate(1)
assert value == 2
def test_can_evaluate_value_by_key(graph):
cursor = graph.run("RETURN 1 AS first, 2 AS second")
value = cursor.evaluate("second")
assert value == 2
def test_evaluate_with_no_records_is_none(graph):
cursor = graph.run("RETURN 1")
cursor.forward()
value = cursor.evaluate()
assert value is None
def test_evaluate_on_non_existent_column_is_none(graph):
cursor = graph.run("RETURN 1")
value = cursor.evaluate(1)
assert value is None
def test_to_subgraph(graph):
s = graph.run("CREATE p=(:Person {name:'Alice'})-[:KNOWS]->(:Person {name:'Bob'}) RETURN p").to_subgraph()
assert isinstance(s, Subgraph)
assert len(s.nodes) == 2
assert len(s.relationships) == 1
| [
"[email protected]"
]
| |
bf6422eb78f6c700211eaab310ce54a6a70d1a4b | 22c56d6cb744a0b7a5879376bed0f8e12abbf357 | /14_xi/04_ParallelogramVOn4Lines.py | 3137178f0acbda5f06e2778f3972f981a83f2fb7 | [
"MIT"
]
| permissive | mirefek/py_euclidea | 8854bd648e4e5cbadaca9d48fffb6f31d5a3447e | 8e400cbf36e3c8919fcc0032b7a95ce55012416e | refs/heads/master | 2023-08-30T14:12:28.195003 | 2021-11-16T21:02:20 | 2021-11-16T21:02:20 | 215,083,101 | 7 | 3 | null | 2021-10-05T15:56:38 | 2019-10-14T15:45:21 | Python | UTF-8 | Python | false | false | 1,105 | py | from constructions import *
def init(env):
A = env.add_free(263.0, 116.0, hidden = True)
B = env.add_free(488.5, 335.0, hidden = True)
C = env.add_free(140.0, 335.0, hidden = True)
X = env.add_free(280.0, 181.5, hidden = True)
l1 = env.add_line(A,B)
l2 = env.add_line(A,C)
l3 = env.add_line(B,C)
l4 = env.add_constr(parallel_tool, (l3,X), Line)
M = env.add_free(296.5, 235.5)
env.set_tools(
"move", "point", "line", "circle",
"perp_bisector", "angle_bisector",
"perpendicular", "parallel",
"compass", "intersection",
)
env.goal_params(l1,l2,l3,l4,M)
def construct_goals(l1,l2,l3_in,l4_in,M):
result = []
for (l3,l4) in (l3_in,l4_in), (l4_in,l3_in):
A = intersection_tool(l1, reflect_by_point(l3, M))
B = intersection_tool(l2, reflect_by_point(l4, M))
C = reflect_by_point(A, M)
D = reflect_by_point(B, M)
result.append((
segment_tool(A,B),
segment_tool(B,C),
segment_tool(C,D),
segment_tool(D,A),
))
return result
| [
"[email protected]"
]
| |
2fff3390b23f34ecccaa20ba3b41671bdfaebfa5 | e3cd9de7d7e68e5995680a297fa25652487b0d02 | /tests/sum_squares_test.py | b2ef648f012073ee2f9ded722f3ce60b17d76950 | [
"Apache-2.0"
]
| permissive | bsaghafi/erdos | 2293993bb336d0a9466a17cc15236390c379d8f8 | ac27a9607f2550bbac999a0c5fb36c84c2860d2e | refs/heads/master | 2020-08-21T02:11:06.982785 | 2019-06-26T23:55:44 | 2019-06-26T23:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,222 | py | from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
from erdos.data_stream import DataStream
from erdos.message import Message
import erdos.graph
from erdos.op import Op
from erdos.timestamp import Timestamp
from erdos.utils import frequency
FLAGS = flags.FLAGS
flags.DEFINE_string('framework', 'ros',
'Execution framework to use: ros | ray.')
class IntegerOp(Op):
"""Operator which publishes an integer every second"""
def __init__(self, name, number):
super(IntegerOp, self).__init__(name)
self.number = np.int64(number)
@staticmethod
def setup_streams(input_streams):
return [DataStream(name="integer_out")]
@frequency(1)
def publish_random_number(self):
output_msg = Message(self.number, Timestamp(coordinates=[0]))
self.get_output_stream("integer_out").send(output_msg)
print("%s sent %d" % (self.name, self.number))
def execute(self):
self.publish_random_number()
self.spin()
class SquareOp(Op):
"""Operator which publishes the square of its input"""
def __init__(self, name):
super(SquareOp, self).__init__(name)
@staticmethod
def setup_streams(input_streams):
input_streams.add_callback(SquareOp.on_next)
return [DataStream(name="square_output")]
def on_next(self, msg):
value = msg.data
result = value**2
self.get_output_stream("square_output").send(
Message(result, msg.timestamp))
print("%s received: %d ^ 2 = %d" % (self.name, value, result))
def execute(self):
self.spin()
class SumOp(Op):
"""Operator which sums the most recently published values for each input.
Sum operation occurs once every second.
"""
def __init__(self, name):
super(SumOp, self).__init__(name)
self.sum = 0
@staticmethod
def setup_streams(input_streams):
input_streams.add_callback(SumOp.add)
return [DataStream(name="sum_output")]
@frequency(1)
def publish_sum(self):
result = self.sum
output_msg = Message(result, Timestamp(coordinates=[0]))
self.get_output_stream("sum_output").send(output_msg)
def add(self, msg):
value = msg.data
original = self.sum
self.sum += msg.data
print("%s: %d (original) + %d (received) = %d (result)"
% (self.name, original, value, self.sum))
def execute(self):
self.publish_sum()
self.spin()
def main(argv):
"""Sums the squares of 2 numbers. """
# Set up graph
graph = erdos.graph.get_current_graph()
# Add operators
int1 = graph.add(IntegerOp, name='int1', init_args={'number': 1})
int2 = graph.add(IntegerOp, name='int2', init_args={'number': 2})
square1 = graph.add(SquareOp, name='square')
square2 = graph.add(SquareOp, name='square2')
sum = graph.add(SumOp, name='sum')
# Connect operators
graph.connect([int1], [square1])
graph.connect([int2], [square2])
graph.connect([square1, square2], [sum])
# Execute graph
graph.execute(FLAGS.framework)
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
]
| |
69093d96a03fc2ddc7f4fd1fb870114f283018ca | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03574/s625136603.py | 8c1ddbc3ce968e40601728c96995e7838eb37d66 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | h,w=map(int, input().split())
w1=['.'*(w+2)]
s=w1+['.'+input()+'.' for _ in range(h)]+w1
for i in range(1,h+1):
for j in range(1,w+1):
if s[i][j]=='.':
t=s[i-1][j-1:j+2]+s[i][j-1:j+2]+s[i+1][j-1:j+2]
s[i]=s[i][:j]+str(t.count('#'))+s[i][j+1:]
print(s[i][1:-1]) | [
"[email protected]"
]
| |
c2df6d6c9916fde341abb0d235790b8577ee05b0 | 2c838d3ffee6e357014dd0cd543ef841503d6647 | /src/Watcher/transforms/client2manufact.py | 96618403f6e2beb9ec4a0730b0ab2fe62594ced8 | []
| no_license | catalyst256/Watcher | 079bb0ffead77c46a814e01e851cf1b6a33b2678 | 14123f501643475fc97b64093284c1b509897550 | refs/heads/master | 2021-01-25T10:29:18.110796 | 2015-01-16T07:43:44 | 2015-01-16T07:43:44 | 14,232,782 | 21 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | #!/usr/bin/env python
import sqlite3 as lite
from common.entities import WirelessClient, Vendor
from canari.maltego.message import UIMessage
from canari.framework import configure #, superuser
__author__ = 'catalyst256'
__copyright__ = 'Copyright 2013, Watcher Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'catalyst256'
__email__ = '[email protected]'
__status__ = 'Development'
__all__ = [
'dotransform'
]
#@superuser
@configure(
label='Watcher - MAC Address Lookup',
description='Tries to work out the vendor from the MAC address',
uuids=[ 'Watcher.v2.client_2_manufacturer' ],
inputs=[ ( 'Watcher', WirelessClient ) ],
debug=True
)
def dotransform(request, response):
mac_addr = request.value[:-9].upper()
mac_addr = mac_addr.replace(':', '')
mac_db = 'Watcher/resources/databases/macaddr.db'
mac_vendor = []
con = lite.connect(mac_db)
with con:
cur = con.cursor()
cur.execute('SELECT * FROM macaddr WHERE mac like ' + "\"" + mac_addr + "\"")
while True:
row = cur.fetchone()
if row == None:
break
if row[1] not in mac_vendor:
mac_vendor.append(row[1])
for x in mac_vendor:
e = Vendor(x)
response += e
return response | [
"[email protected]"
]
| |
f3fcbba0237d608e49a75d1fa5647d4f603bfbd2 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/KoubeiCateringPosPaymodeModifyRequest.py | 61eb98611f3cf9308bdfa7fdf4eec6bb8fc78aa6 | [
"Apache-2.0"
]
| permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,979 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiCateringPosPaymodeModifyModel import KoubeiCateringPosPaymodeModifyModel
class KoubeiCateringPosPaymodeModifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiCateringPosPaymodeModifyModel):
self._biz_content = value
else:
self._biz_content = KoubeiCateringPosPaymodeModifyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.catering.pos.paymode.modify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
]
| |
e29899992d7b9d372aed601eae6f1f6896db9247 | a83dc7ccce7962addbb7a7d3f45eea1dac000a21 | /10day/2.py | 8d49febd380f81aa32131265dce0dbbe43835e22 | []
| no_license | liruixiong/1808 | 879bb90587db0a7073e1a9b5b6c98e7d754feaf9 | 45f67f0ea8b25a7a68efd07272f6f361eae625c3 | refs/heads/master | 2020-03-25T19:34:37.676624 | 2018-08-22T01:49:04 | 2018-08-22T01:49:04 | 144,089,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py |
at =float(input(" "))
y = float(input(" "))
mo = input("+-*/")
if mo == "+":
print(at+y)
elif mo == "-":
print(at-y)
elif mo == "*":
print(at*y)
elif mo == "/":
print(at/y)
| [
"[email protected]"
]
| |
5f95567bceaf7b570e56328ed86f10ff0b772f05 | 940d7b93fb27e8eead9b6e52bc5c7444666744dd | /python/src/Demo/cgi/cgi2.py | d956f6538c63219fc0c7486a6b8aec4cd0f38de9 | [
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"Apache-2.0"
]
| permissive | pilotx45/sl4a | d446531d310cc17d93f24aab7271a0813e8f628d | 150e3e46b5103a9b9a391034ef3fbc5bd5160d0f | refs/heads/master | 2022-03-24T19:48:30.340479 | 2022-03-08T16:23:58 | 2022-03-08T16:23:58 | 277,016,574 | 1 | 0 | Apache-2.0 | 2022-03-08T16:23:59 | 2020-07-04T01:25:36 | null | UTF-8 | Python | false | false | 472 | py | #!/usr/local/bin/python
"""CGI test 2 - basic use of cgi module."""
import cgitb; cgitb.enable()
import cgi
def main():
form = cgi.FieldStorage()
print "Content-type: text/html"
print
if not form:
print "<h1>No Form Keys</h1>"
else:
print "<h1>Form Keys</h1>"
for key in form.keys():
value = form[key].value
print "<p>", cgi.escape(key), ":", cgi.escape(value)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
b6bde677aac4f26f15c0fe037c8ece62d778b970 | f4de413ad77ffaa9b2e7d65e1579a8d2696c0c42 | /classifier/rnn.py | 93be5b6baf981193a36d1dee3fc2ddf89ffa91f5 | []
| no_license | BinbinBian/Parable | b4d93d4fef2bb02f19cb3571501c8a8162045ff1 | f2ceb0b9a5749db7578c95edcbd2a26adb7249cf | refs/heads/master | 2021-01-17T18:44:06.129814 | 2016-05-07T06:13:35 | 2016-05-07T06:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from rnn_layers import *
import theano
import numpy as np
class RNNEncoderDecoder(object):
"""
A RNN Encoder-Decoder Framework
"""
class StochasticRNN(object):
"""
RNN that can encode arbitrarily long sequence
(thousands of time steps)
(best for QA, Paragraph chunking tasks)
""" | [
"[email protected]"
]
| |
ee977d4256e3ec68006d3288301f797322b991c0 | 5308f19fa60215f2d44aa4530230075c245b3dad | /odoo/openerp/addons/base/res/res_config.py | 9f1963148f337367bb8ba5a626a6e991f8a33de9 | []
| no_license | ihyf/raspberry_pi | c5c5fe791f021de4356a442717450c815f858a81 | d8a531ae9ade5f3e1f49c7d1b21583fbe1b8c09e | refs/heads/master | 2020-06-11T07:57:19.140772 | 2017-01-04T12:00:59 | 2017-01-04T12:00:59 | 75,728,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,652 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
from lxml import etree
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['sale_service'],
}
will install both ``sale_crm`` and ``sale_service`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's record
:returns: a list of all installed modules in this installer
:rtype: recordset (collection of Record)
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns.get(module_name)) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True, attributes=None):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access, attributes)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def fields_view_get(self, cr, user, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
ret_val = super(res_config_settings, self).fields_view_get(
cr, user, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
doc = etree.XML(ret_val['arch'])
for field in ret_val['fields']:
if not field.startswith("module_"):
continue
for node in doc.xpath("//field[@name='%s']" % field):
if 'on_change' not in node.attrib:
node.set("on_change",
"onchange_module(%s, '%s')" % (field, field))
ret_val['arch'] = etree.tostring(doc)
return ret_val
def onchange_module(self, cr, uid, ids, field_value, module_name, context={}):
module_pool = self.pool.get('ir.module.module')
module_ids = module_pool.search(
cr, uid, [('name', '=', module_name.replace("module_", '')),
('state','in', ['to install', 'installed', 'to upgrade'])],
context=context)
if module_ids and not field_value:
dep_ids = module_pool.downstream_dependencies(cr, uid, module_ids, context=context)
dep_name = [x.shortdesc for x in module_pool.browse(
cr, uid, dep_ids + module_ids, context=context)]
message = '\n'.join(dep_name)
return {
'warning': {
'title': _('Warning!'),
'message': _('Disabling this option will also uninstall the following modules \n%s') % message,
}
}
return {}
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context=context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_groups = getattr(field, 'group', 'base.group_user').split(',')
groups.append((name, map(ref, field_groups), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
if context is None:
context = {}
context = dict(context, active_test=False)
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
res_groups = self.pool['res.groups']
classified = self._get_classified_fields(cr, uid, context=context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, SUPERUSER_ID, model, field, config[name])
# group fields: modify group / implied groups
for name, groups, implied_group in classified['group']:
gids = map(int, groups)
if config[name]:
res_groups.write(cr, uid, gids, {'implied_ids': [(4, implied_group.id)]}, context=context)
else:
res_groups.write(cr, uid, gids, {'implied_ids': [(3, implied_group.id)]}, context=context)
uids = set()
for group in groups:
uids.update(map(int, group.users))
implied_group.write({'users': [(3, u) for u in uids]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
]
| |
53e60a6387d3a899ed311a33fdaded25fdf5e460 | c725fc58d217f6730687a565fbf85fcf174e8009 | /code_SDSS/sql_bright_star.py | 549fb863ca8b726b5c8de71ac5c9955cb27620e0 | []
| no_license | Kein-Cary/Intracluster-Light | 6faca2bd0413244765474beeffd53cfaa401eef2 | ffcb2d6ea10be45422c7e73408fc6ff6cadf3a85 | refs/heads/master | 2023-03-18T04:51:06.539453 | 2023-03-12T02:48:01 | 2023-03-12T02:48:01 | 160,816,520 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | import h5py
import numpy as np
import astropy.io.fits as fits
import mechanize
import pandas as pd
from io import StringIO
import astropy.units as U
import astropy.constants as C
from astropy import cosmology as apcy
#url = 'http://skyserver.sdss.org/dr12/en/tools/search/sql.aspx'
url = 'http://cas.sdss.org/dr7/en/tools/search/sql.asp'
load = '/media/xkchen/My Passport/data/SDSS/'
with h5py.File(load + 'mpi_h5/sample_catalog.h5', 'r') as f:
catalogue = np.array(f['a'])
z = catalogue[0]
ra = catalogue[1]
dec = catalogue[2]
#r_select = 0.16676 # centered at BCG, radius = 10 arcmin (1515.15 pixel)
r_select = 0.42 ## 1.5 * diagonal line length
N_tot = len(z)
sub_N = N_tot * 1
no_match = []
for kk in range( N_tot ):
ra_g = ra[kk]
dec_g = dec[kk]
z_g = z[kk]
c_ra0 = str(ra_g - r_select)
c_dec0 = str(dec_g - r_select)
c_ra1 = str(ra_g + r_select)
c_dec1 = str(dec_g + r_select)
# query stars and saturated sources (may not be stars)
data_set = """
SELECT ALL
p.ra, p.dec, p.u, p.g, p.r, p.i, p.z, p.type,
p.isoA_u, p.isoA_g, p.isoA_r, p.isoA_i, p.isoA_z,
p.isoB_u, p.isoB_g, p.isoB_r, p.isoB_i, p.isoB_z,
p.isoPhi_u, p.isoPhi_g, p.isoPhi_r, p.isoPhi_i, p.isoPhi_z,
p.flags, dbo.fPhotoFlagsN(p.flags)
FROM PhotoObj AS p
WHERE
p.ra BETWEEN %s AND %s AND p.dec BETWEEN %s AND %s
AND (p.type = 6 OR (p.flags & dbo.fPhotoFlags('SATURATED')) > 0)
ORDER by p.r
""" % (c_ra0, c_ra1, c_dec0, c_dec1)
br = mechanize.Browser()
resp = br.open(url)
resp.info()
br.select_form(name = "sql")
br['cmd'] = data_set
br['format'] = ['csv']
response = br.submit()
s = str(response.get_data(), encoding = 'utf-8')
doc = open('/home/xkchen/mywork/ICL/data/star_dr7/source_SQL_Z%.3f_ra%.3f_dec%.3f.txt' % (z_g, ra_g, dec_g), 'w')
print(s, file = doc)
doc.close()
try:
cat = pd.read_csv('/home/xkchen/mywork/ICL/data/star_dr7/source_SQL_Z%.3f_ra%.3f_dec%.3f.txt' % (z_g, ra_g, dec_g),)
try_ra = np.array(cat.ra)
except:
no_match.append('%d, %.3f,%.3f,%.3f' % (kk, ra_g, dec_g, z_g) )
sub_N -= 1
doc = open('No_source_match_sample.txt', 'w')
for ll in range(len(no_match)):
subx = no_match[ll]
print(subx, file = doc)
doc.close()
print(sub_N)
| [
"[email protected]"
]
| |
b023906757f0266c579b3042d843bdd4da38d017 | 8126291334a4288f51b1116ea31e953debf07039 | /SRC/engine/IO/propertyoutput.spy | 11311550633bb57671c61075db7d567d2fda3223 | [
"LicenseRef-scancode-warranty-disclaimer"
]
| no_license | jumpingyu/OOF2 | 846a7dd506f029535153834607b698ce32dc155d | 31a25398b046c1963859dd96785329d2a9af8681 | refs/heads/master | 2020-05-21T09:12:07.013560 | 2019-04-02T21:05:49 | 2019-04-02T21:05:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,216 | spy | # -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
from ooflib.SWIG.engine import outputval
from ooflib.SWIG.engine import symmmatrix
from ooflib.common import debug
from ooflib.common import utils
from ooflib.engine.IO import output
from ooflib.engine.IO import outputClones
import types, sys
# The PropertyOutputRegistration subclasses create an Output object
# for each registered PropertyOutput. This bridges the gap between
# the C++ PropertyOutputs and the more general Python Outputs.
class PORegBase(PropertyOutputRegistration):
## Callback for all PropertyOutputs. Outputs that need to return
## something other than a list of OutputVal instances should
## override the convert method.
def opfunc(self, mesh, elements, coords, **params):
po = self.instantiate(params)
mesh.precompute_all_subproblems()
initializer = self.initializer()
results = []
for element, ecoords, in zip(elements, coords):
mat = element.material()
mesh.begin_all_subproblems(element)
results.extend(po.evaluate(mesh, element, initializer, ecoords))
mesh.end_all_subproblems(element)
return self.convert(results)
def convert(self, results):
return results
##### Scalar outputs
class ScalarPropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or ScalarPropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.ScalarOutputValPtr,
instancefn=outputClones.scalar_instancefn,
column_names=outputClones.single_column_name,
params=parameters,
srepr=srepr, tip=tip, discussion=discussion)
output.defineScalarOutput(name, op, ordering=ordering)
output.defineAggregateOutput(name, op, ordering=ordering)
# def convert(self, results): # convert from ScalarOutputVal to Float
# return [r.value() for r in results]
##### SymmMatrix3 outputs.
def _symmmatrix3_instancefn(self):
return symmmatrix.SymmMatrix3(0.,0.,0.,0.,0.,0.)
def _symmmatrix3_column_names(self):
sr = self.shortrepr()
names = []
it = self.outputInstance().getIterator()
while not it.end():
names.append("%s[%s]" % (sr, it.shortstring()))
it.next()
return names
class SymmMatrix3PropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or SymmMatrix3PropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.OutputValPtr,
instancefn=_symmmatrix3_instancefn,
srepr=srepr,
column_names=_symmmatrix3_column_names,
params=parameters,
tip=tip, discussion=discussion)
output.defineAggregateOutput(name+":Value", op, ordering=ordering)
def comprepr(s):
comp = s.resolveAlias("component").value
# We have to pass s to op.shortrepr so that the shortrepr
# will be computed for the actual Output, not the Output
# defined above. The actual output will be a clone of the
# one defined there.
return "%s[%s]" % (op.shortrepr(s), comp)
compout = outputClones.ComponentOutput.clone(
name=name+" Component",
tip='Compute components of %s' % name,
srepr=comprepr,
discussion=
"""
<para>Compute the specified component of %s on a &mesh;.</para>
"""
% name)
compout.connect('field', op)
for param in parameters:
compout.aliasParam('field:' + param.name, param.name)
output.defineScalarOutput(name+":Component", compout, ordering=ordering)
def invariantrepr(s):
invariant = s.resolveAlias("invariant").value.shortrepr()
# See comment above about op.shortrepr(s)
return "%s(%s)" % (invariant, op.shortrepr(s))
invout = outputClones.InvariantOutput.clone(
name=name+" Invariant",
srepr=invariantrepr,
tip='Compute invariants of %s' % name,
discussion="""
<para>Compute the specified invariant of %s on a &mesh;.</para>
"""
% name)
invout.connect('field', op)
for param in parameters:
invout.aliasParam('field:' + param.name, param.name)
output.defineScalarOutput(name+":Invariant", invout, ordering=ordering)
output.defineAggregateOutput(name+":Invariant", invout,
ordering=ordering)
# ThreeVector outputs
## TODO 3D: These should add themselves as "Value" outputs, and there
## should be an "Invariant" output, also, since 3-vectors have a
## magnitude. srepr's and column_name's need to be adjusted/provided.
## None of this is implemented yet because there are no
## ThreeVectorPropertyOutputs to test it on.
class ThreeVectorPropertyOutputRegistration(PORegBase):
def __init__(self, name, initializer=None, parameters=[], ordering=0,
srepr=None, tip=None, discussion=None):
PropertyOutputRegistration.__init__(
self, name, initializer or ThreeVectorPropertyOutputInit())
op = output.Output(name=name,
callback=self.opfunc,
otype=outputval.OutputValPtr,
instancefn=outputClones.vector_instancefn,
params=parameters,
srepr=srepr, tip=tip,
discussion=discussion)
output.defineAggregateOutput(name, op, ordering=ordering)
compout = outputClones.ComponentOutput.clone(
name=name+" Component",
tip='Compute components of %s' % name,
discussion=
"""
<para>Compute the specified component of <link
linkend='Output-%s'>%s</link> on a &mesh;.</para>
"""
% (name, name))
compout.connect('field', op)
for param in parameters:
compout.aliasParam('field:'+param.name, param.name)
output.defineScalarOutput(name+":Component", compout, ordering=ordering)
| [
"[email protected]"
]
| |
c8ce9fe2ffe6f9aad8ba442ef8c5905d1888f424 | c97d3c8848e4f03edb6c64b6abff530a6e74d616 | /apps/models_sklearn_spark/Matrix_factorization/handler.py | 1b6060f59557d47ea3890cf8f7f98d14845086ee | [
"Apache-2.0"
]
| permissive | simhaonline/Django_web | eeb80d8f32a460258fceb30ecececd7410949f72 | f7df1a7b101d41835a334b78cddf3570968799e4 | refs/heads/master | 2023-04-24T23:33:51.535515 | 2021-04-02T15:20:29 | 2021-04-02T15:20:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,937 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# __author__ : stray_camel
# __description__ : 矩阵分解
# __date__: 2020/09/09 09
try:
from apps.data.handler import get_ml_1m_ratings_df
except:
pass
from math import sqrt
from sklearn.metrics import mean_squared_error
from scipy.sparse.linalg import svds
from sklearn.model_selection import cross_validate, train_test_split
import sys
import os
import numpy as np
import pandas as pd
from functools import lru_cache
# sys.path.append(os.path.dirname(os.path.abspath('./')))
# from apps.models_sklearn_spark.Matrix_factorization.handler import ratings_df
# apps_floder = os.path.dirname(os.path.dirname(__file__))
# ratings_file = os.path.join(apps_floder, 'data\\ml-1m\\ratings.csv')
# ratings_df = pd.read_csv(ratings_file, sep=',', engine='python')
def data_split(
ratings_df: '数据',
ratio: '分割数据的比例' = 1/4) -> (pd.DataFrame, pd.DataFrame):
"""
按照ratio比例分割数据
"""
train_data = ratings_df.head(round(len(ratings_df)*ratio))
test_data = ratings_df.tail(round(len(ratings_df)*(1-ratio)))
return train_data, test_data
def get_data_sparsity(ratings_df, n_users, n_movies) -> float:
"""
计算数据集的稀疏度
"""
sparsity = round(ratings_df.size/float(n_users*n_movies), 3)
print('The sparsity level of MovieLens is ' + str(sparsity))
return sparsity
def create_uesr_item(ratings_df, n_users, n_movies) -> (np.ndarray, np.ndarray):
"""
创建uesr-item矩阵,此处需创建训练和测试两个UI矩阵,n_users cols * n_movies rows
"""
train_data, test_data = data_split(ratings_df)
train_data_matrix = np.zeros((n_users, n_movies))
for line in train_data.itertuples():
train_data_matrix[line[1] - 1, line[2] - 1] = line[3]
test_data_matrix = np.zeros((n_users, n_movies))
for line in test_data.itertuples():
test_data_matrix[line[1] - 1, line[2] - 1] = line[3]
return train_data_matrix, test_data_matrix
def rmse(prediction, ground_truth) -> float:
prediction = prediction[ground_truth.nonzero()].flatten()
ground_truth = ground_truth[ground_truth.nonzero()].flatten()
res = sqrt(mean_squared_error(prediction, ground_truth))
return res
@lru_cache(None)
def mf_svds(k) -> (float, np.ndarray):
ratings_df = get_ml_1m_ratings_df()
n_users = max(ratings_df.UserID.unique())
n_movies = max(ratings_df.MovieID.unique())
print('Number of users = ' + str(n_users) +
' | Number of movies = ' + str(n_movies))
train_data_matrix, test_data_matrix = create_uesr_item(
ratings_df, n_users, n_movies)
u, s, vt = svds(train_data_matrix, k=20)
u.shape, s.shape, vt.shape
s_diag_matrix = np.diag(s)
X_pred = np.dot(np.dot(u, s_diag_matrix), vt)
_rmse = rmse(X_pred, test_data_matrix)
print('User-based CF MSE: ' + str(_rmse))
return _rmse, X_pred
| [
"[email protected]"
]
| |
810bf355ace92ebc37e0d77e7bf8b58519ee67c4 | be0388dfda0602ae49eb6bd4efe24703f7288cf2 | /google/cloud/datalabeling/__init__.py | dbfce79fa39bb5622d98a9a16c979c2ba602ff41 | [
"Apache-2.0"
]
| permissive | renovate-bot/python-datalabeling | 3211e3f71bba67a8272d48492abdbaa75def3a54 | f2d2282ae971ac946de166c6449e923bc94432cb | refs/heads/master | 2023-06-08T06:38:30.633651 | 2021-08-13T15:31:29 | 2021-08-13T15:31:29 | 237,521,154 | 0 | 0 | Apache-2.0 | 2020-01-31T21:43:01 | 2020-01-31T21:43:00 | null | UTF-8 | Python | false | false | 17,802 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.datalabeling_v1beta1.services.data_labeling_service.client import (
DataLabelingServiceClient,
)
from google.cloud.datalabeling_v1beta1.services.data_labeling_service.async_client import (
DataLabelingServiceAsyncClient,
)
from google.cloud.datalabeling_v1beta1.types.annotation import Annotation
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationMetadata
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationValue
from google.cloud.datalabeling_v1beta1.types.annotation import BoundingPoly
from google.cloud.datalabeling_v1beta1.types.annotation import (
ImageBoundingPolyAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import (
ImageClassificationAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import ImagePolylineAnnotation
from google.cloud.datalabeling_v1beta1.types.annotation import (
ImageSegmentationAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import NormalizedBoundingPoly
from google.cloud.datalabeling_v1beta1.types.annotation import NormalizedPolyline
from google.cloud.datalabeling_v1beta1.types.annotation import NormalizedVertex
from google.cloud.datalabeling_v1beta1.types.annotation import ObjectTrackingFrame
from google.cloud.datalabeling_v1beta1.types.annotation import OperatorMetadata
from google.cloud.datalabeling_v1beta1.types.annotation import Polyline
from google.cloud.datalabeling_v1beta1.types.annotation import SequentialSegment
from google.cloud.datalabeling_v1beta1.types.annotation import (
TextClassificationAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import (
TextEntityExtractionAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import TimeSegment
from google.cloud.datalabeling_v1beta1.types.annotation import Vertex
from google.cloud.datalabeling_v1beta1.types.annotation import (
VideoClassificationAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import VideoEventAnnotation
from google.cloud.datalabeling_v1beta1.types.annotation import (
VideoObjectTrackingAnnotation,
)
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationSentiment
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationSource
from google.cloud.datalabeling_v1beta1.types.annotation import AnnotationType
from google.cloud.datalabeling_v1beta1.types.annotation_spec_set import AnnotationSpec
from google.cloud.datalabeling_v1beta1.types.annotation_spec_set import (
AnnotationSpecSet,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
CreateAnnotationSpecSetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
CreateDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
CreateEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
CreateInstructionRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteAnnotatedDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteAnnotationSpecSetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
DeleteInstructionRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ExportDataRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetAnnotatedDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetAnnotationSpecSetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetDataItemRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetDatasetRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetEvaluationRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetExampleRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
GetInstructionRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ImportDataRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
LabelImageRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
LabelTextRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
LabelVideoRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListAnnotatedDatasetsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListAnnotatedDatasetsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListAnnotationSpecSetsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListAnnotationSpecSetsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListDataItemsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListDataItemsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListDatasetsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListDatasetsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListEvaluationJobsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListEvaluationJobsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListExamplesRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListExamplesResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListInstructionsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ListInstructionsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
PauseEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
ResumeEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
SearchEvaluationsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
SearchEvaluationsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
SearchExampleComparisonsRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
SearchExampleComparisonsResponse,
)
from google.cloud.datalabeling_v1beta1.types.data_labeling_service import (
UpdateEvaluationJobRequest,
)
from google.cloud.datalabeling_v1beta1.types.data_payloads import ImagePayload
from google.cloud.datalabeling_v1beta1.types.data_payloads import TextPayload
from google.cloud.datalabeling_v1beta1.types.data_payloads import VideoPayload
from google.cloud.datalabeling_v1beta1.types.data_payloads import VideoThumbnail
from google.cloud.datalabeling_v1beta1.types.dataset import AnnotatedDataset
from google.cloud.datalabeling_v1beta1.types.dataset import AnnotatedDatasetMetadata
from google.cloud.datalabeling_v1beta1.types.dataset import BigQuerySource
from google.cloud.datalabeling_v1beta1.types.dataset import ClassificationMetadata
from google.cloud.datalabeling_v1beta1.types.dataset import DataItem
from google.cloud.datalabeling_v1beta1.types.dataset import Dataset
from google.cloud.datalabeling_v1beta1.types.dataset import Example
from google.cloud.datalabeling_v1beta1.types.dataset import GcsDestination
from google.cloud.datalabeling_v1beta1.types.dataset import GcsFolderDestination
from google.cloud.datalabeling_v1beta1.types.dataset import GcsSource
from google.cloud.datalabeling_v1beta1.types.dataset import InputConfig
from google.cloud.datalabeling_v1beta1.types.dataset import LabelStats
from google.cloud.datalabeling_v1beta1.types.dataset import OutputConfig
from google.cloud.datalabeling_v1beta1.types.dataset import TextMetadata
from google.cloud.datalabeling_v1beta1.types.dataset import DataType
from google.cloud.datalabeling_v1beta1.types.evaluation import (
BoundingBoxEvaluationOptions,
)
from google.cloud.datalabeling_v1beta1.types.evaluation import ClassificationMetrics
from google.cloud.datalabeling_v1beta1.types.evaluation import ConfusionMatrix
from google.cloud.datalabeling_v1beta1.types.evaluation import Evaluation
from google.cloud.datalabeling_v1beta1.types.evaluation import EvaluationConfig
from google.cloud.datalabeling_v1beta1.types.evaluation import EvaluationMetrics
from google.cloud.datalabeling_v1beta1.types.evaluation import ObjectDetectionMetrics
from google.cloud.datalabeling_v1beta1.types.evaluation import PrCurve
from google.cloud.datalabeling_v1beta1.types.evaluation_job import Attempt
from google.cloud.datalabeling_v1beta1.types.evaluation_job import EvaluationJob
from google.cloud.datalabeling_v1beta1.types.evaluation_job import (
EvaluationJobAlertConfig,
)
from google.cloud.datalabeling_v1beta1.types.evaluation_job import EvaluationJobConfig
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
BoundingPolyConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import EventConfig
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
HumanAnnotationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
ImageClassificationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
ObjectDetectionConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
ObjectTrackingConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
PolylineConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
SegmentationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
SentimentConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
TextClassificationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
TextEntityExtractionConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
VideoClassificationConfig,
)
from google.cloud.datalabeling_v1beta1.types.human_annotation_config import (
StringAggregationType,
)
from google.cloud.datalabeling_v1beta1.types.instruction import CsvInstruction
from google.cloud.datalabeling_v1beta1.types.instruction import Instruction
from google.cloud.datalabeling_v1beta1.types.instruction import PdfInstruction
from google.cloud.datalabeling_v1beta1.types.operations import CreateInstructionMetadata
from google.cloud.datalabeling_v1beta1.types.operations import (
ExportDataOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
ExportDataOperationResponse,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
ImportDataOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
ImportDataOperationResponse,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageBoundingBoxOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageBoundingPolyOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageClassificationOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageOrientedBoundingBoxOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImagePolylineOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelImageSegmentationOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import LabelOperationMetadata
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelTextClassificationOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelTextEntityExtractionOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelVideoClassificationOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelVideoEventOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelVideoObjectDetectionOperationMetadata,
)
from google.cloud.datalabeling_v1beta1.types.operations import (
LabelVideoObjectTrackingOperationMetadata,
)
__all__ = (
"DataLabelingServiceClient",
"DataLabelingServiceAsyncClient",
"Annotation",
"AnnotationMetadata",
"AnnotationValue",
"BoundingPoly",
"ImageBoundingPolyAnnotation",
"ImageClassificationAnnotation",
"ImagePolylineAnnotation",
"ImageSegmentationAnnotation",
"NormalizedBoundingPoly",
"NormalizedPolyline",
"NormalizedVertex",
"ObjectTrackingFrame",
"OperatorMetadata",
"Polyline",
"SequentialSegment",
"TextClassificationAnnotation",
"TextEntityExtractionAnnotation",
"TimeSegment",
"Vertex",
"VideoClassificationAnnotation",
"VideoEventAnnotation",
"VideoObjectTrackingAnnotation",
"AnnotationSentiment",
"AnnotationSource",
"AnnotationType",
"AnnotationSpec",
"AnnotationSpecSet",
"CreateAnnotationSpecSetRequest",
"CreateDatasetRequest",
"CreateEvaluationJobRequest",
"CreateInstructionRequest",
"DeleteAnnotatedDatasetRequest",
"DeleteAnnotationSpecSetRequest",
"DeleteDatasetRequest",
"DeleteEvaluationJobRequest",
"DeleteInstructionRequest",
"ExportDataRequest",
"GetAnnotatedDatasetRequest",
"GetAnnotationSpecSetRequest",
"GetDataItemRequest",
"GetDatasetRequest",
"GetEvaluationJobRequest",
"GetEvaluationRequest",
"GetExampleRequest",
"GetInstructionRequest",
"ImportDataRequest",
"LabelImageRequest",
"LabelTextRequest",
"LabelVideoRequest",
"ListAnnotatedDatasetsRequest",
"ListAnnotatedDatasetsResponse",
"ListAnnotationSpecSetsRequest",
"ListAnnotationSpecSetsResponse",
"ListDataItemsRequest",
"ListDataItemsResponse",
"ListDatasetsRequest",
"ListDatasetsResponse",
"ListEvaluationJobsRequest",
"ListEvaluationJobsResponse",
"ListExamplesRequest",
"ListExamplesResponse",
"ListInstructionsRequest",
"ListInstructionsResponse",
"PauseEvaluationJobRequest",
"ResumeEvaluationJobRequest",
"SearchEvaluationsRequest",
"SearchEvaluationsResponse",
"SearchExampleComparisonsRequest",
"SearchExampleComparisonsResponse",
"UpdateEvaluationJobRequest",
"ImagePayload",
"TextPayload",
"VideoPayload",
"VideoThumbnail",
"AnnotatedDataset",
"AnnotatedDatasetMetadata",
"BigQuerySource",
"ClassificationMetadata",
"DataItem",
"Dataset",
"Example",
"GcsDestination",
"GcsFolderDestination",
"GcsSource",
"InputConfig",
"LabelStats",
"OutputConfig",
"TextMetadata",
"DataType",
"BoundingBoxEvaluationOptions",
"ClassificationMetrics",
"ConfusionMatrix",
"Evaluation",
"EvaluationConfig",
"EvaluationMetrics",
"ObjectDetectionMetrics",
"PrCurve",
"Attempt",
"EvaluationJob",
"EvaluationJobAlertConfig",
"EvaluationJobConfig",
"BoundingPolyConfig",
"EventConfig",
"HumanAnnotationConfig",
"ImageClassificationConfig",
"ObjectDetectionConfig",
"ObjectTrackingConfig",
"PolylineConfig",
"SegmentationConfig",
"SentimentConfig",
"TextClassificationConfig",
"TextEntityExtractionConfig",
"VideoClassificationConfig",
"StringAggregationType",
"CsvInstruction",
"Instruction",
"PdfInstruction",
"CreateInstructionMetadata",
"ExportDataOperationMetadata",
"ExportDataOperationResponse",
"ImportDataOperationMetadata",
"ImportDataOperationResponse",
"LabelImageBoundingBoxOperationMetadata",
"LabelImageBoundingPolyOperationMetadata",
"LabelImageClassificationOperationMetadata",
"LabelImageOrientedBoundingBoxOperationMetadata",
"LabelImagePolylineOperationMetadata",
"LabelImageSegmentationOperationMetadata",
"LabelOperationMetadata",
"LabelTextClassificationOperationMetadata",
"LabelTextEntityExtractionOperationMetadata",
"LabelVideoClassificationOperationMetadata",
"LabelVideoEventOperationMetadata",
"LabelVideoObjectDetectionOperationMetadata",
"LabelVideoObjectTrackingOperationMetadata",
)
| [
"[email protected]"
]
| |
ee4a8bd968583926c1ed2877ab805846d1966635 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/16144140.py | a395cacae7ac1620e027f02c873102b4b6342cf3 | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/16144140.py generated: Wed, 25 Jan 2017 15:25:29
#
# Event Type: 16144140
#
# ASCII decay Descriptor: [Xi_b0 -> (Lambda0 -> p+ pi-) (J/psi(1S) -> mu+ mu-)]cc
#
from Configurables import Generation
Generation().EventType = 16144140
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Xib0_JpsiLambda,mm=phsp,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 5232,-5232 ]
| [
"[email protected]"
]
| |
b768b58cf3721bb2f6b3a2fc866798aa78ca6847 | a990bd26d3a69d1ea6699c85efa2cea99452c3df | /problems/leetcode/rottingOranges994.py | 5388c929d4cee4a0f12199681fa2844bb927234b | []
| no_license | abecus/DS-and-Algorithms | 5f1a948a085465ae165090ec957a9d5307ce729d | 3259e8183382265a27cf8c91e37d0086175a5703 | refs/heads/master | 2022-05-05T07:07:08.194243 | 2022-04-05T16:23:39 | 2022-04-05T16:23:39 | 193,111,610 | 11 | 6 | null | 2020-11-18T16:19:18 | 2019-06-21T14:27:25 | Python | UTF-8 | Python | false | false | 1,960 | py | """
_________________________994. Rotting Oranges_________________________
Difficulty: Medium Likes: 1259 Dislikes: 170 Solution: Available
Total Accepted: 77.3K Total Submission: 164.3K Acceptance Rate: 47.0%
Tags: Breadth-first Search
In a given grid, each cell can have one of three values: the value 0
representing an empty cell; the value 1 representing a fresh orange;
the value 2 representing a rotten orange. Every minute, any fresh
orange that is adjacent (4-directionally) to a rotten orange becomes
rotten. Return the minimum number of minutes that must elapse until no
cell has a fresh orange. If this is impossible, return -1 instead.
Example 1:
Input: [[2,1,1],[1,1,0],[0,1,1]]
Output: 4
Example 2:
Input: [[2,1,1],[0,1,1],[1,0,1]]
Output: -1
Example 3:
Input: [[0,2]]
Output: 0
Note:
1 <= grid.length <= 101 <= grid[0].length <= 10grid[i][j] is only 0, 1, or 2.
"""
def orangesRotting(grid):
r=len(grid)
c=len(grid[0])
def get_adj(i,j):
for x,y in zip([1,-1,0,0],[0,0,-1,1]):
if 0<=i+x<r and 0<=j+y<c:
yield (i+x,j+y)
q=[(i,j) for i in range(r) for j in range(c) if grid[i][j]==2]
res=0
while q:
temp = []
for i,j in q:
for x,y in get_adj(i,j):
if grid[x][y]==1:
grid[x][y]=2
temp.append((x,y))
res+=1
q = temp.copy()
for i in range(r):
for j in range(c):
if grid[i][j]==1:
return -1
return res-1 if res else res
if __name__ == "__main__":
grid = [[2,1,1],[1,1,0],[0,1,1]]
# grid = [[2,1,1],
# [0,1,1],
# [1,0,1]]
# grid = [[0,1]]
print(orangesRotting(grid,))
"""
similarQuestions::
Walls and Gates: Medium
"""
| [
"[email protected]"
]
| |
b3d2499cc45fea03a267a459dd73d738e8962baa | 601362aea0d323309bea046d93ef3f2abe090718 | /flog/libs/wikipedia.py | b8f67543bdc12c2b8f795d5ecf414fb4fbf6e2b9 | []
| no_license | ErikBurdett/flog | cca1d780835351b7017b993e4047d43a437c6504 | 633bd3ff95b62766fcf40d76513d27b8785870a0 | refs/heads/master | 2022-10-30T17:01:31.538700 | 2020-06-16T04:40:14 | 2020-06-16T04:40:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import requests
def random_article():
url = f'https://en.wikipedia.org/api/rest_v1/page/random/title'
return requests.get(url, timeout=2.0).json()
| [
"[email protected]"
]
| |
2bab2de433e731e2c1376160a0148c2a824ea777 | 46083e01408b6bb2e05f78a38f69fd5a6881a4c3 | /autotesting/test2/testcase/testbase.py | 1611bc6797907d0c433ece866a2067286b297c17 | []
| no_license | caixinshu/api | 7601ce97ed6666cbc5995ecd1e32165605a7da7e | b75bf1bdbf4ee14f0485d552ff2f382c7991821e | refs/heads/master | 2021-01-22T05:54:14.651098 | 2019-12-17T09:09:23 | 2019-12-17T09:19:26 | 81,718,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # -*- coding: utf-8 -*
import requests
from suds.client import Client
from config import url
from tools import readexceldata
#初始化url
class test:
def __init__(self,url,file,colnameindex,by_name):
self.url=url
self.file=file
self.colnameindex=colnameindex
self.by_name=by_name
def getclient(self):#生成客户端
client=Client(self.url)
return client
def getdata(self):#获得excel表单数据
data=readexceldata.excel_table_byname(self.file,self.colnameindex,self.by_name)
return data
def main():
test1=test(url.loginurl,"E:\\workspacepython\\apiteststudy\\data\\study.xls",0,"login")
print "111"
print test1.getdata()
if __name__=="__main__":
main()
| [
"[email protected]"
]
| |
3a04a77da2ee5df5107a7f1f4186b15aaa3400bd | ca08100b33a78c01bf49f097f4e80ed10e4ee9ad | /intrepidboats/apps/boats/migrations/0025_auto_20170518_1334.py | 4d0337d5918da1292d1f741df70a316bbba6feec | []
| no_license | elite0401/intrepidpowerboats | 347eae14b584d1be9a61ca14c014135ab0d14ad0 | d2a475b60d17aa078bf0feb5e0298c927e7362e7 | refs/heads/master | 2021-09-11T01:51:47.615117 | 2018-04-06T02:20:02 | 2018-04-06T02:20:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-18 17:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boats', '0024_video_video_external_url'),
]
operations = [
migrations.AlterField(
model_name='abouttheboatimage',
name='kind',
field=models.CharField(choices=[('EXTERIOR', 'exterior'), ('INTERIOR', 'interior'), ('CABIN', 'cabin')], max_length=25, verbose_name='kind'),
),
]
| [
"[email protected]"
]
| |
5493043be3c35aaaa1701498e246f4f8555ae5d7 | 8b2aeac35b73d03587251311fcd171e72a8fc854 | /photos/migrations/0002_auto_20180128_1207.py | 4b21b603c9fe41632188a38fc2948d97f3dcf7af | []
| no_license | mansonul/wedding | 78e273cf68b5897136c0b8ef18c664c3cfa505e2 | 3168faa79f1c223eb078e0e1941a2ddfeab903c4 | refs/heads/master | 2021-05-10T18:16:13.795886 | 2018-01-29T18:13:41 | 2018-01-29T18:13:41 | 118,626,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-28 12:07
from __future__ import unicode_literals
from django.db import migrations
import imagekit.models.fields
import photos.models
class Migration(migrations.Migration):
dependencies = [
('photos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='photoupload',
name='image',
field=imagekit.models.fields.ProcessedImageField(blank=True, null=True, upload_to=photos.models.PhotoUpload.path_and_rename),
),
]
| [
"[email protected]"
]
| |
7b485e6c81c4efd3aac47646b1b61652249aa27d | f9b5a01d8cfeddc0c52fcbfc593fa0f31c4df1bf | /ex06/ex06.py | 6367fad5aff079d421fae75ad052baafeb043335 | []
| no_license | wogurdlek2/16PFA-2013211032 | 358154af14f65b7fd635dd9a682dd9ea22d7539e | 478616b3a090c596afba1b62f01152d468e0f014 | refs/heads/master | 2021-01-21T12:59:29.942224 | 2016-05-25T11:31:16 | 2016-05-25T11:31:16 | 53,999,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | x = "There are %d types of prople." % 10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." % (binary, do_not)
print x
print y
print "I sais: %r." % x
print "I also said: '%s'." % y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e
| [
"CAD Client"
]
| CAD Client |
591f5c567067bbf1a4785cce4f3aeadf302ac753 | 46279163a543cd8820bdc38133404d79e787c5d2 | /torch/fx/experimental/accelerator_partitioner.py | 43ec348d45e6d857feec35e24007b65c58eb1108 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | erwincoumans/pytorch | 31738b65e7b998bfdc28d0e8afa7dadeeda81a08 | ae9f39eb580c4d92157236d64548b055f71cf14b | refs/heads/master | 2023-01-23T10:27:33.628897 | 2020-12-06T01:22:00 | 2020-12-06T01:23:40 | 318,930,000 | 5 | 1 | NOASSERTION | 2020-12-06T01:58:57 | 2020-12-06T01:58:56 | null | UTF-8 | Python | false | false | 41,390 | py | from torch.fx.graph_module import GraphModule
from torch.fx.node import Node, map_arg
from typing import Dict, List, Set, NamedTuple, Tuple
import torch
from torch.fx.experimental.subgraph_creation_example import split_module
import operator
from torch.fx.experimental.partitioner_utils import Partition, \
Device, PartitionerConfig, get_partition_to_latency_mapping,\
get_latency_of_partitioned_graph, NodeLatency, get_extra_size_of, \
PartitionMode
class DAGNode():
"""
DAGNode class maintains useful information for a partition (submodule).
inputs(submodule node) and outputs(submodule node).
"""
def __init__(
self,
submodule_node: Node,
input_nodes: List[Node],
output_nodes: List[Node],
logical_device_ids: List[int],
size_bytes: int
) -> None:
self.submodule_node: Node = submodule_node
self.input_nodes: List[Node] = input_nodes
self.output_nodes: List[Node] = output_nodes
self.logical_device_ids: List[int] = logical_device_ids
self.size_bytes = size_bytes
def __str__(self) -> str:
return str(self.submodule_node)
class DAG:
"""DAG class contains all the DAG nodes"""
def __init__(self) -> None:
self.nodes: List[DAGNode] = []
def create_node(
self,
submodule_node: Node,
input_nodes: List[Node],
output_nodes: List[Node],
logical_devices: List[int],
size_bytes: int
) -> None:
node = DAGNode(submodule_node, input_nodes, output_nodes, logical_devices, size_bytes)
self.nodes.append(node)
class PartitionResult(NamedTuple):
"""NameTuple used for returning DAG and a new graph module
"""
dag: DAG
module_with_submodules: GraphModule
"""Followings are some helper functions for partition manipulation"""
def reset_partition_device(partitions):
for partition in partitions:
partition.logical_device_ids = []
def combine_two_partitions(
partition_0: Partition,
partition_1: Partition,
partitions: List[Partition]
) -> None:
"""Given a list of partitions and its two partitions,
combine these two partitions into a new one appending to the partitions
and remove the previous two partitions from the list of partitions
"""
partition = Partition(len(partitions))
partition.nodes = partition_0.nodes.union(partition_1.nodes)
partition.recalculate_mem_size()
partitions.append(partition)
partitions.remove(partition_0)
partitions.remove(partition_1)
# Reorganize partitions
reorganize_partitions(partitions)
return
def set_parents_and_children(partitions: List[Partition]) -> None:
"""Given a list of partitions, mark parents and children for each partition
"""
# Go through all nodes in a partition.
# If a node's user is in other partition,
# then the other partition is this partition's children.
# This partition is the other partition's parent
for partition in partitions:
partition.children = set()
partition.parents = set()
for partition in partitions:
for node in partition.nodes:
# For each node in the current partition, find its users
users = node.users
for n in users:
# Find which the partition the user belongs to.
# Note that if the node itself is also belongs to that partition,
# that partition is not the child of the current partition
for p in partitions:
if p != partition and n in p.nodes and node not in p.nodes:
partition.children.add(p)
p.parents.add(partition)
return
def reorganize_partitions(partitions: List[Partition]) -> None:
"""Given a list of partitions, reorganzie partiton id,
its parents and its children for each partition
"""
# Rearrange partition ids
for i, partition in enumerate(partitions):
partition.partition_id = i
set_parents_and_children(partitions)
return
def get_bfs_level_partition(partitions: List[Partition]) -> None:
"""Given a list of partitions,
mark the bfs level for each partition
"""
current_level: Set[Partition] = set()
visited: Set[Partition] = set()
for partition in partitions:
# If a partition has no parent, it should be in root level
if len(partition.parents) == 0:
current_level.add(partition)
next_level: Set[Partition] = set()
level = 0
# Start bfs
while current_level:
partition = current_level.pop()
partition.bfs_level = level
visited.add(partition)
children = partition.children
for child in children:
if child not in next_level:
next_level.add(child)
if not current_level:
current_level = next_level.copy()
next_level = set()
level += 1
return
def get_node_to_partition_mapping(partitions: List[Partition]) -> Dict[Node, int]:
"""Given a list of partitions,return node to partition mapping
"""
node_to_partition: Dict[Node, int] = {}
for partition in partitions:
for node in partition.nodes:
node_to_partition[node] = partition.partition_id
return node_to_partition
def get_device_to_partitions_mapping(partitions: List[Partition], devices: List[Device]):
"""Given a list of partitions and a list of devices,
map each partition into a device.
"""
def calculate_extra_mem_bytes_needed_for(partition: Partition, partitions: List[Partition]):
all_nodes: Set[Node] = set()
for p in partitions:
all_nodes = all_nodes.union(p.nodes)
if len(all_nodes) == 0:
return partition.used_mem_bytes
all_nodes = all_nodes.union(partition.nodes)
extra_size_needed = 0
for node in partition.nodes:
extra_size_needed += get_extra_size_of(node, all_nodes)
return extra_size_needed
def find_device_for(partition: Partition):
"""Given a partition, find a logical device for the partition
The algorithm is that:
#1. sort all the devices based on left mem size
#2. put the partition on the device that has just enought mem
for that partition
"""
for d in device_to_left_mem_bytes:
extra_size_needed = calculate_extra_mem_bytes_needed_for(partition, device_to_partitions[d])
if extra_size_needed < device_to_left_mem_bytes[d]:
device_to_partitions[d].append(partition)
partition.logical_device_ids.append(d.logical_id)
device_to_left_mem_bytes[d] -= extra_size_needed
return True
return False
# logical id to device
logical_id_to_device: Dict[int, Device] = {}
# Track partitions on device
device_to_partitions: Dict[Device, List[Partition]] = {}
# Track device's left mem size
device_to_left_mem_bytes: Dict[Device, int] = {}
for d in devices:
logical_id_to_device[d.logical_id] = d
device_to_partitions[d] = []
device_to_left_mem_bytes[d] = d.available_mem_bytes
# Deal with the partitions that have a device
# Find all no device partitions
no_device_partitions = []
for partition in partitions:
if partition.logical_device_ids != []:
logical_id = partition.logical_device_ids[0]
device = logical_id_to_device[logical_id]
device_to_partitions[device] = [partition]
device_to_left_mem_bytes[device] = d.available_mem_bytes - partition.used_mem_bytes
else:
no_device_partitions.append(partition)
# Find device for each no device partition
found_device = True
for partition in no_device_partitions:
device_to_left_mem_bytes = {
d: left_mem_bytes for d, left_mem_bytes
in sorted(device_to_left_mem_bytes.items(), key=lambda item: item[1])
}
found_device = find_device_for(partition)
if not found_device:
break
return found_device
def check_dependency(partition):
visited: Set[Partition] = set([partition])
queue: List[Partition] = [partition]
while queue:
p = queue.pop(0)
for child in p.children:
if child == partition:
return True
else:
if child not in visited:
visited.add(child)
queue.append(child)
return False
class Partitioner:
"""A graph module may not fit into one device.
Partitioner class helps cut one graph into subgraphs (partitions),
so that each partition could fit into a different device.
The main function of this class is self.partition_graph.
It will partition the graph based on the scheme specified in partition_config
A DAG structure is returned
along with a new graph module with partitions as submodule nodes.
"""
def __init__(self) -> None:
self.partitions: List[Partition] = []
self.node_to_partition: Dict[Node, int] = {}
self.devices: List[Device] = []
def partition_graph(
self,
fx_module: GraphModule,
torch_module: torch.nn.Module,
partitioner_config: PartitionerConfig
) -> PartitionResult:
"""
Given the fx module, torch module and partitioner_config,
find the partitions, do the partitions,
and then return a DAG and a new fx module with submodule nodes (partitions)
"""
self.graph_module = fx_module
self.torch_module = torch_module
self.devices = partitioner_config.devices
if len(self.devices) == 0:
raise RuntimeError('No devices')
# Check if there are op nodes in the graph
nodes = self.graph_module.graph.nodes
if all(node.op in {'placeholder', 'get_attr', 'output'} for node in nodes):
raise RuntimeError('No Partition since no operations in the module')
# Calculate total size of the graph
total_size_of_graph = 0
for node in nodes:
if node.op == 'output':
break
total_size_of_graph += node.size_bytes.total_size
device_with_max_mem = max(self.devices, key=lambda d: d.available_mem_bytes)
if partitioner_config.mode == PartitionMode.aot_based:
self.aot_based_partition(
partitioner_config.node_to_partition_mapping,
partitioner_config.partition_to_logical_device_mapping
)
elif total_size_of_graph <= device_with_max_mem.available_mem_bytes:
self.find_single_partition(total_size_of_graph)
elif total_size_of_graph > sum([d.available_mem_bytes for d in self.devices]):
raise RuntimeError('Devices have no enough memory for the module')
else:
if partitioner_config.mode == PartitionMode.sparse_nn:
available_mem_bytes = self.devices[0].available_mem_bytes
if not all(device.available_mem_bytes == available_mem_bytes for device in self.devices):
raise RuntimeError('All devices must have same memory size!')
# sparse_nn_partition only support same memory size
# TODO: add different size support for sparse_nn_partition
self.sparse_nn_partition(available_mem_bytes)
elif partitioner_config.mode == PartitionMode.cost_aware:
self.cost_aware_partition(
partitioner_config.transfer_rate_bytes_per_sec,
partitioner_config.node_to_latency_mapping
)
elif partitioner_config.mode == PartitionMode.kl_based:
self.kl_based_partition(
partitioner_config.transfer_rate_bytes_per_sec,
partitioner_config.node_to_latency_mapping
)
else:
self.size_based_partition()
module_with_submodules = self.do_partition()
# The DAG contains DAGNodes with info of each partition's input nodes, output nodes
# and how partitions are connected.
dag = self.dump_dag(module_with_submodules)
ret = PartitionResult(dag, module_with_submodules)
return ret
def find_single_partition(self, total_size_of_graph) -> None:
"""Only one partition (one graph on one device)."""
partition_0 = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op == 'output':
break
partition_0.nodes.add(node)
partition_0.used_mem_bytes = total_size_of_graph
partition_0.logical_device_ids = [0]
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def size_based_partition(self) -> None:
"""This method is to partition the graph based on memory size.
It uses greedy approach. The result may not be the best.
The basic idea is:
Step 1:
Find a device which has enough memory to fit the first node, create a empty partition
with the size of that device.
Then keep adding the following nodes into the partition until the partition is full.
Step 2:
Repeat Step 1 until no device left
Step 3:
If some nodes are left, create a partition for each left node (single node partition).
and then try to map those partitions into logical devices with non single node partitions.
"""
def find_device_based_on_size(node) -> Device:
"""Given a node, this function is to find a logical device
that could fit the node.
"""
mem_size_needed = get_extra_size_of(node, set())
device = Device('', -1, -1)
for d in self.devices:
if d not in occupied_devices and d.available_mem_bytes >= mem_size_needed:
device = d
break
if device.available_mem_bytes < 0:
raise RuntimeError(str(node) + 'is too large to fit any device')
occupied_devices.append(device)
return device
# Track partition and its left mem size
partition_to_left_mem_bytes: Dict[Partition, int] = {}
# Track all the devices that have been used
occupied_devices: List[Device] = []
partition = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op in {'call_module', 'call_method', 'call_function'}:
# Check if there are devices left
if len(self.partitions) <= len(self.devices):
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
# Check if the current partition is the very first partition
if partition.used_mem_bytes == 0:
# Find a device to fit the first node, return available mem size
device = find_device_based_on_size(node)
occupied_devices.append(device)
# Update partition and its left mem size
partition_to_left_mem_bytes[partition] = device.available_mem_bytes
# Update available mem for the current partitio
partition.logical_device_ids.append(device.logical_id)
else:
# The current partition is not the first partition
# Check if the current node can fit into this partition
if partition_to_left_mem_bytes[partition] < total_size_of_input_nodes:
# Check if no device is left
if len(self.partitions) == len(self.devices):
# No device left, all the partitions before are non single node partitions
non_single_node_partitions = self.partitions[:]
# Create the first single node partition for the current node
self.create_single_node_partition(node)
continue
# Some devices are still left
device = find_device_based_on_size(node)
partition = self.create_partition()
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
partition_to_left_mem_bytes[partition] = device.available_mem_bytes
partition.logical_device_ids.append(device.logical_id)
partition.add_node(node)
partition_to_left_mem_bytes[partition] -= total_size_of_input_nodes
# No device left, create single node partitions
else:
self.create_single_node_partition(node)
reorganize_partitions(self.partitions)
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
# Mapping all partitions into device
found_partition_to_device_mapping = get_device_to_partitions_mapping(self.partitions, self.devices)
if not found_partition_to_device_mapping:
raise RuntimeError("Cannot Get a Valid Partition to Logical Device Mapping")
return
def do_partition(self) -> GraphModule:
"""Return a module with submodules (partitions)."""
module_with_submodules = split_module(
self.graph_module,
self.torch_module,
lambda node: self.node_to_partition[node]
)
return module_with_submodules
def dump_dag(self, module_with_submodules: GraphModule) -> DAG:
dag = DAG()
for node in module_with_submodules.graph.nodes:
if node.op == 'output':
break
if node.op in {'placeholder', 'get_attr'}:
continue
if node.target == operator.__getitem__:
continue
input_nodes : Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
# it outputs its result to 'getitem' nodes.
# Those 'getitem' nodes are the output node for this node.
# Otherwise, the output node is this node itself.
if len(node.users) > 1:
output_nodes = list(node.users)
else:
output_nodes = [node]
partition_id = int(node.name.rsplit('_', 1)[-1])
device_ids = self.partitions[partition_id].logical_device_ids
size_bytes = self.partitions[partition_id].used_mem_bytes
dag.create_node(node, list(input_nodes), output_nodes, device_ids, size_bytes)
return dag
def create_partition(self) -> Partition:
"""Create a partition and append it to self.partitions."""
partition_id = len(self.partitions)
partition = Partition(partition_id)
self.partitions.append(partition)
return partition
def create_single_node_partition(self, node):
"""Create a partition for a single node
"""
partition = self.create_partition()
partition.add_node(node)
return
def sparse_nn_partition(self, available_mem_bytes: int) -> None:
"""This method partition a sparse nn module.
It first traverse all the nodes and do the partitions based on memory size.
If the current partition has no enough memory left for a new op node
(call_module, call_method, call_function), a new partition is created.
Different from size_based_partition, when traversing cross the boundary between
non-embedding nodes and embedding nodes, a new partition is created regardlessly.
For example, if the current node is a non-embedding node but the next node is an
embedding node, a new partition is created for the next node.
After the partition, the partitions are combined as much as possible.
The rule is that a non-embedding partition only
combines with another non-embedding one.
So as the embedding partitions.
"""
def combine_partitions_based_on_size(partitions: List[Partition], available_mem_bytes: int) -> None:
"""Combining small partitions together to keep as less partitions as possible.
Here is an example of the algorithm to do this:
Assume some partitions, we first sort them based on partiiton used memory size.
[(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)]
The available memory is 10.
step 1: self.find_partition_to_combine_based_on_size()
First, mark bfs level for each partition
Second, look the smallest partition, partition_4: 10 - 1 = 9
It means any partition has a used memory equal or less than 9 could combine this partition
We go from the largest and selection partition_0.
Check the bfs level for two partitions, if the level difference is less than 2,
it can be combined.
Then repeat step 1.
"""
find_combination = True
while find_combination:
# Sort partitions based on memory size
sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes)
# Mark bfs level
get_bfs_level_partition(self.partitions)
find_combination, partitions = \
find_partition_to_combine_based_on_size(
sorted_partitions,
available_mem_bytes,
partitions
)
return
def calculate_mem_bytes_needed(p1, p2):
"""Given two partitions, calculate how many mem bytes
are needed if two partitions are combined
"""
nodes = p1.nodes.union(p2.nodes)
mem_bytes_needed = 0
for node in nodes:
mem_bytes_needed += get_extra_size_of(node, nodes)
return mem_bytes_needed
def find_partition_to_combine_based_on_size(
sorted_partitions: List[Partition],
available_mem_bytes: int,
partitions: List[Partition]
) -> Tuple[bool, List[Partition]]:
"""step 1 in combine_partition_based_on_size()"""
find_combination = False
smallest_partition = sorted_partitions.pop(0)
for p in sorted_partitions[::-1]:
if abs(smallest_partition.bfs_level - p.bfs_level) <= 1:
# Calculate how many bytes needed if combined
mem_bytes_needed = calculate_mem_bytes_needed(p, smallest_partition)
if mem_bytes_needed <= available_mem_bytes:
combine_two_partitions(p, smallest_partition, self.partitions)
partitions.remove(smallest_partition)
partitions.remove(p)
partitions.append(self.partitions[-1])
find_combination = True
break
return find_combination, partitions
def reset_partition_in_sparse_nn(partition, new_partition=True):
if in_embedding_region:
embedding_partitions.append(partition)
else:
non_embedding_partitions.append(partition)
if new_partition:
partition = self.create_partition()
partition.left_mem_bytes = available_mem_bytes
return partition
return None
def is_embedding_node(node: Node) -> bool:
"""Check if a node is an embedding node"""
if node.op == 'call_module':
submodule = self.graph_module
for atom in str(node.target).split('.'):
if not hasattr(submodule, atom):
raise RuntimeError(f'Module {submodule} has no attribute {atom}')
submodule = getattr(submodule, atom)
if 'Embedding' in str(submodule):
return True
return False
# Track embedding partitons and non-embedding partitions separately
embedding_partitions: List[Partition] = []
non_embedding_partitions: List[Partition] = []
# A Flag to check the boundary
in_embedding_region: bool = False
partition = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op in {'call_module', 'call_method', 'call_function'}:
# Check if crossing the boundary between embedding nodes and non embedding nodes
if is_embedding_node(node) != in_embedding_region:
# Crossing the boundary
# Check if the current partition is an empty partition
if partition.used_mem_bytes != 0:
# The current partition isn't an empty partition. Create a new one.
partition = reset_partition_in_sparse_nn(partition)
in_embedding_region = not in_embedding_region
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
if total_size_of_input_nodes + partition.used_mem_bytes > available_mem_bytes:
partition = reset_partition_in_sparse_nn(partition)
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
if total_size_of_input_nodes > available_mem_bytes:
raise RuntimeError(node.target + 'is too large to fit into a device')
partition.add_node(node)
reset_partition_in_sparse_nn(partition, new_partition=False)
# Set parents and children for partitions
set_parents_and_children(self.partitions)
# Combining non-embedding partitions
combine_partitions_based_on_size(non_embedding_partitions, available_mem_bytes)
# Combining embedding partitions
combine_partitions_based_on_size(embedding_partitions, available_mem_bytes)
total_size_of_non_embedding_partitions = 0
for partition in non_embedding_partitions:
total_size_of_non_embedding_partitions += partition.used_mem_bytes
# Check if devices are enough for all partitions
if len(embedding_partitions) > len(self.devices):
msg = 'Need ' + str(len(embedding_partitions)) + ' devices, but only ' \
+ str(len(self.devices)) + ' provided'
raise RuntimeError(msg)
occupied_devices = []
for i, partition in enumerate(embedding_partitions):
# Check if all non-embedding partitions can fit into embedding partition devices
if total_size_of_non_embedding_partitions + partition.used_mem_bytes > available_mem_bytes:
raise RuntimeError(
'partition_' +
str(partition.partition_id) +
'(embedding partition) and non embedding partitions can not fit into one device'
)
else:
# Add logical device to the partition
partition.logical_device_ids = [self.devices[i].logical_id]
occupied_devices.append(self.devices[i].logical_id)
# Add logical devices to the non_embedding_partitions
for partition in non_embedding_partitions:
partition.logical_device_ids = occupied_devices
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def cost_aware_partition(
self,
transfer_rate_bytes_per_sec: float,
node_to_latency_mapping: Dict[Node, NodeLatency]
) -> None:
"""This method is to partition the fx module based on the cost.
The cost is the total latency of running the whole graph.
In partitioner_utils.py, the cost model is built.
The algorithm is:
#1. At every begining, each node is a partition.
Then we map all the partitions to the devices
and calculate the cost
#2. Then try to pre-combine any two of the partitions if the two
partitions can be combined.
(the bfs level is less than 2 or two partitions are connected and
can find partition to device mapping)
See if any partition pair could reduce the current cost.
Choose the pair that shows the minimum cost and then combine them
#3. Repeat #2 until the cost cannot be reduced.
"""
def try_combining_partitions(
p0_index,
p1_index,
partitions
) -> float:
"""Given two partitions and a list of partitions, try to combine these two partitions
and see what is the cost of the modified partition list
"""
p0 = partitions[p0_index]
p1 = partitions[p1_index]
"""If two partitions' bfs level are less than 2 or two partitions are connected to each other,
then they can be combined
"""
if (abs(p0.bfs_level - p1.bfs_level) <= 1) or (p0 in p1.parents) or p0 in (p1.children):
combine_two_partitions(p0, p1, partitions)
# Check if a circular dependency exists after combining
if check_dependency(partitions[-1]):
return float('inf')
# Check if the modified partition list can be mapped to devices after combination
reset_partition_device(partitions)
found_deivce = get_device_to_partitions_mapping(partitions, self.devices)
if not found_deivce:
return float('inf')
# Calculate the new cost
partition_to_latency_mapping = get_partition_to_latency_mapping(partitions, node_to_latency_mapping)
cost = get_latency_of_partitioned_graph(partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec)
return cost
# If two partition can not be combined, the cost is inf
return float('inf')
def search_combination(
transfer_rate_bytes_per_sec,
node_to_latency_mapping
) -> bool:
"""Given transfer rate between partitions and each node's latency,
find two partitions to combine so the cost of the partitions can
be reduced.
The algorithm is :
1. Going through all the partition pairs and see
if the pair of partitions can be combined.
2. If they are combined, the cost is calculated.
3. Select the minimum cost and combine its cooresponding partition pair
"""
partition_to_latency_mapping = get_partition_to_latency_mapping(self.partitions, node_to_latency_mapping)
cost = get_latency_of_partitioned_graph(self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec)
if len(self.partitions) == 1:
return False
partition_pair: List[int] = []
for i in range(len(self.partitions) - 1):
for j in range(i + 1, len(self.partitions)):
# Try to combine the partition pair
# and see the new cost after combination
new_cost = try_combining_partitions(
i,
j,
self.partitions[:]
)
if new_cost <= cost:
partition_pair = [i, j]
cost = new_cost
reorganize_partitions(self.partitions)
# If a partition pair is found, combine them
if len(partition_pair) != 0:
p0 = self.partitions[partition_pair[0]]
p1 = self.partitions[partition_pair[1]]
combine_two_partitions(p0, p1, self.partitions)
get_bfs_level_partition(self.partitions)
reset_partition_device(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
return len(partition_pair) != 0
for node in self.graph_module.graph.nodes:
if node.op not in {'placeholder', 'get_attr', 'output'}:
self.create_single_node_partition(node)
# Set up parent partitions and children partitions for each partition
set_parents_and_children(self.partitions)
# Get bfs level for each partition
get_bfs_level_partition(self.partitions)
find_combination = True
while find_combination:
# Search for a pair partition to generate the minimum new cost,
# then combine them
find_combination = search_combination(
transfer_rate_bytes_per_sec,
node_to_latency_mapping
)
# Make sure all partitions are set up correctly.
reorganize_partitions(self.partitions)
# Set up node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def kl_based_partition(
self,
transfer_rate_bytes_per_sec: float,
node_to_latency_mapping: Dict[Node, NodeLatency]
) -> None:
"""This function is a cost aware partition based
on Kernighan-Lin algorithm.
First, the graph is partitioned using size_based_partition.
Then, each node is swapped with any other node in a different
partition, and at the same time, the cost is estimated after
the swapping.
For example, we have nodes n0, n1, n2, n3 and n4.
Using size_based_partition, n0 and n1 are in Partition p0.
n2, n3 and n4 in Partition p1. The current cost is esimated.
We first tried using n0 to swap with n2 from the other partiton.
Then we found swapping n0 and n2 shows a lower cost
than the current cost and it is the minimum among other pairs like
(n0, None)(This means moving n0 to Partition without swapping other nodes),
(n0, n3) and (n0, n4). We swap n0 and n2 and set the new cost
as the current cost.
Then We repeat this process for all the other nodes until all swapping pairs
are tried.
"""
def swap_nodes(n0, n1, p0, p1):
# Either n0 or n1 could be None
# That means we simply move the node
# to another partition
if n0 is not None:
p0.remove_node(n0)
p1.add_node(n0)
if n1 is not None:
p0.add_node(n1)
p1.remove_node(n1)
def try_swap_nodes(n0, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec):
cost = float('inf')
swap_nodes(n0, n1, p0, p1)
# Reorganize partitions after swapping
reorganize_partitions(self.partitions)
# Check if there is a circular dependency after swapping
if (not check_dependency(p0)) and (not check_dependency(p1)):
reset_partition_device(self.partitions)
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions,
node_to_latency_mapping
)
# Check if all partitions can be mapped to logical devices after swapping
found_device = get_device_to_partitions_mapping(self.partitions, self.devices)
if not found_device:
cost = float('inf')
else:
cost = get_latency_of_partitioned_graph(
self.partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec
)
# Swap back and reset all partitions back to original
swap_nodes(n1, n0, p0, p1)
reorganize_partitions(self.partitions)
reset_partition_device(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
return cost
def swap_node_to_partition(node, p0, p1, node_to_latency_mapping, transfer_rate_per_sec):
"""This function helps to swap one node from partition p0
with all the nodes in another partition p1
"""
p1_nodes = list(p1.nodes) + [None]
min_cost = float('inf')
node_pair: List[Node] = []
for n1 in p1_nodes:
# Ignore the node if it is not a op node
if n1 is not None and n1.op in {'placeholder', 'get_attr'}:
continue
# Try swapping node in p0 with n1 in p1
cost = try_swap_nodes(node, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec)
if cost < min_cost:
node_pair = [node, n1]
min_cost = cost
return cost, node_pair
# First use size_base_partition
self.size_based_partition()
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions,
node_to_latency_mapping
)
# Calculate the cost of the partitions
cost = get_latency_of_partitioned_graph(
self.partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec
)
# Keep tracking the node pair that shows the better cost
node_pair: List[Node] = []
# Keep tracking the partition pair of node pair
partition_pair: List[Partition] = []
# Collect all the op nodes from the graph
op_nodes = []
for n in self.graph_module.graph.nodes:
if n.op not in {'placeholder', 'get_attr', 'output'}:
op_nodes.append(n)
for node in op_nodes:
# Find which partition the current node belongs
p0_index = self.node_to_partition[node]
p0 = self.partitions[p0_index]
# Go through all the other partitions to swap
# with other nodes from those partitions
for p1_index, _ in enumerate(self.partitions):
if p0_index != p1_index:
p1 = self.partitions[p1_index]
new_cost, new_node_pair = swap_node_to_partition(
node,
p0,
p1,
node_to_latency_mapping,
transfer_rate_bytes_per_sec
)
# Update cost and node pair
if new_cost < cost:
cost = new_cost
node_pair = new_node_pair
partition_pair = [p0, p1]
# Do the swapping after trying all the nodes from a partition
if len(node_pair) != 0:
swap_nodes(node_pair[0], node_pair[1], partition_pair[0], partition_pair[1])
reorganize_partitions(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
reorganize_partitions(self.partitions)
# Mapping the device to the partition
get_device_to_partitions_mapping(self.partitions, self.devices)
return
def aot_based_partition(self, node_to_partition_mapping, partition_to_logical_device_mapping):
"""This function helps to rebuild the partitions given the nodes and its
corresponding partition id
"""
partition_id_to_partition_mapping: Dict[int, Partition] = {}
self.node_to_partition = node_to_partition_mapping
for node in self.node_to_partition:
partition_id = self.node_to_partition[node]
# If the requested partition has not been created, create the partition
if partition_id not in partition_id_to_partition_mapping:
partition = Partition(partition_id)
self.partitions.append(partition)
partition_id_to_partition_mapping[partition_id] = partition
partition.logical_device_ids = partition_to_logical_device_mapping[partition_id]
else:
partition = partition_id_to_partition_mapping[self.node_to_partition[node]]
# Add the current node into the partition
partition.add_node(node)
| [
"[email protected]"
]
| |
bb42ad482fbb2350569ef7809947d727ac99b2f2 | 9ecfba7ed75b2869b09ec3e79c1f45dab21b9640 | /others/cropimage.py | 2583ee07c70b7822aad8ceca2237d18e83ee22a9 | [
"MIT"
]
| permissive | pection/Scraper-website | ca7af593e421d4f09bfc280d6ec24e6562e0f6c3 | 77ed1df5103e1d8222a055c19acf5af255ffa4aa | refs/heads/master | 2022-12-25T15:51:46.958483 | 2020-10-07T13:58:40 | 2020-10-07T13:58:40 | 315,717,273 | 1 | 0 | MIT | 2020-11-24T18:18:42 | 2020-11-24T18:18:41 | null | UTF-8 | Python | false | false | 817 | py | import cv2
import os
import sys
import numpy as np
from PIL import Image
num=1
path ="//Users/pection/Documents/Crop/"
#we shall store all the file names in this list
filelist=[]
for root, dirs, files in os.walk(path):
for file in files:
if(file.endswith(".png")):
filelist.append(os.path.join(root,file))
print (filelist)
# logo=Image.open('logo.png')
# logo2=Image.open('logo2.png')
# watermark = Image.open('WatermarkB5.png')
# watermark2 = Image.open('WatermarkB3.png')
#
# logoWidth = watermark.width
# logoHeight = watermark.height
# watermarkW=watermark.width
# watermarkH=watermark.height
# logo2Width = watermark2.width
# logo2Height = watermark2.height
for filename in filelist:
img = cv2.imread(filename,-1)
crop_img = img[40:450, 40:450]
cv2.imwrite(filename,crop_img)
| [
"[email protected]"
]
| |
4171f3f9288b6953d7b6ea9c6d40cec41f3b8406 | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/contrib/slim/python/slim/nets/inception_v3.pyi | df9dccde040bd84ccfd994e2ec65a1450b9e965f | []
| no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | pyi | # Stubs for tensorflow.contrib.slim.python.slim.nets.inception_v3 (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib import layers as layers
from tensorflow.contrib.framework.python.ops import arg_scope as arg_scope
from tensorflow.contrib.layers.python.layers import initializers as initializers, regularizers as regularizers
from tensorflow.python.framework import ops as ops
from tensorflow.python.ops import array_ops as array_ops, init_ops as init_ops, nn_ops as nn_ops, variable_scope as variable_scope
from typing import Any as Any, Optional as Optional
trunc_normal: Any
def inception_v3_base(inputs: Any, final_endpoint: str = ..., min_depth: int = ..., depth_multiplier: float = ..., scope: Optional[Any] = ...): ...
def inception_v3(inputs: Any, num_classes: int = ..., is_training: bool = ..., dropout_keep_prob: float = ..., min_depth: int = ..., depth_multiplier: float = ..., prediction_fn: Any = ..., spatial_squeeze: bool = ..., reuse: Optional[Any] = ..., scope: str = ...): ...
def inception_v3_arg_scope(weight_decay: float = ..., batch_norm_var_collection: str = ..., batch_norm_decay: float = ..., batch_norm_epsilon: float = ..., updates_collections: Any = ..., use_fused_batchnorm: bool = ...): ...
| [
"[email protected]"
]
| |
f0d8f6c720eb71434eb0ba1ce0acdcdedf4ed128 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/2e033ce6e3a2cdde5174895cadb3b406b2a013729dd641fee2cebd9f7ed97879/cv2/videoio_registry.py | 0be5f4768f19952c2adff113bfb96d3a9ccf5394 | []
| no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | # encoding: utf-8
# module cv2.videoio_registry
# from C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# functions
def getBackendName(api): # real signature unknown; restored from __doc__
"""
getBackendName(api) -> retval
. @brief Returns backend API name or "UnknownVideoAPI(xxx)"
. @param api backend ID (#VideoCaptureAPIs)
"""
pass
def getBackends(): # real signature unknown; restored from __doc__
"""
getBackends() -> retval
. @brief Returns list of all available backends
"""
pass
def getCameraBackends(): # real signature unknown; restored from __doc__
"""
getCameraBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoCapture(int index)`
"""
pass
def getStreamBackends(): # real signature unknown; restored from __doc__
"""
getStreamBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoCapture(filename)`
"""
pass
def getWriterBackends(): # real signature unknown; restored from __doc__
"""
getWriterBackends() -> retval
. @brief Returns list of available backends which works via `cv::VideoWriter()`
"""
pass
# no classes
| [
"[email protected]"
]
| |
ace559b46e79210154608496701d451bae6e9f1d | df21c2c16ecfb4a46b1d88b0474291ac67c8a05a | /app/migrations/0003_auto_20180708_1239.py | 5d2a0d3ed37768c660d5b76e1dec863b6836cb8e | []
| no_license | aditya2222/CatchUp | 245dc4d122be7d596f8928d32a33acbbd754a4f3 | 915363faf7b59c81da070a70f9587f177a20d695 | refs/heads/master | 2020-03-22T14:21:17.689064 | 2018-07-08T14:08:24 | 2018-07-08T14:08:24 | 140,172,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # Generated by Django 2.0.7 on 2018-07-08 12:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20180708_1219'),
]
operations = [
migrations.AddField(
model_name='post',
name='CurrentUser',
field=models.CharField(blank=True, max_length=120, null=True),
),
migrations.AlterField(
model_name='post',
name='UserName',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
e372ef2a62d72abec5ba965d40ac64c52e42e1cd | 6da9c8536378131cc28d6a9bbe2d1de7de70fbe8 | /Hackerrank/_Contests/Project_Euler/Python/pe009.py | 25a6197d9d0384d61e15f5053dfd1e8bf479f99c | []
| no_license | austinsonger/CodingChallenges | 50f61330270cb6452715e6c28ae93b4595df6aa3 | 0cdc23fb909aa06a24294d923cedd37621e56a81 | refs/heads/master | 2021-04-30T13:21:36.111770 | 2019-07-16T18:49:02 | 2019-07-16T18:49:02 | 121,293,018 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | '''
Special Pythagorean triplet
Problem 9
A Pythagorean triplet is a set of three natural
numbers, a < b < c, for which, a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet
for which a + b + c = 1000.
Find the product abc.
'''
__author__ = 'SUN'
if __name__ == '__main__':
for a in range(1, 333):
for b in range(a + 1, 500):
c = 1000 - a - b
if a ** 2 + b ** 2 == c ** 2:
print("a =", a, ", b =", b, ", c =", c, ', a * b * c = ', a * b
* c)
exit()
| [
"[email protected]"
]
| |
be7023cfd8e20ca8aa5c7262dc094051426d8610 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/935e39a6b43731383c8ecd4f86063224edc819ebd6d95bfabab328fca05f4912/cython_runtime.py | 37c3804fcd8cb40135e6b055396f17c83e3f5186 | []
| no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # encoding: utf-8
# module cython_runtime
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\special\_ellip_harm_2.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# no classes
| [
"[email protected]"
]
| |
246b0476cf8c2531744051a05c4b6a1b6a94b575 | 71969e3559d93efbd560265db5264b1d93ddaaa2 | /LSpider/urls.py | 9206fa2394bfa78e8e9f921e98893e22ef2bdb57 | [
"MIT"
]
| permissive | morole/LSpider | e3cc28c4afd060325d12a622c587cb45841a6e6d | 1dcdd820a8c0520cc8b3c851a5ba7bd06fcbf2f8 | refs/heads/master | 2023-06-20T21:58:43.979326 | 2021-08-02T02:36:45 | 2021-08-02T02:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """LSpider URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
# url(r'^admin/', admin.site.urls),
path('', include('web.index.urls')),
]
| [
"[email protected]"
]
| |
28e9be7467b749a9f75b1304978786d2c3f3c9d7 | 756f1c2c014b928c57fc2001347abbb1f104b696 | /python/marvin/core/caching_query.py | 6c09ef2b321a6d4aef280f971b2724df5f7a9921 | [
"BSD-3-Clause"
]
| permissive | sdss/marvin | ebe1e5325ed20fb46324ae6529bcc9acc220bd10 | db4c536a65fb2f16fee05a4f34996a7fd35f0527 | refs/heads/main | 2022-11-08T23:16:00.622114 | 2022-11-02T15:25:53 | 2022-11-02T15:25:53 | 71,501,855 | 56 | 40 | BSD-3-Clause | 2022-08-11T13:16:21 | 2016-10-20T20:30:15 | Python | UTF-8 | Python | false | false | 9,153 | py | #!/usr/bin/env python
# encoding: utf-8
"""caching_query.py
Represent functions and classes
which allow the usage of Dogpile caching with SQLAlchemy.
Introduces a query option called FromCache.
The three new concepts introduced here are:
* CachingQuery - a Query subclass that caches and
retrieves results in/from dogpile.cache.
* FromCache - a query option that establishes caching
parameters on a Query
* RelationshipCache - a variant of FromCache which is specific
to a query invoked during a lazy load.
* _params_from_query - extracts value parameters from
a Query.
The rest of what's here are standard SQLAlchemy and
dogpile.cache constructs.
"""
from sqlalchemy.orm.interfaces import MapperOption
from sqlalchemy.orm.query import Query
from dogpile.cache.api import NO_VALUE
class CachingQuery(Query):
"""A Query subclass which optionally loads full results from a dogpile
cache region.
The CachingQuery optionally stores additional state that allows it to
consult a dogpile.cache cache before accessing the database, in the form
of a FromCache or RelationshipCache object. Each of these objects
refer to the name of a :class:`dogpile.cache.Region` that's been configured
and stored in a lookup dictionary. When such an object has associated
itself with the CachingQuery, the corresponding :class:`dogpile.cache.Region`
is used to locate a cached result. If none is present, then the
Query is invoked normally, the results being cached.
The FromCache and RelationshipCache mapper options below represent
the "public" method of configuring this state upon the CachingQuery.
"""
def __init__(self, regions, *args, **kw):
self.cache_regions = regions
Query.__init__(self, *args, **kw)
def __iter__(self):
"""override __iter__ to pull results from dogpile
if particular attributes have been configured.
Note that this approach does *not* detach the loaded objects from
the current session. If the cache backend is an in-process cache
(like "memory") and lives beyond the scope of the current session's
transaction, those objects may be expired. The method here can be
modified to first expunge() each loaded item from the current
session before returning the list of items, so that the items
in the cache are not the same ones in the current Session.
"""
if hasattr(self, '_cache_region'):
return self.get_value(createfunc=lambda: list(Query.__iter__(self)))
else:
return Query.__iter__(self)
def _get_cache_plus_key(self):
"""Return a cache region plus key."""
dogpile_region = self.cache_regions[self._cache_region.region]
if self._cache_region.cache_key:
key = self._cache_region.cache_key
else:
key = _key_from_query(self)
return dogpile_region, key
def invalidate(self):
"""Invalidate the cache value represented by this Query."""
dogpile_region, cache_key = self._get_cache_plus_key()
dogpile_region.delete(cache_key)
def get_value(self, merge=True, createfunc=None,
expiration_time=None, ignore_expiration=False):
"""Return the value from the cache for this query.
Raise KeyError if no value present and no
createfunc specified.
"""
dogpile_region, cache_key = self._get_cache_plus_key()
# ignore_expiration means, if the value is in the cache
# but is expired, return it anyway. This doesn't make sense
# with createfunc, which says, if the value is expired, generate
# a new value.
assert not ignore_expiration or not createfunc, \
"Can't ignore expiration and also provide createfunc"
if ignore_expiration or not createfunc:
cached_value = dogpile_region.get(cache_key,
expiration_time=expiration_time,
ignore_expiration=ignore_expiration)
else:
cached_value = dogpile_region.get_or_create(
cache_key,
createfunc,
expiration_time=expiration_time
)
if cached_value is NO_VALUE:
raise KeyError(cache_key)
if merge:
cached_value = self.merge_result(cached_value, load=False)
return cached_value
def set_value(self, value):
"""Set the value in the cache for this query."""
dogpile_region, cache_key = self._get_cache_plus_key()
dogpile_region.set(cache_key, value)
def use_cache(self, backend='default'):
''' Adds the cache onto a Query instance
Parameters:
backend (str):
Type of cache backend to use. Can be 'null', 'default', or 'maps'.
Returns:
returns a SQLA query instance with caching turned on
'''
from marvin import marvindb
from marvin.db.caching import regions
assert backend in list(regions.keys()), 'backend must be a proper cache backend'
return self.options(FromCache(backend)).options(*marvindb.cache_bits)
def query_callable(regions, query_cls=CachingQuery):
def query(*arg, **kw):
return query_cls(regions, *arg, **kw)
return query
def _key_from_query(query, qualifier=None):
"""Given a Query, create a cache key.
There are many approaches to this; here we use the simplest,
which is to create an md5 hash of the text of the SQL statement,
combined with stringified versions of all the bound parameters
within it. There's a bit of a performance hit with
compiling out "query.statement" here; other approaches include
setting up an explicit cache key with a particular Query,
then combining that with the bound parameter values.
"""
stmt = query.with_labels().statement
compiled = stmt.compile()
params = compiled.params
# here we return the key as a long string. our "key mangler"
# set up with the region will boil it down to an md5.
return " ".join(
[str(compiled)] +
[str(params[k]) for k in sorted(params)])
class FromCache(MapperOption):
"""Specifies that a Query should load results from a cache."""
propagate_to_loaders = False
def __init__(self, region="default", cache_key=None):
"""Construct a new FromCache.
:param region: the cache region. Should be a
region configured in the dictionary of dogpile
regions.
:param cache_key: optional. A string cache key
that will serve as the key to the query. Use this
if your query has a huge amount of parameters (such
as when using in_()) which correspond more simply to
some other identifier.
"""
self.region = region
self.cache_key = cache_key
def process_query(self, query):
"""Process a Query during normal loading operation."""
query._cache_region = self
class RelationshipCache(MapperOption):
"""Specifies that a Query as called within a "lazy load"
should load results from a cache."""
propagate_to_loaders = True
def __init__(self, attribute, region="default", cache_key=None):
"""Construct a new RelationshipCache.
:param attribute: A Class.attribute which
indicates a particular class relationship() whose
lazy loader should be pulled from the cache.
:param region: name of the cache region.
:param cache_key: optional. A string cache key
that will serve as the key to the query, bypassing
the usual means of forming a key from the Query itself.
"""
self.region = region
self.cache_key = cache_key
self._relationship_options = {
(attribute.property.parent.class_, attribute.property.key): self
}
def process_query_conditionally(self, query):
"""Process a Query that is used within a lazy loader.
(the process_query_conditionally() method is a SQLAlchemy
hook invoked only within lazyload.)
"""
if query._current_path:
mapper, prop = query._current_path[-2:]
key = prop.key
for cls in mapper.class_.__mro__:
if (cls, key) in self._relationship_options:
relationship_option = self._relationship_options[(cls, key)]
query._cache_region = relationship_option
break
def and_(self, option):
"""Chain another RelationshipCache option to this one.
While many RelationshipCache objects can be specified on a single
Query separately, chaining them together allows for a more efficient
lookup during load.
"""
self._relationship_options.update(option._relationship_options)
return self
| [
"[email protected]"
]
| |
2de2010bec76a55f68fd7df8729f7d83ce87a3ea | fe8360d9284d8156cd557d3a757645c11849cdd9 | /models/address.py | 3c11b1443ea2136894676b06698d4e57f8b4cd02 | []
| no_license | hvanreenen/fhir-rest-server | 5a1a5bcb9a3477d9f9d133c263f61ba202db5741 | 36ae55706aba0fdfcf084dbb24bd8c73929b3e0f | refs/heads/master | 2021-01-10T23:45:06.793874 | 2016-10-20T09:57:04 | 2016-10-20T09:57:04 | 70,390,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,973 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Address) on 2016-10-07.
# 2016, SMART Health IT.
from . import element
class Address(element.Element):
""" A postal address.
There is a variety of postal address formats defined around the world. This
format defines a superset that is the basis for all addresses around the
world.
"""
resource_name = "Address"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.city = None #type: str
""" Name of city, town etc..
Type `str`. """
self.country = None #type: str
""" Country (can be ISO 3166 3 letter code).
Type `str`. """
self.district = None #type: str
""" District name (aka county).
Type `str`. """
self.line = None #type: List[str]
""" Street name, number, direction & P.O. Box etc..
List of `str` items. """
self.period = None #type: period.Period
""" Time period when address was/is in use.
Type `Period` (represented as `dict` in JSON). """
self.postalCode = None #type: str
""" Postal code for area.
Type `str`. """
self.state = None #type: str
""" Sub-unit of country (abbreviations ok).
Type `str`. """
self.text = None #type: str
""" Text representation of the address.
Type `str`. """
self.type = None #type: str
""" postal | physical | both.
Type `str`. """
self.use = None #type: str
""" home | work | temp | old - purpose of this address.
Type `str`. """
super(Address, self).__init__(jsondict=jsondict, strict=strict)
def __str__(self):
return ''
def elementProperties(self):
js = super(Address, self).elementProperties()
js.extend([
("city", "city", str, False, None, False),
("country", "country", str, False, None, False),
("district", "district", str, False, None, False),
("line", "line", str, True, None, False),
("period", "period", period.Period, False, None, False),
("postalCode", "postalCode", str, False, None, False),
("state", "state", str, False, None, False),
("text", "text", str, False, None, False),
("type", "type", str, False, None, False),
("use", "use", str, False, None, False),
])
return js
from . import period
| [
"[email protected]"
]
| |
e4ffd83343645d489fd7f0901317a07d4bdea4b1 | c0a25bd77d98e6087c745d5fa2862c4a715a8f59 | /standupmeeting/settings.py | 241a863296e0a608133996cb32846d82c37359a1 | []
| no_license | codyowl/standupmeeting | a84f356b611bd87956b9aa15c58a6ca63fbffebc | bd2a782406901f492f54c1780e1d85d07fe51c20 | refs/heads/master | 2021-01-21T15:18:49.137211 | 2017-06-17T17:47:40 | 2017-06-17T17:47:40 | 91,837,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | """
Django settings for standupmeeting project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z5)=jv1ho$%@891l#l)x47*zq@4*!0$v07fk@srtz+2)rps^3j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
'home',
'dashboard',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'standupmeeting.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'standupmeeting.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'standupmeeting',
'USER': 'root',
'PASSWORD': 'root',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
) | [
"[email protected]"
]
| |
1b6a2aa29c25d01d109682bef2c4e146e7d3ae9a | 7b4cc3814338b600db560324e615cf5c3a02bff5 | /test/test_inline_response20019_ranks_sum.py | 7c843351bb5fea0cf90e0166453b3ff6628bd10a | []
| no_license | wood-run/opendota-client | 58ea278c94d3edad0daf695438d5ec2a3d90fe08 | 2cd7defca67c7efde4ee414e9dcd8685245cd167 | refs/heads/master | 2022-12-29T02:17:26.862289 | 2020-10-13T08:29:06 | 2020-10-13T08:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | # coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. You can find data that can be used to convert hero and ability IDs and other information provided by the API from the [dotaconstants](https://github.com/odota/dotaconstants) repository. **Beginning 2018-04-22, the OpenDota API is limited to 50,000 free calls per month and 60 requests/minute** We offer a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more. # noqa: E501
OpenAPI spec version: 18.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opendota_client
from opendota_client.models.inline_response20019_ranks_sum import InlineResponse20019RanksSum # noqa: E501
from opendota_client.rest import ApiException
class TestInlineResponse20019RanksSum(unittest.TestCase):
"""InlineResponse20019RanksSum unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20019RanksSum(self):
"""Test InlineResponse20019RanksSum"""
# FIXME: construct object with mandatory attributes with example values
# model = opendota_client.models.inline_response20019_ranks_sum.InlineResponse20019RanksSum() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
75a51dcedafba4f54f170bc433e959f80f46a919 | 61e98b0302a43ab685be4c255b4ecf2979db55b6 | /sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/functional/too_many_nested_blocks.py | 47dbf441bd71b32547d4d652a501a6d3189ff396 | [
"BSD-3-Clause",
"EPL-2.0",
"CDDL-1.0",
"Apache-2.0",
"WTFPL",
"GPL-2.0-only",
"BSD-2-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.1",
"Classpath-exception-2.0"
]
| permissive | dzenyu/kafka | 5631c05a6de6e288baeb8955bdddf2ff60ec2a0e | d69a24bce8d108f43376271f89ecc3b81c7b6622 | refs/heads/master | 2021-07-16T12:31:09.623509 | 2021-06-28T18:22:16 | 2021-06-28T18:22:16 | 198,724,535 | 0 | 0 | Apache-2.0 | 2019-07-24T23:51:47 | 2019-07-24T23:51:46 | null | UTF-8 | Python | false | false | 2,259 | py | """Checks the maximum block level is smaller than 6 in function definitions"""
#pylint: disable=using-constant-test, missing-docstring, too-many-return-statements
def my_function():
if 1: # [too-many-nested-blocks]
for i in range(10):
if i == 2:
while True:
try:
if True:
i += 1
except IOError:
pass
if 1:
for i in range(10):
if i == 2:
while True:
try:
i += 1
except IOError:
pass
def nested_func():
if True:
for i in range(10):
while True:
if True:
if True:
yield i
nested_func()
def more_complex_function():
attr1 = attr2 = attr3 = [1, 2, 3]
if attr1:
for i in attr1:
if attr2:
return i
else:
return 'duh'
elif attr2:
for i in attr2:
if attr2:
return i
else:
return 'duh'
else:
for i in range(15):
if attr3:
return i
else:
return 'doh'
return None
def elif_function():
arg = None
if arg == 1:
return 1
elif arg == 2:
return 2
elif arg == 3:
return 3
elif arg == 4:
return 4
elif arg == 5:
return 5
elif arg == 6:
return 6
elif arg == 7:
return 7
def else_if_function():
arg = None
if arg == 1: # [too-many-nested-blocks]
return 1
else:
if arg == 2:
return 2
else:
if arg == 3:
return 3
else:
if arg == 4:
return 4
else:
if arg == 5:
return 5
else:
if arg == 6:
return 6
else:
if arg == 7:
return 7
| [
"[email protected]"
]
| |
921edfd522099ada4d11a5a777e54f9d2dca360b | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.11.16/3/1569573269.py | 7413ac80608e26f988f405d4836f82d6a23f8641 | []
| no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | import functools
import typing
import string
import random
import pytest
def leap(j: int) -> bool:
if j % 4 == 0 and (j % 100 == 0 and j % 400 != 0):
return False
elif j % 4 == 0 and (j % 100 == 0 or j % 400 != 0):
return True
else:
return False
######################################################################
## Lösung Teil 2 (Tests)
print(leap(2000))
print(leap(1660))
print(leap(1783))
print(leap(1800))
######################################################################
| [
"[email protected]"
]
| |
44743649534d60a91cc3986c48b9fcb6f15d46bd | 30d61ce0b728f31a830db6b6b1954a32551990b2 | /src/gui_config/custom/util.py | 52c871fa4478b9296be8335390e061416b42f78d | [
"MIT"
]
| permissive | hgiesel/anki_set_randomizer | 6755dc8489b703887c55a5427bbbdab858f58a65 | 1a9a22480eb6c0e7f421dc08d36d14920e43dd3e | refs/heads/master | 2022-08-24T05:45:13.339132 | 2020-01-15T17:04:26 | 2020-01-30T13:56:50 | 197,258,760 | 5 | 0 | MIT | 2022-07-20T17:28:42 | 2019-07-16T19:56:27 | JavaScript | UTF-8 | Python | false | false | 90 | py | def mapTruthValueToIcon(b):
if b:
return '✓'
else:
return '✗'
| [
"[email protected]"
]
| |
3c5b287ba292013072af0952810ed48c30cfb9e9 | 95341c85a8a116dba0d77644360ccfb346ceeb80 | /src/api-engine/api/routes/node/serializers.py | 9d954df567d1319bff4f28d77173fa89c21c0968 | [
"Apache-2.0",
"CC-BY-4.0"
]
| permissive | kuochunchang/cello | 109204905a6be17c47b6aa3268ee4bbfeadce43a | 1f778cea3a2021aabadd48e41cdd69ed1f8e979c | refs/heads/master | 2020-06-03T05:42:43.108481 | 2019-05-28T13:45:05 | 2019-05-28T13:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,661 | py | #
# SPDX-License-Identifier: Apache-2.0
#
import logging
from rest_framework import serializers
from api.common.enums import (
Operation,
NetworkType,
FabricNodeType,
FabricVersions,
HostType,
)
from api.common.serializers import PageQuerySerializer
from api.models import Node
LOG = logging.getLogger(__name__)
class NodeQuery(PageQuerySerializer, serializers.ModelSerializer):
agent_id = serializers.UUIDField(
help_text="Agent ID, only operator can use this field",
required=False,
allow_null=True,
)
class Meta:
model = Node
fields = (
"page",
"per_page",
"type",
"name",
"network_type",
"network_version",
"agent_id",
)
extra_kwargs = {"type": {"required": False}}
class NodeIDSerializer(serializers.Serializer):
id = serializers.UUIDField(help_text="ID of node")
class NodeInListSerializer(NodeIDSerializer, serializers.ModelSerializer):
agent_id = serializers.UUIDField(
help_text="Agent ID", required=False, allow_null=True
)
network_id = serializers.UUIDField(
help_text="Network ID", required=False, allow_null=True
)
class Meta:
model = Node
fields = (
"id",
"type",
"name",
"network_type",
"network_version",
"created_at",
"agent_id",
"network_id",
)
extra_kwargs = {
"id": {"required": True, "read_only": False},
"created_at": {"required": True, "read_only": False},
}
class NodeListSerializer(serializers.Serializer):
data = NodeInListSerializer(many=True, help_text="Nodes list")
total = serializers.IntegerField(
help_text="Total number of node", min_value=0
)
class NodeCreateBody(serializers.ModelSerializer):
agent_type = serializers.ChoiceField(
help_text="Agent type",
choices=HostType.to_choices(True),
required=False,
)
class Meta:
model = Node
fields = (
"network_type",
"network_version",
"type",
"agent_type",
"agent",
)
extra_kwargs = {
"network_type": {"required": True},
"network_version": {"required": True},
"type": {"required": True},
}
def validate(self, attrs):
network_type = attrs.get("network_type")
node_type = attrs.get("type")
network_version = attrs.get("network_version")
agent_type = attrs.get("agent_type")
agent = attrs.get("agent")
if network_type == NetworkType.Fabric.name.lower():
if network_version not in FabricVersions.values():
raise serializers.ValidationError("Not valid fabric version")
if node_type not in FabricNodeType.names():
raise serializers.ValidationError(
"Not valid node type for %s" % network_type
)
if agent_type is None and agent is None:
raise serializers.ValidationError("Please set agent_type or agent")
if agent_type and agent:
if agent_type != agent.type:
raise serializers.ValidationError(
"agent type not equal to agent"
)
return attrs
class NodeOperationSerializer(serializers.Serializer):
action = serializers.ChoiceField(
help_text=Operation.get_info("Operation for node:", list_str=True),
choices=Operation.to_choices(True),
)
| [
"[email protected]"
]
| |
5e93c1c35118d3f32a43a70d453bab1653d00a3c | 1e9c4294652b0f4699d85516afd54fb5697b4800 | /python_exam/0803/mnist_cnn02.py | 13b66b9af78378cf5592a9f8e0ee4e3c7dc36b17 | []
| no_license | mgh3326/GyeonggiBigDataSpecialist | 89c9fbf01036b35efca509ed3f74b9784e44ed19 | 29192a66df0913c6d9b525436772c8fd51a013ac | refs/heads/master | 2023-04-06T07:09:09.057634 | 2019-06-20T23:35:33 | 2019-06-20T23:35:33 | 138,550,772 | 3 | 2 | null | 2023-03-24T22:43:06 | 2018-06-25T06:10:59 | Jupyter Notebook | UTF-8 | Python | false | false | 4,430 | py | # -*- coding: utf-8 -*-
"""
ml_day4 (2018.08.02)
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
for i in range(1000):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#### CNN
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
###########################################
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1,28,28,1])
## 첫번째 합성곱 레이어
# W_conv1 = weight_variable([5, 5, 1, 32])
# b_conv1 = bias_variable([32])
W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))
# h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# h_pool1 = max_pool_2x2(h_conv1)
conv2d01 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME')
h_conv1 = tf.nn.relu(conv2d01 + b_conv1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
## 두번째 합성곱 레이어
# W_conv2 = weight_variable([5, 5, 32, 64])
# b_conv2 = bias_variable([64])
W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))
# h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# h_pool2 = max_pool_2x2(h_conv2)
conv2d02 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME')
h_conv2 = tf.nn.relu(conv2d02 + b_conv2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
## 완전 연결 계층
# W_fc1 = weight_variable([7 * 7 * 64, 1024])
# b_fc1 = bias_variable([1024])
W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
## 드롭아웃
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
## 최종 소프트맥스
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_variable([10])
W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
## 모델 훈련 및 평가
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) | [
"[email protected]"
]
| |
26accc85bb295eeec34334972d717689820a06f2 | 1c822c0d49d7b67b0896c066958148a7b0731924 | /Basic_Concepts_of_String_Manipulation/First_day!.py | d6cff37297cfcb07407b916cacdfdf68deaf9adc | [
"MIT"
]
| permissive | RKiddle/python_reg_expressions | 7e13a16475476c88543fde6dc55b53ec2fccbe37 | 9e89c1c59677ffa19a4c64a37e92bbea33fad88e | refs/heads/master | 2020-06-23T00:34:07.027628 | 2019-10-27T14:51:32 | 2019-10-27T14:51:32 | 198,446,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # Find characters in movie variable
length_string = len(movie)
# Convert to string
to_string = str(length_string)
# Predefined variable
statement = "Number of characters in this review:"
# Concatenate strings and print result
print(statement + " " + to_string)
| [
"[email protected]"
]
| |
b8fe7ae8b85c3bcd71ac6f2dae28c73ba24a674b | d7016f69993570a1c55974582cda899ff70907ec | /sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_04_01/aio/operations/_registries_operations.py | 96c7dd2ba66633f991630f8e2ddcd4c222a39f8a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 10,664 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._registries_operations import build_get_build_source_upload_url_request, build_schedule_run_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegistriesOperations:
"""RegistriesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _schedule_run_initial(
self,
resource_group_name: str,
registry_name: str,
run_request: "_models.RunRequest",
**kwargs: Any
) -> Optional["_models.Run"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Run"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(run_request, 'RunRequest')
request = build_schedule_run_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._schedule_run_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_schedule_run_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scheduleRun"} # type: ignore
@distributed_trace_async
async def begin_schedule_run(
self,
resource_group_name: str,
registry_name: str,
run_request: "_models.RunRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.Run"]:
"""Schedules a new run based on the request parameters and add it to the run queue.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param run_request: The parameters of a run that needs to scheduled.
:type run_request: ~azure.mgmt.containerregistry.v2019_04_01.models.RunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Run or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_04_01.models.Run]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._schedule_run_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
run_request=run_request,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_schedule_run.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scheduleRun"} # type: ignore
@distributed_trace_async
async def get_build_source_upload_url(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> "_models.SourceUploadDefinition":
"""Get the upload location for the user to be able to upload the source.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceUploadDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_04_01.models.SourceUploadDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceUploadDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-04-01") # type: str
request = build_get_build_source_upload_url_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
template_url=self.get_build_source_upload_url.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SourceUploadDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_build_source_upload_url.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/listBuildSourceUploadUrl"} # type: ignore
| [
"[email protected]"
]
| |
d3f0507bedcb7480314209d9473afa6749e406ff | e1f8bb28b022720445debea589c9cf091103a303 | /doc/sphinxext/mock_gui_toolkits.py | 097a3409b16793df0a2333fa9b2e06ab2289e15a | []
| no_license | demotu/matplotlib | e5a4e6c7047373b3ead918c40c97f93eb09c562d | 1662e05278ecaea064b9149c4fcb15df9f337862 | refs/heads/master | 2021-01-22T00:06:39.310427 | 2018-06-12T20:38:12 | 2018-06-12T20:38:12 | 24,751,842 | 1 | 0 | null | 2018-06-12T20:38:13 | 2014-10-03T08:38:36 | Python | UTF-8 | Python | false | false | 6,886 | py | import sys
from unittest.mock import MagicMock
class MyCairoCffi(MagicMock):
pass
class MyPyQt4(MagicMock):
class QtGui(object):
# PyQt4.QtGui public classes.
# Generated with
# textwrap.fill([name for name in dir(PyQt4.QtGui)
# if isinstance(getattr(PyQt4.QtGui, name), type)])
_QtGui_public_classes = """\
Display QAbstractButton QAbstractGraphicsShapeItem
QAbstractItemDelegate QAbstractItemView QAbstractPrintDialog
QAbstractProxyModel QAbstractScrollArea QAbstractSlider
QAbstractSpinBox QAbstractTextDocumentLayout QAction QActionEvent
QActionGroup QApplication QBitmap QBoxLayout QBrush QButtonGroup
QCalendarWidget QCheckBox QClipboard QCloseEvent QColor QColorDialog
QColumnView QComboBox QCommandLinkButton QCommonStyle QCompleter
QConicalGradient QContextMenuEvent QCursor QDataWidgetMapper QDateEdit
QDateTimeEdit QDesktopServices QDesktopWidget QDial QDialog
QDialogButtonBox QDirModel QDockWidget QDoubleSpinBox QDoubleValidator
QDrag QDragEnterEvent QDragLeaveEvent QDragMoveEvent QDropEvent
QErrorMessage QFileDialog QFileIconProvider QFileOpenEvent
QFileSystemModel QFocusEvent QFocusFrame QFont QFontComboBox
QFontDatabase QFontDialog QFontInfo QFontMetrics QFontMetricsF
QFormLayout QFrame QGesture QGestureEvent QGestureRecognizer QGlyphRun
QGradient QGraphicsAnchor QGraphicsAnchorLayout QGraphicsBlurEffect
QGraphicsColorizeEffect QGraphicsDropShadowEffect QGraphicsEffect
QGraphicsEllipseItem QGraphicsGridLayout QGraphicsItem
QGraphicsItemAnimation QGraphicsItemGroup QGraphicsLayout
QGraphicsLayoutItem QGraphicsLineItem QGraphicsLinearLayout
QGraphicsObject QGraphicsOpacityEffect QGraphicsPathItem
QGraphicsPixmapItem QGraphicsPolygonItem QGraphicsProxyWidget
QGraphicsRectItem QGraphicsRotation QGraphicsScale QGraphicsScene
QGraphicsSceneContextMenuEvent QGraphicsSceneDragDropEvent
QGraphicsSceneEvent QGraphicsSceneHelpEvent QGraphicsSceneHoverEvent
QGraphicsSceneMouseEvent QGraphicsSceneMoveEvent
QGraphicsSceneResizeEvent QGraphicsSceneWheelEvent
QGraphicsSimpleTextItem QGraphicsTextItem QGraphicsTransform
QGraphicsView QGraphicsWidget QGridLayout QGroupBox QHBoxLayout
QHeaderView QHelpEvent QHideEvent QHoverEvent QIcon QIconDragEvent
QIconEngine QIconEngineV2 QIdentityProxyModel QImage QImageIOHandler
QImageReader QImageWriter QInputContext QInputContextFactory
QInputDialog QInputEvent QInputMethodEvent QIntValidator QItemDelegate
QItemEditorCreatorBase QItemEditorFactory QItemSelection
QItemSelectionModel QItemSelectionRange QKeyEvent QKeyEventTransition
QKeySequence QLCDNumber QLabel QLayout QLayoutItem QLineEdit
QLinearGradient QListView QListWidget QListWidgetItem QMainWindow
QMatrix QMatrix2x2 QMatrix2x3 QMatrix2x4 QMatrix3x2 QMatrix3x3
QMatrix3x4 QMatrix4x2 QMatrix4x3 QMatrix4x4 QMdiArea QMdiSubWindow
QMenu QMenuBar QMessageBox QMimeSource QMouseEvent
QMouseEventTransition QMoveEvent QMovie QPageSetupDialog QPaintDevice
QPaintEngine QPaintEngineState QPaintEvent QPainter QPainterPath
QPainterPathStroker QPalette QPanGesture QPen QPicture QPictureIO
QPinchGesture QPixmap QPixmapCache QPlainTextDocumentLayout
QPlainTextEdit QPolygon QPolygonF QPrintDialog QPrintEngine
QPrintPreviewDialog QPrintPreviewWidget QPrinter QPrinterInfo
QProgressBar QProgressDialog QProxyModel QPushButton QPyTextObject
QQuaternion QRadialGradient QRadioButton QRawFont QRegExpValidator
QRegion QResizeEvent QRubberBand QScrollArea QScrollBar
QSessionManager QShortcut QShortcutEvent QShowEvent QSizeGrip
QSizePolicy QSlider QSortFilterProxyModel QSound QSpacerItem QSpinBox
QSplashScreen QSplitter QSplitterHandle QStackedLayout QStackedWidget
QStandardItem QStandardItemModel QStaticText QStatusBar
QStatusTipEvent QStringListModel QStyle QStyleFactory QStyleHintReturn
QStyleHintReturnMask QStyleHintReturnVariant QStyleOption
QStyleOptionButton QStyleOptionComboBox QStyleOptionComplex
QStyleOptionDockWidget QStyleOptionDockWidgetV2 QStyleOptionFocusRect
QStyleOptionFrame QStyleOptionFrameV2 QStyleOptionFrameV3
QStyleOptionGraphicsItem QStyleOptionGroupBox QStyleOptionHeader
QStyleOptionMenuItem QStyleOptionProgressBar QStyleOptionProgressBarV2
QStyleOptionRubberBand QStyleOptionSizeGrip QStyleOptionSlider
QStyleOptionSpinBox QStyleOptionTab QStyleOptionTabBarBase
QStyleOptionTabBarBaseV2 QStyleOptionTabV2 QStyleOptionTabV3
QStyleOptionTabWidgetFrame QStyleOptionTabWidgetFrameV2
QStyleOptionTitleBar QStyleOptionToolBar QStyleOptionToolBox
QStyleOptionToolBoxV2 QStyleOptionToolButton QStyleOptionViewItem
QStyleOptionViewItemV2 QStyleOptionViewItemV3 QStyleOptionViewItemV4
QStylePainter QStyledItemDelegate QSwipeGesture QSyntaxHighlighter
QSystemTrayIcon QTabBar QTabWidget QTableView QTableWidget
QTableWidgetItem QTableWidgetSelectionRange QTabletEvent
QTapAndHoldGesture QTapGesture QTextBlock QTextBlockFormat
QTextBlockGroup QTextBlockUserData QTextBrowser QTextCharFormat
QTextCursor QTextDocument QTextDocumentFragment QTextDocumentWriter
QTextEdit QTextFormat QTextFragment QTextFrame QTextFrameFormat
QTextImageFormat QTextInlineObject QTextItem QTextLayout QTextLength
QTextLine QTextList QTextListFormat QTextObject QTextObjectInterface
QTextOption QTextTable QTextTableCell QTextTableCellFormat
QTextTableFormat QTimeEdit QToolBar QToolBox QToolButton QToolTip
QTouchEvent QTransform QTreeView QTreeWidget QTreeWidgetItem
QTreeWidgetItemIterator QUndoCommand QUndoGroup QUndoStack QUndoView
QVBoxLayout QValidator QVector2D QVector3D QVector4D QWhatsThis
QWhatsThisClickedEvent QWheelEvent QWidget QWidgetAction QWidgetItem
QWindowStateChangeEvent QWizard QWizardPage QWorkspace
QX11EmbedContainer QX11EmbedWidget QX11Info
"""
for _name in _QtGui_public_classes.split():
locals()[_name] = type(_name, (), {})
del _name
class MySip(MagicMock):
def getapi(*args):
return 1
class MyWX(MagicMock):
class Panel(object):
pass
class ToolBar(object):
pass
class Frame(object):
pass
def setup(app):
sys.modules.update(
cairocffi=MyCairoCffi(),
PyQt4=MyPyQt4(),
sip=MySip(),
wx=MyWX(),
)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
| [
"[email protected]"
]
| |
6a4675054e6b1622b80d37ae794ec9fbb98e9ef6 | bdd2bbef297d6edd3d335c48ab89955925d331d5 | /encyclopedia/urls.py | 5f0ded8610846862e5b0f87a8029d45d825b1c9c | []
| no_license | michelle2014/CS50W-Wiki | 424569bb1e2fd7c83fa7ff2a98c51821bcfc04fb | 0301e48db06720b0419c5939816a9be345dff9b0 | refs/heads/master | 2023-07-28T05:05:42.512177 | 2021-09-05T05:33:06 | 2021-09-05T05:33:06 | 327,516,261 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("create", views.create, name="create"),
path("edit/<str:title>", views.edit, name="edit"),
path("search", views.search, name="search"),
path("<str:title>", views.entry, name="entry")
] | [
"[email protected]"
]
| |
c7432b46e7815589e67b5f13126792906baa874b | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/optimized_659.py | 2a149860964006fc23b9d40cde8de4ed76a7020a | []
| no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,583 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((513.189, 440.035, 538.548), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((570.351, 361.611, 332.233), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((553.44, 456.734, 234.315), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((596.433, 688.793, 431.456), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((565.199, 405.652, 345.85), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((579.294, 381.001, 348.501), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((368.44, 406.247, 367.378), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((448.219, 577.439, 439.358), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((307.69, 553.195, 375.403), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((783.564, 349.669, 391.617), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((574.444, 482.119, 400.257), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((585.011, 387.232, 281.884), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((580.031, 621.288, 368.759), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((579.496, 421.971, 322.229), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((561.886, 406.09, 420.253), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((410.629, 371.023, 343.742), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((602.083, 424.911, 407.51), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((622.903, 431.478, 254.626), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((540.708, 579.704, 242.646), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((602.373, 438.065, 358.834), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((660.617, 589.743, 366.602), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
]
| |
27a2b8233ca588d5ce1b4954241ac87f2ee31b23 | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/tools/autograd/nested_dict.py | e1e09814199153aa94647c2246c983b2ba3ea303 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
]
| permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 581 | py | # TODO: refactor nested_dict into common library with ATen
class nested_dict(object):
"""
A nested dict is a dictionary with a parent. If key lookup fails,
it recursively continues into the parent. Writes always happen to
the top level dict.
"""
def __init__(self, base, parent):
self.base, self.parent = base, parent
def __contains__(self, item):
return item in self.base or item in self.parent
def __getitem__(self, x):
r = self.base.get(x)
if r is not None:
return r
return self.parent[x]
| [
"[email protected]"
]
| |
99286b2ac35687ea7459db1976eefff58c6ac283 | 3a3c7ab7d9cadfc5610888e07dbb9d6eaaf8aa01 | /scripts/OpenFOAM/generateBodyOBJFile.py | b2dfdaab64702e895cf9fb115ccd64fdb7f598dc | [
"MIT"
]
| permissive | cubayang/snake | 7e430e8bcbf4acf99c007e5c1a646e0e6f45280c | f78844235f4d9b815b53a707f276dd634bce7a07 | refs/heads/master | 2021-01-17T20:24:27.359901 | 2016-08-18T00:34:18 | 2016-08-18T00:34:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,641 | py | # file: generateBodyOBJFile.py
# author: Olivier Mesnard ([email protected])
# brief: Convert input coordinates file into a OBJ file.
import os
import argparse
from snake.openfoam import OBJFile
from snake import miscellaneous
def parse_command_line():
"""Parses the command-line."""
print('[info] parsing command-line ...'),
# create the parser
parser = argparse.ArgumentParser(description='Generates an .OBJ file '
'that will be readable by OpenFOAM '
'mesh generator: SnappyHexMesh',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser = argparse.ArgumentParser(description='Generates an .OBJ file '
# 'that will be readable by OpenFOAM '
# 'mesh generator: SnappyHexMesh')
# fill the parser with arguments
parser.add_argument('--file', dest='file_path',
type=str,
metavar=('<path>'),
help='path of the coordinates file to convert')
parser.add_argument('--name', dest='name',
type=str,
metavar=('<name>'),
help='name of the .OBJ file generated (no extension)')
parser.add_argument('--extrusion-limits', dest='extrusion_limits',
type=float, nargs=2,
default=[0.0, 1.0],
metavar=('start', 'end'),
help='limits of the extrusion in the 3rd direction')
parser.add_argument('--save-directory', dest='save_directory',
type=str,
default=os.getcwd(),
metavar=('<directory>'),
help='directory where to save the .obj file')
# parse given options file
parser.add_argument('--options',
type=open, action=miscellaneous.ReadOptionsFromFile,
metavar=('<path>'),
help='path of the file with options to parse')
print('done')
return parser.parse_args()
def main():
"""Generates an .OBJ file from a given coordinates file."""
args = parse_command_line()
body = OBJFile.Body2d(args.file_path,
name=args.name,
extrusion_limits=args.extrusion_limits)
body.write(save_directory=args.save_directory)
if __name__ == '__main__':
print('\n[{}] START\n'.format(os.path.basename(__file__)))
main()
print('\n[{}] END\n'.format(os.path.basename(__file__))) | [
"[email protected]"
]
| |
51a5de5a76db69817407b3251044c8d8f122a59f | 264f392530710b287ac54f40ea805638c6348cc3 | /scripts/run_tabular_bayes_dice.py | 3326a3f91fd93e0b96222614b928658af9ee75ab | [
"Apache-2.0"
]
| permissive | google-research/dice_rl | b26dd2231b0a664f11e0ede08d8209a4ace1cd2f | 6551950608ad0472ddf6e8f4075f51793c9d2763 | refs/heads/master | 2023-08-06T21:35:15.690175 | 2023-01-30T19:26:12 | 2023-01-30T19:27:38 | 285,369,787 | 106 | 14 | Apache-2.0 | 2023-01-30T19:27:44 | 2020-08-05T18:15:53 | Python | UTF-8 | Python | false | false | 6,480 | py | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running tabular BayesDICE.
Make sure to generate the datasets prior to running this script (see
`scripts/create_dataset.py`). The default parameters here should reproduce
the published bandit and frozenlake results. For Taxi, pass in
solve_for_state_action_ratio=False.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import os
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
import tensorflow_probability as tfp
import pickle
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from dice_rl.environments.env_policies import get_target_policy
import dice_rl.environments.gridworld.navigation as navigation
import dice_rl.environments.gridworld.taxi as taxi
from dice_rl.estimators import estimator as estimator_lib
from dice_rl.estimators.tabular_bayes_dice import TabularBayesDice
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'frozenlake', 'Environment name.')
flags.DEFINE_integer('seed', 0, 'Initial random seed.')
flags.DEFINE_integer('num_trajectory', 5, 'Number of trajectories to collect.')
flags.DEFINE_float('alpha', 0.0,
'How close is the behavior policy to optimal policy.')
flags.DEFINE_integer('max_trajectory_length', 100,
'Cutoff trajectory at this step.')
flags.DEFINE_bool('tabular_obs', True, 'Whether to use tabular observations.')
flags.DEFINE_string('load_dir', None, 'Directory to load dataset from.')
flags.DEFINE_string('save_dir', None, 'Directory to save estimation results.')
flags.DEFINE_float('gamma', 0.99, 'Discount factor.')
flags.DEFINE_integer('num_steps', 50000, 'Number of training steps.')
flags.DEFINE_integer('batch_size', 1024, 'Batch size.')
flags.DEFINE_float('zeta_learning_rate', 1e-2, 'Zeta learning rate.')
flags.DEFINE_float('nu_learning_rate', 1e-2, 'Value learning rate.')
flags.DEFINE_bool('solve_for_state_action_ratio', True,
'Whether to use tabular observations.')
flags.DEFINE_float('alpha_target', 1.0,
'How close is the target policy to optimal policy.')
flags.DEFINE_float('kl_regularizer', 1., 'LP regularizer of kl(q||p).')
flags.DEFINE_float('eps_std', 1., 'Epsilon std for reparametrization.')
def main(argv):
env_name = FLAGS.env_name
seed = FLAGS.seed
tabular_obs = FLAGS.tabular_obs
num_trajectory = FLAGS.num_trajectory
max_trajectory_length = FLAGS.max_trajectory_length
load_dir = FLAGS.load_dir
save_dir = FLAGS.save_dir
gamma = FLAGS.gamma
assert 0 <= gamma < 1.
alpha = FLAGS.alpha
alpha_target = FLAGS.alpha_target
num_steps = FLAGS.num_steps
batch_size = FLAGS.batch_size
zeta_learning_rate = FLAGS.zeta_learning_rate
nu_learning_rate = FLAGS.nu_learning_rate
solve_for_state_action_ratio = FLAGS.solve_for_state_action_ratio
eps_std = FLAGS.eps_std
kl_regularizer = FLAGS.kl_regularizer
target_policy = get_target_policy(
load_dir, env_name, tabular_obs, alpha=alpha_target)
hparam_str = ('{ENV_NAME}_tabular{TAB}_alpha{ALPHA}_seed{SEED}_'
'numtraj{NUM_TRAJ}_maxtraj{MAX_TRAJ}').format(
ENV_NAME=env_name,
TAB=tabular_obs,
ALPHA=alpha,
SEED=seed,
NUM_TRAJ=num_trajectory,
MAX_TRAJ=max_trajectory_length)
directory = os.path.join(load_dir, hparam_str)
print('Loading dataset.')
dataset = Dataset.load(directory)
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
print('behavior per-step',
estimator_lib.get_fullbatch_average(dataset, gamma=gamma))
train_hparam_str = ('eps{EPS}_kl{KL}').format(EPS=eps_std, KL=kl_regularizer)
if save_dir is not None:
# Save for a specific alpha target
target_hparam_str = hparam_str.replace(
'alpha{}'.format(alpha), 'alpha{}_alphat{}'.format(alpha, alpha_target))
save_dir = os.path.join(save_dir, target_hparam_str, train_hparam_str)
summary_writer = tf.summary.create_file_writer(logdir=save_dir)
else:
summary_writer = tf.summary.create_noop_writer()
estimator = TabularBayesDice(
dataset_spec=dataset.spec,
gamma=gamma,
solve_for_state_action_ratio=solve_for_state_action_ratio,
zeta_learning_rate=zeta_learning_rate,
nu_learning_rate=nu_learning_rate,
kl_regularizer=kl_regularizer,
eps_std=eps_std,
)
estimator.prepare_dataset(dataset, target_policy)
global_step = tf.Variable(0, dtype=tf.int64)
tf.summary.experimental.set_step(global_step)
with summary_writer.as_default():
running_losses = []
running_estimates = []
for step in range(num_steps):
loss = estimator.train_step()[0]
running_losses.append(loss)
global_step.assign_add(1)
if step % 500 == 0 or step == num_steps - 1:
print('step', step, 'losses', np.mean(running_losses, 0))
estimate = estimator.estimate_average_reward(dataset, target_policy)
tf.debugging.check_numerics(estimate, 'NaN in estimate')
running_estimates.append(estimate)
tf.print('est', tf.math.reduce_mean(estimate),
tf.math.reduce_std(estimate))
running_losses = []
if save_dir is not None:
with tf.io.gfile.GFile(os.path.join(save_dir, 'results.npy'), 'w') as f:
np.save(f, running_estimates)
print('saved results to %s' % save_dir)
print('Done!')
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
]
| |
df2805ded0f8ca965205b075e6a84753cff47e12 | b2fb3c44c67eb61c41465996c24c094071c457cc | /LeetCode/print_words_vertically.py | 3027e23f1272d85cbab87a94fe941c5d21586733 | []
| no_license | zelzhan/Challenges-and-contests | 8edd3a2f07a0538903dc885c86e15f02783821c5 | e7df9b37ad1130d37f3efbf0114d06b6f3b4a4f1 | refs/heads/master | 2022-12-28T23:16:30.807040 | 2020-10-13T10:09:22 | 2020-10-13T10:09:22 | 118,697,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from collections import defaultdict
class Solution:
def printVertically(self, s: str) -> List[str]:
s = s.split(" ")
hashtable = defaultdict(int)
for i, string in enumerate(s):
hashtable[i] = len(string)
max_length = max(hashtable.values())
j = 0
res = []
def pop_zeroes(cont):
i = -1
while cont[i] == " ":
cont.pop()
while j != max_length:
word = []
for i, string in enumerate(s):
if hashtable[i] == 0:
word.append(" ")
else:
hashtable[i] -=1
word.append(string[j])
j+=1
pop_zeroes(word)
res.append("".join(word))
return res
| [
"[email protected]"
]
| |
dfabe356284d91b7abe48701e4cb31e026728bd1 | e8d719fe45dfbff9cbbc4ed872832cec6cabaca6 | /307_Range_Sum_Query_Mutable_TLE.py | 09a96706fa016fe861dd7404e808a7fa4a7d89a3 | []
| no_license | nlfox/leetcode | 64f4f48d7f4be6df0542e51cc7037df40bf184a3 | d61363f99de3d591ebc8cd94f62544a31a026d55 | refs/heads/master | 2020-12-21T01:43:01.792899 | 2016-11-14T23:10:12 | 2016-11-14T23:10:12 | 56,680,839 | 2 | 0 | null | 2016-05-17T17:16:37 | 2016-04-20T11:19:58 | Python | UTF-8 | Python | false | false | 1,088 | py | class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
self.nums = nums
self.len = len(nums)
self.d = []
last = 0
for i in nums:
self.d.append(last)
last += i
self.d.append(last)
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: int
"""
self.nums[i] = val
last = self.d[i]
for j in xrange(i+1, self.len + 1):
last += self.nums[j - 1]
self.d[j] = last
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.d[j + 1] - self.d[i]
# Your NumArray object will be instantiated and called as such:
numArray = NumArray([9, -8])
print numArray.update(0, 3)
print numArray.sumRange(1, 1)
print numArray.sumRange(0, 1)
print numArray.update(1, -3)
print numArray.sumRange(0, 1)
| [
"[email protected]"
]
| |
36052de6dd45ad86930ea87779a2ffd46de82b96 | 28a78bf095125a1202842225c2d7512079017a02 | /argorithm/2884_re.py | 35e05cb9fef74bc98182ae9affe95a92feff041a | []
| no_license | sunyeongchoi/sydsyd_challenge | d322451a82c63b05097d44ee3b9fc4492645e204 | 93dd250e96b91f50215a61a3b5913a523f074445 | refs/heads/master | 2023-05-25T00:57:58.651494 | 2023-05-18T01:43:00 | 2023-05-18T01:43:00 | 293,253,085 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | H, M = map(int, input().split())
if M >= 45:
print(H, M-45)
else:
if H == 0:
H = 24
print(H-1, (60+M)-45) | [
"[email protected]"
]
| |
e6d2396b1679238553cf86553f1d2cbe848c4b65 | b8c4ef9ccab22717ab97ab2fb100614d962a5820 | /src/test/python/com/skalicky/python/interviewpuzzles/test_find_all_concatenated_words_in_dictionary.py | 31250fe2c29509af0bab7db00e4be68e00a269b3 | []
| no_license | Sandeep8447/interview_puzzles | 1d6c8e05f106c8d5c4c412a9f304cb118fcc90f4 | a3c1158fe70ed239f8548ace8d1443a431b644c8 | refs/heads/master | 2023-09-02T21:39:32.747747 | 2021-10-30T11:56:57 | 2021-10-30T11:56:57 | 422,867,683 | 0 | 0 | null | 2021-10-30T11:56:58 | 2021-10-30T11:55:17 | null | UTF-8 | Python | false | false | 1,397 | py | from unittest import TestCase
from src.main.python.com.skalicky.python.interviewpuzzles.find_all_concatenated_words_in_dictionary import Solution
class TestSolution(TestCase):
def test_find_all_concatenated_words_in_dictionary__when_input_contains_words_of_same_length__then_output_is_empty(
self):
self.assertSetEqual(set(), Solution.find_all_concatenated_words_in_dictionary({'cat', 'dog', 'eat'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_multiple_concatenated_words_of_2_other_words__then_these_words_are_in_output(
self):
self.assertSetEqual({'techlead', 'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'tech', 'lead', 'techlead', 'cat', 'cats', 'dog', 'catsdog'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_concatenated_word_of_3_other_words__then_this_word_is_in_output(
self):
self.assertSetEqual({'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'cat', 's', 'dog', 'catsdog'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_word_concatenated_by_multiple_ways__then_this_word_is_in_output(
self):
self.assertSetEqual({'cats', 'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'cat', 'cats', 's', 'dog', 'catsdog'}))
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.