max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
parliament_proposal_fetcher.py | Track-your-parliament/track-your-parliament-data | 0 | 7900 | import urllib.request, json
import pandas as pd
baseUrl = 'https://avoindata.eduskunta.fi/api/v1/tables/VaskiData'
parameters = 'rows?columnName=Eduskuntatunnus&columnValue=LA%25&perPage=100'
page = 0
df = ''
while True:
print(f'Fetching page number {page}')
with urllib.request.urlopen(f'{baseUrl}/{parameters}&page={page}') as url:
data = json.loads(url.read().decode())
if page == 0:
columns = data['columnNames']
df = pd.DataFrame(columns=columns)
dataRows = data['rowData']
df = df.append(pd.DataFrame(dataRows, columns=data['columnNames']), ignore_index=True)
if data['hasMore'] == False:
break
page = page + 1
df.to_csv('./data/parliament_proposals_raw.csv', sep=';', encoding='utf-8') | import urllib.request, json
import pandas as pd
baseUrl = 'https://avoindata.eduskunta.fi/api/v1/tables/VaskiData'
parameters = 'rows?columnName=Eduskuntatunnus&columnValue=LA%25&perPage=100'
page = 0
df = ''
while True:
print(f'Fetching page number {page}')
with urllib.request.urlopen(f'{baseUrl}/{parameters}&page={page}') as url:
data = json.loads(url.read().decode())
if page == 0:
columns = data['columnNames']
df = pd.DataFrame(columns=columns)
dataRows = data['rowData']
df = df.append(pd.DataFrame(dataRows, columns=data['columnNames']), ignore_index=True)
if data['hasMore'] == False:
break
page = page + 1
df.to_csv('./data/parliament_proposals_raw.csv', sep=';', encoding='utf-8') | none | 1 | 3.306531 | 3 |
|
examples/Catboost_regression-scorer_usage.py | emaldonadocruz/UTuning | 0 | 7901 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 16:15:37 2021
@author: em42363
"""
# In[1]: Import functions
'''
CatBoost is a high-performance open source library for gradient boosting
on decision trees
'''
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import numpy as np
import os
os.chdir(os.path.dirname(__file__))
import sys
sys.path.insert(0, r'C:\Users\eduar\OneDrive\PhD\UTuning')
sys.path.insert(0, r'C:\Users\em42363\OneDrive\PhD\UTuning')
from UTuning import scorer, plots
#df = pd.read_csv(r'C:\Users\eduar\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
df = pd.read_csv(r'C:\Users\em42363\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
import random
import matplotlib.pyplot as plt
# In[1]: Split train test
'''
Perform split train test
'''
y = df['Production'].values
X = df[['Por', 'LogPerm', 'Brittle', 'TOC']].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# In[6]: Regressor
'''
Define the regressor, fit the model and predict the estimates
'''
model = CatBoostRegressor(iterations=1000, learning_rate=0.2, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=0)
model.fit(X_train, y_train)
estimates = model.predict(X_test)
# In[9]: Plot error line
'''
Use UTuning to plot error lines
'''
plots.error_line(estimates[:, 0], y_test, np.sqrt(estimates[:, 1]), Frac=1)
# %% Define the virtual ensemble
def virt_ensemble(X_train,y_train, num_samples=100, iters=1000, lr=0.1): # 100, .1
ens_preds = []
model = CatBoostRegressor(iterations=iters, learning_rate=lr, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=1)
model.fit(X_train,y_train)
ens_preds = model.virtual_ensembles_predict(X_test, prediction_type='VirtEnsembles',
virtual_ensembles_count=num_samples,
thread_count=8)
return np.asarray(ens_preds)
# %%
n_quantiles = 11
perc = np.linspace(0.0, 1.00, n_quantiles)
Samples = 10
ens_preds=virt_ensemble(X_train,y_train, num_samples=Samples)
Pred_array = ens_preds[:,:,0]
Knowledge_u=np.sqrt(np.var(Pred_array,axis=1)) #Knowledge uncertainty
Data_u=np.sqrt(np.mean(ens_preds[:,:,1],axis=1)) #Data uncertainty
Sigma=Knowledge_u+Data_u
# %%
'''
We use UTuning to return the Indicator Function and plot the
accuracy plot and diagnose our model.
'''
scorer = scorer.scorer(Pred_array, y_test, Sigma)
IF_array = scorer.IndicatorFunction()
avgIF = np.mean(IF_array,axis=0)
# % Second plot test
plots.error_accuracy_plot(perc,IF_array,Pred_array,y_test,Sigma)
# %
print('Accuracy = {0:2.2f}'.format(scorer.Accuracy()))
print('Precision = {0:2.2f}'.format(scorer.Precision()))
print('Goodness = {0:2.2f}'.format(scorer.Goodness()))
| # -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 16:15:37 2021
@author: em42363
"""
# In[1]: Import functions
'''
CatBoost is a high-performance open source library for gradient boosting
on decision trees
'''
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import numpy as np
import os
os.chdir(os.path.dirname(__file__))
import sys
sys.path.insert(0, r'C:\Users\eduar\OneDrive\PhD\UTuning')
sys.path.insert(0, r'C:\Users\em42363\OneDrive\PhD\UTuning')
from UTuning import scorer, plots
#df = pd.read_csv(r'C:\Users\eduar\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
df = pd.read_csv(r'C:\Users\em42363\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
import random
import matplotlib.pyplot as plt
# In[1]: Split train test
'''
Perform split train test
'''
y = df['Production'].values
X = df[['Por', 'LogPerm', 'Brittle', 'TOC']].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# In[6]: Regressor
'''
Define the regressor, fit the model and predict the estimates
'''
model = CatBoostRegressor(iterations=1000, learning_rate=0.2, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=0)
model.fit(X_train, y_train)
estimates = model.predict(X_test)
# In[9]: Plot error line
'''
Use UTuning to plot error lines
'''
plots.error_line(estimates[:, 0], y_test, np.sqrt(estimates[:, 1]), Frac=1)
# %% Define the virtual ensemble
def virt_ensemble(X_train,y_train, num_samples=100, iters=1000, lr=0.1): # 100, .1
ens_preds = []
model = CatBoostRegressor(iterations=iters, learning_rate=lr, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=1)
model.fit(X_train,y_train)
ens_preds = model.virtual_ensembles_predict(X_test, prediction_type='VirtEnsembles',
virtual_ensembles_count=num_samples,
thread_count=8)
return np.asarray(ens_preds)
# %%
n_quantiles = 11
perc = np.linspace(0.0, 1.00, n_quantiles)
Samples = 10
ens_preds=virt_ensemble(X_train,y_train, num_samples=Samples)
Pred_array = ens_preds[:,:,0]
Knowledge_u=np.sqrt(np.var(Pred_array,axis=1)) #Knowledge uncertainty
Data_u=np.sqrt(np.mean(ens_preds[:,:,1],axis=1)) #Data uncertainty
Sigma=Knowledge_u+Data_u
# %%
'''
We use UTuning to return the Indicator Function and plot the
accuracy plot and diagnose our model.
'''
scorer = scorer.scorer(Pred_array, y_test, Sigma)
IF_array = scorer.IndicatorFunction()
avgIF = np.mean(IF_array,axis=0)
# % Second plot test
plots.error_accuracy_plot(perc,IF_array,Pred_array,y_test,Sigma)
# %
print('Accuracy = {0:2.2f}'.format(scorer.Accuracy()))
print('Precision = {0:2.2f}'.format(scorer.Precision()))
print('Goodness = {0:2.2f}'.format(scorer.Goodness()))
| en | 0.642221 | # -*- coding: utf-8 -*- Created on Mon Sep 20 16:15:37 2021 @author: em42363 # In[1]: Import functions CatBoost is a high-performance open source library for gradient boosting on decision trees #df = pd.read_csv(r'C:\Users\eduar\OneDrive\PhD\UTuning\dataset\unconv_MV.csv') # In[1]: Split train test Perform split train test # In[6]: Regressor Define the regressor, fit the model and predict the estimates # In[9]: Plot error line Use UTuning to plot error lines # %% Define the virtual ensemble # 100, .1 # %% #Knowledge uncertainty #Data uncertainty # %% We use UTuning to return the Indicator Function and plot the accuracy plot and diagnose our model. # % Second plot test # % | 2.709847 | 3 |
sujson/_logger.py | PotasnikM/translator-to-suJSON | 2 | 7902 | import logging
from platform import system
from tqdm import tqdm
from multiprocessing import Lock
loggers = {}
# https://stackoverflow.com/questions/38543506/
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super(TqdmLoggingHandler, self).__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.set_lock(Lock())
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger
| import logging
from platform import system
from tqdm import tqdm
from multiprocessing import Lock
loggers = {}
# https://stackoverflow.com/questions/38543506/
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super(TqdmLoggingHandler, self).__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.set_lock(Lock())
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger
| en | 0.479285 | # https://stackoverflow.com/questions/38543506/ Create a logger with a certain name and level # if (logger.hasHandlers()): # logger.handlers.clear() | 2.602909 | 3 |
face-detect.py | Gicehajunior/face-recognition-detection-OpenCv-Python | 0 | 7903 | import cv2
import sys
import playsound
face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
# capture video using cv2
video_capture = cv2.VideoCapture(0)
while True:
# capture frame by frame, i.e, one by one
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# for each face on the projected on the frame
faces = face_cascade.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 5,
# minSize(35, 35)
)
# loop through the video faces for detection
for (x, y, w, h) in faces:
point1 = x+w
point2 = y+h
frame_color = (50, 50, 200)
rectangleBox = cv2.rectangle(frame, (x, y), (point1, point2), frame_color, 2)
cv2.imshow('video', frame)
if faces.any():
playsound.playsound('openDoorAlert.mp3', True)
if len(faces) > 1:
print("There are " + str(len(faces)) + " peoples at the gate")
else:
print("There is " + str(len(faces)) + " person at the gate")
else:
pass
if cv2.waitKey(1) & 0xFF == ord('q'):
sys.exit()
| import cv2
import sys
import playsound
face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
# capture video using cv2
video_capture = cv2.VideoCapture(0)
while True:
# capture frame by frame, i.e, one by one
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# for each face on the projected on the frame
faces = face_cascade.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 5,
# minSize(35, 35)
)
# loop through the video faces for detection
for (x, y, w, h) in faces:
point1 = x+w
point2 = y+h
frame_color = (50, 50, 200)
rectangleBox = cv2.rectangle(frame, (x, y), (point1, point2), frame_color, 2)
cv2.imshow('video', frame)
if faces.any():
playsound.playsound('openDoorAlert.mp3', True)
if len(faces) > 1:
print("There are " + str(len(faces)) + " peoples at the gate")
else:
print("There is " + str(len(faces)) + " person at the gate")
else:
pass
if cv2.waitKey(1) & 0xFF == ord('q'):
sys.exit()
| en | 0.860185 | # capture video using cv2 # capture frame by frame, i.e, one by one # for each face on the projected on the frame # minSize(35, 35) # loop through the video faces for detection | 3.127979 | 3 |
sis/enrollments.py | ryanlovett/sis-cli | 0 | 7904 | # vim:set et sw=4 ts=4:
import logging
import sys
import jmespath
from . import sis, classes
# logging
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logger = logging.getLogger(__name__)
# SIS endpoint
enrollments_uri = "https://apis.berkeley.edu/sis/v2/enrollments"
# apparently some courses have LAB without LEC (?)
section_codes = ['LEC', 'SES', 'WBL']
async def get_student_enrollments(app_id, app_key, identifier, term_id,
id_type='campus-uid', enrolled_only='true', primary_only='true',
course_attr='course-id'):
'''Gets a students enrollments.'''
uri = enrollments_uri + f"/students/{identifier}"
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
"page-number": 1,
"page-size": 100, # maximum
"id-type": id_type,
"term-id": term_id,
"enrolled-only": enrolled_only,
"primary-only": primary_only,
}
enrollments = await sis.get_items(uri, params, headers, 'studentEnrollments')
logger.debug(f"enrollments: {enrollments}")
if course_attr == 'course-id':
flt = '[].classSection.class.course.identifiers[?type == `cs-course-id`].id[]'
elif course_attr == 'display-name':
flt = '[].classSection.class.course.displayName'
return jmespath.search(flt, enrollments)
async def get_section_enrollments(app_id, app_key, term_id, section_id):
'''Gets a course section's enrollments.'''
uri = enrollments_uri + f"/terms/{term_id}/classes/sections/{section_id}"
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
"page-number": 1,
"page-size": 100, # maximum
}
enrollments = await sis.get_items(uri, params, headers, 'classSectionEnrollments')
logger.info(f"{section_id}: {len(enrollments)}")
return enrollments
def section_id(section):
'''Return a section's course ID, e.g. "15807".'''
return section['id']
def section_subject_area(section):
'''Return a section's subject area, e.g. "STAT".'''
return jmespath.search('class.course.subjectArea.code', section)
def section_catalog_number(section):
'''Return a section's formatted catalog number, e.g. "215B".'''
return jmespath.search('class.course.catalogNumber.formatted', section)
def section_display_name(section):
'''Return a section's displayName, e.g. "STAT 215B".'''
return jmespath.search('class.course.displayName', section)
def section_is_primary(section):
'''Return a section's primary status.'''
return jmespath.search('association.primary', section)
def enrollment_campus_uid(enrollment):
'''Return an enrollent's campus UID.'''
expr = "student.identifiers[?disclose && type=='campus-uid'].id | [0]"
return jmespath.search(expr, enrollment)
def enrollment_campus_email(enrollment):
'''Return an enrollment's campus email if found, otherwise
return any other email.'''
expr = "student.emails[?type.code=='CAMP'].emailAddress | [0]"
email = jmespath.search(expr, enrollment)
if email: return email
expr = "student.emails[?type.code=='OTHR'].emailAddress | [0]"
return jmespath.search(expr, enrollment)
def get_enrollment_uids(enrollments):
'''Given an SIS enrollment, return the student's campus UID.'''
return list(map(lambda x: enrollment_campus_uid(x), enrollments))
def get_enrollment_emails(enrollments):
'''Given an SIS enrollment, return the student's campus email.'''
return list(map(lambda x: enrollment_campus_email(x), enrollments))
def enrollment_status(enrollment):
'''Return an enrollment's status, e.g. 'E', 'W', or 'D'.'''
return jmespath.search('enrollmentStatus.status.code', enrollment)
def filter_enrollment_status(enrollments, status):
return list(filter(lambda x: enrollment_status(x) == status, enrollments))
def status_code(constituents):
return {'enrolled':'E', 'waitlisted':'W', 'dropped':'D'}[constituents]
async def get_students(term_id, class_number, constituents, credentials, exact, identifier='campus-uid'):
'''Given a term and class section number, return the student ids.'''
if exact:
# get all enrollments for this section
enrollments = await get_section_enrollments(
credentials['enrollments_id'], credentials['enrollments_key'],
term_id, class_number
)
else:
# get the data for the specified section
section = await classes.get_sections_by_id(
credentials['classes_id'], credentials['classes_key'],
term_id, class_number, include_secondary='true'
)
# extract the subject area and catalog number, e.g. STAT C8
subject_area = section_subject_area(section)
catalog_number = section_catalog_number(section)
logger.info(f"{subject_area} {catalog_number}")
# get enrollments in all matching sections
enrollments = await get_enrollments(
credentials['enrollments_id'], credentials['enrollments_key'],
term_id, subject_area, catalog_number
)
if constituents == 'students':
constituent_enrollments = enrollments
else:
# filter for those enrollments with a specific status code
constituent_enrollments = filter_enrollment_status(
enrollments, status_code(constituents))
# function to extract an enrollment attribute
if identifier == 'campus-uid':
enrollment_attr_fn = enrollment_campus_uid
else:
enrollment_attr_fn = enrollment_campus_email
logger.debug(f"constituent_enrollments: {constituent_enrollments}")
# we convert to a set to collapse overlapping enrollments between
# lectures and labs (if not exact)
return set(map(lambda x: enrollment_attr_fn(x), constituent_enrollments))
def filter_lectures(sections, relevant_codes=section_codes):
'''
Given a list of SIS sections:
[{'code': '32227', 'description': '2019 Spring ASTRON 128 001 LAB 001'}]
return only the section codes which are lectures.
'''
codes = []
for section in sections:
if 'description' not in section: continue
desc_words = set(section['description'].split())
if len(set(desc_words) & set(relevant_codes)) > 0:
codes.append(section['code'])
return codes
async def get_lecture_section_ids(app_id, app_key, term_id, subject_area, catalog_number=None):
'''
Given a term, subject, and course number, return the lecture section ids.
We only care about the lecture enrollments since they contain a superset
of the enrollments of all other section types (lab, dis).
'''
uri = enrollments_uri + f'/terms/{term_id}/classes/sections/descriptors'
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
'page-number': 1,
"subject-area-code": subject_area
}
if catalog_number:
params["catalog-number"] = catalog_number
# Retrieve the sections associated with the course which includes
# both lecture and sections.
sections = await sis.get_items(uri, params, headers, 'fieldValues')
return filter_lectures(sections)
async def get_enrollments(app_id, app_key, term_id, subject_area, catalog_number):
'''Gets a course's enrollments from the SIS.'''
logger.info(f"get_enrollments: {subject_area} {catalog_number}")
# get the lectures
lecture_codes = await get_lecture_section_ids(app_id, app_key, term_id,
subject_area, catalog_number)
# get the enrollments in each lecture
enrollments = []
for section_id in lecture_codes:
enrollments += await get_section_enrollments(app_id, app_key, term_id, section_id)
logger.info(f'enrollments: {len(enrollments)}')
return enrollments
| # vim:set et sw=4 ts=4:
import logging
import sys
import jmespath
from . import sis, classes
# logging
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logger = logging.getLogger(__name__)
# SIS endpoint
enrollments_uri = "https://apis.berkeley.edu/sis/v2/enrollments"
# apparently some courses have LAB without LEC (?)
section_codes = ['LEC', 'SES', 'WBL']
async def get_student_enrollments(app_id, app_key, identifier, term_id,
id_type='campus-uid', enrolled_only='true', primary_only='true',
course_attr='course-id'):
'''Gets a students enrollments.'''
uri = enrollments_uri + f"/students/{identifier}"
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
"page-number": 1,
"page-size": 100, # maximum
"id-type": id_type,
"term-id": term_id,
"enrolled-only": enrolled_only,
"primary-only": primary_only,
}
enrollments = await sis.get_items(uri, params, headers, 'studentEnrollments')
logger.debug(f"enrollments: {enrollments}")
if course_attr == 'course-id':
flt = '[].classSection.class.course.identifiers[?type == `cs-course-id`].id[]'
elif course_attr == 'display-name':
flt = '[].classSection.class.course.displayName'
return jmespath.search(flt, enrollments)
async def get_section_enrollments(app_id, app_key, term_id, section_id):
'''Gets a course section's enrollments.'''
uri = enrollments_uri + f"/terms/{term_id}/classes/sections/{section_id}"
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
"page-number": 1,
"page-size": 100, # maximum
}
enrollments = await sis.get_items(uri, params, headers, 'classSectionEnrollments')
logger.info(f"{section_id}: {len(enrollments)}")
return enrollments
def section_id(section):
'''Return a section's course ID, e.g. "15807".'''
return section['id']
def section_subject_area(section):
'''Return a section's subject area, e.g. "STAT".'''
return jmespath.search('class.course.subjectArea.code', section)
def section_catalog_number(section):
'''Return a section's formatted catalog number, e.g. "215B".'''
return jmespath.search('class.course.catalogNumber.formatted', section)
def section_display_name(section):
'''Return a section's displayName, e.g. "STAT 215B".'''
return jmespath.search('class.course.displayName', section)
def section_is_primary(section):
'''Return a section's primary status.'''
return jmespath.search('association.primary', section)
def enrollment_campus_uid(enrollment):
'''Return an enrollent's campus UID.'''
expr = "student.identifiers[?disclose && type=='campus-uid'].id | [0]"
return jmespath.search(expr, enrollment)
def enrollment_campus_email(enrollment):
'''Return an enrollment's campus email if found, otherwise
return any other email.'''
expr = "student.emails[?type.code=='CAMP'].emailAddress | [0]"
email = jmespath.search(expr, enrollment)
if email: return email
expr = "student.emails[?type.code=='OTHR'].emailAddress | [0]"
return jmespath.search(expr, enrollment)
def get_enrollment_uids(enrollments):
'''Given an SIS enrollment, return the student's campus UID.'''
return list(map(lambda x: enrollment_campus_uid(x), enrollments))
def get_enrollment_emails(enrollments):
'''Given an SIS enrollment, return the student's campus email.'''
return list(map(lambda x: enrollment_campus_email(x), enrollments))
def enrollment_status(enrollment):
'''Return an enrollment's status, e.g. 'E', 'W', or 'D'.'''
return jmespath.search('enrollmentStatus.status.code', enrollment)
def filter_enrollment_status(enrollments, status):
return list(filter(lambda x: enrollment_status(x) == status, enrollments))
def status_code(constituents):
return {'enrolled':'E', 'waitlisted':'W', 'dropped':'D'}[constituents]
async def get_students(term_id, class_number, constituents, credentials, exact, identifier='campus-uid'):
'''Given a term and class section number, return the student ids.'''
if exact:
# get all enrollments for this section
enrollments = await get_section_enrollments(
credentials['enrollments_id'], credentials['enrollments_key'],
term_id, class_number
)
else:
# get the data for the specified section
section = await classes.get_sections_by_id(
credentials['classes_id'], credentials['classes_key'],
term_id, class_number, include_secondary='true'
)
# extract the subject area and catalog number, e.g. STAT C8
subject_area = section_subject_area(section)
catalog_number = section_catalog_number(section)
logger.info(f"{subject_area} {catalog_number}")
# get enrollments in all matching sections
enrollments = await get_enrollments(
credentials['enrollments_id'], credentials['enrollments_key'],
term_id, subject_area, catalog_number
)
if constituents == 'students':
constituent_enrollments = enrollments
else:
# filter for those enrollments with a specific status code
constituent_enrollments = filter_enrollment_status(
enrollments, status_code(constituents))
# function to extract an enrollment attribute
if identifier == 'campus-uid':
enrollment_attr_fn = enrollment_campus_uid
else:
enrollment_attr_fn = enrollment_campus_email
logger.debug(f"constituent_enrollments: {constituent_enrollments}")
# we convert to a set to collapse overlapping enrollments between
# lectures and labs (if not exact)
return set(map(lambda x: enrollment_attr_fn(x), constituent_enrollments))
def filter_lectures(sections, relevant_codes=section_codes):
'''
Given a list of SIS sections:
[{'code': '32227', 'description': '2019 Spring ASTRON 128 001 LAB 001'}]
return only the section codes which are lectures.
'''
codes = []
for section in sections:
if 'description' not in section: continue
desc_words = set(section['description'].split())
if len(set(desc_words) & set(relevant_codes)) > 0:
codes.append(section['code'])
return codes
async def get_lecture_section_ids(app_id, app_key, term_id, subject_area, catalog_number=None):
'''
Given a term, subject, and course number, return the lecture section ids.
We only care about the lecture enrollments since they contain a superset
of the enrollments of all other section types (lab, dis).
'''
uri = enrollments_uri + f'/terms/{term_id}/classes/sections/descriptors'
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
'page-number': 1,
"subject-area-code": subject_area
}
if catalog_number:
params["catalog-number"] = catalog_number
# Retrieve the sections associated with the course which includes
# both lecture and sections.
sections = await sis.get_items(uri, params, headers, 'fieldValues')
return filter_lectures(sections)
async def get_enrollments(app_id, app_key, term_id, subject_area, catalog_number):
'''Gets a course's enrollments from the SIS.'''
logger.info(f"get_enrollments: {subject_area} {catalog_number}")
# get the lectures
lecture_codes = await get_lecture_section_ids(app_id, app_key, term_id,
subject_area, catalog_number)
# get the enrollments in each lecture
enrollments = []
for section_id in lecture_codes:
enrollments += await get_section_enrollments(app_id, app_key, term_id, section_id)
logger.info(f'enrollments: {len(enrollments)}')
return enrollments
| en | 0.800737 | # vim:set et sw=4 ts=4: # logging # SIS endpoint # apparently some courses have LAB without LEC (?) Gets a students enrollments. # maximum Gets a course section's enrollments. # maximum Return a section's course ID, e.g. "15807". Return a section's subject area, e.g. "STAT". Return a section's formatted catalog number, e.g. "215B". Return a section's displayName, e.g. "STAT 215B". Return a section's primary status. Return an enrollent's campus UID. Return an enrollment's campus email if found, otherwise return any other email. Given an SIS enrollment, return the student's campus UID. Given an SIS enrollment, return the student's campus email. Return an enrollment's status, e.g. 'E', 'W', or 'D'. Given a term and class section number, return the student ids. # get all enrollments for this section # get the data for the specified section # extract the subject area and catalog number, e.g. STAT C8 # get enrollments in all matching sections # filter for those enrollments with a specific status code # function to extract an enrollment attribute # we convert to a set to collapse overlapping enrollments between # lectures and labs (if not exact) Given a list of SIS sections: [{'code': '32227', 'description': '2019 Spring ASTRON 128 001 LAB 001'}] return only the section codes which are lectures. Given a term, subject, and course number, return the lecture section ids. We only care about the lecture enrollments since they contain a superset of the enrollments of all other section types (lab, dis). # Retrieve the sections associated with the course which includes # both lecture and sections. Gets a course's enrollments from the SIS. # get the lectures # get the enrollments in each lecture | 2.044141 | 2 |
app.py | Nishanth-Gobi/Da-Vinci-Code | 0 | 7905 | from flask import Flask, render_template, request, redirect, url_for
from os.path import join
from stego import Steganography
app = Flask(__name__)
UPLOAD_FOLDER = 'static/files/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
@app.route("/")
def home():
return render_template('home.html')
@app.route("/encrypt", methods=['GET', 'POST'])
def get_image():
if request.method == 'GET':
return render_template('encrypt.html')
# Check if the user has entered the secret message
if 'file' in request.files and 'Secret' in request.values:
uploaded_image = request.files['file']
message = request.values.get('Secret')
password = <PASSWORD>("key")
filepath = join(app.config['UPLOAD_FOLDER'], "cover_image.png")
uploaded_image.save(filepath)
im = Steganography(filepath=app.config['UPLOAD_FOLDER'], key=password)
im.encode(message=message)
return render_template('encrypt.html', value=filepath, image_flag=True, secret_flag=True)
return redirect(url_for('encrypt'))
@app.route("/decrypt", methods=['GET', 'POST'])
def get_image_to_decrypt():
if request.method == 'GET':
return render_template('decrypt.html')
if 'key' in request.values:
password = request.values.get('key')
filepath = join(app.config['UPLOAD_FOLDER'], "stego_image.png")
im = Steganography(filepath=app.config['UPLOAD_FOLDER'], key=password)
message = im.decode()
return render_template('decrypt.html', value=filepath, message=message)
if 'file' in request.files:
uploaded_image = request.files['file']
filepath = join(app.config['UPLOAD_FOLDER'], "stego_image.png")
uploaded_image.save(filepath)
return render_template('decrypt.html', value=filepath)
if __name__ == '__main__':
app.run(debug=True)
| from flask import Flask, render_template, request, redirect, url_for
from os.path import join
from stego import Steganography
app = Flask(__name__)
UPLOAD_FOLDER = 'static/files/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
@app.route("/")
def home():
return render_template('home.html')
@app.route("/encrypt", methods=['GET', 'POST'])
def get_image():
if request.method == 'GET':
return render_template('encrypt.html')
# Check if the user has entered the secret message
if 'file' in request.files and 'Secret' in request.values:
uploaded_image = request.files['file']
message = request.values.get('Secret')
password = <PASSWORD>("key")
filepath = join(app.config['UPLOAD_FOLDER'], "cover_image.png")
uploaded_image.save(filepath)
im = Steganography(filepath=app.config['UPLOAD_FOLDER'], key=password)
im.encode(message=message)
return render_template('encrypt.html', value=filepath, image_flag=True, secret_flag=True)
return redirect(url_for('encrypt'))
@app.route("/decrypt", methods=['GET', 'POST'])
def get_image_to_decrypt():
if request.method == 'GET':
return render_template('decrypt.html')
if 'key' in request.values:
password = request.values.get('key')
filepath = join(app.config['UPLOAD_FOLDER'], "stego_image.png")
im = Steganography(filepath=app.config['UPLOAD_FOLDER'], key=password)
message = im.decode()
return render_template('decrypt.html', value=filepath, message=message)
if 'file' in request.files:
uploaded_image = request.files['file']
filepath = join(app.config['UPLOAD_FOLDER'], "stego_image.png")
uploaded_image.save(filepath)
return render_template('decrypt.html', value=filepath)
if __name__ == '__main__':
app.run(debug=True)
| en | 0.762725 | # Check if the user has entered the secret message | 2.594015 | 3 |
imitation_learning/generate_demonstrations/gen_envs.py | HaiDangDang/2020-flatland | 1 | 7906 | from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters
from flatland.envs.observations import GlobalObsForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool
import random
import sys
import os
import time
import msgpack
import json
from PIL import Image
import argparse as ap
def RandomTestParams(tid):
seed = tid * 19997 + 997
random.seed(seed)
width = 50 + random.randint(0, 100)
height = 50 + random.randint(0, 100)
nr_cities = 4 + random.randint(0, (width + height) // 10)
nr_trains = min(nr_cities * 20, 100 + random.randint(0, 100))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, 5)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def RandomTestParams_small(tid):
seed = tid * 19997 + 997
random.seed(seed)
nSize = random.randint(0,5)
width = 20 + nSize * 5
height = 20 + nSize * 5
nr_cities = 2 + nSize // 2 + random.randint(0,2)
nr_trains = min(nr_cities * 5, 5 + random.randint(0,5)) #, 10 + random.randint(0, 10))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, nSize)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def ShouldRunTest(tid):
return tid >= 7
#return tid >= 3
return True
def create_test_env(fnParams, nTest, sDir):
(seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration) = fnParams(nTest)
#if not ShouldRunTest(test_id):
# continue
rail_generator = sparse_rail_generator(
max_num_cities=nr_cities,
seed=seed,
grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_cities,
)
#stochastic_data = {'malfunction_rate': malfunction_rate,
# 'min_duration': malfunction_min_duration,
# 'max_duration': malfunction_max_duration
# }
stochastic_data = MalfunctionParameters(malfunction_rate=malfunction_rate,
min_duration=malfunction_min_duration,
max_duration=malfunction_max_duration
)
observation_builder = GlobalObsForRailEnv()
DEFAULT_SPEED_RATIO_MAP = {
1.: 0.25,
1. / 2.: 0.25,
1. / 3.: 0.25,
1. / 4.: 0.25}
schedule_generator = sparse_schedule_generator(DEFAULT_SPEED_RATIO_MAP)
for iAttempt in range(5):
try:
env = RailEnv(
width=width,
height=height,
rail_generator=rail_generator,
schedule_generator=schedule_generator,
number_of_agents=nr_trains,
malfunction_generator_and_process_data=malfunction_from_params(stochastic_data),
obs_builder_object=observation_builder,
remove_agents_at_target=True
)
obs = env.reset(random_seed = seed)
break
except ValueError as oErr:
print("Error:", oErr)
width += 5
height += 5
print("Try again with larger env: (w,h):", width, height)
if not os.path.exists(sDir):
os.makedirs(sDir)
sfName = "{}/Level_{}.mpk".format(sDir, nTest)
if os.path.exists(sfName):
os.remove(sfName)
env.save(sfName)
sys.stdout.write(".")
sys.stdout.flush()
return env
#env = create_test_env(RandomTestParams_small, 0, "train-envs-small/Test_0")
def createEnvSet(nStart, nEnd, sDir, bSmall=True):
#print("Generate small envs in train-envs-small:")
print(f"Generate envs (small={bSmall}) in dir {sDir}:")
sDirImages = "train-envs-small/images/"
if not os.path.exists(sDirImages):
os.makedirs(sDirImages)
for test_id in range(nStart, nEnd, 1):
env = create_test_env(RandomTestParams_small, test_id, sDir)
oRender = RenderTool(env, gl="PILSVG")
#oRender.env = env
#oRender.set_new_rail()
oRender.render_env()
g2img = oRender.get_image()
imgPIL = Image.fromarray(g2img)
#imgPIL.show()
imgPIL.save(sDirImages + "Level_{}.png".format(test_id))
# print("Generate large envs in train-envs-1000:")
# for test_id in range(100):
# create_test_env(RandomTestParams, test_id, "train-envs-1000/Test_0")
def merge(sfEpisode, sfEnv, sfEnvOut, bJson=False):
if bJson:
with open(sfEpisode, "rb") as fEp:
oActions = json.load(fEp)
oEp = {"actions":oActions}
print("json oEp:", type(oEp), list(oEp.keys()))
else:
with open(sfEpisode, "rb") as fEp:
oEp = msgpack.load(fEp)
print("oEp:", type(oEp), list(oEp.keys()))
with open(sfEnv, "rb") as fEnv:
oEnv = msgpack.load(fEnv)
print("oEnv:", type(oEnv), list(oEnv.keys()))
# merge dicts
oEnv2 = {**oEp, **oEnv}
print("Merged keys:", list(oEnv2.keys()))
with open(sfEnvOut, "wb") as fEnv:
msgpack.dump(oEnv2, fEnv)
def printKeys1(sfEnv):
with open(sfEnv, "rb") as fEnv:
oEnv = msgpack.load(fEnv, encoding="utf-8")
print(sfEnv, "keys:", list(oEnv.keys()))
for sKey in oEnv.keys():
print("key", sKey, len(oEnv[sKey]))
if sKey == "shape":
print("shape: ", oEnv[sKey] )
def printKeys(sfEnvs):
try:
for sfEnv in sfEnvs:
printKeys1(sfEnv)
except:
# assume single env
printKeys1(sfEnvs)
def main2():
parser = ap.ArgumentParser(description='Generate envs, merge episodes into env files.')
parser.add_argument("-c", '--createEnvs', type=int, nargs=2, action="append",
metavar=("nStart", "nEnd"),
help='merge episode into env')
parser.add_argument("-d", "--outDir", type=str, nargs=1, default="./test-envs-tmp")
parser.add_argument("-m", '--merge', type=str, nargs=3, action="append",
metavar=("episode", "env", "output_env"),
help='merge episode into env')
parser.add_argument("-j", '--mergejson', type=str, nargs=3, action="append",
metavar=("json", "env", "output_env"),
help='merge json actions into env, with key actions')
parser.add_argument('-k', "--keys", type=str, action='append', nargs="+",
help='print the keys in a file')
args=parser.parse_args()
print(args)
if args.merge:
print("merge:", args.merge)
merge(*args.merge[0])
if args.mergejson:
print("merge json:", args.mergejson)
merge(*args.mergejson[0], bJson=True)
if args.keys:
print("keys:", args.keys)
printKeys(args.keys[0])
if args.outDir:
print("outDir", args.outDir)
if args.createEnvs:
print("create Envs - ", *args.createEnvs[0])
createEnvSet(*args.createEnvs[0], sDir=args.outDir)
if __name__=="__main__":
main2()
| from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters
from flatland.envs.observations import GlobalObsForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool
import random
import sys
import os
import time
import msgpack
import json
from PIL import Image
import argparse as ap
def RandomTestParams(tid):
seed = tid * 19997 + 997
random.seed(seed)
width = 50 + random.randint(0, 100)
height = 50 + random.randint(0, 100)
nr_cities = 4 + random.randint(0, (width + height) // 10)
nr_trains = min(nr_cities * 20, 100 + random.randint(0, 100))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, 5)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def RandomTestParams_small(tid):
seed = tid * 19997 + 997
random.seed(seed)
nSize = random.randint(0,5)
width = 20 + nSize * 5
height = 20 + nSize * 5
nr_cities = 2 + nSize // 2 + random.randint(0,2)
nr_trains = min(nr_cities * 5, 5 + random.randint(0,5)) #, 10 + random.randint(0, 10))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, nSize)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def ShouldRunTest(tid):
return tid >= 7
#return tid >= 3
return True
def create_test_env(fnParams, nTest, sDir):
(seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration) = fnParams(nTest)
#if not ShouldRunTest(test_id):
# continue
rail_generator = sparse_rail_generator(
max_num_cities=nr_cities,
seed=seed,
grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_cities,
)
#stochastic_data = {'malfunction_rate': malfunction_rate,
# 'min_duration': malfunction_min_duration,
# 'max_duration': malfunction_max_duration
# }
stochastic_data = MalfunctionParameters(malfunction_rate=malfunction_rate,
min_duration=malfunction_min_duration,
max_duration=malfunction_max_duration
)
observation_builder = GlobalObsForRailEnv()
DEFAULT_SPEED_RATIO_MAP = {
1.: 0.25,
1. / 2.: 0.25,
1. / 3.: 0.25,
1. / 4.: 0.25}
schedule_generator = sparse_schedule_generator(DEFAULT_SPEED_RATIO_MAP)
for iAttempt in range(5):
try:
env = RailEnv(
width=width,
height=height,
rail_generator=rail_generator,
schedule_generator=schedule_generator,
number_of_agents=nr_trains,
malfunction_generator_and_process_data=malfunction_from_params(stochastic_data),
obs_builder_object=observation_builder,
remove_agents_at_target=True
)
obs = env.reset(random_seed = seed)
break
except ValueError as oErr:
print("Error:", oErr)
width += 5
height += 5
print("Try again with larger env: (w,h):", width, height)
if not os.path.exists(sDir):
os.makedirs(sDir)
sfName = "{}/Level_{}.mpk".format(sDir, nTest)
if os.path.exists(sfName):
os.remove(sfName)
env.save(sfName)
sys.stdout.write(".")
sys.stdout.flush()
return env
#env = create_test_env(RandomTestParams_small, 0, "train-envs-small/Test_0")
def createEnvSet(nStart, nEnd, sDir, bSmall=True):
#print("Generate small envs in train-envs-small:")
print(f"Generate envs (small={bSmall}) in dir {sDir}:")
sDirImages = "train-envs-small/images/"
if not os.path.exists(sDirImages):
os.makedirs(sDirImages)
for test_id in range(nStart, nEnd, 1):
env = create_test_env(RandomTestParams_small, test_id, sDir)
oRender = RenderTool(env, gl="PILSVG")
#oRender.env = env
#oRender.set_new_rail()
oRender.render_env()
g2img = oRender.get_image()
imgPIL = Image.fromarray(g2img)
#imgPIL.show()
imgPIL.save(sDirImages + "Level_{}.png".format(test_id))
# print("Generate large envs in train-envs-1000:")
# for test_id in range(100):
# create_test_env(RandomTestParams, test_id, "train-envs-1000/Test_0")
def merge(sfEpisode, sfEnv, sfEnvOut, bJson=False):
if bJson:
with open(sfEpisode, "rb") as fEp:
oActions = json.load(fEp)
oEp = {"actions":oActions}
print("json oEp:", type(oEp), list(oEp.keys()))
else:
with open(sfEpisode, "rb") as fEp:
oEp = msgpack.load(fEp)
print("oEp:", type(oEp), list(oEp.keys()))
with open(sfEnv, "rb") as fEnv:
oEnv = msgpack.load(fEnv)
print("oEnv:", type(oEnv), list(oEnv.keys()))
# merge dicts
oEnv2 = {**oEp, **oEnv}
print("Merged keys:", list(oEnv2.keys()))
with open(sfEnvOut, "wb") as fEnv:
msgpack.dump(oEnv2, fEnv)
def printKeys1(sfEnv):
with open(sfEnv, "rb") as fEnv:
oEnv = msgpack.load(fEnv, encoding="utf-8")
print(sfEnv, "keys:", list(oEnv.keys()))
for sKey in oEnv.keys():
print("key", sKey, len(oEnv[sKey]))
if sKey == "shape":
print("shape: ", oEnv[sKey] )
def printKeys(sfEnvs):
try:
for sfEnv in sfEnvs:
printKeys1(sfEnv)
except:
# assume single env
printKeys1(sfEnvs)
def main2():
parser = ap.ArgumentParser(description='Generate envs, merge episodes into env files.')
parser.add_argument("-c", '--createEnvs', type=int, nargs=2, action="append",
metavar=("nStart", "nEnd"),
help='merge episode into env')
parser.add_argument("-d", "--outDir", type=str, nargs=1, default="./test-envs-tmp")
parser.add_argument("-m", '--merge', type=str, nargs=3, action="append",
metavar=("episode", "env", "output_env"),
help='merge episode into env')
parser.add_argument("-j", '--mergejson', type=str, nargs=3, action="append",
metavar=("json", "env", "output_env"),
help='merge json actions into env, with key actions')
parser.add_argument('-k', "--keys", type=str, action='append', nargs="+",
help='print the keys in a file')
args=parser.parse_args()
print(args)
if args.merge:
print("merge:", args.merge)
merge(*args.merge[0])
if args.mergejson:
print("merge json:", args.mergejson)
merge(*args.mergejson[0], bJson=True)
if args.keys:
print("keys:", args.keys)
printKeys(args.keys[0])
if args.outDir:
print("outDir", args.outDir)
if args.createEnvs:
print("create Envs - ", *args.createEnvs[0])
createEnvSet(*args.createEnvs[0], sDir=args.outDir)
if __name__=="__main__":
main2()
| en | 0.248223 | #, 10 + random.randint(0, 10)) #return tid >= 3 #if not ShouldRunTest(test_id): # continue #stochastic_data = {'malfunction_rate': malfunction_rate, # 'min_duration': malfunction_min_duration, # 'max_duration': malfunction_max_duration # } #env = create_test_env(RandomTestParams_small, 0, "train-envs-small/Test_0") #print("Generate small envs in train-envs-small:") #oRender.env = env #oRender.set_new_rail() #imgPIL.show() # print("Generate large envs in train-envs-1000:") # for test_id in range(100): # create_test_env(RandomTestParams, test_id, "train-envs-1000/Test_0") # merge dicts # assume single env | 1.791044 | 2 |
job-queue-portal/postgres_django_queue/djangoenv/lib/python3.8/site-packages/django_celery_results/migrations/0006_taskresult_date_created.py | Sruthi-Ganesh/postgres-django-queue | 0 | 7907 | # -*- coding: utf-8 -*-
# Generated by Django 2.2.4 on 2019-08-21 19:53
# this file is auto-generated so don't do flake8 on it
# flake8: noqa
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
import django.utils.timezone
def copy_date_done_to_date_created(apps, schema_editor):
TaskResult = apps.get_model('django_celery_results', 'taskresult')
db_alias = schema_editor.connection.alias
TaskResult.objects.using(db_alias).all().update(
date_created=models.F('date_done')
)
def reverse_copy_date_done_to_date_created(app, schema_editor):
# the reverse of 'copy_date_done_to_date_created' is do nothing
# because the 'date_created' will be removed.
pass
class Migration(migrations.Migration):
dependencies = [
('django_celery_results', '0005_taskresult_worker'),
]
operations = [
migrations.AddField(
model_name='taskresult',
name='date_created',
field=models.DateTimeField(
auto_now_add=True,
db_index=True,
default=django.utils.timezone.now,
help_text='Datetime field when the task result was created in UTC',
verbose_name='Created DateTime'
),
preserve_default=False,
),
migrations.RunPython(copy_date_done_to_date_created,
reverse_copy_date_done_to_date_created),
]
| # -*- coding: utf-8 -*-
# Generated by Django 2.2.4 on 2019-08-21 19:53
# this file is auto-generated so don't do flake8 on it
# flake8: noqa
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
import django.utils.timezone
def copy_date_done_to_date_created(apps, schema_editor):
TaskResult = apps.get_model('django_celery_results', 'taskresult')
db_alias = schema_editor.connection.alias
TaskResult.objects.using(db_alias).all().update(
date_created=models.F('date_done')
)
def reverse_copy_date_done_to_date_created(app, schema_editor):
# the reverse of 'copy_date_done_to_date_created' is do nothing
# because the 'date_created' will be removed.
pass
class Migration(migrations.Migration):
dependencies = [
('django_celery_results', '0005_taskresult_worker'),
]
operations = [
migrations.AddField(
model_name='taskresult',
name='date_created',
field=models.DateTimeField(
auto_now_add=True,
db_index=True,
default=django.utils.timezone.now,
help_text='Datetime field when the task result was created in UTC',
verbose_name='Created DateTime'
),
preserve_default=False,
),
migrations.RunPython(copy_date_done_to_date_created,
reverse_copy_date_done_to_date_created),
]
| en | 0.775403 | # -*- coding: utf-8 -*- # Generated by Django 2.2.4 on 2019-08-21 19:53 # this file is auto-generated so don't do flake8 on it # flake8: noqa # the reverse of 'copy_date_done_to_date_created' is do nothing # because the 'date_created' will be removed. | 2.172062 | 2 |
remediar/modules/http/__init__.py | fabaff/remediar | 0 | 7908 | <filename>remediar/modules/http/__init__.py
"""Support for HTTP or web server issues."""
| <filename>remediar/modules/http/__init__.py
"""Support for HTTP or web server issues."""
| en | 0.710091 | Support for HTTP or web server issues. | 1.279388 | 1 |
Image Recognition/utils/BayesianModels/Bayesian3Conv3FC.py | AlanMorningLight/PyTorch-BayesianCNN | 1 | 7909 | import torch.nn as nn
from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer
class BBB3Conv3FC(nn.Module):
"""
Simple Neural Network having 3 Convolution
and 3 FC layers with Bayesian layers.
"""
def __init__(self, outputs, inputs):
super(BBB3Conv3FC, self).__init__()
self.conv1 = BBBConv2d(inputs, 32, 5, stride=1, padding=2)
self.soft1 = nn.Softplus()
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = BBBConv2d(32, 64, 5, stride=1, padding=2)
self.soft2 = nn.Softplus()
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = BBBConv2d(64, 128, 5, stride=1, padding=1)
self.soft3 = nn.Softplus()
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.flatten = FlattenLayer(2 * 2 * 128)
self.fc1 = BBBLinearFactorial(2 * 2 * 128, 1000)
self.soft5 = nn.Softplus()
self.fc2 = BBBLinearFactorial(1000, 1000)
self.soft6 = nn.Softplus()
self.fc3 = BBBLinearFactorial(1000, outputs)
layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2,
self.conv3, self.soft3, self.pool3, self.flatten, self.fc1, self.soft5,
self.fc2, self.soft6, self.fc3]
self.layers = nn.ModuleList(layers)
def probforward(self, x):
'Forward pass with Bayesian weights'
kl = 0
for layer in self.layers:
if hasattr(layer, 'convprobforward') and callable(layer.convprobforward):
x, _kl, = layer.convprobforward(x)
kl += _kl
elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward):
x, _kl, = layer.fcprobforward(x)
kl += _kl
else:
x = layer(x)
logits = x
return logits, kl | import torch.nn as nn
from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer
class BBB3Conv3FC(nn.Module):
"""
Simple Neural Network having 3 Convolution
and 3 FC layers with Bayesian layers.
"""
def __init__(self, outputs, inputs):
super(BBB3Conv3FC, self).__init__()
self.conv1 = BBBConv2d(inputs, 32, 5, stride=1, padding=2)
self.soft1 = nn.Softplus()
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = BBBConv2d(32, 64, 5, stride=1, padding=2)
self.soft2 = nn.Softplus()
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = BBBConv2d(64, 128, 5, stride=1, padding=1)
self.soft3 = nn.Softplus()
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.flatten = FlattenLayer(2 * 2 * 128)
self.fc1 = BBBLinearFactorial(2 * 2 * 128, 1000)
self.soft5 = nn.Softplus()
self.fc2 = BBBLinearFactorial(1000, 1000)
self.soft6 = nn.Softplus()
self.fc3 = BBBLinearFactorial(1000, outputs)
layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2,
self.conv3, self.soft3, self.pool3, self.flatten, self.fc1, self.soft5,
self.fc2, self.soft6, self.fc3]
self.layers = nn.ModuleList(layers)
def probforward(self, x):
'Forward pass with Bayesian weights'
kl = 0
for layer in self.layers:
if hasattr(layer, 'convprobforward') and callable(layer.convprobforward):
x, _kl, = layer.convprobforward(x)
kl += _kl
elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward):
x, _kl, = layer.fcprobforward(x)
kl += _kl
else:
x = layer(x)
logits = x
return logits, kl | en | 0.903543 | Simple Neural Network having 3 Convolution and 3 FC layers with Bayesian layers. | 2.884863 | 3 |
custom_scripts/load_animals.py | nphilou/influence-release | 0 | 7910 | import os
from tensorflow.contrib.learn.python.learn.datasets import base
import numpy as np
import IPython
from subprocess import call
from keras.preprocessing import image
from influence.dataset import DataSet
from influence.inception_v3 import preprocess_input
BASE_DIR = 'data' # TODO: change
def fill(X, Y, idx, label, img_path, img_side):
img = image.load_img(img_path, target_size=(img_side, img_side))
x = image.img_to_array(img)
X[idx, ...] = x
Y[idx] = label
def extract_and_rename_animals():
class_maps = [
('dog', 'n02084071'),
('cat', 'n02121808'),
('bird', 'n01503061'),
('fish', 'n02512053'),
('horse', 'n02374451'),
('monkey', 'n02484322'),
('zebra', 'n02391049'),
('panda', 'n02510455'),
('lemur', 'n02496913'),
('wombat', 'n01883070'),
]
for class_string, class_id in class_maps:
class_dir = os.path.join(BASE_DIR, class_string)
print(class_dir)
call('mkdir %s' % class_dir, shell=True)
call('tar -xf %s.tar -C %s' % (os.path.join(BASE_DIR, class_id), class_dir), shell=True)
for filename in os.listdir(class_dir):
file_idx = filename.split('_')[1].split('.')[0]
src_filename = os.path.join(class_dir, filename)
dst_filename = os.path.join(class_dir, '%s_%s.JPEG' % (class_string, file_idx))
os.rename(src_filename, dst_filename)
def load_animals(num_train_ex_per_class=300,
num_test_ex_per_class=100,
num_valid_ex_per_class=0,
classes=None,
):
num_channels = 3
img_side = 299
if num_valid_ex_per_class == 0:
valid_str = ''
else:
valid_str = '_valid-%s' % num_valid_examples
if classes is None:
classes = ['dog', 'cat', 'bird', 'fish', 'horse', 'monkey', 'zebra', 'panda', 'lemur', 'wombat']
data_filename = os.path.join(BASE_DIR, 'dataset_train-%s_test-%s%s.npz' % (num_train_ex_per_class, num_test_ex_per_class, valid_str))
else:
data_filename = os.path.join(BASE_DIR, 'dataset_%s_train-%s_test-%s%s.npz' % ('-'.join(classes), num_train_ex_per_class, num_test_ex_per_class, valid_str))
num_classes = len(classes)
num_train_examples = num_train_ex_per_class * num_classes
num_test_examples = num_test_ex_per_class * num_classes
num_valid_examples = num_valid_ex_per_class * num_classes
if os.path.exists(data_filename):
print('Loading animals from disk...')
f = np.load(data_filename)
X_train = f['X_train']
X_test = f['X_test']
Y_train = f['Y_train']
Y_test = f['Y_test']
if 'X_valid' in f:
X_valid = f['X_valid']
else:
X_valid = None
if 'Y_valid' in f:
Y_valid = f['Y_valid']
else:
Y_valid = None
else:
print('Reading animals from raw images...')
X_train = np.zeros([num_train_examples, img_side, img_side, num_channels])
X_test = np.zeros([num_test_examples, img_side, img_side, num_channels])
# X_valid = np.zeros([num_valid_examples, img_side, img_side, num_channels])
X_valid = None
Y_train = np.zeros([num_train_examples])
Y_test = np.zeros([num_test_examples])
# Y_valid = np.zeros([num_valid_examples])
Y_valid = None
for class_idx, class_string in enumerate(classes):
print('class: %s' % class_string)
# For some reason, a lot of numbers are skipped.
i = 0
num_filled = 0
while num_filled < num_train_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
print(img_path)
if os.path.exists(img_path):
fill(X_train, Y_train, num_filled + (num_train_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
num_filled = 0
while num_filled < num_test_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
if os.path.exists(img_path):
fill(X_test, Y_test, num_filled + (num_test_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
num_filled = 0
while num_filled < num_valid_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
if os.path.exists(img_path):
fill(X_valid, Y_valid, num_filled + (num_valid_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
X_train = preprocess_input(X_train)
X_test = preprocess_input(X_test)
X_valid = preprocess_input(X_valid)
np.random.seed(0)
permutation_idx = np.arange(num_train_examples)
np.random.shuffle(permutation_idx)
X_train = X_train[permutation_idx, :]
Y_train = Y_train[permutation_idx]
permutation_idx = np.arange(num_test_examples)
np.random.shuffle(permutation_idx)
X_test = X_test[permutation_idx, :]
Y_test = Y_test[permutation_idx]
permutation_idx = np.arange(num_valid_examples)
np.random.shuffle(permutation_idx)
X_valid = X_valid[permutation_idx, :]
Y_valid = Y_valid[permutation_idx]
np.savez_compressed(data_filename, X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test, X_valid=X_valid, Y_valid=Y_valid)
train = DataSet(X_train, Y_train)
if (X_valid is not None) and (Y_valid is not None):
# validation = DataSet(X_valid, Y_valid)
validation = None
else:
validation = None
test = DataSet(X_test, Y_test)
return base.Datasets(train=train, validation=validation, test=test)
def load_koda():
num_channels = 3
img_side = 299
data_filename = os.path.join(BASE_DIR, 'dataset_koda.npz')
if os.path.exists(data_filename):
print('Loading Koda from disk...')
f = np.load(data_filename)
X = f['X']
Y = f['Y']
else:
# Returns all class 0
print('Reading Koda from raw images...')
image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg'))]
# Hack to get the image files in the right order
# image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and not image_file.startswith('124'))]
# image_files += [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and image_file.startswith('124'))]
num_examples = len(image_files)
X = np.zeros([num_examples, img_side, img_side, num_channels])
Y = np.zeros([num_examples])
class_idx = 0
for counter, image_file in enumerate(image_files):
img_path = os.path.join(BASE_DIR, 'koda', image_file)
fill(X, Y, counter, class_idx, img_path, img_side)
X = preprocess_input(X)
np.savez(data_filename, X=X, Y=Y)
return X, Y
def load_dogfish_with_koda():
classes = ['dog', 'fish']
X_test, Y_test = load_koda()
data_sets = load_animals(num_train_ex_per_class=900,
num_test_ex_per_class=300,
num_valid_ex_per_class=0,
classes=classes)
train = data_sets.train
validation = data_sets.validation
test = DataSet(X_test, Y_test)
return base.Datasets(train=train, validation=validation, test=test)
def load_dogfish_with_orig_and_koda():
classes = ['dog', 'fish']
X_test, Y_test = load_koda()
X_test = np.reshape(X_test, (X_test.shape[0], -1))
data_sets = load_animals(num_train_ex_per_class=900,
num_test_ex_per_class=300,
num_valid_ex_per_class=0,
classes=classes)
train = data_sets.train
validation = data_sets.validation
test = DataSet(
np.concatenate((data_sets.test.x, X_test), axis=0),
np.concatenate((data_sets.test.labels, Y_test), axis=0))
return base.Datasets(train=train, validation=validation, test=test)
| import os
from tensorflow.contrib.learn.python.learn.datasets import base
import numpy as np
import IPython
from subprocess import call
from keras.preprocessing import image
from influence.dataset import DataSet
from influence.inception_v3 import preprocess_input
BASE_DIR = 'data' # TODO: change
def fill(X, Y, idx, label, img_path, img_side):
img = image.load_img(img_path, target_size=(img_side, img_side))
x = image.img_to_array(img)
X[idx, ...] = x
Y[idx] = label
def extract_and_rename_animals():
class_maps = [
('dog', 'n02084071'),
('cat', 'n02121808'),
('bird', 'n01503061'),
('fish', 'n02512053'),
('horse', 'n02374451'),
('monkey', 'n02484322'),
('zebra', 'n02391049'),
('panda', 'n02510455'),
('lemur', 'n02496913'),
('wombat', 'n01883070'),
]
for class_string, class_id in class_maps:
class_dir = os.path.join(BASE_DIR, class_string)
print(class_dir)
call('mkdir %s' % class_dir, shell=True)
call('tar -xf %s.tar -C %s' % (os.path.join(BASE_DIR, class_id), class_dir), shell=True)
for filename in os.listdir(class_dir):
file_idx = filename.split('_')[1].split('.')[0]
src_filename = os.path.join(class_dir, filename)
dst_filename = os.path.join(class_dir, '%s_%s.JPEG' % (class_string, file_idx))
os.rename(src_filename, dst_filename)
def load_animals(num_train_ex_per_class=300,
num_test_ex_per_class=100,
num_valid_ex_per_class=0,
classes=None,
):
num_channels = 3
img_side = 299
if num_valid_ex_per_class == 0:
valid_str = ''
else:
valid_str = '_valid-%s' % num_valid_examples
if classes is None:
classes = ['dog', 'cat', 'bird', 'fish', 'horse', 'monkey', 'zebra', 'panda', 'lemur', 'wombat']
data_filename = os.path.join(BASE_DIR, 'dataset_train-%s_test-%s%s.npz' % (num_train_ex_per_class, num_test_ex_per_class, valid_str))
else:
data_filename = os.path.join(BASE_DIR, 'dataset_%s_train-%s_test-%s%s.npz' % ('-'.join(classes), num_train_ex_per_class, num_test_ex_per_class, valid_str))
num_classes = len(classes)
num_train_examples = num_train_ex_per_class * num_classes
num_test_examples = num_test_ex_per_class * num_classes
num_valid_examples = num_valid_ex_per_class * num_classes
if os.path.exists(data_filename):
print('Loading animals from disk...')
f = np.load(data_filename)
X_train = f['X_train']
X_test = f['X_test']
Y_train = f['Y_train']
Y_test = f['Y_test']
if 'X_valid' in f:
X_valid = f['X_valid']
else:
X_valid = None
if 'Y_valid' in f:
Y_valid = f['Y_valid']
else:
Y_valid = None
else:
print('Reading animals from raw images...')
X_train = np.zeros([num_train_examples, img_side, img_side, num_channels])
X_test = np.zeros([num_test_examples, img_side, img_side, num_channels])
# X_valid = np.zeros([num_valid_examples, img_side, img_side, num_channels])
X_valid = None
Y_train = np.zeros([num_train_examples])
Y_test = np.zeros([num_test_examples])
# Y_valid = np.zeros([num_valid_examples])
Y_valid = None
for class_idx, class_string in enumerate(classes):
print('class: %s' % class_string)
# For some reason, a lot of numbers are skipped.
i = 0
num_filled = 0
while num_filled < num_train_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
print(img_path)
if os.path.exists(img_path):
fill(X_train, Y_train, num_filled + (num_train_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
num_filled = 0
while num_filled < num_test_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
if os.path.exists(img_path):
fill(X_test, Y_test, num_filled + (num_test_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
num_filled = 0
while num_filled < num_valid_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
if os.path.exists(img_path):
fill(X_valid, Y_valid, num_filled + (num_valid_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
X_train = preprocess_input(X_train)
X_test = preprocess_input(X_test)
X_valid = preprocess_input(X_valid)
np.random.seed(0)
permutation_idx = np.arange(num_train_examples)
np.random.shuffle(permutation_idx)
X_train = X_train[permutation_idx, :]
Y_train = Y_train[permutation_idx]
permutation_idx = np.arange(num_test_examples)
np.random.shuffle(permutation_idx)
X_test = X_test[permutation_idx, :]
Y_test = Y_test[permutation_idx]
permutation_idx = np.arange(num_valid_examples)
np.random.shuffle(permutation_idx)
X_valid = X_valid[permutation_idx, :]
Y_valid = Y_valid[permutation_idx]
np.savez_compressed(data_filename, X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test, X_valid=X_valid, Y_valid=Y_valid)
train = DataSet(X_train, Y_train)
if (X_valid is not None) and (Y_valid is not None):
# validation = DataSet(X_valid, Y_valid)
validation = None
else:
validation = None
test = DataSet(X_test, Y_test)
return base.Datasets(train=train, validation=validation, test=test)
def load_koda():
num_channels = 3
img_side = 299
data_filename = os.path.join(BASE_DIR, 'dataset_koda.npz')
if os.path.exists(data_filename):
print('Loading Koda from disk...')
f = np.load(data_filename)
X = f['X']
Y = f['Y']
else:
# Returns all class 0
print('Reading Koda from raw images...')
image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg'))]
# Hack to get the image files in the right order
# image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and not image_file.startswith('124'))]
# image_files += [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and image_file.startswith('124'))]
num_examples = len(image_files)
X = np.zeros([num_examples, img_side, img_side, num_channels])
Y = np.zeros([num_examples])
class_idx = 0
for counter, image_file in enumerate(image_files):
img_path = os.path.join(BASE_DIR, 'koda', image_file)
fill(X, Y, counter, class_idx, img_path, img_side)
X = preprocess_input(X)
np.savez(data_filename, X=X, Y=Y)
return X, Y
def load_dogfish_with_koda():
classes = ['dog', 'fish']
X_test, Y_test = load_koda()
data_sets = load_animals(num_train_ex_per_class=900,
num_test_ex_per_class=300,
num_valid_ex_per_class=0,
classes=classes)
train = data_sets.train
validation = data_sets.validation
test = DataSet(X_test, Y_test)
return base.Datasets(train=train, validation=validation, test=test)
def load_dogfish_with_orig_and_koda():
classes = ['dog', 'fish']
X_test, Y_test = load_koda()
X_test = np.reshape(X_test, (X_test.shape[0], -1))
data_sets = load_animals(num_train_ex_per_class=900,
num_test_ex_per_class=300,
num_valid_ex_per_class=0,
classes=classes)
train = data_sets.train
validation = data_sets.validation
test = DataSet(
np.concatenate((data_sets.test.x, X_test), axis=0),
np.concatenate((data_sets.test.labels, Y_test), axis=0))
return base.Datasets(train=train, validation=validation, test=test)
| en | 0.608122 | # TODO: change # X_valid = np.zeros([num_valid_examples, img_side, img_side, num_channels]) # Y_valid = np.zeros([num_valid_examples]) # For some reason, a lot of numbers are skipped. # validation = DataSet(X_valid, Y_valid) # Returns all class 0 # Hack to get the image files in the right order # image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and not image_file.startswith('124'))] # image_files += [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and image_file.startswith('124'))] | 2.286287 | 2 |
src/qiskit_aws_braket_provider/awsbackend.py | carstenblank/qiskit-aws-braket-provider | 7 | 7911 | <reponame>carstenblank/qiskit-aws-braket-provider
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from datetime import datetime, timedelta
from braket.device_schema.device_service_properties_v1 import DeviceCost
from typing import List, Dict, Optional, Any, Union, Tuple
from botocore.response import StreamingBody
from braket.aws import AwsDevice, AwsQuantumTask, AwsSession
from braket.circuits import Circuit
from braket.device_schema import DeviceCapabilities
from braket.device_schema.ionq import IonqDeviceCapabilities
from braket.device_schema.rigetti import RigettiDeviceCapabilities
from braket.device_schema.simulators import GateModelSimulatorDeviceCapabilities
from qiskit.providers import BaseBackend, JobStatus
from qiskit.providers.models import QasmBackendConfiguration, BackendProperties, BackendStatus
from qiskit.qobj import QasmQobj
from . import awsjob
from . import awsprovider
from .conversions_configuration import aws_device_2_configuration
from .conversions_properties import aws_ionq_to_properties, aws_rigetti_to_properties, aws_simulator_to_properties
from .transpilation import convert_qasm_qobj
logger = logging.getLogger(__name__)
class AWSBackend(BaseBackend):
_aws_device: AwsDevice
_configuration: QasmBackendConfiguration
_provider: 'awsprovider.AWSProvider'
def __init__(self, aws_device: AwsDevice, provider: 'awsprovider.AWSProvider' = None):
super().__init__(aws_device_2_configuration(aws_device), provider)
self._aws_device = aws_device
self._run = aws_device.run
def properties(self) -> BackendProperties:
properties: DeviceCapabilities = self._aws_device.properties
if isinstance(properties, IonqDeviceCapabilities):
return aws_ionq_to_properties(properties, self._configuration)
if isinstance(properties, RigettiDeviceCapabilities):
return aws_rigetti_to_properties(properties, self._configuration)
if isinstance(properties, GateModelSimulatorDeviceCapabilities):
return aws_simulator_to_properties(properties, self._configuration)
def status(self) -> BackendStatus:
# now = datetime.now()
# windows = self._aws_device.properties.service.executionWindows
# is_in_execution_window = windows.
status: str = self._aws_device.status
backend_status: BackendStatus = BackendStatus(
backend_name=self.name(),
backend_version=self.version(),
operational=False,
pending_jobs=0, # TODO
status_msg=status
)
if status == 'ONLINE':
backend_status.operational = True
elif status == 'OFFLINE':
backend_status.operational = False
else:
backend_status.operational = False
return backend_status
def _get_job_data_s3_folder(self, job_id):
return f"results-{self.name()}-{job_id}"
@staticmethod
def _exists_file(s3_client, s3_bucket: str, file: str):
result: dict = s3_client.list_objects_v2(
Bucket=s3_bucket,
Prefix=file
)
# TODO: error handling
return result['KeyCount'] != 0
def _save_job_task_arns(self, job_id: str, task_arns: List[str],
s3_bucket: Optional[str] = None) -> AwsSession.S3DestinationFolder:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does already exist in the bucket {used_s3_bucket}")
result = s3_client.put_object(Body=json.dumps(task_arns).encode(), Bucket=used_s3_bucket, Key=file)
# TODO: error handling
return used_s3_bucket, self._get_job_data_s3_folder(job_id=job_id)
def _delete_job_task_arns(self, job_id: str, s3_bucket: Optional[str] = None):
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.delete_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
def _load_job_task_arns(self, job_id: str, s3_bucket: Optional[str] = None) -> List[str]:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.get_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
streaming_body: StreamingBody = result['Body']
data: bytes = streaming_body.read()
task_arns = json.loads(data.decode())
return task_arns
def _save_job_data_s3(self, qobj: QasmQobj, s3_bucket: Optional[str] = None,
extra_data: Optional[dict] = None) -> AwsSession.S3DestinationFolder:
used_s3_bucket: str = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=qobj.qobj_id)}/qiskit_qobj_data.json'
if AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' already exists at the bucket {used_s3_bucket}")
body = {
'qobj_id': qobj.qobj_id,
'qobj': qobj.to_dict()
}
if extra_data:
body['extra_data'] = extra_data
result = s3_client.put_object(Body=json.dumps(body).encode(), Bucket=used_s3_bucket, Key=file)
# TODO: error handling
return used_s3_bucket, self._get_job_data_s3_folder(job_id=qobj.qobj_id)
def _delete_job_data_s3(self, job_id: str, s3_bucket: Optional[str] = None):
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/qiskit_qobj_data.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.delete_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
def _load_job_data_s3(self, job_id: str, s3_bucket: Optional[str] = None) -> Tuple[QasmQobj, dict]:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/qiskit_qobj_data.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.get_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
streaming_body: StreamingBody = result['Body']
data: bytes = streaming_body.read()
stored_experiment_data = json.loads(data.decode())
assert 'qobj' in stored_experiment_data
qobj_raw = stored_experiment_data['qobj']
qobj = QasmQobj.from_dict(qobj_raw)
extra_data = stored_experiment_data.get('extra_data', {})
return qobj, extra_data
def _create_task(self, job_id: str, qc: Circuit, shots: int, s3_bucket: Optional[str] = None) -> AwsQuantumTask:
used_s3_bucket: str = s3_bucket or self._provider.get_default_bucket()
task: AwsQuantumTask = self._aws_device.run(
task_specification=qc,
s3_destination_folder=(used_s3_bucket, self._get_job_data_s3_folder(job_id)),
shots=shots
)
return task
def jobs(
self,
limit: int = 10,
skip: int = 0,
status: Optional[Union[JobStatus, str, List[Union[JobStatus, str]]]] = None,
job_name: Optional[str] = None,
start_datetime: Optional[datetime] = None,
end_datetime: Optional[datetime] = None,
job_tags: Optional[List[str]] = None,
job_tags_operator: Optional[str] = "OR",
descending: bool = True,
db_filter: Optional[Dict[str, Any]] = None
) -> List['awsjob.AWSJob']:
# TODO: use job tags as meta data on s3, else use the method of active_jobs
pass
def active_jobs(self, limit: int = 10) -> List['awsjob.AWSJob']:
client = self._provider._aws_session.braket_client
task_arns = []
nextToken = 'init'
while nextToken is not None:
result: dict = client.search_quantum_tasks(
filters=[{
'name': self.name(),
'operator': 'EQUAL',
'values': ['CREATED', 'QUEUED', 'RUNNING']
}
],
maxResults=limit,
nextToken=None if nextToken == 'init' or nextToken is None else nextToken
)
# TODO: build all task_arns, query s3 for all keys with task_arns.json, see to which task a job associated, load the jobs via job_id
pass
def retrieve_job(self, job_id: str, s3_bucket: Optional[str] = None) -> 'awsjob.AWSJob':
qobj, extra_data = self._load_job_data_s3(job_id=job_id, s3_bucket=s3_bucket)
arns = self._load_job_task_arns(job_id=job_id, s3_bucket=s3_bucket)
tasks = [AwsQuantumTask(arn=arn) for arn in arns]
job = awsjob.AWSJob(
job_id=job_id,
qobj=qobj,
tasks=tasks,
extra_data=extra_data,
s3_bucket=s3_bucket,
backend=self
)
return job
def estimate_costs(self, qobj: QasmQobj) -> Optional[float]:
shots = qobj.config.shots
no_experiments = len(qobj.experiments)
cost: DeviceCost = self._aws_device.properties.service.deviceCost
if cost.unit == 'shot':
return shots * no_experiments * cost.price
elif cost.unit == 'hour':
time_per_experiment = timedelta(seconds=10) # TODO: make this a better estimate: depends on no_qubits and depth
total_time = shots * no_experiments * time_per_experiment
return total_time.total_seconds() / 60 / 60 * cost.price
else:
return None
def run(self, qobj: QasmQobj, s3_bucket: Optional[str] = None, extra_data: Optional[dict] = None):
# If we get here, then we can continue with running, else ValueError!
circuits: List[Circuit] = list(convert_qasm_qobj(qobj))
shots = qobj.config.shots
tasks: List[AwsQuantumTask] = []
try:
s3_location: AwsSession.S3DestinationFolder = self._save_job_data_s3(qobj, s3_bucket=s3_bucket, extra_data=extra_data)
for circuit in circuits:
task = self._aws_device.run(
task_specification=circuit,
s3_destination_folder=s3_location,
shots=shots
)
tasks.append(task)
task_arns = [t.id for t in tasks]
self._save_job_task_arns(job_id=qobj.qobj_id, task_arns=task_arns, s3_bucket=s3_location[0])
except Exception as ex:
logger.error(f'During creation of tasks an error occurred: {ex}')
logger.error(f'Cancelling all tasks {len(tasks)}!')
for task in tasks:
logger.error(f'Attempt to cancel {task.id}...')
task.cancel()
logger.error(f'State of {task.id}: {task.state()}.')
self._delete_job_task_arns(qobj.qobj_id, s3_bucket=s3_bucket)
self._delete_job_data_s3(qobj.qobj_id, s3_bucket=s3_bucket)
raise ex
job = awsjob.AWSJob(
job_id=qobj.qobj_id,
qobj=qobj,
tasks=tasks,
extra_data=extra_data,
s3_bucket=s3_location[0],
backend=self
)
return job
| # Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from datetime import datetime, timedelta
from braket.device_schema.device_service_properties_v1 import DeviceCost
from typing import List, Dict, Optional, Any, Union, Tuple
from botocore.response import StreamingBody
from braket.aws import AwsDevice, AwsQuantumTask, AwsSession
from braket.circuits import Circuit
from braket.device_schema import DeviceCapabilities
from braket.device_schema.ionq import IonqDeviceCapabilities
from braket.device_schema.rigetti import RigettiDeviceCapabilities
from braket.device_schema.simulators import GateModelSimulatorDeviceCapabilities
from qiskit.providers import BaseBackend, JobStatus
from qiskit.providers.models import QasmBackendConfiguration, BackendProperties, BackendStatus
from qiskit.qobj import QasmQobj
from . import awsjob
from . import awsprovider
from .conversions_configuration import aws_device_2_configuration
from .conversions_properties import aws_ionq_to_properties, aws_rigetti_to_properties, aws_simulator_to_properties
from .transpilation import convert_qasm_qobj
logger = logging.getLogger(__name__)
class AWSBackend(BaseBackend):
_aws_device: AwsDevice
_configuration: QasmBackendConfiguration
_provider: 'awsprovider.AWSProvider'
def __init__(self, aws_device: AwsDevice, provider: 'awsprovider.AWSProvider' = None):
super().__init__(aws_device_2_configuration(aws_device), provider)
self._aws_device = aws_device
self._run = aws_device.run
def properties(self) -> BackendProperties:
properties: DeviceCapabilities = self._aws_device.properties
if isinstance(properties, IonqDeviceCapabilities):
return aws_ionq_to_properties(properties, self._configuration)
if isinstance(properties, RigettiDeviceCapabilities):
return aws_rigetti_to_properties(properties, self._configuration)
if isinstance(properties, GateModelSimulatorDeviceCapabilities):
return aws_simulator_to_properties(properties, self._configuration)
def status(self) -> BackendStatus:
# now = datetime.now()
# windows = self._aws_device.properties.service.executionWindows
# is_in_execution_window = windows.
status: str = self._aws_device.status
backend_status: BackendStatus = BackendStatus(
backend_name=self.name(),
backend_version=self.version(),
operational=False,
pending_jobs=0, # TODO
status_msg=status
)
if status == 'ONLINE':
backend_status.operational = True
elif status == 'OFFLINE':
backend_status.operational = False
else:
backend_status.operational = False
return backend_status
def _get_job_data_s3_folder(self, job_id):
return f"results-{self.name()}-{job_id}"
@staticmethod
def _exists_file(s3_client, s3_bucket: str, file: str):
result: dict = s3_client.list_objects_v2(
Bucket=s3_bucket,
Prefix=file
)
# TODO: error handling
return result['KeyCount'] != 0
def _save_job_task_arns(self, job_id: str, task_arns: List[str],
s3_bucket: Optional[str] = None) -> AwsSession.S3DestinationFolder:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does already exist in the bucket {used_s3_bucket}")
result = s3_client.put_object(Body=json.dumps(task_arns).encode(), Bucket=used_s3_bucket, Key=file)
# TODO: error handling
return used_s3_bucket, self._get_job_data_s3_folder(job_id=job_id)
def _delete_job_task_arns(self, job_id: str, s3_bucket: Optional[str] = None):
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.delete_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
def _load_job_task_arns(self, job_id: str, s3_bucket: Optional[str] = None) -> List[str]:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.get_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
streaming_body: StreamingBody = result['Body']
data: bytes = streaming_body.read()
task_arns = json.loads(data.decode())
return task_arns
def _save_job_data_s3(self, qobj: QasmQobj, s3_bucket: Optional[str] = None,
extra_data: Optional[dict] = None) -> AwsSession.S3DestinationFolder:
used_s3_bucket: str = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=qobj.qobj_id)}/qiskit_qobj_data.json'
if AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' already exists at the bucket {used_s3_bucket}")
body = {
'qobj_id': qobj.qobj_id,
'qobj': qobj.to_dict()
}
if extra_data:
body['extra_data'] = extra_data
result = s3_client.put_object(Body=json.dumps(body).encode(), Bucket=used_s3_bucket, Key=file)
# TODO: error handling
return used_s3_bucket, self._get_job_data_s3_folder(job_id=qobj.qobj_id)
def _delete_job_data_s3(self, job_id: str, s3_bucket: Optional[str] = None):
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/qiskit_qobj_data.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.delete_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
def _load_job_data_s3(self, job_id: str, s3_bucket: Optional[str] = None) -> Tuple[QasmQobj, dict]:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/qiskit_qobj_data.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.get_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
streaming_body: StreamingBody = result['Body']
data: bytes = streaming_body.read()
stored_experiment_data = json.loads(data.decode())
assert 'qobj' in stored_experiment_data
qobj_raw = stored_experiment_data['qobj']
qobj = QasmQobj.from_dict(qobj_raw)
extra_data = stored_experiment_data.get('extra_data', {})
return qobj, extra_data
def _create_task(self, job_id: str, qc: Circuit, shots: int, s3_bucket: Optional[str] = None) -> AwsQuantumTask:
used_s3_bucket: str = s3_bucket or self._provider.get_default_bucket()
task: AwsQuantumTask = self._aws_device.run(
task_specification=qc,
s3_destination_folder=(used_s3_bucket, self._get_job_data_s3_folder(job_id)),
shots=shots
)
return task
def jobs(
self,
limit: int = 10,
skip: int = 0,
status: Optional[Union[JobStatus, str, List[Union[JobStatus, str]]]] = None,
job_name: Optional[str] = None,
start_datetime: Optional[datetime] = None,
end_datetime: Optional[datetime] = None,
job_tags: Optional[List[str]] = None,
job_tags_operator: Optional[str] = "OR",
descending: bool = True,
db_filter: Optional[Dict[str, Any]] = None
) -> List['awsjob.AWSJob']:
# TODO: use job tags as meta data on s3, else use the method of active_jobs
pass
def active_jobs(self, limit: int = 10) -> List['awsjob.AWSJob']:
client = self._provider._aws_session.braket_client
task_arns = []
nextToken = 'init'
while nextToken is not None:
result: dict = client.search_quantum_tasks(
filters=[{
'name': self.name(),
'operator': 'EQUAL',
'values': ['CREATED', 'QUEUED', 'RUNNING']
}
],
maxResults=limit,
nextToken=None if nextToken == 'init' or nextToken is None else nextToken
)
# TODO: build all task_arns, query s3 for all keys with task_arns.json, see to which task a job associated, load the jobs via job_id
pass
def retrieve_job(self, job_id: str, s3_bucket: Optional[str] = None) -> 'awsjob.AWSJob':
qobj, extra_data = self._load_job_data_s3(job_id=job_id, s3_bucket=s3_bucket)
arns = self._load_job_task_arns(job_id=job_id, s3_bucket=s3_bucket)
tasks = [AwsQuantumTask(arn=arn) for arn in arns]
job = awsjob.AWSJob(
job_id=job_id,
qobj=qobj,
tasks=tasks,
extra_data=extra_data,
s3_bucket=s3_bucket,
backend=self
)
return job
def estimate_costs(self, qobj: QasmQobj) -> Optional[float]:
shots = qobj.config.shots
no_experiments = len(qobj.experiments)
cost: DeviceCost = self._aws_device.properties.service.deviceCost
if cost.unit == 'shot':
return shots * no_experiments * cost.price
elif cost.unit == 'hour':
time_per_experiment = timedelta(seconds=10) # TODO: make this a better estimate: depends on no_qubits and depth
total_time = shots * no_experiments * time_per_experiment
return total_time.total_seconds() / 60 / 60 * cost.price
else:
return None
def run(self, qobj: QasmQobj, s3_bucket: Optional[str] = None, extra_data: Optional[dict] = None):
# If we get here, then we can continue with running, else ValueError!
circuits: List[Circuit] = list(convert_qasm_qobj(qobj))
shots = qobj.config.shots
tasks: List[AwsQuantumTask] = []
try:
s3_location: AwsSession.S3DestinationFolder = self._save_job_data_s3(qobj, s3_bucket=s3_bucket, extra_data=extra_data)
for circuit in circuits:
task = self._aws_device.run(
task_specification=circuit,
s3_destination_folder=s3_location,
shots=shots
)
tasks.append(task)
task_arns = [t.id for t in tasks]
self._save_job_task_arns(job_id=qobj.qobj_id, task_arns=task_arns, s3_bucket=s3_location[0])
except Exception as ex:
logger.error(f'During creation of tasks an error occurred: {ex}')
logger.error(f'Cancelling all tasks {len(tasks)}!')
for task in tasks:
logger.error(f'Attempt to cancel {task.id}...')
task.cancel()
logger.error(f'State of {task.id}: {task.state()}.')
self._delete_job_task_arns(qobj.qobj_id, s3_bucket=s3_bucket)
self._delete_job_data_s3(qobj.qobj_id, s3_bucket=s3_bucket)
raise ex
job = awsjob.AWSJob(
job_id=qobj.qobj_id,
qobj=qobj,
tasks=tasks,
extra_data=extra_data,
s3_bucket=s3_location[0],
backend=self
)
return job | en | 0.70666 | # Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # now = datetime.now() # windows = self._aws_device.properties.service.executionWindows # is_in_execution_window = windows. # TODO # TODO: error handling # TODO: error handling # TODO: error handling # TODO: error handling # TODO: error handling # TODO: error handling # TODO: error handling # TODO: use job tags as meta data on s3, else use the method of active_jobs # TODO: build all task_arns, query s3 for all keys with task_arns.json, see to which task a job associated, load the jobs via job_id # TODO: make this a better estimate: depends on no_qubits and depth # If we get here, then we can continue with running, else ValueError! | 1.407782 | 1 |
test/unit/Algorithms/GenericLinearTransportTest.py | thirtywang/OpenPNM | 0 | 7912 | import OpenPNM
import numpy as np
import OpenPNM.Physics.models as pm
class GenericLinearTransportTest:
def setup_class(self):
self.net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
self.phase = OpenPNM.Phases.GenericPhase(network=self.net)
Ps = self.net.Ps
Ts = self.net.Ts
self.phys = OpenPNM.Physics.GenericPhysics(network=self.net,
phase=self.phase,
pores=Ps, throats=Ts)
self.phys['throat.cond'] = 5e-8
self.alg = OpenPNM.Algorithms.GenericLinearTransport(network=self.net,
phase=self.phase)
def test_set_BC_modes_pores(self):
BC1_pores = np.arange(25, 35)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC1_pores)
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == BC1_pores)
BC2_pores = np.arange(43, 50)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC2_pores,
mode='merge')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == np.concatenate((BC1_pores, BC2_pores)))
BC3_pores = np.arange(4, 9)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC3_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == BC3_pores)
BC4_pores = [11, 90]
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
pores=BC4_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Neumann')
assert np.all(ptest == BC4_pores)
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=BC1_pores,
bcvalue=0.3)
ptest = self.alg.pores('pore.Dirichlet')
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=self.alg.Ps,
mode='remove')
Dp = np.sum(self.alg['pore.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_throats(self):
BC1_throats = np.arange(25, 35)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC1_throats)
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == BC1_throats)
BC2_throats = np.arange(43, 50)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC2_throats,
mode='merge')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == np.concatenate((BC1_throats, BC2_throats)))
BC3_throats = np.arange(4, 9)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC3_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == BC3_throats)
BC4_throats = [11, 90]
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
throats=BC4_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Neumann')
assert np.all(t_test == BC4_throats)
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=BC1_throats,
bcvalue=0.3)
t_test = self.alg.throats('throat.Dirichlet')
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=self.alg.Ts,
mode='remove')
Dp = np.sum(self.alg['throat.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'throat.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_with_boolean_masks_pores(self):
BC1_pores = np.zeros(self.alg.Np, dtype='bool')
BC1_pores[np.arange(25, 35)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC1_pores)
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == self.alg._parse_locations(BC1_pores))
BC2_pores = np.zeros(self.alg.Np, dtype='bool')
BC2_pores[np.arange(43, 50)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC2_pores,
mode='merge')
ptest = self.alg.pores('pore.Dirichlet')
B1 = self.alg._parse_locations(BC1_pores)
B2 = self.alg._parse_locations(BC2_pores)
assert np.all(ptest == np.concatenate((B1, B2)))
BC3_pores = np.zeros(self.alg.Np, dtype='bool')
BC3_pores[np.arange(4, 9)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC3_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == self.alg._parse_locations(BC3_pores))
BC4_pores = np.zeros(self.alg.Np, dtype='bool')
BC4_pores[[11, 90]] = True
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
pores=BC4_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Neumann')
assert np.all(ptest == self.alg._parse_locations(BC4_pores))
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=BC1_pores,
bcvalue=0.3)
ptest = self.alg.pores('pore.Dirichlet')
removed_p = self.alg._parse_locations(self.alg.Ps)
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=removed_p,
mode='remove')
Dp = np.sum(self.alg['pore.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_with_boolean_masks_throats(self):
BC1_throats = np.zeros(self.alg.Nt, dtype='bool')
BC1_throats[np.arange(25, 35)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC1_throats)
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == self.alg._parse_locations(BC1_throats))
BC2_throats = np.zeros(self.alg.Nt, dtype='bool')
BC2_throats[np.arange(43, 50)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC2_throats,
mode='merge')
t_test = self.alg.throats('throat.Dirichlet')
B1 = self.alg._parse_locations(BC1_throats)
B2 = self.alg._parse_locations(BC2_throats)
assert np.all(t_test == np.concatenate((B1, B2)))
BC3_throats = np.zeros(self.alg.Nt, dtype='bool')
BC3_throats[np.arange(4, 9)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC3_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == self.alg._parse_locations(BC3_throats))
BC4_throats = np.zeros(self.alg.Nt, dtype='bool')
BC4_throats[[11, 90]] = True
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
throats=BC4_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Neumann')
assert np.all(t_test == self.alg._parse_locations(BC4_throats))
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=BC1_throats,
bcvalue=0.3)
t_test = self.alg.throats('throat.Dirichlet')
removed_t = self.alg._parse_locations(self.alg.Ts)
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=removed_t,
mode='remove')
Dp = np.sum(self.alg['throat.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_super_pore_conductance(self):
g_super = []
BC1_pores = np.arange(20, 30)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.4,
pores=BC1_pores)
BC2_pores = np.arange(45, 66)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=1.4e-10,
pores=BC2_pores)
g_super.append(2e-12)
BC3_pores = np.arange(87, 94)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=-0.9e-10,
pores=BC3_pores)
g_super.append(np.ones(len(BC3_pores)) * 1.5e-12)
BC4_pores = np.arange(3, 7)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=0.1e-10,
pores=BC4_pores)
g_super.append(np.array([6.42e-13]))
self.alg.run(conductance='throat.cond',
quantity='pore.mole_fraction',
super_pore_conductance=g_super)
self.alg.return_results()
r1 = self.alg.rate(BC1_pores)[0]
r2 = self.alg.rate(BC2_pores)[0]
r3 = self.alg.rate(BC3_pores)[0]
r4 = self.alg.rate(BC4_pores)[0]
assert np.absolute(r1 + r2 + r3 + r4) < 1e-20
assert np.size(self.alg.super_pore_conductance[0]) == 1
assert np.size(self.alg.super_pore_conductance[1]) == 7
assert np.size(self.alg.super_pore_conductance[2]) == 1
def test_source_term_modes(self):
self.phys['pore.item1'] = 0.5e-12
self.phys['pore.item2'] = 2.5
self.phys['pore.item3'] = -1.4e-11
self.phys.models.add(propname='pore.A',
model=pm.generic_source_term.power_law,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
x='mole_fraction',
return_rate=False,
regen_mode='on_demand')
self.phys.models.add(propname='pore.B',
model=pm.generic_source_term.linear,
A1='pore.item1',
A2='pore.item3',
x='mole_fraction',
return_rate=False,
regen_mode='on_demand')
S1_pores = np.arange(25, 35)
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
pores=S1_pores)
mask1 = ~np.isnan(self.alg['pore.source_nonlinear_s1_A'])
mask2 = ~np.isnan(self.alg['pore.source_nonlinear_s2_A'])
assert np.all(self.alg.Ps[mask1] == S1_pores)
assert np.all(self.alg.Ps[mask2] == S1_pores)
self.alg.set_source_term(source_name='pore.A',
pores=[26], x0=np.ones(self.phys.Np),
mode='update')
assert self.alg['pore.source_nonlinear_s1_A'][26] == 1.25e-12
S2_pores = np.array([30, 31])
self.alg.set_source_term(source_name='pore.A',
pores=S2_pores,
mode='overwrite')
mask1 = ~np.isnan(self.alg['pore.source_nonlinear_s1_A'])
assert np.all(self.alg.Ps[mask1] == S2_pores)
self.alg.set_source_term(source_name='pore.B',
pores=S1_pores,
mode='remove')
mask1 = np.isnan(self.alg['pore.source_nonlinear_s1_B'])
assert np.all(self.alg.Ps[mask1] == self.alg.Ps)
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
pores=self.alg.Ps,
mode='remove')
assert ('pore.source_B' in self.alg.labels())
assert ('pore.source_A' in self.alg.labels())
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
mode='remove')
assert ('pore.source_B' not in self.alg.labels())
assert ('pore.source_A' not in self.alg.labels())
| import OpenPNM
import numpy as np
import OpenPNM.Physics.models as pm
class GenericLinearTransportTest:
def setup_class(self):
self.net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
self.phase = OpenPNM.Phases.GenericPhase(network=self.net)
Ps = self.net.Ps
Ts = self.net.Ts
self.phys = OpenPNM.Physics.GenericPhysics(network=self.net,
phase=self.phase,
pores=Ps, throats=Ts)
self.phys['throat.cond'] = 5e-8
self.alg = OpenPNM.Algorithms.GenericLinearTransport(network=self.net,
phase=self.phase)
def test_set_BC_modes_pores(self):
BC1_pores = np.arange(25, 35)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC1_pores)
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == BC1_pores)
BC2_pores = np.arange(43, 50)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC2_pores,
mode='merge')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == np.concatenate((BC1_pores, BC2_pores)))
BC3_pores = np.arange(4, 9)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC3_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == BC3_pores)
BC4_pores = [11, 90]
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
pores=BC4_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Neumann')
assert np.all(ptest == BC4_pores)
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=BC1_pores,
bcvalue=0.3)
ptest = self.alg.pores('pore.Dirichlet')
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=self.alg.Ps,
mode='remove')
Dp = np.sum(self.alg['pore.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_throats(self):
BC1_throats = np.arange(25, 35)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC1_throats)
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == BC1_throats)
BC2_throats = np.arange(43, 50)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC2_throats,
mode='merge')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == np.concatenate((BC1_throats, BC2_throats)))
BC3_throats = np.arange(4, 9)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC3_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == BC3_throats)
BC4_throats = [11, 90]
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
throats=BC4_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Neumann')
assert np.all(t_test == BC4_throats)
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=BC1_throats,
bcvalue=0.3)
t_test = self.alg.throats('throat.Dirichlet')
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=self.alg.Ts,
mode='remove')
Dp = np.sum(self.alg['throat.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'throat.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_with_boolean_masks_pores(self):
BC1_pores = np.zeros(self.alg.Np, dtype='bool')
BC1_pores[np.arange(25, 35)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC1_pores)
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == self.alg._parse_locations(BC1_pores))
BC2_pores = np.zeros(self.alg.Np, dtype='bool')
BC2_pores[np.arange(43, 50)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC2_pores,
mode='merge')
ptest = self.alg.pores('pore.Dirichlet')
B1 = self.alg._parse_locations(BC1_pores)
B2 = self.alg._parse_locations(BC2_pores)
assert np.all(ptest == np.concatenate((B1, B2)))
BC3_pores = np.zeros(self.alg.Np, dtype='bool')
BC3_pores[np.arange(4, 9)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
pores=BC3_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Dirichlet')
assert np.all(ptest == self.alg._parse_locations(BC3_pores))
BC4_pores = np.zeros(self.alg.Np, dtype='bool')
BC4_pores[[11, 90]] = True
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
pores=BC4_pores,
mode='overwrite')
ptest = self.alg.pores('pore.Neumann')
assert np.all(ptest == self.alg._parse_locations(BC4_pores))
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=BC1_pores,
bcvalue=0.3)
ptest = self.alg.pores('pore.Dirichlet')
removed_p = self.alg._parse_locations(self.alg.Ps)
self.alg.set_boundary_conditions(bctype='Dirichlet',
pores=removed_p,
mode='remove')
Dp = np.sum(self.alg['pore.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_set_BC_modes_with_boolean_masks_throats(self):
BC1_throats = np.zeros(self.alg.Nt, dtype='bool')
BC1_throats[np.arange(25, 35)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC1_throats)
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == self.alg._parse_locations(BC1_throats))
BC2_throats = np.zeros(self.alg.Nt, dtype='bool')
BC2_throats[np.arange(43, 50)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC2_throats,
mode='merge')
t_test = self.alg.throats('throat.Dirichlet')
B1 = self.alg._parse_locations(BC1_throats)
B2 = self.alg._parse_locations(BC2_throats)
assert np.all(t_test == np.concatenate((B1, B2)))
BC3_throats = np.zeros(self.alg.Nt, dtype='bool')
BC3_throats[np.arange(4, 9)] = True
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.8,
throats=BC3_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Dirichlet')
assert np.all(t_test == self.alg._parse_locations(BC3_throats))
BC4_throats = np.zeros(self.alg.Nt, dtype='bool')
BC4_throats[[11, 90]] = True
self.alg.set_boundary_conditions(bctype='Neumann',
bcvalue=0.5,
throats=BC4_throats,
mode='overwrite')
t_test = self.alg.throats('throat.Neumann')
assert np.all(t_test == self.alg._parse_locations(BC4_throats))
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=BC1_throats,
bcvalue=0.3)
t_test = self.alg.throats('throat.Dirichlet')
removed_t = self.alg._parse_locations(self.alg.Ts)
self.alg.set_boundary_conditions(bctype='Dirichlet',
throats=removed_t,
mode='remove')
Dp = np.sum(self.alg['throat.Dirichlet'])
assert Dp == 0
self.alg.set_boundary_conditions(bctype='Neumann',
mode='remove')
label = 'pore.Neumann'
assert (label not in self.alg.labels())
def test_super_pore_conductance(self):
g_super = []
BC1_pores = np.arange(20, 30)
self.alg.set_boundary_conditions(bctype='Dirichlet',
bcvalue=0.4,
pores=BC1_pores)
BC2_pores = np.arange(45, 66)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=1.4e-10,
pores=BC2_pores)
g_super.append(2e-12)
BC3_pores = np.arange(87, 94)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=-0.9e-10,
pores=BC3_pores)
g_super.append(np.ones(len(BC3_pores)) * 1.5e-12)
BC4_pores = np.arange(3, 7)
self.alg.set_boundary_conditions(bctype='Neumann_group',
bcvalue=0.1e-10,
pores=BC4_pores)
g_super.append(np.array([6.42e-13]))
self.alg.run(conductance='throat.cond',
quantity='pore.mole_fraction',
super_pore_conductance=g_super)
self.alg.return_results()
r1 = self.alg.rate(BC1_pores)[0]
r2 = self.alg.rate(BC2_pores)[0]
r3 = self.alg.rate(BC3_pores)[0]
r4 = self.alg.rate(BC4_pores)[0]
assert np.absolute(r1 + r2 + r3 + r4) < 1e-20
assert np.size(self.alg.super_pore_conductance[0]) == 1
assert np.size(self.alg.super_pore_conductance[1]) == 7
assert np.size(self.alg.super_pore_conductance[2]) == 1
def test_source_term_modes(self):
self.phys['pore.item1'] = 0.5e-12
self.phys['pore.item2'] = 2.5
self.phys['pore.item3'] = -1.4e-11
self.phys.models.add(propname='pore.A',
model=pm.generic_source_term.power_law,
A1='pore.item1',
A2='pore.item2',
A3='pore.item3',
x='mole_fraction',
return_rate=False,
regen_mode='on_demand')
self.phys.models.add(propname='pore.B',
model=pm.generic_source_term.linear,
A1='pore.item1',
A2='pore.item3',
x='mole_fraction',
return_rate=False,
regen_mode='on_demand')
S1_pores = np.arange(25, 35)
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
pores=S1_pores)
mask1 = ~np.isnan(self.alg['pore.source_nonlinear_s1_A'])
mask2 = ~np.isnan(self.alg['pore.source_nonlinear_s2_A'])
assert np.all(self.alg.Ps[mask1] == S1_pores)
assert np.all(self.alg.Ps[mask2] == S1_pores)
self.alg.set_source_term(source_name='pore.A',
pores=[26], x0=np.ones(self.phys.Np),
mode='update')
assert self.alg['pore.source_nonlinear_s1_A'][26] == 1.25e-12
S2_pores = np.array([30, 31])
self.alg.set_source_term(source_name='pore.A',
pores=S2_pores,
mode='overwrite')
mask1 = ~np.isnan(self.alg['pore.source_nonlinear_s1_A'])
assert np.all(self.alg.Ps[mask1] == S2_pores)
self.alg.set_source_term(source_name='pore.B',
pores=S1_pores,
mode='remove')
mask1 = np.isnan(self.alg['pore.source_nonlinear_s1_B'])
assert np.all(self.alg.Ps[mask1] == self.alg.Ps)
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
pores=self.alg.Ps,
mode='remove')
assert ('pore.source_B' in self.alg.labels())
assert ('pore.source_A' in self.alg.labels())
self.alg.set_source_term(source_name=['pore.A', 'pore.B'],
mode='remove')
assert ('pore.source_B' not in self.alg.labels())
assert ('pore.source_A' not in self.alg.labels())
| none | 1 | 2.206882 | 2 |
|
EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py | spartantri/aws-security-automation | 0 | 7913 | <filename>EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py
# MIT No Attribution
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import boto3
import os
def lambda_handler(event, context):
# TODO implement
print(event)
client = boto3.client('ec2')
instanceID = event.get('instanceID')
response = client.describe_instances(
InstanceIds=[
instanceID
]
)
volumeID = response['Reservations'][0]['Instances'][0]['BlockDeviceMappings'][0]['Ebs']['VolumeId']
print(volumeID)
SnapShotDetails = client.create_snapshot(
Description='Isolated Instance',
VolumeId=volumeID
)
client.create_tags(Resources=[SnapShotDetails['SnapshotId']], Tags=[{'Key': 'Name', 'Value': instanceID}])
# TODO Dump Response into S3 - response
# TODO Dump Response details into Snapshot - SnapShotDetails['SnapshotId']
print(response)
print(SnapShotDetails['SnapshotId'])
response = client.modify_instance_attribute(
Groups=[
os.environ['ISOLATED_SECUTRITYGROUP'],
],
InstanceId=instanceID
)
tagresponse = client.create_tags(
Resources=[
instanceID,
],
Tags=[
{
'Key': 'IsIsolated',
'Value': 'InstanceIsolated'
},
]
)
waiter = client.get_waiter('snapshot_completed')
waiter.wait(
SnapshotIds=[
SnapShotDetails['SnapshotId'],
]
)
# event['SnapshotId'] = SnapShotDetails['SnapshotId']
return SnapShotDetails['SnapshotId']
| <filename>EC2 Auto Clean Room Forensics/Lambda-Functions/snapshotForRemediation.py
# MIT No Attribution
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import boto3
import os
def lambda_handler(event, context):
# TODO implement
print(event)
client = boto3.client('ec2')
instanceID = event.get('instanceID')
response = client.describe_instances(
InstanceIds=[
instanceID
]
)
volumeID = response['Reservations'][0]['Instances'][0]['BlockDeviceMappings'][0]['Ebs']['VolumeId']
print(volumeID)
SnapShotDetails = client.create_snapshot(
Description='Isolated Instance',
VolumeId=volumeID
)
client.create_tags(Resources=[SnapShotDetails['SnapshotId']], Tags=[{'Key': 'Name', 'Value': instanceID}])
# TODO Dump Response into S3 - response
# TODO Dump Response details into Snapshot - SnapShotDetails['SnapshotId']
print(response)
print(SnapShotDetails['SnapshotId'])
response = client.modify_instance_attribute(
Groups=[
os.environ['ISOLATED_SECUTRITYGROUP'],
],
InstanceId=instanceID
)
tagresponse = client.create_tags(
Resources=[
instanceID,
],
Tags=[
{
'Key': 'IsIsolated',
'Value': 'InstanceIsolated'
},
]
)
waiter = client.get_waiter('snapshot_completed')
waiter.wait(
SnapshotIds=[
SnapShotDetails['SnapshotId'],
]
)
# event['SnapshotId'] = SnapShotDetails['SnapshotId']
return SnapShotDetails['SnapshotId']
| en | 0.728042 | # MIT No Attribution # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A # PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # TODO implement # TODO Dump Response into S3 - response # TODO Dump Response details into Snapshot - SnapShotDetails['SnapshotId'] # event['SnapshotId'] = SnapShotDetails['SnapshotId'] | 2.239901 | 2 |
gpu_bdb/queries/q26/gpu_bdb_query_26.py | VibhuJawa/gpu-bdb | 62 | 7914 | <reponame>VibhuJawa/gpu-bdb<gh_stars>10-100
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
train_clustering_model,
run_query,
)
from bdb_tools.q26_utils import (
Q26_CATEGORY,
Q26_ITEM_COUNT,
N_CLUSTERS,
CLUSTER_ITERATIONS,
N_ITER,
read_tables
)
import numpy as np
from dask import delayed
def agg_count_distinct(df, group_key, counted_key):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
return (
df.drop_duplicates([group_key, counted_key])
.groupby(group_key)[counted_key]
.count()
)
def get_clusters(client, kmeans_input_df):
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in kmeans_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = kmeans_input_df.index.to_frame().reset_index(drop=True)
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
# Sort based on CDH6.1 q26-result formatting
output = output.sort_values(["ss_customer_sk"])
# Based on CDH6.1 q26-result formatting
results_dict["cid_labels"] = output
return results_dict
def main(client, config):
import cudf
ss_ddf, items_ddf = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
items_filtered = items_ddf[items_ddf.i_category == Q26_CATEGORY].reset_index(
drop=True
)
items_filtered = items_filtered[["i_item_sk", "i_class_id"]]
f_ss_ddf = ss_ddf[ss_ddf["ss_customer_sk"].notnull()].reset_index(drop=True)
merged_ddf = f_ss_ddf.merge(
items_filtered, left_on="ss_item_sk", right_on="i_item_sk", how="inner"
)
keep_cols = ["ss_customer_sk", "i_class_id"]
merged_ddf = merged_ddf[keep_cols]
# One-Hot-Encode i_class_id
merged_ddf = merged_ddf.map_partitions(
cudf.get_dummies,
columns=["i_class_id"],
prefix="id",
cats={"i_class_id": np.arange(1, 16, dtype="int32")},
prefix_sep="",
dtype="float32",
)
merged_ddf["total"] = 1.0 # Will keep track of total count
all_categories = ["total"] + ["id%d" % i for i in range(1, 16)]
# Aggregate using agg to get sorted ss_customer_sk
agg_dict = dict.fromkeys(all_categories, "sum")
rollup_ddf = merged_ddf.groupby("ss_customer_sk").agg(agg_dict)
rollup_ddf = rollup_ddf[rollup_ddf.total > Q26_ITEM_COUNT][all_categories[1:]]
# Prepare data for KMeans clustering
rollup_ddf = rollup_ddf.astype("float64")
kmeans_input_df = rollup_ddf.persist()
results_dict = get_clusters(client=client, kmeans_input_df=kmeans_input_df)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
train_clustering_model,
run_query,
)
from bdb_tools.q26_utils import (
Q26_CATEGORY,
Q26_ITEM_COUNT,
N_CLUSTERS,
CLUSTER_ITERATIONS,
N_ITER,
read_tables
)
import numpy as np
from dask import delayed
def agg_count_distinct(df, group_key, counted_key):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
return (
df.drop_duplicates([group_key, counted_key])
.groupby(group_key)[counted_key]
.count()
)
def get_clusters(client, kmeans_input_df):
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in kmeans_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = kmeans_input_df.index.to_frame().reset_index(drop=True)
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
# Sort based on CDH6.1 q26-result formatting
output = output.sort_values(["ss_customer_sk"])
# Based on CDH6.1 q26-result formatting
results_dict["cid_labels"] = output
return results_dict
def main(client, config):
import cudf
ss_ddf, items_ddf = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
items_filtered = items_ddf[items_ddf.i_category == Q26_CATEGORY].reset_index(
drop=True
)
items_filtered = items_filtered[["i_item_sk", "i_class_id"]]
f_ss_ddf = ss_ddf[ss_ddf["ss_customer_sk"].notnull()].reset_index(drop=True)
merged_ddf = f_ss_ddf.merge(
items_filtered, left_on="ss_item_sk", right_on="i_item_sk", how="inner"
)
keep_cols = ["ss_customer_sk", "i_class_id"]
merged_ddf = merged_ddf[keep_cols]
# One-Hot-Encode i_class_id
merged_ddf = merged_ddf.map_partitions(
cudf.get_dummies,
columns=["i_class_id"],
prefix="id",
cats={"i_class_id": np.arange(1, 16, dtype="int32")},
prefix_sep="",
dtype="float32",
)
merged_ddf["total"] = 1.0 # Will keep track of total count
all_categories = ["total"] + ["id%d" % i for i in range(1, 16)]
# Aggregate using agg to get sorted ss_customer_sk
agg_dict = dict.fromkeys(all_categories, "sum")
rollup_ddf = merged_ddf.groupby("ss_customer_sk").agg(agg_dict)
rollup_ddf = rollup_ddf[rollup_ddf.total > Q26_ITEM_COUNT][all_categories[1:]]
# Prepare data for KMeans clustering
rollup_ddf = rollup_ddf.astype("float64")
kmeans_input_df = rollup_ddf.persist()
results_dict = get_clusters(client=client, kmeans_input_df=kmeans_input_df)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main) | en | 0.827343 | # # Copyright (c) 2019-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'. The series' index will have one entry per unique 'group_key' value. Workaround for lack of nunique aggregate function on Dask df. # Sort based on CDH6.1 q26-result formatting # Based on CDH6.1 q26-result formatting # One-Hot-Encode i_class_id # Will keep track of total count # Aggregate using agg to get sorted ss_customer_sk # Prepare data for KMeans clustering | 2.366876 | 2 |
tests/test_intbounds.py | alex/optimizer-model | 4 | 7915 | from optimizer.utils.intbounds import IntBounds
class TestIntBounds(object):
def test_make_gt(self):
i0 = IntBounds()
i1 = i0.make_gt(IntBounds(10, 10))
assert i1.lower == 11
def test_make_gt_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_gt(IntBounds(10, 10)).make_gt(IntBounds(0, 0))
assert i1.lower == 11
def test_make_lt(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(10, 10))
assert i1.upper == 9
def test_make_lt_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(0, 0)).make_lt(IntBounds(10, 10))
assert i1.upper == -1
def test_both_bounds(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(10, 10)).make_gt(IntBounds(0, 0))
assert i1.upper == 9
assert i1.lower == 1
i2 = i0.make_gt(IntBounds(0, 0)).make_lt(IntBounds(10, 10))
assert i2.lower == 1
assert i2.upper == 9
def test_make_le_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_le(IntBounds(0, 0)).make_le(IntBounds(2, 2))
assert i1.upper == 0
def test_make_ge_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_ge(IntBounds(10, 10)).make_ge(IntBounds(0, 0))
assert i1.lower == 10
| from optimizer.utils.intbounds import IntBounds
class TestIntBounds(object):
def test_make_gt(self):
i0 = IntBounds()
i1 = i0.make_gt(IntBounds(10, 10))
assert i1.lower == 11
def test_make_gt_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_gt(IntBounds(10, 10)).make_gt(IntBounds(0, 0))
assert i1.lower == 11
def test_make_lt(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(10, 10))
assert i1.upper == 9
def test_make_lt_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(0, 0)).make_lt(IntBounds(10, 10))
assert i1.upper == -1
def test_both_bounds(self):
i0 = IntBounds()
i1 = i0.make_lt(IntBounds(10, 10)).make_gt(IntBounds(0, 0))
assert i1.upper == 9
assert i1.lower == 1
i2 = i0.make_gt(IntBounds(0, 0)).make_lt(IntBounds(10, 10))
assert i2.lower == 1
assert i2.upper == 9
def test_make_le_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_le(IntBounds(0, 0)).make_le(IntBounds(2, 2))
assert i1.upper == 0
def test_make_ge_already_bounded(self):
i0 = IntBounds()
i1 = i0.make_ge(IntBounds(10, 10)).make_ge(IntBounds(0, 0))
assert i1.lower == 10
| none | 1 | 3.084882 | 3 |
|
tdclient/test/database_model_test.py | minchuang/td-client-python | 2 | 7916 | <reponame>minchuang/td-client-python
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
try:
from unittest import mock
except ImportError:
import mock
from tdclient import models
from tdclient.test.test_helper import *
def setup_function(function):
unset_environ()
def test_database():
client = mock.MagicMock()
database = models.Database(client, "sample_datasets", tables=["nasdaq", "www_access"], count=12345, created_at="created_at", updated_at="updated_at", org_name="org_name", permission="administrator")
assert database.org_name == "org_name"
assert database.permission == "administrator"
assert database.count == 12345
assert database.name == "sample_datasets"
assert database.tables() == ["nasdaq", "www_access"]
assert database.created_at == "created_at"
assert database.updated_at == "updated_at"
def test_database_update_tables():
client = mock.MagicMock()
client.tables = mock.MagicMock(return_value=[
models.Table(client, "sample_datasets", "foo", "type", "schema", "count"),
models.Table(client, "sample_datasets", "bar", "type", "schema", "count"),
models.Table(client, "sample_datasets", "baz", "type", "schema", "count"),
])
database = models.Database(client, "sample_datasets", tables=None, count=12345, created_at="created_at", updated_at="updated_at", org_name="org_name", permission="administrator")
tables = database.tables()
assert [ table.name for table in tables ] == ["foo", "bar", "baz"]
client.tables.assert_called_with("sample_datasets")
| #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
try:
from unittest import mock
except ImportError:
import mock
from tdclient import models
from tdclient.test.test_helper import *
def setup_function(function):
unset_environ()
def test_database():
client = mock.MagicMock()
database = models.Database(client, "sample_datasets", tables=["nasdaq", "www_access"], count=12345, created_at="created_at", updated_at="updated_at", org_name="org_name", permission="administrator")
assert database.org_name == "org_name"
assert database.permission == "administrator"
assert database.count == 12345
assert database.name == "sample_datasets"
assert database.tables() == ["nasdaq", "www_access"]
assert database.created_at == "created_at"
assert database.updated_at == "updated_at"
def test_database_update_tables():
client = mock.MagicMock()
client.tables = mock.MagicMock(return_value=[
models.Table(client, "sample_datasets", "foo", "type", "schema", "count"),
models.Table(client, "sample_datasets", "bar", "type", "schema", "count"),
models.Table(client, "sample_datasets", "baz", "type", "schema", "count"),
])
database = models.Database(client, "sample_datasets", tables=None, count=12345, created_at="created_at", updated_at="updated_at", org_name="org_name", permission="administrator")
tables = database.tables()
assert [ table.name for table in tables ] == ["foo", "bar", "baz"]
client.tables.assert_called_with("sample_datasets") | ru | 0.26433 | #!/usr/bin/env python | 2.499113 | 2 |
setup.py | ballcap231/fireTS | 0 | 7917 | from setuptools import setup
dependencies = [
'numpy',
'scipy',
'scikit-learn',
]
setup(
name='fireTS',
version='0.0.7',
description='A python package for multi-variate time series prediction',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url='https://github.com/jxx123/fireTS.git',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['fireTS'],
install_requires=dependencies,
include_package_data=True,
zip_safe=False)
| from setuptools import setup
dependencies = [
'numpy',
'scipy',
'scikit-learn',
]
setup(
name='fireTS',
version='0.0.7',
description='A python package for multi-variate time series prediction',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url='https://github.com/jxx123/fireTS.git',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['fireTS'],
install_requires=dependencies,
include_package_data=True,
zip_safe=False)
| none | 1 | 1.2662 | 1 |
|
euler/py/project_019.py | heyihan/scodes | 0 | 7918 | <filename>euler/py/project_019.py
# https://projecteuler.net/problem=19
def is_leap(year):
if year%4 != 0:
return False
if year%100 == 0 and year%400 != 0:
return False
return True
def year_days(year):
if is_leap(year):
return 366
return 365
def month_days(month, year):
if month == 4 or month == 6 or month == 9 or month == 11:
return 30
if month == 2:
if is_leap(year):
return 29
return 28
return 31
day_19000101 = 1
days_1900 = year_days(1900)
day_next_day1 = (day_19000101 + days_1900)%7
print(day_19000101, days_1900, day_next_day1)
sum = 0
for i in range(1901, 2001):
for j in range(1, 13):
if day_next_day1 == 0:
print(i, j)
sum = sum + 1
days = month_days(j, i)
day_next_day1 = (day_next_day1 + days)%7
#print(i, j, days, day_next_day1)
print(sum)
| <filename>euler/py/project_019.py
# https://projecteuler.net/problem=19
def is_leap(year):
if year%4 != 0:
return False
if year%100 == 0 and year%400 != 0:
return False
return True
def year_days(year):
if is_leap(year):
return 366
return 365
def month_days(month, year):
if month == 4 or month == 6 or month == 9 or month == 11:
return 30
if month == 2:
if is_leap(year):
return 29
return 28
return 31
day_19000101 = 1
days_1900 = year_days(1900)
day_next_day1 = (day_19000101 + days_1900)%7
print(day_19000101, days_1900, day_next_day1)
sum = 0
for i in range(1901, 2001):
for j in range(1, 13):
if day_next_day1 == 0:
print(i, j)
sum = sum + 1
days = month_days(j, i)
day_next_day1 = (day_next_day1 + days)%7
#print(i, j, days, day_next_day1)
print(sum)
| en | 0.149129 | # https://projecteuler.net/problem=19 #print(i, j, days, day_next_day1) | 3.605942 | 4 |
address_book/address_book.py | wowsuchnamaste/address_book | 0 | 7919 | """A simple address book."""
from ._tools import generate_uuid
class AddressBook:
"""
A simple address book.
"""
def __init__(self):
self._entries = []
def add_entry(self, entry):
"""Add an entry to the address book."""
self._entries.append(entry)
def get_entries(self):
"""Returns a list of all entries in the address book.
:return: ``list`` of ``Person`` objects.
"""
return self._entries
def get_entry(self, name):
entry = [entry for entry in self._entries if entry.name == name]
return entry[0]
class Entry:
def __init__(
self,
name,
first_name=None,
last_name=None,
address=None,
phone_number=None,
email=None,
organization=None,
):
self._uuid = generate_uuid()
self.name = name
self.first_name = first_name
self.last_name = last_name
self._parse_name(name)
self.address = address
self.phone_number = phone_number
self.email = email
self.organization = organization
def __repr__(self):
return self.name
def _parse_name(self, name):
"""
Parse whatever is passed as ``name`` and update ``self.name`` from that.
:param name: A person's name as string or dictionary.
:return: The method doesn't return anything.
"""
if type(name) == dict:
self.first_name = name["first_name"]
self.last_name = name["last_name"]
self.name = self.first_name + " " + self.last_name
| """A simple address book."""
from ._tools import generate_uuid
class AddressBook:
"""
A simple address book.
"""
def __init__(self):
self._entries = []
def add_entry(self, entry):
"""Add an entry to the address book."""
self._entries.append(entry)
def get_entries(self):
"""Returns a list of all entries in the address book.
:return: ``list`` of ``Person`` objects.
"""
return self._entries
def get_entry(self, name):
entry = [entry for entry in self._entries if entry.name == name]
return entry[0]
class Entry:
def __init__(
self,
name,
first_name=None,
last_name=None,
address=None,
phone_number=None,
email=None,
organization=None,
):
self._uuid = generate_uuid()
self.name = name
self.first_name = first_name
self.last_name = last_name
self._parse_name(name)
self.address = address
self.phone_number = phone_number
self.email = email
self.organization = organization
def __repr__(self):
return self.name
def _parse_name(self, name):
"""
Parse whatever is passed as ``name`` and update ``self.name`` from that.
:param name: A person's name as string or dictionary.
:return: The method doesn't return anything.
"""
if type(name) == dict:
self.first_name = name["first_name"]
self.last_name = name["last_name"]
self.name = self.first_name + " " + self.last_name
| en | 0.922794 | A simple address book. A simple address book. Add an entry to the address book. Returns a list of all entries in the address book. :return: ``list`` of ``Person`` objects. Parse whatever is passed as ``name`` and update ``self.name`` from that. :param name: A person's name as string or dictionary. :return: The method doesn't return anything. | 3.717318 | 4 |
inference.py | zzhang87/ChestXray | 0 | 7920 | <filename>inference.py
import keras
import numpy as np
import pandas as pd
import cv2
import os
import json
import pdb
import argparse
import math
import copy
from vis.visualization import visualize_cam, overlay, visualize_activation
from vis.utils.utils import apply_modifications
from shutil import rmtree
import matplotlib.cm as cm
from matplotlib import pyplot as plt
from sklearn import metrics
import keras.backend as K
from keras import activations
from keras.applications.inception_v3 import preprocess_input as inception_pre
from keras.applications.mobilenet import preprocess_input as mobilenet_pre
from keras.applications.resnet50 import preprocess_input as resnet_pre
from keras.applications.densenet import preprocess_input as densenet_pre
from datagenerator import ImageDataGenerator
from utils import load_model
def getCAM(model, image):
# weights of the final fully-connected layer
weights = model.layers[-1].get_weights()[0]
# activation before the last global pooling
for layer in reversed(model.layers):
if len(layer.output_shape) > 2:
break
function = K.function([model.layers[0].input, K.learning_phase()], [layer.output])
activation = np.squeeze(function([image, 0])[0])
# weighted sum of the activation map
CAM = np.dot(activation, weights)
return CAM
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--ckpt_path', help = 'Path to the model checkpoint.')
ap.add_argument('--image_path', help = 'Path to the image to run inference on.')
ap.add_argument('--bnbox', help = 'Path to the bounding box annotation, if applies.')
ap.add_argument('--threshold', default = 0.5, help = 'Threshold for displaying the Class Activation Map.')
args = ap.parse_args()
model_dir = os.path.dirname(args.ckpt_path)
with open(os.path.join(model_dir, 'label_map.json'), 'r') as f:
label_map = json.load(f)
num_class = len(list(label_map.keys()))
model, model_config = load_model(model_dir, args.ckpt_path)
model_name = model_config['model_name']
if model_name in ['inception']:
image_size = 299
else:
image_size = 224
preprocess_input = {
'inception': inception_pre,
'resnet': resnet_pre,
'mobilenet': mobilenet_pre,
'densenet': densenet_pre
}
if args.bnbox is not None:
annotation = pd.read_csv(args.bnbox)
image_index = os.path.basename(args.image_path)
indices = np.where(annotation['Image Index'] == image_index)[0]
bnbox = {}
for i in indices:
disease = annotation['Finding Label'][i]
x = int(annotation['Bbox [x'][i] + 0.5)
y = int(annotation['y'][i] + 0.5)
w = int(annotation['w'][i] + 0.5)
h = int(annotation['h]'][i] + 0.5)
bnbox[disease] = [x, y, x + w, y + h]
image = cv2.imread(args.image_path)
img = cv2.resize(image, (image_size, image_size))
img = preprocess_input[model_name](img.astype(np.float32))
img = np.expand_dims(img, axis = 0)
predictions = np.squeeze(model.predict(img))
CAM = getCAM(model, img)
cv2.namedWindow("ChestXray", cv2.WINDOW_NORMAL)
for key, value in label_map.items():
heatmap = CAM[:,:,int(key)]
heatmap -= heatmap.min()
heatmap *= 255.0 / heatmap.max()
heatmap[np.where(heatmap < args.threshold * 255)] *= 0.1
heatmap = cv2.applyColorMap(heatmap.astype(np.uint8), cv2.COLORMAP_JET)
heatmap = cv2.resize(heatmap, image.shape[:2], cv2.INTER_AREA)
overlay_img = overlay(heatmap, image, alpha = 0.4)
cv2.putText(overlay_img, "{}: {:.2%}".format(value, predictions[int(key)]),
(30,30), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255,255,255), 2)
if value in bnbox.keys():
box = bnbox[value]
cv2.rectangle(overlay_img, (box[0], box[1]), (box[2], box[3]),
color = (0, 180, 0), thickness = 2)
cv2.imshow("ChestXray", overlay_img)
cv2.waitKey()
plt.show()
print('{}: {:.2%}'.format(value, predictions[int(key)]))
cv2.destroyAllWindows()
if __name__ == "__main__":
main() | <filename>inference.py
import keras
import numpy as np
import pandas as pd
import cv2
import os
import json
import pdb
import argparse
import math
import copy
from vis.visualization import visualize_cam, overlay, visualize_activation
from vis.utils.utils import apply_modifications
from shutil import rmtree
import matplotlib.cm as cm
from matplotlib import pyplot as plt
from sklearn import metrics
import keras.backend as K
from keras import activations
from keras.applications.inception_v3 import preprocess_input as inception_pre
from keras.applications.mobilenet import preprocess_input as mobilenet_pre
from keras.applications.resnet50 import preprocess_input as resnet_pre
from keras.applications.densenet import preprocess_input as densenet_pre
from datagenerator import ImageDataGenerator
from utils import load_model
def getCAM(model, image):
# weights of the final fully-connected layer
weights = model.layers[-1].get_weights()[0]
# activation before the last global pooling
for layer in reversed(model.layers):
if len(layer.output_shape) > 2:
break
function = K.function([model.layers[0].input, K.learning_phase()], [layer.output])
activation = np.squeeze(function([image, 0])[0])
# weighted sum of the activation map
CAM = np.dot(activation, weights)
return CAM
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--ckpt_path', help = 'Path to the model checkpoint.')
ap.add_argument('--image_path', help = 'Path to the image to run inference on.')
ap.add_argument('--bnbox', help = 'Path to the bounding box annotation, if applies.')
ap.add_argument('--threshold', default = 0.5, help = 'Threshold for displaying the Class Activation Map.')
args = ap.parse_args()
model_dir = os.path.dirname(args.ckpt_path)
with open(os.path.join(model_dir, 'label_map.json'), 'r') as f:
label_map = json.load(f)
num_class = len(list(label_map.keys()))
model, model_config = load_model(model_dir, args.ckpt_path)
model_name = model_config['model_name']
if model_name in ['inception']:
image_size = 299
else:
image_size = 224
preprocess_input = {
'inception': inception_pre,
'resnet': resnet_pre,
'mobilenet': mobilenet_pre,
'densenet': densenet_pre
}
if args.bnbox is not None:
annotation = pd.read_csv(args.bnbox)
image_index = os.path.basename(args.image_path)
indices = np.where(annotation['Image Index'] == image_index)[0]
bnbox = {}
for i in indices:
disease = annotation['Finding Label'][i]
x = int(annotation['Bbox [x'][i] + 0.5)
y = int(annotation['y'][i] + 0.5)
w = int(annotation['w'][i] + 0.5)
h = int(annotation['h]'][i] + 0.5)
bnbox[disease] = [x, y, x + w, y + h]
image = cv2.imread(args.image_path)
img = cv2.resize(image, (image_size, image_size))
img = preprocess_input[model_name](img.astype(np.float32))
img = np.expand_dims(img, axis = 0)
predictions = np.squeeze(model.predict(img))
CAM = getCAM(model, img)
cv2.namedWindow("ChestXray", cv2.WINDOW_NORMAL)
for key, value in label_map.items():
heatmap = CAM[:,:,int(key)]
heatmap -= heatmap.min()
heatmap *= 255.0 / heatmap.max()
heatmap[np.where(heatmap < args.threshold * 255)] *= 0.1
heatmap = cv2.applyColorMap(heatmap.astype(np.uint8), cv2.COLORMAP_JET)
heatmap = cv2.resize(heatmap, image.shape[:2], cv2.INTER_AREA)
overlay_img = overlay(heatmap, image, alpha = 0.4)
cv2.putText(overlay_img, "{}: {:.2%}".format(value, predictions[int(key)]),
(30,30), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255,255,255), 2)
if value in bnbox.keys():
box = bnbox[value]
cv2.rectangle(overlay_img, (box[0], box[1]), (box[2], box[3]),
color = (0, 180, 0), thickness = 2)
cv2.imshow("ChestXray", overlay_img)
cv2.waitKey()
plt.show()
print('{}: {:.2%}'.format(value, predictions[int(key)]))
cv2.destroyAllWindows()
if __name__ == "__main__":
main() | en | 0.775805 | # weights of the final fully-connected layer # activation before the last global pooling # weighted sum of the activation map | 2.38967 | 2 |
test/DQueueTest.py | MistSun-Chen/py_verifier | 0 | 7921 | from libTask import Queue
from common import configParams
from common import common
def main():
cp = configParams.ConfigParams("config.json")
detectGeneralQueue = Queue.DQueue(cp, len(cp.detect_general_ids), cp.modelPath, common.GENERALDETECT_METHOD_ID,
cp.GPUDevices, cp.detect_general_ids)
print("Run Into Next step")
smokeQueue = Queue.DQueue(cp, len(cp.smoke_ids), cp.modelPath, common.PEOPLESMOKE_METHOD_ID,cp.GPUDevices, cp.smoke_ids)
if __name__ == '__main__':
main() | from libTask import Queue
from common import configParams
from common import common
def main():
cp = configParams.ConfigParams("config.json")
detectGeneralQueue = Queue.DQueue(cp, len(cp.detect_general_ids), cp.modelPath, common.GENERALDETECT_METHOD_ID,
cp.GPUDevices, cp.detect_general_ids)
print("Run Into Next step")
smokeQueue = Queue.DQueue(cp, len(cp.smoke_ids), cp.modelPath, common.PEOPLESMOKE_METHOD_ID,cp.GPUDevices, cp.smoke_ids)
if __name__ == '__main__':
main() | none | 1 | 1.978631 | 2 |
|
config.py | volgachen/Chinese-Tokenization | 0 | 7922 | class Config:
ngram = 2
train_set = "data/rmrb.txt"
modified_train_set = "data/rmrb_modified.txt"
test_set = ""
model_file = ""
param_file = ""
word_max_len = 10
proposals_keep_ratio = 1.0
use_re = 1
subseq_num = 15 | class Config:
ngram = 2
train_set = "data/rmrb.txt"
modified_train_set = "data/rmrb_modified.txt"
test_set = ""
model_file = ""
param_file = ""
word_max_len = 10
proposals_keep_ratio = 1.0
use_re = 1
subseq_num = 15 | none | 1 | 1.774057 | 2 |
|
src/Knn-Tensor.py | python-itb/knn-from-scratch | 0 | 7923 | <reponame>python-itb/knn-from-scratch
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 13 18:52:28 2018
@author: amajidsinar
"""
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-white')
iris = datasets.load_iris()
dataset = iris.data
# only take 0th and 1th column for X
data_known = iris.data[:,:2]
# y
label_known = iris.target
# the hard part
# so matplotlib does not readily support labeling based on class
# but we know that one of the feature of plt is that a plt call would give those set of number
# the same color
category = np.unique(label_known)
for i in category:
plt.scatter(data_known[label_known==i][:,0],data_known[label_known==i][:,1],label=i)
# Unknown class of a data
data_unknown = np.array([[5.7,3.3],[5.6,3.4],[6.4,3],[8.2,2.2]])
plt.scatter(data_unknown[:,0],data_unknown[:,1], label='?')
plt.legend()
#-------------
# Euclidean Distance
diff = data_known - data_unknown.reshape(data_unknown.shape[0],1,data_unknown.shape[1])
distance = (diff**2).sum(2)
#return sorted index of distance
dist_index = np.argsort(distance)
label = label_known[dist_index]
#for k in [1,2,3,4,5,6,7,8,9,10]:
#keep the rank
k = 10
label = label[:,:k]
label_predict = []
for i in range(data_unknown.shape[0]):
values,counts = np.unique(label[i], return_counts=True)
ind = np.argmax(counts)
label_predict.append(values[ind])
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 13 18:52:28 2018
@author: amajidsinar
"""
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-white')
iris = datasets.load_iris()
dataset = iris.data
# only take 0th and 1th column for X
data_known = iris.data[:,:2]
# y
label_known = iris.target
# the hard part
# so matplotlib does not readily support labeling based on class
# but we know that one of the feature of plt is that a plt call would give those set of number
# the same color
category = np.unique(label_known)
for i in category:
plt.scatter(data_known[label_known==i][:,0],data_known[label_known==i][:,1],label=i)
# Unknown class of a data
data_unknown = np.array([[5.7,3.3],[5.6,3.4],[6.4,3],[8.2,2.2]])
plt.scatter(data_unknown[:,0],data_unknown[:,1], label='?')
plt.legend()
#-------------
# Euclidean Distance
diff = data_known - data_unknown.reshape(data_unknown.shape[0],1,data_unknown.shape[1])
distance = (diff**2).sum(2)
#return sorted index of distance
dist_index = np.argsort(distance)
label = label_known[dist_index]
#for k in [1,2,3,4,5,6,7,8,9,10]:
#keep the rank
k = 10
label = label[:,:k]
label_predict = []
for i in range(data_unknown.shape[0]):
values,counts = np.unique(label[i], return_counts=True)
ind = np.argmax(counts)
label_predict.append(values[ind]) | en | 0.855187 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Tue Feb 13 18:52:28 2018 @author: amajidsinar # only take 0th and 1th column for X # y # the hard part # so matplotlib does not readily support labeling based on class # but we know that one of the feature of plt is that a plt call would give those set of number # the same color # Unknown class of a data #------------- # Euclidean Distance #return sorted index of distance #for k in [1,2,3,4,5,6,7,8,9,10]: #keep the rank | 3.263841 | 3 |
de_test_tron2.py | volpepe/detectron2-ResNeSt | 0 | 7924 | import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
import argparse, time
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("-i", "--image", type=str, help="Path to image to segment")
p.add_argument("-m", "--model", type=str, help="Model to use", default="COCO-InstanceSegmentation/mask_cascade_rcnn_ResNeSt_200_FPN_syncBN_all_tricks_3x.yaml")
p.add_argument("-t", "--threshold", type=float, help="Threshold for model detections", default=0.4)
p.add_argument("-rs", "--use_resnest", type=bool, help="Whether the selected model uses ResNeSt backbone or no", default=True)
return p.parse_args()
def start_segment(args):
img = args.image
model = args.model
thresh = args.threshold
use_resnest = args.use_resnest
im = cv2.imread(img)
# get default cfg file
cfg = get_cfg()
# replace cfg from specific model yaml file
cfg.merge_from_file(model_zoo.get_config_file(model))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = thresh # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model, resnest=use_resnest)
predictor = DefaultPredictor(cfg)
start = time.time()
outputs = predictor(im)
print("Time eplased: {}".format(time.time() - start))
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2) #rgb image (::-1)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite("output.jpg", out.get_image()[:, :, ::-1])
if __name__ == "__main__":
args = parse_args()
start_segment(args) | import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
import argparse, time
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("-i", "--image", type=str, help="Path to image to segment")
p.add_argument("-m", "--model", type=str, help="Model to use", default="COCO-InstanceSegmentation/mask_cascade_rcnn_ResNeSt_200_FPN_syncBN_all_tricks_3x.yaml")
p.add_argument("-t", "--threshold", type=float, help="Threshold for model detections", default=0.4)
p.add_argument("-rs", "--use_resnest", type=bool, help="Whether the selected model uses ResNeSt backbone or no", default=True)
return p.parse_args()
def start_segment(args):
img = args.image
model = args.model
thresh = args.threshold
use_resnest = args.use_resnest
im = cv2.imread(img)
# get default cfg file
cfg = get_cfg()
# replace cfg from specific model yaml file
cfg.merge_from_file(model_zoo.get_config_file(model))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = thresh # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model, resnest=use_resnest)
predictor = DefaultPredictor(cfg)
start = time.time()
outputs = predictor(im)
print("Time eplased: {}".format(time.time() - start))
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2) #rgb image (::-1)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite("output.jpg", out.get_image()[:, :, ::-1])
if __name__ == "__main__":
args = parse_args()
start_segment(args) | en | 0.765922 | # import some common libraries # import some common detectron2 utilities # get default cfg file # replace cfg from specific model yaml file # set threshold for this model # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well #rgb image (::-1) | 2.291868 | 2 |
pika/data.py | Pankrat/pika | 0 | 7925 | """AMQP Table Encoding/Decoding"""
import struct
import decimal
import calendar
from datetime import datetime
from pika import exceptions
from pika.compat import unicode_type, PY2, long, as_bytes
def encode_short_string(pieces, value):
"""Encode a string value as short string and append it to pieces list
returning the size of the encoded value.
:param list pieces: Already encoded values
:param value: String value to encode
:type value: str or unicode
:rtype: int
"""
encoded_value = as_bytes(value)
length = len(encoded_value)
# 4.2.5.3
# Short strings, stored as an 8-bit unsigned integer length followed by zero
# or more octets of data. Short strings can carry up to 255 octets of UTF-8
# data, but may not contain binary zero octets.
# ...
# 4.2.5.5
# The server SHOULD validate field names and upon receiving an invalid field
# name, it SHOULD signal a connection exception with reply code 503 (syntax
# error).
# -> validate length (avoid truncated utf-8 / corrupted data), but skip null
# byte check.
if length > 255:
raise exceptions.ShortStringTooLong(encoded_value)
pieces.append(struct.pack('B', length))
pieces.append(encoded_value)
return 1 + length
if PY2:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
# Purely for compatibility with original python2 code. No idea what
# and why this does.
value = encoded[offset:offset + length]
try:
value = bytes(value)
except UnicodeEncodeError:
pass
offset += length
return value, offset
else:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
value = encoded[offset:offset + length].decode('utf8')
offset += length
return value, offset
def encode_table(pieces, table):
"""Encode a dict as an AMQP table appending the encded table to the
pieces list passed in.
:param list pieces: Already encoded frame pieces
:param dict table: The dict to encode
:rtype: int
"""
table = table or {}
length_index = len(pieces)
pieces.append(None) # placeholder
tablesize = 0
for (key, value) in table.items():
tablesize += encode_short_string(pieces, key)
tablesize += encode_value(pieces, value)
pieces[length_index] = struct.pack('>I', tablesize)
return tablesize + 4
def encode_value(pieces, value):
"""Encode the value passed in and append it to the pieces list returning
the the size of the encoded value.
:param list pieces: Already encoded values
:param any value: The value to encode
:rtype: int
"""
if PY2:
if isinstance(value, basestring):
if isinstance(value, unicode_type):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
else:
# support only str on Python 3
if isinstance(value, str):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
if isinstance(value, bool):
pieces.append(struct.pack('>cB', b't', int(value)))
return 2
if isinstance(value, long):
pieces.append(struct.pack('>cq', b'l', value))
return 9
elif isinstance(value, int):
pieces.append(struct.pack('>ci', b'I', value))
return 5
elif isinstance(value, decimal.Decimal):
value = value.normalize()
if value.as_tuple().exponent < 0:
decimals = -value.as_tuple().exponent
raw = int(value * (decimal.Decimal(10) ** decimals))
pieces.append(struct.pack('>cBi', b'D', decimals, raw))
else:
# per spec, the "decimals" octet is unsigned (!)
pieces.append(struct.pack('>cBi', b'D', 0, int(value)))
return 6
elif isinstance(value, datetime):
pieces.append(struct.pack('>cQ', b'T',
calendar.timegm(value.utctimetuple())))
return 9
elif isinstance(value, dict):
pieces.append(struct.pack('>c', b'F'))
return 1 + encode_table(pieces, value)
elif isinstance(value, list):
p = []
for v in value:
encode_value(p, v)
piece = b''.join(p)
pieces.append(struct.pack('>cI', b'A', len(piece)))
pieces.append(piece)
return 5 + len(piece)
elif value is None:
pieces.append(struct.pack('>c', b'V'))
return 1
else:
raise exceptions.UnsupportedAMQPFieldException(pieces, value)
def decode_table(encoded, offset):
"""Decode the AMQP table passed in from the encoded value returning the
decoded result and the number of bytes read plus the offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
"""
result = {}
tablesize = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
limit = offset + tablesize
while offset < limit:
key, offset = decode_short_string(encoded, offset)
value, offset = decode_value(encoded, offset)
result[key] = value
return result, offset
def decode_value(encoded, offset):
"""Decode the value passed in returning the decoded value and the number
of bytes read in addition to the starting offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
:raises: pika.exceptions.InvalidFieldTypeException
"""
# slice to get bytes in Python 3 and str in Python 2
kind = encoded[offset:offset + 1]
offset += 1
# Bool
if kind == b't':
value = struct.unpack_from('>B', encoded, offset)[0]
value = bool(value)
offset += 1
# Short-Short Int
elif kind == b'b':
value = struct.unpack_from('>B', encoded, offset)[0]
offset += 1
# Short-Short Unsigned Int
elif kind == b'B':
value = struct.unpack_from('>b', encoded, offset)[0]
offset += 1
# Short Int
elif kind == b'U':
value = struct.unpack_from('>h', encoded, offset)[0]
offset += 2
# Short Unsigned Int
elif kind == b'u':
value = struct.unpack_from('>H', encoded, offset)[0]
offset += 2
# Long Int
elif kind == b'I':
value = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
# Long Unsigned Int
elif kind == b'i':
value = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
# Long-Long Int
elif kind == b'L':
value = long(struct.unpack_from('>q', encoded, offset)[0])
offset += 8
# Long-Long Unsigned Int
elif kind == b'l':
value = long(struct.unpack_from('>Q', encoded, offset)[0])
offset += 8
# Float
elif kind == b'f':
value = long(struct.unpack_from('>f', encoded, offset)[0])
offset += 4
# Double
elif kind == b'd':
value = long(struct.unpack_from('>d', encoded, offset)[0])
offset += 8
# Decimal
elif kind == b'D':
decimals = struct.unpack_from('B', encoded, offset)[0]
offset += 1
raw = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
value = decimal.Decimal(raw) * (decimal.Decimal(10) ** -decimals)
# Short String
elif kind == b's':
value, offset = decode_short_string(encoded, offset)
# Long String
elif kind == b'S':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
value = encoded[offset:offset + length].decode('utf8')
offset += length
# Field Array
elif kind == b'A':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
offset_end = offset + length
value = []
while offset < offset_end:
v, offset = decode_value(encoded, offset)
value.append(v)
# Timestamp
elif kind == b'T':
value = datetime.utcfromtimestamp(struct.unpack_from('>Q', encoded,
offset)[0])
offset += 8
# Field Table
elif kind == b'F':
(value, offset) = decode_table(encoded, offset)
# Null / Void
elif kind == b'V':
value = None
else:
raise exceptions.InvalidFieldTypeException(kind)
return value, offset
| """AMQP Table Encoding/Decoding"""
import struct
import decimal
import calendar
from datetime import datetime
from pika import exceptions
from pika.compat import unicode_type, PY2, long, as_bytes
def encode_short_string(pieces, value):
"""Encode a string value as short string and append it to pieces list
returning the size of the encoded value.
:param list pieces: Already encoded values
:param value: String value to encode
:type value: str or unicode
:rtype: int
"""
encoded_value = as_bytes(value)
length = len(encoded_value)
# 4.2.5.3
# Short strings, stored as an 8-bit unsigned integer length followed by zero
# or more octets of data. Short strings can carry up to 255 octets of UTF-8
# data, but may not contain binary zero octets.
# ...
# 4.2.5.5
# The server SHOULD validate field names and upon receiving an invalid field
# name, it SHOULD signal a connection exception with reply code 503 (syntax
# error).
# -> validate length (avoid truncated utf-8 / corrupted data), but skip null
# byte check.
if length > 255:
raise exceptions.ShortStringTooLong(encoded_value)
pieces.append(struct.pack('B', length))
pieces.append(encoded_value)
return 1 + length
if PY2:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
# Purely for compatibility with original python2 code. No idea what
# and why this does.
value = encoded[offset:offset + length]
try:
value = bytes(value)
except UnicodeEncodeError:
pass
offset += length
return value, offset
else:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
value = encoded[offset:offset + length].decode('utf8')
offset += length
return value, offset
def encode_table(pieces, table):
"""Encode a dict as an AMQP table appending the encded table to the
pieces list passed in.
:param list pieces: Already encoded frame pieces
:param dict table: The dict to encode
:rtype: int
"""
table = table or {}
length_index = len(pieces)
pieces.append(None) # placeholder
tablesize = 0
for (key, value) in table.items():
tablesize += encode_short_string(pieces, key)
tablesize += encode_value(pieces, value)
pieces[length_index] = struct.pack('>I', tablesize)
return tablesize + 4
def encode_value(pieces, value):
"""Encode the value passed in and append it to the pieces list returning
the the size of the encoded value.
:param list pieces: Already encoded values
:param any value: The value to encode
:rtype: int
"""
if PY2:
if isinstance(value, basestring):
if isinstance(value, unicode_type):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
else:
# support only str on Python 3
if isinstance(value, str):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
if isinstance(value, bool):
pieces.append(struct.pack('>cB', b't', int(value)))
return 2
if isinstance(value, long):
pieces.append(struct.pack('>cq', b'l', value))
return 9
elif isinstance(value, int):
pieces.append(struct.pack('>ci', b'I', value))
return 5
elif isinstance(value, decimal.Decimal):
value = value.normalize()
if value.as_tuple().exponent < 0:
decimals = -value.as_tuple().exponent
raw = int(value * (decimal.Decimal(10) ** decimals))
pieces.append(struct.pack('>cBi', b'D', decimals, raw))
else:
# per spec, the "decimals" octet is unsigned (!)
pieces.append(struct.pack('>cBi', b'D', 0, int(value)))
return 6
elif isinstance(value, datetime):
pieces.append(struct.pack('>cQ', b'T',
calendar.timegm(value.utctimetuple())))
return 9
elif isinstance(value, dict):
pieces.append(struct.pack('>c', b'F'))
return 1 + encode_table(pieces, value)
elif isinstance(value, list):
p = []
for v in value:
encode_value(p, v)
piece = b''.join(p)
pieces.append(struct.pack('>cI', b'A', len(piece)))
pieces.append(piece)
return 5 + len(piece)
elif value is None:
pieces.append(struct.pack('>c', b'V'))
return 1
else:
raise exceptions.UnsupportedAMQPFieldException(pieces, value)
def decode_table(encoded, offset):
"""Decode the AMQP table passed in from the encoded value returning the
decoded result and the number of bytes read plus the offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
"""
result = {}
tablesize = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
limit = offset + tablesize
while offset < limit:
key, offset = decode_short_string(encoded, offset)
value, offset = decode_value(encoded, offset)
result[key] = value
return result, offset
def decode_value(encoded, offset):
"""Decode the value passed in returning the decoded value and the number
of bytes read in addition to the starting offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
:raises: pika.exceptions.InvalidFieldTypeException
"""
# slice to get bytes in Python 3 and str in Python 2
kind = encoded[offset:offset + 1]
offset += 1
# Bool
if kind == b't':
value = struct.unpack_from('>B', encoded, offset)[0]
value = bool(value)
offset += 1
# Short-Short Int
elif kind == b'b':
value = struct.unpack_from('>B', encoded, offset)[0]
offset += 1
# Short-Short Unsigned Int
elif kind == b'B':
value = struct.unpack_from('>b', encoded, offset)[0]
offset += 1
# Short Int
elif kind == b'U':
value = struct.unpack_from('>h', encoded, offset)[0]
offset += 2
# Short Unsigned Int
elif kind == b'u':
value = struct.unpack_from('>H', encoded, offset)[0]
offset += 2
# Long Int
elif kind == b'I':
value = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
# Long Unsigned Int
elif kind == b'i':
value = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
# Long-Long Int
elif kind == b'L':
value = long(struct.unpack_from('>q', encoded, offset)[0])
offset += 8
# Long-Long Unsigned Int
elif kind == b'l':
value = long(struct.unpack_from('>Q', encoded, offset)[0])
offset += 8
# Float
elif kind == b'f':
value = long(struct.unpack_from('>f', encoded, offset)[0])
offset += 4
# Double
elif kind == b'd':
value = long(struct.unpack_from('>d', encoded, offset)[0])
offset += 8
# Decimal
elif kind == b'D':
decimals = struct.unpack_from('B', encoded, offset)[0]
offset += 1
raw = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
value = decimal.Decimal(raw) * (decimal.Decimal(10) ** -decimals)
# Short String
elif kind == b's':
value, offset = decode_short_string(encoded, offset)
# Long String
elif kind == b'S':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
value = encoded[offset:offset + length].decode('utf8')
offset += length
# Field Array
elif kind == b'A':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
offset_end = offset + length
value = []
while offset < offset_end:
v, offset = decode_value(encoded, offset)
value.append(v)
# Timestamp
elif kind == b'T':
value = datetime.utcfromtimestamp(struct.unpack_from('>Q', encoded,
offset)[0])
offset += 8
# Field Table
elif kind == b'F':
(value, offset) = decode_table(encoded, offset)
# Null / Void
elif kind == b'V':
value = None
else:
raise exceptions.InvalidFieldTypeException(kind)
return value, offset
| en | 0.67811 | AMQP Table Encoding/Decoding Encode a string value as short string and append it to pieces list returning the size of the encoded value. :param list pieces: Already encoded values :param value: String value to encode :type value: str or unicode :rtype: int # 4.2.5.3 # Short strings, stored as an 8-bit unsigned integer length followed by zero # or more octets of data. Short strings can carry up to 255 octets of UTF-8 # data, but may not contain binary zero octets. # ... # 4.2.5.5 # The server SHOULD validate field names and upon receiving an invalid field # name, it SHOULD signal a connection exception with reply code 503 (syntax # error). # -> validate length (avoid truncated utf-8 / corrupted data), but skip null # byte check. Decode a short string value from ``encoded`` data at ``offset``. # Purely for compatibility with original python2 code. No idea what # and why this does. Decode a short string value from ``encoded`` data at ``offset``. Encode a dict as an AMQP table appending the encded table to the pieces list passed in. :param list pieces: Already encoded frame pieces :param dict table: The dict to encode :rtype: int # placeholder Encode the value passed in and append it to the pieces list returning the the size of the encoded value. :param list pieces: Already encoded values :param any value: The value to encode :rtype: int # support only str on Python 3 # per spec, the "decimals" octet is unsigned (!) Decode the AMQP table passed in from the encoded value returning the decoded result and the number of bytes read plus the offset. :param str encoded: The binary encoded data to decode :param int offset: The starting byte offset :rtype: tuple Decode the value passed in returning the decoded value and the number of bytes read in addition to the starting offset. :param str encoded: The binary encoded data to decode :param int offset: The starting byte offset :rtype: tuple :raises: pika.exceptions.InvalidFieldTypeException # slice to get bytes in Python 3 and str in Python 2 # Bool # Short-Short Int # Short-Short Unsigned Int # Short Int # Short Unsigned Int # Long Int # Long Unsigned Int # Long-Long Int # Long-Long Unsigned Int # Float # Double # Decimal # Short String # Long String # Field Array # Timestamp # Field Table # Null / Void | 2.773486 | 3 |
tests/fixtures/data_sets/service/dummy/dummy_configurable.py | Agi-dev/pylaas_core | 0 | 7926 | from pylaas_core.abstract.abstract_service import AbstractService
import time
from pylaas_core.interface.technical.container_configurable_aware_interface import ContainerConfigurableAwareInterface
class DummyConfigurable(AbstractService, ContainerConfigurableAwareInterface):
def __init__(self) -> None:
super().__init__()
self._microtime = int(round(time.time() * 1000))
self._configs = None
def set_configs(self, configurations):
self._configs = configurations
return self
| from pylaas_core.abstract.abstract_service import AbstractService
import time
from pylaas_core.interface.technical.container_configurable_aware_interface import ContainerConfigurableAwareInterface
class DummyConfigurable(AbstractService, ContainerConfigurableAwareInterface):
def __init__(self) -> None:
super().__init__()
self._microtime = int(round(time.time() * 1000))
self._configs = None
def set_configs(self, configurations):
self._configs = configurations
return self
| none | 1 | 2.365911 | 2 |
|
blogtech/src/blog/views.py | IVAN-URBACZKA/django-blog | 0 | 7927 | from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView
from .models import BlogPost
from django.contrib.auth.decorators import login_required
class BlogPostHomeView(ListView):
model = BlogPost
context_object_name = "posts"
class BlogPostDetailsView(DetailView):
model = BlogPost
context_object_name = "post"
@method_decorator(login_required, name='dispatch')
class BlogPostCreateView(CreateView):
model = BlogPost
fields = ['title', 'image','author', 'category', 'content']
def get_success_url(self):
return reverse('posts:home')
@method_decorator(login_required, name='dispatch')
class BlogPostUpdateView(UpdateView):
model = BlogPost
fields = ['title', 'author', 'category', 'content']
template_name = 'blog/blogpost_update.html'
@method_decorator(login_required, name='dispatch')
class BlogPostDeleteView(DeleteView):
model = BlogPost
success_url = reverse_lazy('posts:home') | from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView
from .models import BlogPost
from django.contrib.auth.decorators import login_required
class BlogPostHomeView(ListView):
model = BlogPost
context_object_name = "posts"
class BlogPostDetailsView(DetailView):
model = BlogPost
context_object_name = "post"
@method_decorator(login_required, name='dispatch')
class BlogPostCreateView(CreateView):
model = BlogPost
fields = ['title', 'image','author', 'category', 'content']
def get_success_url(self):
return reverse('posts:home')
@method_decorator(login_required, name='dispatch')
class BlogPostUpdateView(UpdateView):
model = BlogPost
fields = ['title', 'author', 'category', 'content']
template_name = 'blog/blogpost_update.html'
@method_decorator(login_required, name='dispatch')
class BlogPostDeleteView(DeleteView):
model = BlogPost
success_url = reverse_lazy('posts:home') | none | 1 | 2.19136 | 2 |
|
apc_deep_vision/python/generate_data.py | Juxi/apb-baseline | 9 | 7928 | #! /usr/bin/env python
# ********************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, University of Colorado, Boulder
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the University of Colorado Boulder
# nor the names of its contributors may be
# used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ********************************************************************/
import cv2
import os
import numpy as np
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("proposal_path", type=str,
help="relative path from python script to proposals, no slash")
parser.add_argument("--view", default=None,
help="true/1 shows each masked image")
args = parser.parse_args()
# args.proposal_path = "../test_proposals"
# args.proposal_path = args.proposal_path
included_extenstions = ['txt']
image_names = [fn[0:len(fn)-4] for fn in os.listdir(args.proposal_path)
if any(fn.endswith(ext) for ext in included_extenstions)]
for image_name in image_names:
load_path = args.proposal_path + '/' + image_name
image = cv2.imread(load_path + ".jpeg")
data = np.loadtxt(load_path + ".txt", str)
# If there is only one line, force data to be a list of lists anyway
# Note, only works for our data as first list item is a string
if isinstance(data[0], basestring):
data = [data]
# If any line does not conform to classification tl_x tl_y br_x br_y
# then forget about it
skip = False
for line in data:
if len(line) < 5:
skip = True
if skip:
continue
for i, proposal in zip(range(0,len(data)),data):
mask = cv2.imread(load_path + '_mask{0:04d}.jpeg'.format(i))
mask = np.invert(mask)
maskGray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
ret, maskGray = cv2.threshold(maskGray,128,255,cv2.THRESH_BINARY)
print load_path + '_mask{0:04d}.jpeg'.format(i)
cropped = image[float(proposal[2]):float(proposal[4]), float(proposal[1]):float(proposal[3])]
masked = cv2.bitwise_and(cropped, cropped, mask = maskGray)
if args.view:
cv2.imshow("original", masked)
cv2.waitKey(0)
mask_directory = args.proposal_path + '/masked/' + proposal[0];
crop_directory = args.proposal_path + '/cropped/' + proposal[0];
if not os.path.exists(mask_directory):
os.makedirs(mask_directory)
if not os.path.exists(crop_directory):
os.makedirs(crop_directory)
cv2.imwrite(mask_directory + '/{}_{}.jpeg'.format(image_name,i), masked)
cv2.imwrite(crop_directory + '/{}_{}.jpeg'.format(image_name,i), cropped)
# item = data[]
# cropped = image[70:170, 440:540]
# startY:endY, startX:endX
# startX:startY, endX:endY
#
| #! /usr/bin/env python
# ********************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, University of Colorado, Boulder
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the University of Colorado Boulder
# nor the names of its contributors may be
# used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ********************************************************************/
import cv2
import os
import numpy as np
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("proposal_path", type=str,
help="relative path from python script to proposals, no slash")
parser.add_argument("--view", default=None,
help="true/1 shows each masked image")
args = parser.parse_args()
# args.proposal_path = "../test_proposals"
# args.proposal_path = args.proposal_path
included_extenstions = ['txt']
image_names = [fn[0:len(fn)-4] for fn in os.listdir(args.proposal_path)
if any(fn.endswith(ext) for ext in included_extenstions)]
for image_name in image_names:
load_path = args.proposal_path + '/' + image_name
image = cv2.imread(load_path + ".jpeg")
data = np.loadtxt(load_path + ".txt", str)
# If there is only one line, force data to be a list of lists anyway
# Note, only works for our data as first list item is a string
if isinstance(data[0], basestring):
data = [data]
# If any line does not conform to classification tl_x tl_y br_x br_y
# then forget about it
skip = False
for line in data:
if len(line) < 5:
skip = True
if skip:
continue
for i, proposal in zip(range(0,len(data)),data):
mask = cv2.imread(load_path + '_mask{0:04d}.jpeg'.format(i))
mask = np.invert(mask)
maskGray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
ret, maskGray = cv2.threshold(maskGray,128,255,cv2.THRESH_BINARY)
print load_path + '_mask{0:04d}.jpeg'.format(i)
cropped = image[float(proposal[2]):float(proposal[4]), float(proposal[1]):float(proposal[3])]
masked = cv2.bitwise_and(cropped, cropped, mask = maskGray)
if args.view:
cv2.imshow("original", masked)
cv2.waitKey(0)
mask_directory = args.proposal_path + '/masked/' + proposal[0];
crop_directory = args.proposal_path + '/cropped/' + proposal[0];
if not os.path.exists(mask_directory):
os.makedirs(mask_directory)
if not os.path.exists(crop_directory):
os.makedirs(crop_directory)
cv2.imwrite(mask_directory + '/{}_{}.jpeg'.format(image_name,i), masked)
cv2.imwrite(crop_directory + '/{}_{}.jpeg'.format(image_name,i), cropped)
# item = data[]
# cropped = image[70:170, 440:540]
# startY:endY, startX:endX
# startX:startY, endX:endY
#
| en | 0.691273 | #! /usr/bin/env python # ******************************************************************** # Software License Agreement (BSD License) # # Copyright (c) 2015, University of Colorado, Boulder # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the University of Colorado Boulder # nor the names of its contributors may be # used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ********************************************************************/ # args.proposal_path = "../test_proposals" # args.proposal_path = args.proposal_path # If there is only one line, force data to be a list of lists anyway # Note, only works for our data as first list item is a string # If any line does not conform to classification tl_x tl_y br_x br_y # then forget about it # item = data[] # cropped = image[70:170, 440:540] # startY:endY, startX:endX # startX:startY, endX:endY # | 1.087199 | 1 |
stats.py | shirshanka/fact-ory | 0 | 7929 | import numpy as np;
import sys
import matplotlib.pyplot as plt;
from matplotlib import cm;
from termcolor import colored;
class Stats():
def __init__(self, param1_range, param2_range):
self._total_times = 0;
self._total_time = 0.0;
self._wrong_answers = [];
self._time_dict = {};
self._param1_range = param1_range
self._param2_range = param2_range
self._param1_length = param1_range[1] - param1_range[0] + 1
self._param2_length = param2_range[1] - param2_range[0] + 1
self._red_color = 1.0
self._green_color = 0.3
self._cream_color = 0.6
self._default_color = np.nan
self._wrong_color = 1000.0
self._time_penalty = 2.0 # time penalty for wrong answer is 5 seconds
self._result_matrix = np.full((self._param1_length, self._param2_length), self._default_color)
def add_statistic(self, operator, param1,param2,ans,time_diff):
self.add_time_statistic(param1, param2, time_diff)
x_axis = param1 - self._param1_range[0]
y_axis = param2 - self._param2_range[0]
curr_value = self._result_matrix[x_axis][y_axis]
incr_value = time_diff
if (operator.evaluate(param1, param2) != ans):
# wrong answer
self.add_wrong_answer(param1,param2,ans)
incr_value = incr_value + self._time_penalty
else:
# right answer: do nothing
pass
if np.isnan(curr_value):
self._result_matrix[x_axis][y_axis] = incr_value
else:
self._result_matrix[x_axis][y_axis] = curr_value + incr_value
def add_time_statistic(self, param1, param2, time_diff):
self._total_times = self._total_times +1;
self._total_time = self._total_time + time_diff;
if not self._time_dict.has_key(param1):
self._time_dict[param1] = []
if not self._time_dict.has_key(param2):
self._time_dict[param2] = []
self._time_dict[param1].append(time_diff)
self._time_dict[param2].append(time_diff)
def add_wrong_answer(self, param1, param2, answer_given):
self._wrong_answers.append((param1,param2, answer_given))
def get_avg_time(self):
return (self._total_time / self._total_times);
def print_stats(self, operator):
sys.stdout.write("You took an average of %0.2f seconds to answer each question!\n" % self.get_avg_time());
if self._wrong_answers != []:
print("Here were the answers you got wrong...")
for (f1,f2,ans) in self._wrong_answers:
print ("%d %s %d = " % (f1,operator.symbol,f2)), colored("%d" % ans, "red"), "Correct answer is ", colored("%d" % operator.evaluate(f1,f2), "green")
row_labels = range(self._param1_range[0],self._param1_range[1]+1)
col_labels = range(self._param2_range[0],self._param2_range[1]+1)
#plt.matshow(self._result_matrix, cmap=cm.Spectral_r, vmin=0, vmax=1)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(self._result_matrix, interpolation='nearest', vmin=0)
fig.colorbar(cax)
plt.gca().set_aspect('auto')
row_ticks = range(len(row_labels))
col_ticks = range(len(col_labels))
if (len(row_labels) > 10):
skip_every = int(len(row_labels) / 10);
row_labels = row_labels[0::skip_every]
row_ticks = row_ticks[0::skip_every]
if (len(col_labels) > 10):
skip_every = int(len(col_labels)/10)
col_labels = col_labels[0::skip_every]
col_ticks = col_ticks[0::skip_every]
plt.xticks(col_ticks, col_labels)
plt.yticks(row_ticks, row_labels)
plt.show()
if __name__=="__main__":
print "hello world"
| import numpy as np;
import sys
import matplotlib.pyplot as plt;
from matplotlib import cm;
from termcolor import colored;
class Stats():
def __init__(self, param1_range, param2_range):
self._total_times = 0;
self._total_time = 0.0;
self._wrong_answers = [];
self._time_dict = {};
self._param1_range = param1_range
self._param2_range = param2_range
self._param1_length = param1_range[1] - param1_range[0] + 1
self._param2_length = param2_range[1] - param2_range[0] + 1
self._red_color = 1.0
self._green_color = 0.3
self._cream_color = 0.6
self._default_color = np.nan
self._wrong_color = 1000.0
self._time_penalty = 2.0 # time penalty for wrong answer is 5 seconds
self._result_matrix = np.full((self._param1_length, self._param2_length), self._default_color)
def add_statistic(self, operator, param1,param2,ans,time_diff):
self.add_time_statistic(param1, param2, time_diff)
x_axis = param1 - self._param1_range[0]
y_axis = param2 - self._param2_range[0]
curr_value = self._result_matrix[x_axis][y_axis]
incr_value = time_diff
if (operator.evaluate(param1, param2) != ans):
# wrong answer
self.add_wrong_answer(param1,param2,ans)
incr_value = incr_value + self._time_penalty
else:
# right answer: do nothing
pass
if np.isnan(curr_value):
self._result_matrix[x_axis][y_axis] = incr_value
else:
self._result_matrix[x_axis][y_axis] = curr_value + incr_value
def add_time_statistic(self, param1, param2, time_diff):
self._total_times = self._total_times +1;
self._total_time = self._total_time + time_diff;
if not self._time_dict.has_key(param1):
self._time_dict[param1] = []
if not self._time_dict.has_key(param2):
self._time_dict[param2] = []
self._time_dict[param1].append(time_diff)
self._time_dict[param2].append(time_diff)
def add_wrong_answer(self, param1, param2, answer_given):
self._wrong_answers.append((param1,param2, answer_given))
def get_avg_time(self):
return (self._total_time / self._total_times);
def print_stats(self, operator):
sys.stdout.write("You took an average of %0.2f seconds to answer each question!\n" % self.get_avg_time());
if self._wrong_answers != []:
print("Here were the answers you got wrong...")
for (f1,f2,ans) in self._wrong_answers:
print ("%d %s %d = " % (f1,operator.symbol,f2)), colored("%d" % ans, "red"), "Correct answer is ", colored("%d" % operator.evaluate(f1,f2), "green")
row_labels = range(self._param1_range[0],self._param1_range[1]+1)
col_labels = range(self._param2_range[0],self._param2_range[1]+1)
#plt.matshow(self._result_matrix, cmap=cm.Spectral_r, vmin=0, vmax=1)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(self._result_matrix, interpolation='nearest', vmin=0)
fig.colorbar(cax)
plt.gca().set_aspect('auto')
row_ticks = range(len(row_labels))
col_ticks = range(len(col_labels))
if (len(row_labels) > 10):
skip_every = int(len(row_labels) / 10);
row_labels = row_labels[0::skip_every]
row_ticks = row_ticks[0::skip_every]
if (len(col_labels) > 10):
skip_every = int(len(col_labels)/10)
col_labels = col_labels[0::skip_every]
col_ticks = col_ticks[0::skip_every]
plt.xticks(col_ticks, col_labels)
plt.yticks(row_ticks, row_labels)
plt.show()
if __name__=="__main__":
print "hello world"
| en | 0.630466 | # time penalty for wrong answer is 5 seconds # wrong answer # right answer: do nothing #plt.matshow(self._result_matrix, cmap=cm.Spectral_r, vmin=0, vmax=1) | 2.930981 | 3 |
examples/peptidecutter/advanced.py | zjuchenyuan/EasyLogin | 33 | 7930 | <gh_stars>10-100
from EasyLogin import EasyLogin
from pprint import pprint
def peptidecutter(oneprotein):
a = EasyLogin(proxy="socks5://127.0.0.1:1080") #speed up by using proxy
a.post("http://web.expasy.org/cgi-bin/peptide_cutter/peptidecutter.pl",
"protein={}&enzyme_number=all_enzymes&special_enzyme=Chym&min_prob=&block_size=60&alphtable=alphtable&cleave_number=all&cleave_exactly=&cleave_range_min=&cleave_range_max=".format(oneprotein)
)
table=a.b.find("table",{"class":"proteomics2"})
tds=table.find_all("td")
result = []
oneline = []
i = 0
for td in tds:
i+=1
if i==1:
content = td.text
elif i==2:
content = int(td.text)
else:
content = [int(i) for i in td.text.split()]
oneline.append(content)
if i==3:
result.append(oneline)
oneline=[]
i=0
return result
def fasta_reader(filename):
filecontents = open(filename).read().split("\n")
name = ""
thedata = ""
result=[]
for line in filecontents:
if not len(line): continue
if line[0]=='>':
if len(thedata):
result.append([name,thedata])
thedata = ""
name = line
else:
thedata += line
result.append([name,thedata])#don't forget the last one
return result
def peptidecutter_more(filename):
return [ [name,peptidecutter(oneprotein)] for name,oneprotein in fasta_reader(filename) ]
if __name__ == "__main__":
#pprint(peptidecutter("SERVELAT"))
import sys
pprint(peptidecutter_more(sys.argv[1]))
| from EasyLogin import EasyLogin
from pprint import pprint
def peptidecutter(oneprotein):
a = EasyLogin(proxy="socks5://127.0.0.1:1080") #speed up by using proxy
a.post("http://web.expasy.org/cgi-bin/peptide_cutter/peptidecutter.pl",
"protein={}&enzyme_number=all_enzymes&special_enzyme=Chym&min_prob=&block_size=60&alphtable=alphtable&cleave_number=all&cleave_exactly=&cleave_range_min=&cleave_range_max=".format(oneprotein)
)
table=a.b.find("table",{"class":"proteomics2"})
tds=table.find_all("td")
result = []
oneline = []
i = 0
for td in tds:
i+=1
if i==1:
content = td.text
elif i==2:
content = int(td.text)
else:
content = [int(i) for i in td.text.split()]
oneline.append(content)
if i==3:
result.append(oneline)
oneline=[]
i=0
return result
def fasta_reader(filename):
filecontents = open(filename).read().split("\n")
name = ""
thedata = ""
result=[]
for line in filecontents:
if not len(line): continue
if line[0]=='>':
if len(thedata):
result.append([name,thedata])
thedata = ""
name = line
else:
thedata += line
result.append([name,thedata])#don't forget the last one
return result
def peptidecutter_more(filename):
return [ [name,peptidecutter(oneprotein)] for name,oneprotein in fasta_reader(filename) ]
if __name__ == "__main__":
#pprint(peptidecutter("SERVELAT"))
import sys
pprint(peptidecutter_more(sys.argv[1])) | en | 0.686117 | #speed up by using proxy #don't forget the last one #pprint(peptidecutter("SERVELAT")) | 2.676969 | 3 |
pgn2fixture/tests/test_utils.py | pointerish/pgn2fixture | 3 | 7931 | import unittest
from .. import utils
class TestUtils(unittest.TestCase):
def setUp(self) -> None:
self.pgn_string = '''
[Event "US Championship 1963/64"]
[Site "New York, NY USA"]
[Date "1964.01.01"]
[EventDate "1963.??.??"]
[Round "11"][Result "0-1"]
[White "<NAME>"]
[Black "<NAME>"]
[ECO "A33"]
[WhiteElo "?"]
[BlackElo "?"][PlyCount "112"]
1. c4 0-1'''
def test_clean(self):
result = ['Event "US Championship 1963/64"', 'Site "New York, NY USA"', 'Date "1964.01.01"', 'EventDate "1963.??.??"', 'Round "11"', 'Result "0-1"',
'White "<NAME>"', 'Black "<NAME>"', 'ECO "A33"', 'WhiteElo "?"', 'BlackElo "?"', 'PlyCount "112"', '1. c4 0-1']
self.assertEqual(utils.clean(self.pgn_string), result)
def test_extract_tag_roster(self):
result = {'event': 'US Championship 1963/64', 'site': 'New York, NY USA', 'date': '1964.01.01', 'eventdate': '1963.??.??', 'round': '11', 'result': '0-1',
'white': '<NAME>', 'black': '<NAME>', 'eco': 'A33', 'whiteelo': '?', 'blackelo': '?', 'plycount': '112', 'moves': '1. c4 0-1'}
self.assertEqual(utils.extract_tag_roster(self.pgn_string), result)
| import unittest
from .. import utils
class TestUtils(unittest.TestCase):
def setUp(self) -> None:
self.pgn_string = '''
[Event "US Championship 1963/64"]
[Site "New York, NY USA"]
[Date "1964.01.01"]
[EventDate "1963.??.??"]
[Round "11"][Result "0-1"]
[White "<NAME>"]
[Black "<NAME>"]
[ECO "A33"]
[WhiteElo "?"]
[BlackElo "?"][PlyCount "112"]
1. c4 0-1'''
def test_clean(self):
result = ['Event "US Championship 1963/64"', 'Site "New York, NY USA"', 'Date "1964.01.01"', 'EventDate "1963.??.??"', 'Round "11"', 'Result "0-1"',
'White "<NAME>"', 'Black "<NAME>"', 'ECO "A33"', 'WhiteElo "?"', 'BlackElo "?"', 'PlyCount "112"', '1. c4 0-1']
self.assertEqual(utils.clean(self.pgn_string), result)
def test_extract_tag_roster(self):
result = {'event': 'US Championship 1963/64', 'site': 'New York, NY USA', 'date': '1964.01.01', 'eventdate': '1963.??.??', 'round': '11', 'result': '0-1',
'white': '<NAME>', 'black': '<NAME>', 'eco': 'A33', 'whiteelo': '?', 'blackelo': '?', 'plycount': '112', 'moves': '1. c4 0-1'}
self.assertEqual(utils.extract_tag_roster(self.pgn_string), result)
| en | 0.553976 | [Event "US Championship 1963/64"] [Site "New York, NY USA"] [Date "1964.01.01"] [EventDate "1963.??.??"] [Round "11"][Result "0-1"] [White "<NAME>"] [Black "<NAME>"] [ECO "A33"] [WhiteElo "?"] [BlackElo "?"][PlyCount "112"] 1. c4 0-1 | 2.872694 | 3 |
manila/tests/share/test_snapshot_access.py | gouthampacha/manila | 3 | 7932 | # Copyright (c) 2016 <NAME>, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import mock
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila.share import snapshot_access
from manila import test
from manila.tests import db_utils
from manila import utils
@ddt.ddt
class SnapshotAccessTestCase(test.TestCase):
def setUp(self):
super(SnapshotAccessTestCase, self).setUp()
self.driver = self.mock_class("manila.share.driver.ShareDriver",
mock.Mock())
self.snapshot_access = snapshot_access.ShareSnapshotInstanceAccess(
db, self.driver)
self.context = context.get_admin_context()
share = db_utils.create_share()
self.snapshot = db_utils.create_snapshot(share_id=share['id'])
self.snapshot_instance = db_utils.create_snapshot_instance(
snapshot_id=self.snapshot['id'],
share_instance_id=self.snapshot['share']['instance']['id'])
@ddt.data(constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.ACCESS_STATE_QUEUED_TO_DENY)
def test_update_access_rules(self, state):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': state,
'access_id': 'rule_id%s' % i
})
all_rules = copy.deepcopy(rules)
all_rules.append({
'id': 'id-3',
'state': constants.ACCESS_STATE_ERROR,
'access_id': 'rule_id3'
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(return_value=all_rules))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access')
self.mock_object(self.snapshot_access, '_check_needs_refresh',
mock.Mock(return_value=False))
self.mock_object(db, 'share_snapshot_instance_access_delete')
self.snapshot_access.update_access_rules(self.context,
self.snapshot_instance['id'])
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
if state == constants.ACCESS_STATE_QUEUED_TO_APPLY:
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance, rules, add_rules=rules,
delete_rules=[], share_server=None)
else:
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance, [], add_rules=[],
delete_rules=rules, share_server=None)
def test_update_access_rules_delete_all_rules(self):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': constants.ACCESS_STATE_QUEUED_TO_DENY,
'access_id': 'rule_id%s' % i
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(side_effect=[rules, []]))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access')
self.mock_object(db, 'share_snapshot_instance_access_delete')
self.snapshot_access.update_access_rules(self.context,
self.snapshot_instance['id'],
delete_all_rules=True)
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
self.driver.snapshot_update_access.assert_called_with(
utils.IsAMatcher(context.RequestContext), self.snapshot_instance,
[], add_rules=[], delete_rules=rules, share_server=None)
def test_update_access_rules_exception(self):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': constants.ACCESS_STATE_APPLYING,
'access_id': 'rule_id%s' % i
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(return_value=rules))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access',
mock.Mock(side_effect=exception.NotFound))
self.assertRaises(exception.NotFound,
self.snapshot_access.update_access_rules,
self.context, self.snapshot_instance['id'])
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.snapshot_instance,
rules, add_rules=rules, delete_rules=[], share_server=None)
| # Copyright (c) 2016 <NAME>, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import mock
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila.share import snapshot_access
from manila import test
from manila.tests import db_utils
from manila import utils
@ddt.ddt
class SnapshotAccessTestCase(test.TestCase):
def setUp(self):
super(SnapshotAccessTestCase, self).setUp()
self.driver = self.mock_class("manila.share.driver.ShareDriver",
mock.Mock())
self.snapshot_access = snapshot_access.ShareSnapshotInstanceAccess(
db, self.driver)
self.context = context.get_admin_context()
share = db_utils.create_share()
self.snapshot = db_utils.create_snapshot(share_id=share['id'])
self.snapshot_instance = db_utils.create_snapshot_instance(
snapshot_id=self.snapshot['id'],
share_instance_id=self.snapshot['share']['instance']['id'])
@ddt.data(constants.ACCESS_STATE_QUEUED_TO_APPLY,
constants.ACCESS_STATE_QUEUED_TO_DENY)
def test_update_access_rules(self, state):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': state,
'access_id': 'rule_id%s' % i
})
all_rules = copy.deepcopy(rules)
all_rules.append({
'id': 'id-3',
'state': constants.ACCESS_STATE_ERROR,
'access_id': 'rule_id3'
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(return_value=all_rules))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access')
self.mock_object(self.snapshot_access, '_check_needs_refresh',
mock.Mock(return_value=False))
self.mock_object(db, 'share_snapshot_instance_access_delete')
self.snapshot_access.update_access_rules(self.context,
self.snapshot_instance['id'])
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
if state == constants.ACCESS_STATE_QUEUED_TO_APPLY:
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance, rules, add_rules=rules,
delete_rules=[], share_server=None)
else:
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance, [], add_rules=[],
delete_rules=rules, share_server=None)
def test_update_access_rules_delete_all_rules(self):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': constants.ACCESS_STATE_QUEUED_TO_DENY,
'access_id': 'rule_id%s' % i
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(side_effect=[rules, []]))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access')
self.mock_object(db, 'share_snapshot_instance_access_delete')
self.snapshot_access.update_access_rules(self.context,
self.snapshot_instance['id'],
delete_all_rules=True)
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
self.driver.snapshot_update_access.assert_called_with(
utils.IsAMatcher(context.RequestContext), self.snapshot_instance,
[], add_rules=[], delete_rules=rules, share_server=None)
def test_update_access_rules_exception(self):
rules = []
for i in range(2):
rules.append({
'id': 'id-%s' % i,
'state': constants.ACCESS_STATE_APPLYING,
'access_id': 'rule_id%s' % i
})
snapshot_instance_get = self.mock_object(
db, 'share_snapshot_instance_get',
mock.Mock(return_value=self.snapshot_instance))
snap_get_all_for_snap_instance = self.mock_object(
db, 'share_snapshot_access_get_all_for_snapshot_instance',
mock.Mock(return_value=rules))
self.mock_object(db, 'share_snapshot_instance_access_update')
self.mock_object(self.driver, 'snapshot_update_access',
mock.Mock(side_effect=exception.NotFound))
self.assertRaises(exception.NotFound,
self.snapshot_access.update_access_rules,
self.context, self.snapshot_instance['id'])
snapshot_instance_get.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'], with_share_data=True)
snap_get_all_for_snap_instance.assert_called_once_with(
utils.IsAMatcher(context.RequestContext),
self.snapshot_instance['id'])
self.driver.snapshot_update_access.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.snapshot_instance,
rules, add_rules=rules, delete_rules=[], share_server=None)
| en | 0.856152 | # Copyright (c) 2016 <NAME>, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. | 1.888933 | 2 |
packages/pyright-internal/src/tests/samples/unnecessaryCast1.py | sasano8/pyright | 4,391 | 7933 | # This sample tests the type checker's reportUnnecessaryCast feature.
from typing import cast, Union
def foo(a: int):
# This should generate an error if
# reportUnnecessaryCast is enabled.
b = cast(int, a)
c: Union[int, str] = "hello"
d = cast(int, c)
| # This sample tests the type checker's reportUnnecessaryCast feature.
from typing import cast, Union
def foo(a: int):
# This should generate an error if
# reportUnnecessaryCast is enabled.
b = cast(int, a)
c: Union[int, str] = "hello"
d = cast(int, c)
| en | 0.69311 | # This sample tests the type checker's reportUnnecessaryCast feature. # This should generate an error if # reportUnnecessaryCast is enabled. | 2.443084 | 2 |
Python/1238.py | ArikBartzadok/beecrowd-challenges | 0 | 7934 | def execucoes():
return int(input())
def entradas():
return input().split(' ')
def imprimir(v):
print(v)
def tamanho_a(a):
return len(a)
def tamanho_b(b):
return len(b)
def diferenca_tamanhos(a, b):
return (len(a) <= len(b))
def analisar(e, i, s):
a, b = e
if(diferenca_tamanhos(a, b)):
for i in range(tamanho_a(a)):
s += a[i]
s += b[i]
s += b[tamanho_a(a):]
else:
for i in range(tamanho_b(b)):
s += a[i]
s += b[i]
s += a[tamanho_b(b):]
return s
def combinador():
n = execucoes()
for i in range(n): imprimir(analisar(entradas(), i, ''))
combinador() | def execucoes():
return int(input())
def entradas():
return input().split(' ')
def imprimir(v):
print(v)
def tamanho_a(a):
return len(a)
def tamanho_b(b):
return len(b)
def diferenca_tamanhos(a, b):
return (len(a) <= len(b))
def analisar(e, i, s):
a, b = e
if(diferenca_tamanhos(a, b)):
for i in range(tamanho_a(a)):
s += a[i]
s += b[i]
s += b[tamanho_a(a):]
else:
for i in range(tamanho_b(b)):
s += a[i]
s += b[i]
s += a[tamanho_b(b):]
return s
def combinador():
n = execucoes()
for i in range(n): imprimir(analisar(entradas(), i, ''))
combinador() | none | 1 | 3.400173 | 3 |
|
metadata_service/api/popular_tables.py | worldwise001/amundsenmetadatalibrary | 0 | 7935 | from http import HTTPStatus
from typing import Iterable, Union, Mapping
from flask import request
from flask_restful import Resource, fields, marshal
from metadata_service.proxy import get_proxy_client
popular_table_fields = {
'database': fields.String,
'cluster': fields.String,
'schema': fields.String,
'table_name': fields.String(attribute='name'),
'table_description': fields.String(attribute='description'), # Optional
}
popular_tables_fields = {
'popular_tables': fields.List(fields.Nested(popular_table_fields))
}
class PopularTablesAPI(Resource):
"""
PopularTables API
"""
def __init__(self) -> None:
self.client = get_proxy_client()
def get(self) -> Iterable[Union[Mapping, int, None]]:
limit = request.args.get('limit', 10)
popular_tables = self.client.get_popular_tables(num_entries=limit)
return marshal({'popular_tables': popular_tables}, popular_tables_fields), HTTPStatus.OK
| from http import HTTPStatus
from typing import Iterable, Union, Mapping
from flask import request
from flask_restful import Resource, fields, marshal
from metadata_service.proxy import get_proxy_client
popular_table_fields = {
'database': fields.String,
'cluster': fields.String,
'schema': fields.String,
'table_name': fields.String(attribute='name'),
'table_description': fields.String(attribute='description'), # Optional
}
popular_tables_fields = {
'popular_tables': fields.List(fields.Nested(popular_table_fields))
}
class PopularTablesAPI(Resource):
"""
PopularTables API
"""
def __init__(self) -> None:
self.client = get_proxy_client()
def get(self) -> Iterable[Union[Mapping, int, None]]:
limit = request.args.get('limit', 10)
popular_tables = self.client.get_popular_tables(num_entries=limit)
return marshal({'popular_tables': popular_tables}, popular_tables_fields), HTTPStatus.OK
| en | 0.35124 | # Optional PopularTables API | 2.328978 | 2 |
tests/test1.py | SaijC/manhwaDownloader | 0 | 7936 | import requests
import logging
import cfscrape
import os
from manhwaDownloader.constants import CONSTANTS as CONST
logging.basicConfig(level=logging.DEBUG)
folderPath = os.path.join(CONST.OUTPUTPATH, 'serious-taste-of-forbbiden-fruit')
logging.info(len([file for file in os.walk(folderPath)]))
walkList = [file for file in os.walk(folderPath)]
chapterDicts = dict()
for folder, _, files in walkList[1:]:
chapterDicts.update({folder: files})
print(chapterDicts) | import requests
import logging
import cfscrape
import os
from manhwaDownloader.constants import CONSTANTS as CONST
logging.basicConfig(level=logging.DEBUG)
folderPath = os.path.join(CONST.OUTPUTPATH, 'serious-taste-of-forbbiden-fruit')
logging.info(len([file for file in os.walk(folderPath)]))
walkList = [file for file in os.walk(folderPath)]
chapterDicts = dict()
for folder, _, files in walkList[1:]:
chapterDicts.update({folder: files})
print(chapterDicts) | none | 1 | 1.997639 | 2 |
|
others/Keras_custom_error.py | rahasayantan/Work-For-Reference | 0 | 7937 | # define custom R2 metrics for Keras backend
from keras import backend as K
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
# base model architecture definition
def model():
model = Sequential()
#input layer
model.add(Dense(input_dims, input_dim=input_dims))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
# hidden layers
model.add(Dense(input_dims))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//2))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//4, activation=act_func))
# output layer (y_pred)
model.add(Dense(1, activation='linear'))
# compile this model
model.compile(loss='mean_squared_error', # one may use 'mean_absolute_error' as alternative
optimizer='adam',
metrics=[r2_keras] # you can add several if needed
)
# Visualize NN architecture
print(model.summary())
return model
################K2
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import RobustScaler
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, InputLayer, GaussianNoise
from keras.wrappers.scikit_learn import KerasRegressor
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
#
# Data preparation
#
y_train = train['y'].values
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
# Sscaling features
scaler = RobustScaler()
df_all = scaler.fit_transform(df_all)
train = df_all[:num_train]
test = df_all[num_train:]
# Keep only the most contributing features
sfm = SelectFromModel(LassoCV())
sfm.fit(train, y_train)
train = sfm.transform(train)
test = sfm.transform(test)
print ('Number of features : %d' % train.shape[1])
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def build_model_fn(neurons=20, noise=0.25):
model = Sequential()
model.add(InputLayer(input_shape=(train.shape[1],)))
model.add(GaussianNoise(noise))
model.add(Dense(neurons, activation='tanh'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='nadam', metrics=[r2_keras])
return model
#
# Tuning model parameters
#
model = KerasRegressor(build_fn=build_model_fn, epochs=75, verbose=0)
gsc = GridSearchCV(
estimator=model,
param_grid={
#'neurons': range(18,31,4),
'noise': [x/20.0 for x in range(3, 7)],
},
#scoring='r2',
scoring='neg_mean_squared_error',
cv=5
)
grid_result = gsc.fit(train, y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
for test_mean, test_stdev, train_mean, train_stdev, param in zip(
grid_result.cv_results_['mean_test_score'],
grid_result.cv_results_['std_test_score'],
grid_result.cv_results_['mean_train_score'],
grid_result.cv_results_['std_train_score'],
grid_result.cv_results_['params']):
print("Train: %f (%f) // Test : %f (%f) with: %r" % (train_mean, train_stdev, test_mean, test_stdev, param))
#
# Train model with best params for submission
#
model = build_model_fn(**grid_result.best_params_)
model.fit(train, y_train, epochs=75, verbose=2)
y_test = model.predict(test).flatten()
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('mercedes-submission.csv', index=False)
#########################
import pandas as pd
import numpy as np
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline, Pipeline, _name_estimators
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import r2_score
from sklearn.base import BaseEstimator, TransformerMixin
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train['y'].values
y_mean = np.mean(y_train)
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
train = df_all[:num_train]
test = df_all[num_train:]
class AddColumns(BaseEstimator, TransformerMixin):
def __init__(self, transform_=None):
self.transform_ = transform_
def fit(self, X, y=None):
self.transform_.fit(X, y)
return self
def transform(self, X, y=None):
xform_data = self.transform_.transform(X, y)
return np.append(X, xform_data, axis=1)
class LogExpPipeline(Pipeline):
def fit(self, X, y):
super(LogExpPipeline, self).fit(X, np.log1p(y))
def predict(self, X):
return np.expm1(super(LogExpPipeline, self).predict(X))
#
# Model/pipeline with scaling,pca,svm
#
svm_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(),
SVR(kernel='rbf', C=1.0, epsilon=0.05)]))
# results = cross_val_score(svm_pipe, train, y_train, cv=5, scoring='r2')
# print("SVM score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
#
# Model/pipeline with scaling,pca,ElasticNet
#
en_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(n_components=125),
ElasticNet(alpha=0.001, l1_ratio=0.1)]))
#
# XGBoost model
#
xgb_model = xgb.sklearn.XGBRegressor(max_depth=4, learning_rate=0.005, subsample=0.921,
objective='reg:linear', n_estimators=1300, base_score=y_mean)
xgb_pipe = Pipeline(_name_estimators([AddColumns(transform_=PCA(n_components=10)),
AddColumns(transform_=FastICA(n_components=10, max_iter=500)),
xgb_model]))
# results = cross_val_score(xgb_model, train, y_train, cv=5, scoring='r2')
# print("XGB score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Random Forest
#
rf_model = RandomForestRegressor(n_estimators=250, n_jobs=4, min_samples_split=25,
min_samples_leaf=25, max_depth=3)
# results = cross_val_score(rf_model, train, y_train, cv=5, scoring='r2')
# print("RF score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Now the training and stacking part. In previous version i just tried to train each model and
# find the best combination, that lead to a horrible score (Overfit?). Code below does out-of-fold
# training/predictions and then we combine the final results.
#
# Read here for more explanation (This code was borrowed/adapted) :
#
class Ensemble(object):
def __init__(self, n_splits, stacker, base_models):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, y, T):
X = np.array(X)
y = np.array(y)
T = np.array(T)
folds = list(KFold(n_splits=self.n_splits, shuffle=True, random_state=2016).split(X, y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
for i, clf in enumerate(self.base_models):
S_test_i = np.zeros((T.shape[0], self.n_splits))
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx]
y_train = y[train_idx]
X_holdout = X[test_idx]
y_holdout = y[test_idx]
clf.fit(X_train, y_train)
y_pred = clf.predict(X_holdout)[:]
print ("Model %d fold %d score %f" % (i, j, r2_score(y_holdout, y_pred)))
S_train[test_idx, i] = y_pred
S_test_i[:, j] = clf.predict(T)[:]
S_test[:, i] = S_test_i.mean(axis=1)
# results = cross_val_score(self.stacker, S_train, y, cv=5, scoring='r2')
# print("Stacker score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
self.stacker.fit(S_train, y)
res = self.stacker.predict(S_test)[:]
return res
stack = Ensemble(n_splits=5,
#stacker=ElasticNetCV(l1_ratio=[x/10.0 for x in range(1,10)]),
stacker=ElasticNet(l1_ratio=0.1, alpha=1.4),
base_models=(svm_pipe, en_pipe, xgb_pipe, rf_model))
y_test = stack.fit_predict(train, y_train, test)
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('submission.csv', index=False)
#############################
'''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| # define custom R2 metrics for Keras backend
from keras import backend as K
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
# base model architecture definition
def model():
model = Sequential()
#input layer
model.add(Dense(input_dims, input_dim=input_dims))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
# hidden layers
model.add(Dense(input_dims))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//2))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//4, activation=act_func))
# output layer (y_pred)
model.add(Dense(1, activation='linear'))
# compile this model
model.compile(loss='mean_squared_error', # one may use 'mean_absolute_error' as alternative
optimizer='adam',
metrics=[r2_keras] # you can add several if needed
)
# Visualize NN architecture
print(model.summary())
return model
################K2
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import RobustScaler
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, InputLayer, GaussianNoise
from keras.wrappers.scikit_learn import KerasRegressor
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
#
# Data preparation
#
y_train = train['y'].values
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
# Sscaling features
scaler = RobustScaler()
df_all = scaler.fit_transform(df_all)
train = df_all[:num_train]
test = df_all[num_train:]
# Keep only the most contributing features
sfm = SelectFromModel(LassoCV())
sfm.fit(train, y_train)
train = sfm.transform(train)
test = sfm.transform(test)
print ('Number of features : %d' % train.shape[1])
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def build_model_fn(neurons=20, noise=0.25):
model = Sequential()
model.add(InputLayer(input_shape=(train.shape[1],)))
model.add(GaussianNoise(noise))
model.add(Dense(neurons, activation='tanh'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='nadam', metrics=[r2_keras])
return model
#
# Tuning model parameters
#
model = KerasRegressor(build_fn=build_model_fn, epochs=75, verbose=0)
gsc = GridSearchCV(
estimator=model,
param_grid={
#'neurons': range(18,31,4),
'noise': [x/20.0 for x in range(3, 7)],
},
#scoring='r2',
scoring='neg_mean_squared_error',
cv=5
)
grid_result = gsc.fit(train, y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
for test_mean, test_stdev, train_mean, train_stdev, param in zip(
grid_result.cv_results_['mean_test_score'],
grid_result.cv_results_['std_test_score'],
grid_result.cv_results_['mean_train_score'],
grid_result.cv_results_['std_train_score'],
grid_result.cv_results_['params']):
print("Train: %f (%f) // Test : %f (%f) with: %r" % (train_mean, train_stdev, test_mean, test_stdev, param))
#
# Train model with best params for submission
#
model = build_model_fn(**grid_result.best_params_)
model.fit(train, y_train, epochs=75, verbose=2)
y_test = model.predict(test).flatten()
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('mercedes-submission.csv', index=False)
#########################
import pandas as pd
import numpy as np
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline, Pipeline, _name_estimators
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import r2_score
from sklearn.base import BaseEstimator, TransformerMixin
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train['y'].values
y_mean = np.mean(y_train)
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
train = df_all[:num_train]
test = df_all[num_train:]
class AddColumns(BaseEstimator, TransformerMixin):
def __init__(self, transform_=None):
self.transform_ = transform_
def fit(self, X, y=None):
self.transform_.fit(X, y)
return self
def transform(self, X, y=None):
xform_data = self.transform_.transform(X, y)
return np.append(X, xform_data, axis=1)
class LogExpPipeline(Pipeline):
def fit(self, X, y):
super(LogExpPipeline, self).fit(X, np.log1p(y))
def predict(self, X):
return np.expm1(super(LogExpPipeline, self).predict(X))
#
# Model/pipeline with scaling,pca,svm
#
svm_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(),
SVR(kernel='rbf', C=1.0, epsilon=0.05)]))
# results = cross_val_score(svm_pipe, train, y_train, cv=5, scoring='r2')
# print("SVM score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
#
# Model/pipeline with scaling,pca,ElasticNet
#
en_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(n_components=125),
ElasticNet(alpha=0.001, l1_ratio=0.1)]))
#
# XGBoost model
#
xgb_model = xgb.sklearn.XGBRegressor(max_depth=4, learning_rate=0.005, subsample=0.921,
objective='reg:linear', n_estimators=1300, base_score=y_mean)
xgb_pipe = Pipeline(_name_estimators([AddColumns(transform_=PCA(n_components=10)),
AddColumns(transform_=FastICA(n_components=10, max_iter=500)),
xgb_model]))
# results = cross_val_score(xgb_model, train, y_train, cv=5, scoring='r2')
# print("XGB score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Random Forest
#
rf_model = RandomForestRegressor(n_estimators=250, n_jobs=4, min_samples_split=25,
min_samples_leaf=25, max_depth=3)
# results = cross_val_score(rf_model, train, y_train, cv=5, scoring='r2')
# print("RF score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Now the training and stacking part. In previous version i just tried to train each model and
# find the best combination, that lead to a horrible score (Overfit?). Code below does out-of-fold
# training/predictions and then we combine the final results.
#
# Read here for more explanation (This code was borrowed/adapted) :
#
class Ensemble(object):
def __init__(self, n_splits, stacker, base_models):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, y, T):
X = np.array(X)
y = np.array(y)
T = np.array(T)
folds = list(KFold(n_splits=self.n_splits, shuffle=True, random_state=2016).split(X, y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
for i, clf in enumerate(self.base_models):
S_test_i = np.zeros((T.shape[0], self.n_splits))
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx]
y_train = y[train_idx]
X_holdout = X[test_idx]
y_holdout = y[test_idx]
clf.fit(X_train, y_train)
y_pred = clf.predict(X_holdout)[:]
print ("Model %d fold %d score %f" % (i, j, r2_score(y_holdout, y_pred)))
S_train[test_idx, i] = y_pred
S_test_i[:, j] = clf.predict(T)[:]
S_test[:, i] = S_test_i.mean(axis=1)
# results = cross_val_score(self.stacker, S_train, y, cv=5, scoring='r2')
# print("Stacker score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
self.stacker.fit(S_train, y)
res = self.stacker.predict(S_test)[:]
return res
stack = Ensemble(n_splits=5,
#stacker=ElasticNetCV(l1_ratio=[x/10.0 for x in range(1,10)]),
stacker=ElasticNet(l1_ratio=0.1, alpha=1.4),
base_models=(svm_pipe, en_pipe, xgb_pipe, rf_model))
y_test = stack.fit_predict(train, y_train, test)
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('submission.csv', index=False)
#############################
'''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| en | 0.689487 | # define custom R2 metrics for Keras backend # base model architecture definition #input layer # hidden layers # output layer (y_pred) # compile this model # one may use 'mean_absolute_error' as alternative # you can add several if needed # Visualize NN architecture ################K2 # # Data preparation # # One-hot encoding of categorical/strings # Sscaling features # Keep only the most contributing features # # Tuning model parameters # #'neurons': range(18,31,4), #scoring='r2', # # Train model with best params for submission # ######################### # One-hot encoding of categorical/strings # # Model/pipeline with scaling,pca,svm # # results = cross_val_score(svm_pipe, train, y_train, cv=5, scoring='r2') # print("SVM score: %.4f (%.4f)" % (results.mean(), results.std())) # exit() # # Model/pipeline with scaling,pca,ElasticNet # # # XGBoost model # # results = cross_val_score(xgb_model, train, y_train, cv=5, scoring='r2') # print("XGB score: %.4f (%.4f)" % (results.mean(), results.std())) # # Random Forest # # results = cross_val_score(rf_model, train, y_train, cv=5, scoring='r2') # print("RF score: %.4f (%.4f)" % (results.mean(), results.std())) # # Now the training and stacking part. In previous version i just tried to train each model and # find the best combination, that lead to a horrible score (Overfit?). Code below does out-of-fold # training/predictions and then we combine the final results. # # Read here for more explanation (This code was borrowed/adapted) : # # results = cross_val_score(self.stacker, S_train, y, cv=5, scoring='r2') # print("Stacker score: %.4f (%.4f)" % (results.mean(), results.std())) # exit() #stacker=ElasticNetCV(l1_ratio=[x/10.0 for x in range(1,10)]), ############################# This example demonstrates the use of Convolution1D for text classification. Gets to 0.89 test accuracy after 2 epochs. 90s/epoch on Intel i5 2.4Ghz CPU. 10s/epoch on Tesla K40 GPU. # set parameters: # we start off with an efficient embedding layer which maps # our vocab indices into embedding_dims dimensions # we add a Convolution1D, which will learn filters # word group filters of size filter_length: # we use max pooling: # We add a vanilla hidden layer: # We project onto a single unit output layer, and squash it with a sigmoid: | 2.953036 | 3 |
tests/ut/python/parallel/test_manual_gatherv2.py | PowerOlive/mindspore | 3,200 | 7938 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
class Net(Cell):
def __init__(self,
strategy1=None,
strategy2=None,
strategy3=None,
axis=0,
init_flag=True,
split_tuple=(4, 4),
split_string="manual_split",
param_shape=(8, 8)):
super().__init__()
self.gatherv2 = P.Gather().shard(strategy1)
self.gatherv2.add_prim_attr(split_string, split_tuple)
self.mul = P.Mul().shard(strategy2)
self.reshape = P.Reshape()
self.matmul = P.MatMul().shard(strategy3)
self.matmul.add_prim_attr("forward_reduce_scatter", True)
if init_flag:
self.param = Parameter(initializer("ones", param_shape, ms.float32), name="gatherv2_param")
else:
self.param = Parameter(Tensor(np.ones(param_shape), dtype=ms.float32), name="gatherv2_param")
self.mul_weight = Parameter(initializer("ones", (8, 8, 8), ms.float32), name="mul_weight")
self.matmul_weight = Parameter(initializer("ones", (64, 16), ms.float32), name="matmul_weight")
self.axis = axis
def construct(self, x, b):
out = self.gatherv2(self.param, x, self.axis)
out = self.mul(out, self.mul_weight)
out = self.reshape(out, (8, 64))
out = self.matmul(out, self.matmul_weight)
return out
_x = Tensor(np.ones([8, 8]), dtype=ms.int32)
_b = Tensor(np.ones([64, 8]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b, auto_parallel_mode=True)
context.reset_auto_parallel_context()
def test_normal_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
compile_net(net)
def test_normal_split2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 4, 1), (1, 4, 1))
strategy3 = ((1, 4), (4, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=17)
strategy1 = ((4, 8), (1, 4))
strategy2 = ((1, 4, 8), (1, 4, 8))
strategy3 = ((1, 32), (32, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split_with_offset():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_string="manual_split_with_offset", split_tuple=((4, 0), (4, 4)))
compile_net(net)
def test_auto_parallel_error():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0)
net = Net()
with pytest.raises(RuntimeError):
compile_net(net)
def test_axis_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, axis=1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (8, 1))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (1, 8))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error4():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 8), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error5():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_split_tuple_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=((5, 0), (5, 5)))
with pytest.raises(RuntimeError):
compile_net(net)
def test_parameter_use_tensor_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, init_flag=False)
with pytest.raises(RuntimeError):
compile_net(net)
| # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
class Net(Cell):
def __init__(self,
strategy1=None,
strategy2=None,
strategy3=None,
axis=0,
init_flag=True,
split_tuple=(4, 4),
split_string="manual_split",
param_shape=(8, 8)):
super().__init__()
self.gatherv2 = P.Gather().shard(strategy1)
self.gatherv2.add_prim_attr(split_string, split_tuple)
self.mul = P.Mul().shard(strategy2)
self.reshape = P.Reshape()
self.matmul = P.MatMul().shard(strategy3)
self.matmul.add_prim_attr("forward_reduce_scatter", True)
if init_flag:
self.param = Parameter(initializer("ones", param_shape, ms.float32), name="gatherv2_param")
else:
self.param = Parameter(Tensor(np.ones(param_shape), dtype=ms.float32), name="gatherv2_param")
self.mul_weight = Parameter(initializer("ones", (8, 8, 8), ms.float32), name="mul_weight")
self.matmul_weight = Parameter(initializer("ones", (64, 16), ms.float32), name="matmul_weight")
self.axis = axis
def construct(self, x, b):
out = self.gatherv2(self.param, x, self.axis)
out = self.mul(out, self.mul_weight)
out = self.reshape(out, (8, 64))
out = self.matmul(out, self.matmul_weight)
return out
_x = Tensor(np.ones([8, 8]), dtype=ms.int32)
_b = Tensor(np.ones([64, 8]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b, auto_parallel_mode=True)
context.reset_auto_parallel_context()
def test_normal_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
compile_net(net)
def test_normal_split2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 4, 1), (1, 4, 1))
strategy3 = ((1, 4), (4, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=17)
strategy1 = ((4, 8), (1, 4))
strategy2 = ((1, 4, 8), (1, 4, 8))
strategy3 = ((1, 32), (32, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split_with_offset():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_string="manual_split_with_offset", split_tuple=((4, 0), (4, 4)))
compile_net(net)
def test_auto_parallel_error():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0)
net = Net()
with pytest.raises(RuntimeError):
compile_net(net)
def test_axis_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, axis=1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (8, 1))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (1, 8))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error4():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 8), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error5():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_split_tuple_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=((5, 0), (5, 5)))
with pytest.raises(RuntimeError):
compile_net(net)
def test_parameter_use_tensor_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, init_flag=False)
with pytest.raises(RuntimeError):
compile_net(net)
| en | 0.808111 | # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ | 1.730067 | 2 |
ClemBot.Bot/bot/api/tag_route.py | makayla-moster/ClemBot | 121 | 7939 | <gh_stars>100-1000
from bot.api.api_client import ApiClient
from bot.api.base_route import BaseRoute
import typing as t
from bot.models import Tag
class TagRoute(BaseRoute):
def __init__(self, api_client: ApiClient):
super().__init__(api_client)
async def create_tag(self, name: str, content: str, guild_id: int, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'Name': name,
'Content': content,
'GuildId': guild_id,
'UserId': user_id,
}
tag_dict = await self._client.post('tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_content(self, guild_id: int, name: str, content: str, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'Content': content
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_owner(self, guild_id: int, name: str, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'UserId': user_id
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag(self, guild_id: int, name: str) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
}
tag_dict = await self._client.get('bot/tags', data=json)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag_content(self, guild_id: int, name: str) -> t.Optional[str]:
json = {
'GuildId': guild_id,
'Name': name,
}
resp = await self._client.get('bot/tags', data=json)
return None if resp is None else resp['content']
async def delete_tag(self, guild_id: int, name: str, **kwargs):
"""
Makes a call to the API to delete a tag w/ the given GuildId and Name.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- content The content of the tag.
- guildId The guild id the tag was in.
"""
json = {
'GuildId': guild_id,
'Name': name,
}
return await self._client.delete('bot/tags', data=json, **kwargs)
async def add_tag_use(self, guild_id: int, name: str, channel_id: int, user_id: int):
"""
Makes a call to the API to say a tag w/ the given Name was used.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- guildId The guild id the tag is in.
"""
json = {
'GuildId': guild_id,
'Name': name,
'ChannelId': channel_id,
'UserId': user_id
}
return await self._client.post('bot/tags/invoke', data=json)
async def get_guilds_tags(self, guild_id: int) -> t.Iterator[Tag]:
resp = await self._client.get(f'guilds/{guild_id}/tags')
if not resp:
return []
return [Tag.from_dict(i) for i in resp['tags']]
| from bot.api.api_client import ApiClient
from bot.api.base_route import BaseRoute
import typing as t
from bot.models import Tag
class TagRoute(BaseRoute):
def __init__(self, api_client: ApiClient):
super().__init__(api_client)
async def create_tag(self, name: str, content: str, guild_id: int, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'Name': name,
'Content': content,
'GuildId': guild_id,
'UserId': user_id,
}
tag_dict = await self._client.post('tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_content(self, guild_id: int, name: str, content: str, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'Content': content
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_owner(self, guild_id: int, name: str, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'UserId': user_id
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag(self, guild_id: int, name: str) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
}
tag_dict = await self._client.get('bot/tags', data=json)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag_content(self, guild_id: int, name: str) -> t.Optional[str]:
json = {
'GuildId': guild_id,
'Name': name,
}
resp = await self._client.get('bot/tags', data=json)
return None if resp is None else resp['content']
async def delete_tag(self, guild_id: int, name: str, **kwargs):
"""
Makes a call to the API to delete a tag w/ the given GuildId and Name.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- content The content of the tag.
- guildId The guild id the tag was in.
"""
json = {
'GuildId': guild_id,
'Name': name,
}
return await self._client.delete('bot/tags', data=json, **kwargs)
async def add_tag_use(self, guild_id: int, name: str, channel_id: int, user_id: int):
"""
Makes a call to the API to say a tag w/ the given Name was used.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- guildId The guild id the tag is in.
"""
json = {
'GuildId': guild_id,
'Name': name,
'ChannelId': channel_id,
'UserId': user_id
}
return await self._client.post('bot/tags/invoke', data=json)
async def get_guilds_tags(self, guild_id: int) -> t.Iterator[Tag]:
resp = await self._client.get(f'guilds/{guild_id}/tags')
if not resp:
return []
return [Tag.from_dict(i) for i in resp['tags']] | en | 0.836019 | Makes a call to the API to delete a tag w/ the given GuildId and Name. If successful, the API will return a dict with the given values: - name The name of the tag. - content The content of the tag. - guildId The guild id the tag was in. Makes a call to the API to say a tag w/ the given Name was used. If successful, the API will return a dict with the given values: - name The name of the tag. - guildId The guild id the tag is in. | 2.248383 | 2 |
formfactor_AL.py | kirichoi/PolymerConnectome | 0 | 7940 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 10:59:00 2020
@author: user
"""
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import itertools
import ctypes
def formfactor(args):
# with AL_dist_flat_glo.get_lock:
AL_dist_flat_glo_r = np.frombuffer(AL_dist_flat_glo.get_obj())
AL_dist_flat_glo_s = AL_dist_flat_glo_r.reshape((n_glo.value,m_glo.value))
# ffq = np.sum(np.cos(np.dot(np.logspace(-2,3,100)[args[0]]*np.array([1,0,0]),
# np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T)))
qr = np.logspace(-2,3,100)[args[0]]
rvec = np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T
cosx = np.cos(np.dot(qr*np.array([1,0,0]), rvec))
cosy = np.cos(np.dot(qr*np.array([0,1,0]), rvec))
cosz = np.cos(np.dot(qr*np.array([0,0,1]), rvec))
# cosxy = np.cos(np.dot(qr*np.array([0.707,0.707,0]), rvec))
# cosyz = np.cos(np.dot(qr*np.array([0,0.707,0.707]), rvec))
# cosxz = np.cos(np.dot(qr*np.array([0.707,0,0.707]), rvec))
# cosxyz = np.cos(np.dot(qr*np.array([0.577,0.577,0.577]), rvec))
ffq = np.sum(np.mean(np.array([cosx, cosy, cosz]), axis=0))
return ffq
def parallelinit(AL_dist_flat_glo_, n_glo_, m_glo_):
global AL_dist_flat_glo, n_glo, m_glo
AL_dist_flat_glo = AL_dist_flat_glo_
n_glo = n_glo_
m_glo = m_glo_
if __name__ == '__main__':
AL_dist_flat = np.load(r'./AL_dist_flat.npy')
n = np.shape(AL_dist_flat)[0]
m = np.shape(AL_dist_flat)[1]
q_range = np.logspace(-2,3,100)
# r_x = np.array([1, 0, 0])
# q_range_glo = mp.Array(ctypes.c_double, q_range)
AL_dist_flat_glo = mp.Array(ctypes.c_double, AL_dist_flat.flatten())
n_glo = mp.Value(ctypes.c_int, n)
m_glo = mp.Value(ctypes.c_int, m)
# r_x_glo = mp.Array(ctypes.c_double, r_x)
paramlist = list(itertools.product(range(100), range(n)))
pool = mp.Pool(20, initializer=parallelinit, initargs=(AL_dist_flat_glo, n_glo, m_glo))
t1 = time.time()
results = pool.map(formfactor, paramlist)
pool.close()
t2 = time.time()
print(t2-t1)
np.save(r'./AL_results.npy', results)
Pq = 2*np.divide(np.sum(np.array(results).reshape(100, n), axis=1), n)
# fig = plt.figure(figsize=(8,6))
# plt.plot(q_range, Pq, lw=3, color='tab:orange')
# plt.xscale('log')
# plt.xlabel('$q$', fontsize=15)
# plt.ylabel('$P(q)$', fontsize=15)
# plt.tight_layout()
# plt.savefig(r'./AL_form_factor.pdf', dpi=300, bbox_inches='tight')
# plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(q_range, Pq, lw=3, color='tab:orange')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$q$', fontsize=15)
plt.ylabel('$P(q)$', fontsize=15)
plt.tight_layout()
plt.savefig(r'./AL_form_factor_log.pdf', dpi=300, bbox_inches='tight')
plt.show()
| # -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 10:59:00 2020
@author: user
"""
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import itertools
import ctypes
def formfactor(args):
# with AL_dist_flat_glo.get_lock:
AL_dist_flat_glo_r = np.frombuffer(AL_dist_flat_glo.get_obj())
AL_dist_flat_glo_s = AL_dist_flat_glo_r.reshape((n_glo.value,m_glo.value))
# ffq = np.sum(np.cos(np.dot(np.logspace(-2,3,100)[args[0]]*np.array([1,0,0]),
# np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T)))
qr = np.logspace(-2,3,100)[args[0]]
rvec = np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T
cosx = np.cos(np.dot(qr*np.array([1,0,0]), rvec))
cosy = np.cos(np.dot(qr*np.array([0,1,0]), rvec))
cosz = np.cos(np.dot(qr*np.array([0,0,1]), rvec))
# cosxy = np.cos(np.dot(qr*np.array([0.707,0.707,0]), rvec))
# cosyz = np.cos(np.dot(qr*np.array([0,0.707,0.707]), rvec))
# cosxz = np.cos(np.dot(qr*np.array([0.707,0,0.707]), rvec))
# cosxyz = np.cos(np.dot(qr*np.array([0.577,0.577,0.577]), rvec))
ffq = np.sum(np.mean(np.array([cosx, cosy, cosz]), axis=0))
return ffq
def parallelinit(AL_dist_flat_glo_, n_glo_, m_glo_):
global AL_dist_flat_glo, n_glo, m_glo
AL_dist_flat_glo = AL_dist_flat_glo_
n_glo = n_glo_
m_glo = m_glo_
if __name__ == '__main__':
AL_dist_flat = np.load(r'./AL_dist_flat.npy')
n = np.shape(AL_dist_flat)[0]
m = np.shape(AL_dist_flat)[1]
q_range = np.logspace(-2,3,100)
# r_x = np.array([1, 0, 0])
# q_range_glo = mp.Array(ctypes.c_double, q_range)
AL_dist_flat_glo = mp.Array(ctypes.c_double, AL_dist_flat.flatten())
n_glo = mp.Value(ctypes.c_int, n)
m_glo = mp.Value(ctypes.c_int, m)
# r_x_glo = mp.Array(ctypes.c_double, r_x)
paramlist = list(itertools.product(range(100), range(n)))
pool = mp.Pool(20, initializer=parallelinit, initargs=(AL_dist_flat_glo, n_glo, m_glo))
t1 = time.time()
results = pool.map(formfactor, paramlist)
pool.close()
t2 = time.time()
print(t2-t1)
np.save(r'./AL_results.npy', results)
Pq = 2*np.divide(np.sum(np.array(results).reshape(100, n), axis=1), n)
# fig = plt.figure(figsize=(8,6))
# plt.plot(q_range, Pq, lw=3, color='tab:orange')
# plt.xscale('log')
# plt.xlabel('$q$', fontsize=15)
# plt.ylabel('$P(q)$', fontsize=15)
# plt.tight_layout()
# plt.savefig(r'./AL_form_factor.pdf', dpi=300, bbox_inches='tight')
# plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(q_range, Pq, lw=3, color='tab:orange')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$q$', fontsize=15)
plt.ylabel('$P(q)$', fontsize=15)
plt.tight_layout()
plt.savefig(r'./AL_form_factor_log.pdf', dpi=300, bbox_inches='tight')
plt.show()
| en | 0.361903 | # -*- coding: utf-8 -*- Created on Mon Sep 7 10:59:00 2020 @author: user # with AL_dist_flat_glo.get_lock: # ffq = np.sum(np.cos(np.dot(np.logspace(-2,3,100)[args[0]]*np.array([1,0,0]), # np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T))) # cosxy = np.cos(np.dot(qr*np.array([0.707,0.707,0]), rvec)) # cosyz = np.cos(np.dot(qr*np.array([0,0.707,0.707]), rvec)) # cosxz = np.cos(np.dot(qr*np.array([0.707,0,0.707]), rvec)) # cosxyz = np.cos(np.dot(qr*np.array([0.577,0.577,0.577]), rvec)) # r_x = np.array([1, 0, 0]) # q_range_glo = mp.Array(ctypes.c_double, q_range) # r_x_glo = mp.Array(ctypes.c_double, r_x) # fig = plt.figure(figsize=(8,6)) # plt.plot(q_range, Pq, lw=3, color='tab:orange') # plt.xscale('log') # plt.xlabel('$q$', fontsize=15) # plt.ylabel('$P(q)$', fontsize=15) # plt.tight_layout() # plt.savefig(r'./AL_form_factor.pdf', dpi=300, bbox_inches='tight') # plt.show() | 1.99023 | 2 |
utils/tests.py | nanodude/cairocffi | 0 | 7941 | # coding: utf-8
import io
import cairo # pycairo
import cairocffi
from pycairo_to_cairocffi import _UNSAFE_pycairo_context_to_cairocffi
from cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo
import pango_example
def test():
cairocffi_context = cairocffi.Context(cairocffi.PDFSurface(None, 10, 20))
cairocffi_context.scale(2, 3)
pycairo_context = _UNSAFE_cairocffi_context_to_pycairo(cairocffi_context)
cairocffi_context2 = _UNSAFE_pycairo_context_to_cairocffi(pycairo_context)
assert tuple(cairocffi_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(cairocffi_context2.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(pycairo_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert cairocffi_context2._pointer == cairocffi_context._pointer
file_obj = io.BytesIO()
# Mostly test that this runs without raising.
pango_example.write_example_pdf(file_obj)
assert file_obj.getvalue().startswith(b'%PDF')
if __name__ == '__main__':
test()
| # coding: utf-8
import io
import cairo # pycairo
import cairocffi
from pycairo_to_cairocffi import _UNSAFE_pycairo_context_to_cairocffi
from cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo
import pango_example
def test():
cairocffi_context = cairocffi.Context(cairocffi.PDFSurface(None, 10, 20))
cairocffi_context.scale(2, 3)
pycairo_context = _UNSAFE_cairocffi_context_to_pycairo(cairocffi_context)
cairocffi_context2 = _UNSAFE_pycairo_context_to_cairocffi(pycairo_context)
assert tuple(cairocffi_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(cairocffi_context2.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(pycairo_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert cairocffi_context2._pointer == cairocffi_context._pointer
file_obj = io.BytesIO()
# Mostly test that this runs without raising.
pango_example.write_example_pdf(file_obj)
assert file_obj.getvalue().startswith(b'%PDF')
if __name__ == '__main__':
test()
| en | 0.902625 | # coding: utf-8 # pycairo # Mostly test that this runs without raising. | 2.485461 | 2 |
riddle.py | robertlit/monty-hall-problem | 0 | 7942 | import random
goat1 = random.randint(1, 3)
goat2 = random.randint(1, 3)
while goat1 == goat2:
goat2 = random.randint(1, 3)
success = 0
tries = 1_000_000
for _ in range(tries):
options = [1, 2, 3]
choice = random.randint(1, 3)
options.remove(choice)
if choice == goat1:
options.remove(goat2)
else:
options.remove(goat1)
choice = options[0]
if choice != goat1 and choice != goat2:
success = success + 1
print(success / tries)
| import random
goat1 = random.randint(1, 3)
goat2 = random.randint(1, 3)
while goat1 == goat2:
goat2 = random.randint(1, 3)
success = 0
tries = 1_000_000
for _ in range(tries):
options = [1, 2, 3]
choice = random.randint(1, 3)
options.remove(choice)
if choice == goat1:
options.remove(goat2)
else:
options.remove(goat1)
choice = options[0]
if choice != goat1 and choice != goat2:
success = success + 1
print(success / tries)
| none | 1 | 3.506327 | 4 |
|
gentable/gen_test_cases.py | selavy/studies | 0 | 7943 | #!/usr/bin/env python3
import random
N = 32
M = 64
# NOTE: 0 is a reserved value
randu = lambda x: random.randint(1, 2**x-1)
randU32 = lambda: randu(32)
randU64 = lambda: randu(64)
fmt_by_dtype = {
'u32hex': '0x{:08x}',
'u64hex': '0x{:016x}',
}
cpp_by_dtype = {
'u32hex': 'uint32_t',
'u64hex': 'uint64_t',
}
# key = randU32()
# vals = [(key, randU32(), randU64()) for _ in range(N)]
# keys = [(x[0], x[1]) for x in vals]
# success = [random.choice(vals) for _ in range(M)]
# failure = []
keys = [(randU32(),) for _ in range(M)]
vals = [(randU32(), randU64()) for _ in range(N)]
def genval():
y = randU32()
while y in vals:
y = randU32()
return y
miss = [(genval(),) for _ in range(M)]
def print_vector(vals, name, dtypes, indent=0):
indent = ' ' * indent
tabs = indent + ' '
cpptypes = [cpp_by_dtype[dt] for dt in dtypes]
if len(cpptypes) == 1:
cctype = cpptypes[0]
def fmtrow(vs): return vs
else:
cctype = f"std::tuple<{', '.join(cpptypes)}>"
def fmtrow(vs): return f"{{ {vs} }}"
fmts = [fmt_by_dtype[dt] for dt in dtypes]
print(f"{indent}const std::vector<{cctype}> {name} = {{")
rows = [
tabs + fmtrow(', '.join([fmt.format(v) for v, fmt in zip(vs, fmts)])) + ','
for vs in vals
]
print("\n".join(rows))
print(f"{indent}}};")
print('TEST_CASE("Insert random values and look them up", "[gentbl]")')
print('{')
print_vector(keys, name='keys', dtypes=['u32hex'], indent=4)
print()
print_vector(vals, name='vals', dtypes=['u32hex', 'u64hex'], indent=4)
print()
print_vector(miss, name='miss', dtypes=['u32hex'], indent=4)
print()
print('}')
# print("const std::vector<std::tuple<uint32_t, uint32_t, uint64_t>> vs = {")
# for _ in range(N):
# print(" {{ 0x{:08x}, 0x{:08x}, 0x{:016x} }},".format(
# randU32(), randU32(), randU64()))
# print("};")
| #!/usr/bin/env python3
import random
N = 32
M = 64
# NOTE: 0 is a reserved value
randu = lambda x: random.randint(1, 2**x-1)
randU32 = lambda: randu(32)
randU64 = lambda: randu(64)
fmt_by_dtype = {
'u32hex': '0x{:08x}',
'u64hex': '0x{:016x}',
}
cpp_by_dtype = {
'u32hex': 'uint32_t',
'u64hex': 'uint64_t',
}
# key = randU32()
# vals = [(key, randU32(), randU64()) for _ in range(N)]
# keys = [(x[0], x[1]) for x in vals]
# success = [random.choice(vals) for _ in range(M)]
# failure = []
keys = [(randU32(),) for _ in range(M)]
vals = [(randU32(), randU64()) for _ in range(N)]
def genval():
y = randU32()
while y in vals:
y = randU32()
return y
miss = [(genval(),) for _ in range(M)]
def print_vector(vals, name, dtypes, indent=0):
indent = ' ' * indent
tabs = indent + ' '
cpptypes = [cpp_by_dtype[dt] for dt in dtypes]
if len(cpptypes) == 1:
cctype = cpptypes[0]
def fmtrow(vs): return vs
else:
cctype = f"std::tuple<{', '.join(cpptypes)}>"
def fmtrow(vs): return f"{{ {vs} }}"
fmts = [fmt_by_dtype[dt] for dt in dtypes]
print(f"{indent}const std::vector<{cctype}> {name} = {{")
rows = [
tabs + fmtrow(', '.join([fmt.format(v) for v, fmt in zip(vs, fmts)])) + ','
for vs in vals
]
print("\n".join(rows))
print(f"{indent}}};")
print('TEST_CASE("Insert random values and look them up", "[gentbl]")')
print('{')
print_vector(keys, name='keys', dtypes=['u32hex'], indent=4)
print()
print_vector(vals, name='vals', dtypes=['u32hex', 'u64hex'], indent=4)
print()
print_vector(miss, name='miss', dtypes=['u32hex'], indent=4)
print()
print('}')
# print("const std::vector<std::tuple<uint32_t, uint32_t, uint64_t>> vs = {")
# for _ in range(N):
# print(" {{ 0x{:08x}, 0x{:08x}, 0x{:016x} }},".format(
# randU32(), randU32(), randU64()))
# print("};")
| en | 0.584581 | #!/usr/bin/env python3 # NOTE: 0 is a reserved value # key = randU32() # vals = [(key, randU32(), randU64()) for _ in range(N)] # keys = [(x[0], x[1]) for x in vals] # success = [random.choice(vals) for _ in range(M)] # failure = [] # print("const std::vector<std::tuple<uint32_t, uint32_t, uint64_t>> vs = {") # for _ in range(N): # print(" {{ 0x{:08x}, 0x{:08x}, 0x{:016x} }},".format( # randU32(), randU32(), randU64())) # print("};") | 2.875069 | 3 |
examples/toy_env/run_toy_env.py | aaspeel/deer | 0 | 7944 | <filename>examples/toy_env/run_toy_env.py
"""Toy environment launcher. See the docs for more details about this environment.
"""
import sys
import logging
import numpy as np
from deer.default_parser import process_args
from deer.agent import NeuralAgent
from deer.learning_algos.q_net_keras import MyQNetwork
from Toy_env import MyEnv as Toy_env
import deer.experiment.base_controllers as bc
from deer.policies import EpsilonGreedyPolicy
class Defaults:
# ----------------------
# Experiment Parameters
# ----------------------
STEPS_PER_EPOCH = 1000
EPOCHS = 50
STEPS_PER_TEST = 500
PERIOD_BTW_SUMMARY_PERFS = 1
# ----------------------
# Environment Parameters
# ----------------------
FRAME_SKIP = 1
# ----------------------
# DQN Agent parameters:
# ----------------------
UPDATE_RULE = 'rmsprop'
LEARNING_RATE = 0.005
LEARNING_RATE_DECAY = 1.
DISCOUNT = 0.9
DISCOUNT_INC = 1.
DISCOUNT_MAX = 0.99
RMS_DECAY = 0.9
RMS_EPSILON = 0.0001
MOMENTUM = 0
CLIP_NORM = 1.0
EPSILON_START = 1.0
EPSILON_MIN = .1
EPSILON_DECAY = 10000
UPDATE_FREQUENCY = 1
REPLAY_MEMORY_SIZE = 1000000
BATCH_SIZE = 32
FREEZE_INTERVAL = 1000
DETERMINISTIC = True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# --- Parse parameters ---
parameters = process_args(sys.argv[1:], Defaults)
if parameters.deterministic:
rng = np.random.RandomState(123456)
else:
rng = np.random.RandomState()
# --- Instantiate environment ---
env = Toy_env(rng)
# --- Instantiate qnetwork ---
qnetwork = MyQNetwork(
env,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_norm,
parameters.freeze_interval,
parameters.batch_size,
parameters.update_rule,
rng)
train_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.1)
test_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.)
# --- Instantiate agent ---
agent = NeuralAgent(
env,
qnetwork,
parameters.replay_memory_size,
max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),
parameters.batch_size,
rng,
train_policy=train_policy,
test_policy=test_policy)
# --- Bind controllers to the agent ---
# Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and
# learning rate as well as the training epoch number.
agent.attach(bc.VerboseController(
evaluate_on='epoch',
periodicity=1))
# During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes.
# Plus, we also want to display after each training episode (!= than after every training) the average bellman
# residual and the average of the V values obtained during the last episode, hence the two last arguments.
agent.attach(bc.TrainerController(
evaluate_on='action',
periodicity=parameters.update_frequency,
show_episode_avg_V_value=True,
show_avg_Bellman_residual=True))
# Every epoch end, one has the possibility to modify the learning rate using a LearningRateController. Here we
# wish to update the learning rate after every training epoch (periodicity=1), according to the parameters given.
agent.attach(bc.LearningRateController(
initial_learning_rate=parameters.learning_rate,
learning_rate_decay=parameters.learning_rate_decay,
periodicity=1))
# Same for the discount factor.
agent.attach(bc.DiscountFactorController(
initial_discount_factor=parameters.discount,
discount_factor_growth=parameters.discount_inc,
discount_factor_max=parameters.discount_max,
periodicity=1))
# As for the discount factor and the learning rate, one can update periodically the parameter of the epsilon-greedy
# policy implemented by the agent. This controllers has a bit more capabilities, as it allows one to choose more
# precisely when to update epsilon: after every X action, episode or epoch. This parameter can also be reset every
# episode or epoch (or never, hence the resetEvery='none').
agent.attach(bc.EpsilonController(
initial_e=parameters.epsilon_start,
e_decays=parameters.epsilon_decay,
e_min=parameters.epsilon_min,
evaluate_on='action',
periodicity=1,
reset_every='none'))
# All previous controllers control the agent during the epochs it goes through. However, we want to interleave a
# "test epoch" between each training epoch ("one of two epochs", hence the periodicity=2). We do not want these
# test epoch to interfere with the training of the agent, which is well established by the TrainerController,
# EpsilonController and alike. Therefore, we will disable these controllers for the whole duration of the test
# epochs interleaved this way, using the controllersToDisable argument of the InterleavedTestEpochController.
# The value of this argument is a list of the indexes of all controllers to disable, their index reflecting in
# which order they were added. Here, "0" is refering to the firstly attached controller, thus the
# VerboseController; "2" refers to the thirdly attached controller, thus the LearningRateController; etc. The order
# in which the indexes are listed is not important.
# For each test epoch, we want also to display the sum of all rewards obtained, hence the showScore=True.
# Finally, we want to call the summarizePerformance method of Toy_Env every [parameters.period_btw_summary_perfs]
# *test* epochs.
agent.attach(bc.InterleavedTestEpochController(
id=0,
epoch_length=parameters.steps_per_test,
periodicity=1,
show_score=True,
summarize_every=parameters.period_btw_summary_perfs))
# --- Run the experiment ---
agent.run(parameters.epochs, parameters.steps_per_epoch)
| <filename>examples/toy_env/run_toy_env.py
"""Toy environment launcher. See the docs for more details about this environment.
"""
import sys
import logging
import numpy as np
from deer.default_parser import process_args
from deer.agent import NeuralAgent
from deer.learning_algos.q_net_keras import MyQNetwork
from Toy_env import MyEnv as Toy_env
import deer.experiment.base_controllers as bc
from deer.policies import EpsilonGreedyPolicy
class Defaults:
# ----------------------
# Experiment Parameters
# ----------------------
STEPS_PER_EPOCH = 1000
EPOCHS = 50
STEPS_PER_TEST = 500
PERIOD_BTW_SUMMARY_PERFS = 1
# ----------------------
# Environment Parameters
# ----------------------
FRAME_SKIP = 1
# ----------------------
# DQN Agent parameters:
# ----------------------
UPDATE_RULE = 'rmsprop'
LEARNING_RATE = 0.005
LEARNING_RATE_DECAY = 1.
DISCOUNT = 0.9
DISCOUNT_INC = 1.
DISCOUNT_MAX = 0.99
RMS_DECAY = 0.9
RMS_EPSILON = 0.0001
MOMENTUM = 0
CLIP_NORM = 1.0
EPSILON_START = 1.0
EPSILON_MIN = .1
EPSILON_DECAY = 10000
UPDATE_FREQUENCY = 1
REPLAY_MEMORY_SIZE = 1000000
BATCH_SIZE = 32
FREEZE_INTERVAL = 1000
DETERMINISTIC = True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# --- Parse parameters ---
parameters = process_args(sys.argv[1:], Defaults)
if parameters.deterministic:
rng = np.random.RandomState(123456)
else:
rng = np.random.RandomState()
# --- Instantiate environment ---
env = Toy_env(rng)
# --- Instantiate qnetwork ---
qnetwork = MyQNetwork(
env,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_norm,
parameters.freeze_interval,
parameters.batch_size,
parameters.update_rule,
rng)
train_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.1)
test_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.)
# --- Instantiate agent ---
agent = NeuralAgent(
env,
qnetwork,
parameters.replay_memory_size,
max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),
parameters.batch_size,
rng,
train_policy=train_policy,
test_policy=test_policy)
# --- Bind controllers to the agent ---
# Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and
# learning rate as well as the training epoch number.
agent.attach(bc.VerboseController(
evaluate_on='epoch',
periodicity=1))
# During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes.
# Plus, we also want to display after each training episode (!= than after every training) the average bellman
# residual and the average of the V values obtained during the last episode, hence the two last arguments.
agent.attach(bc.TrainerController(
evaluate_on='action',
periodicity=parameters.update_frequency,
show_episode_avg_V_value=True,
show_avg_Bellman_residual=True))
# Every epoch end, one has the possibility to modify the learning rate using a LearningRateController. Here we
# wish to update the learning rate after every training epoch (periodicity=1), according to the parameters given.
agent.attach(bc.LearningRateController(
initial_learning_rate=parameters.learning_rate,
learning_rate_decay=parameters.learning_rate_decay,
periodicity=1))
# Same for the discount factor.
agent.attach(bc.DiscountFactorController(
initial_discount_factor=parameters.discount,
discount_factor_growth=parameters.discount_inc,
discount_factor_max=parameters.discount_max,
periodicity=1))
# As for the discount factor and the learning rate, one can update periodically the parameter of the epsilon-greedy
# policy implemented by the agent. This controllers has a bit more capabilities, as it allows one to choose more
# precisely when to update epsilon: after every X action, episode or epoch. This parameter can also be reset every
# episode or epoch (or never, hence the resetEvery='none').
agent.attach(bc.EpsilonController(
initial_e=parameters.epsilon_start,
e_decays=parameters.epsilon_decay,
e_min=parameters.epsilon_min,
evaluate_on='action',
periodicity=1,
reset_every='none'))
# All previous controllers control the agent during the epochs it goes through. However, we want to interleave a
# "test epoch" between each training epoch ("one of two epochs", hence the periodicity=2). We do not want these
# test epoch to interfere with the training of the agent, which is well established by the TrainerController,
# EpsilonController and alike. Therefore, we will disable these controllers for the whole duration of the test
# epochs interleaved this way, using the controllersToDisable argument of the InterleavedTestEpochController.
# The value of this argument is a list of the indexes of all controllers to disable, their index reflecting in
# which order they were added. Here, "0" is refering to the firstly attached controller, thus the
# VerboseController; "2" refers to the thirdly attached controller, thus the LearningRateController; etc. The order
# in which the indexes are listed is not important.
# For each test epoch, we want also to display the sum of all rewards obtained, hence the showScore=True.
# Finally, we want to call the summarizePerformance method of Toy_Env every [parameters.period_btw_summary_perfs]
# *test* epochs.
agent.attach(bc.InterleavedTestEpochController(
id=0,
epoch_length=parameters.steps_per_test,
periodicity=1,
show_score=True,
summarize_every=parameters.period_btw_summary_perfs))
# --- Run the experiment ---
agent.run(parameters.epochs, parameters.steps_per_epoch)
| en | 0.868153 | Toy environment launcher. See the docs for more details about this environment. # ---------------------- # Experiment Parameters # ---------------------- # ---------------------- # Environment Parameters # ---------------------- # ---------------------- # DQN Agent parameters: # ---------------------- # --- Parse parameters --- # --- Instantiate environment --- # --- Instantiate qnetwork --- # --- Instantiate agent --- # --- Bind controllers to the agent --- # Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and # learning rate as well as the training epoch number. # During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes. # Plus, we also want to display after each training episode (!= than after every training) the average bellman # residual and the average of the V values obtained during the last episode, hence the two last arguments. # Every epoch end, one has the possibility to modify the learning rate using a LearningRateController. Here we # wish to update the learning rate after every training epoch (periodicity=1), according to the parameters given. # Same for the discount factor. # As for the discount factor and the learning rate, one can update periodically the parameter of the epsilon-greedy # policy implemented by the agent. This controllers has a bit more capabilities, as it allows one to choose more # precisely when to update epsilon: after every X action, episode or epoch. This parameter can also be reset every # episode or epoch (or never, hence the resetEvery='none'). # All previous controllers control the agent during the epochs it goes through. However, we want to interleave a # "test epoch" between each training epoch ("one of two epochs", hence the periodicity=2). We do not want these # test epoch to interfere with the training of the agent, which is well established by the TrainerController, # EpsilonController and alike. Therefore, we will disable these controllers for the whole duration of the test # epochs interleaved this way, using the controllersToDisable argument of the InterleavedTestEpochController. # The value of this argument is a list of the indexes of all controllers to disable, their index reflecting in # which order they were added. Here, "0" is refering to the firstly attached controller, thus the # VerboseController; "2" refers to the thirdly attached controller, thus the LearningRateController; etc. The order # in which the indexes are listed is not important. # For each test epoch, we want also to display the sum of all rewards obtained, hence the showScore=True. # Finally, we want to call the summarizePerformance method of Toy_Env every [parameters.period_btw_summary_perfs] # *test* epochs. # --- Run the experiment --- | 2.086075 | 2 |
equilibration/sodium_models/seed_1/post_processing/rdf_calculations.py | Dynamical-Systems-Laboratory/IPMCsMD | 2 | 7945 | # ------------------------------------------------------------------
#
# RDF and CN related analysis
#
# ------------------------------------------------------------------
import sys
py_path = '../../../../postprocessing/'
sys.path.insert(0, py_path)
py_path = '../../../../postprocessing/io_operations/'
sys.path.insert(0, py_path)
import cn_and_rdf_lmp as crl
import io_module as io
#
# Input
#
# RDF and CN intput file
rdf_file = '../nafion.rdf'
# Output file
out_file = 'rdf_cn_averaged.txt'
# Number of bins
nbins = 300
# Number of columns
ncols = 10
crl.compute_time_average(rdf_file, out_file, nbins, ncols)
| # ------------------------------------------------------------------
#
# RDF and CN related analysis
#
# ------------------------------------------------------------------
import sys
py_path = '../../../../postprocessing/'
sys.path.insert(0, py_path)
py_path = '../../../../postprocessing/io_operations/'
sys.path.insert(0, py_path)
import cn_and_rdf_lmp as crl
import io_module as io
#
# Input
#
# RDF and CN intput file
rdf_file = '../nafion.rdf'
# Output file
out_file = 'rdf_cn_averaged.txt'
# Number of bins
nbins = 300
# Number of columns
ncols = 10
crl.compute_time_average(rdf_file, out_file, nbins, ncols)
| en | 0.358885 | # ------------------------------------------------------------------ # # RDF and CN related analysis # # ------------------------------------------------------------------ # # Input # # RDF and CN intput file # Output file # Number of bins # Number of columns | 2.400128 | 2 |
venv/Lib/site-packages/plotnine/geoms/geom_pointrange.py | EkremBayar/bayar | 0 | 7946 | from ..doctools import document
from .geom import geom
from .geom_path import geom_path
from .geom_point import geom_point
from .geom_linerange import geom_linerange
@document
class geom_pointrange(geom):
"""
Vertical interval represented by a line with a point
{usage}
Parameters
----------
{common_parameters}
fatten : float, optional (default: 2)
A multiplicative factor used to increase the size of the
point along the line-range.
"""
DEFAULT_AES = {'alpha': 1, 'color': 'black', 'fill': None,
'linetype': 'solid', 'shape': 'o', 'size': 0.5}
REQUIRED_AES = {'x', 'y', 'ymin', 'ymax'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'fatten': 4}
@staticmethod
def draw_group(data, panel_params, coord, ax, **params):
geom_linerange.draw_group(data.copy(), panel_params,
coord, ax, **params)
data['size'] = data['size'] * params['fatten']
data['stroke'] = geom_point.DEFAULT_AES['stroke']
geom_point.draw_group(data, panel_params, coord, ax, **params)
@staticmethod
def draw_legend(data, da, lyr):
"""
Draw a point in the box
Parameters
----------
data : dataframe
da : DrawingArea
lyr : layer
Returns
-------
out : DrawingArea
"""
geom_path.draw_legend(data, da, lyr)
data['size'] = data['size'] * lyr.geom.params['fatten']
data['stroke'] = geom_point.DEFAULT_AES['stroke']
geom_point.draw_legend(data, da, lyr)
return da
| from ..doctools import document
from .geom import geom
from .geom_path import geom_path
from .geom_point import geom_point
from .geom_linerange import geom_linerange
@document
class geom_pointrange(geom):
"""
Vertical interval represented by a line with a point
{usage}
Parameters
----------
{common_parameters}
fatten : float, optional (default: 2)
A multiplicative factor used to increase the size of the
point along the line-range.
"""
DEFAULT_AES = {'alpha': 1, 'color': 'black', 'fill': None,
'linetype': 'solid', 'shape': 'o', 'size': 0.5}
REQUIRED_AES = {'x', 'y', 'ymin', 'ymax'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'fatten': 4}
@staticmethod
def draw_group(data, panel_params, coord, ax, **params):
geom_linerange.draw_group(data.copy(), panel_params,
coord, ax, **params)
data['size'] = data['size'] * params['fatten']
data['stroke'] = geom_point.DEFAULT_AES['stroke']
geom_point.draw_group(data, panel_params, coord, ax, **params)
@staticmethod
def draw_legend(data, da, lyr):
"""
Draw a point in the box
Parameters
----------
data : dataframe
da : DrawingArea
lyr : layer
Returns
-------
out : DrawingArea
"""
geom_path.draw_legend(data, da, lyr)
data['size'] = data['size'] * lyr.geom.params['fatten']
data['stroke'] = geom_point.DEFAULT_AES['stroke']
geom_point.draw_legend(data, da, lyr)
return da
| en | 0.402598 | Vertical interval represented by a line with a point {usage} Parameters ---------- {common_parameters} fatten : float, optional (default: 2) A multiplicative factor used to increase the size of the point along the line-range. Draw a point in the box Parameters ---------- data : dataframe da : DrawingArea lyr : layer Returns ------- out : DrawingArea | 2.544566 | 3 |
app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py | SummaLabs/DLS | 32 | 7947 | <filename>app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import json
import os
import skimage.io as skio
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.models import Model
from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense
from keras.utils.visualize_util import plot as kplot
##################################
def buildModelCNN(inpShape=(3,128,128), sizFlt = 3, numFltStart=16, numCls=2, numHidden=128, funact='relu'):
inpData = Input(shape=inpShape)
# Conv 1'st
x = Convolution2D(nb_filter=1 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(inpData)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv 2'nd
x = Convolution2D(nb_filter=2 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv 3'rd
x = Convolution2D(nb_filter=3 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv 4'th
x = Convolution2D(nb_filter=4 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv 5'th
x = Convolution2D(nb_filter=5 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
#
x = Flatten()(x)
if numHidden is not None:
x = Dense(output_dim=numHidden, activation=funact)(x)
x = Dense(output_dim=numCls, activation='softmax')(x)
retModel = Model(inpData, x)
return retModel
##################################
def getBasicModelTemplate(modelName='model_1'):
retTemplate = {
"class_name": "Model",
"keras_version": keras.__version__,
"config": {
"name": "%s" % modelName,
"layers" : [],
"input_layers": [],
"output_layers": [],
}
}
return retTemplate
def generateModelJsonDict(model):
tmpl = getBasicModelTemplate()
tmpLayers = []
for ii,ll in enumerate(model.layers):
tmp = {
'class_name': type(ll).__name__,
'name': ll.name,
'config': ll.get_config(),
}
if ii==0:
tmp['inbound_nodes'] = []
else:
tmp['inbound_nodes'] = [[
[
model.layers[ii-1].name,
0,
0
]
]]
tmpLayers.append(tmp)
tmpl['config']['layers'] = tmpLayers
tmpl['config']['input_layers'] = [
[
model.layers[0].name,
0,
0
]
]
tmpl['config']['output_layers'] = [
[
model.layers[-1].name,
0,
0
]
]
return tmpl
##################################
if __name__ == '__main__':
model = buildModelCNN(inpShape=(3, 128, 128))
fimgModel = 'keras-model-cnn.jpg'
kplot(model, fimgModel, show_shapes=True)
# plt.imshow(skio.imread(fimgModel))
# plt.show()
model.summary()
print ('------')
numLayers = len(model.layers)
for ii,ll in enumerate(model.layers):
print ('[%d/%d] : %s' % (ii, numLayers, ll))
modelJson = generateModelJsonDict(model)
print ('----------------------')
print (json.dumps(modelJson, indent=4))
foutJson = 'test-model-cnn.json'
with open(foutJson, 'w') as f:
json.dump(modelJson, f, indent=4)
# print (json.dumps(modelJson, indent=4))
| <filename>app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import json
import os
import skimage.io as skio
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.models import Model
from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense
from keras.utils.visualize_util import plot as kplot
##################################
def buildModelCNN(inpShape=(3,128,128), sizFlt = 3, numFltStart=16, numCls=2, numHidden=128, funact='relu'):
inpData = Input(shape=inpShape)
# Conv 1'st
x = Convolution2D(nb_filter=1 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(inpData)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv 2'nd
x = Convolution2D(nb_filter=2 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv 3'rd
x = Convolution2D(nb_filter=3 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv 4'th
x = Convolution2D(nb_filter=4 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv 5'th
x = Convolution2D(nb_filter=5 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
#
x = Flatten()(x)
if numHidden is not None:
x = Dense(output_dim=numHidden, activation=funact)(x)
x = Dense(output_dim=numCls, activation='softmax')(x)
retModel = Model(inpData, x)
return retModel
##################################
def getBasicModelTemplate(modelName='model_1'):
retTemplate = {
"class_name": "Model",
"keras_version": keras.__version__,
"config": {
"name": "%s" % modelName,
"layers" : [],
"input_layers": [],
"output_layers": [],
}
}
return retTemplate
def generateModelJsonDict(model):
tmpl = getBasicModelTemplate()
tmpLayers = []
for ii,ll in enumerate(model.layers):
tmp = {
'class_name': type(ll).__name__,
'name': ll.name,
'config': ll.get_config(),
}
if ii==0:
tmp['inbound_nodes'] = []
else:
tmp['inbound_nodes'] = [[
[
model.layers[ii-1].name,
0,
0
]
]]
tmpLayers.append(tmp)
tmpl['config']['layers'] = tmpLayers
tmpl['config']['input_layers'] = [
[
model.layers[0].name,
0,
0
]
]
tmpl['config']['output_layers'] = [
[
model.layers[-1].name,
0,
0
]
]
return tmpl
##################################
if __name__ == '__main__':
model = buildModelCNN(inpShape=(3, 128, 128))
fimgModel = 'keras-model-cnn.jpg'
kplot(model, fimgModel, show_shapes=True)
# plt.imshow(skio.imread(fimgModel))
# plt.show()
model.summary()
print ('------')
numLayers = len(model.layers)
for ii,ll in enumerate(model.layers):
print ('[%d/%d] : %s' % (ii, numLayers, ll))
modelJson = generateModelJsonDict(model)
print ('----------------------')
print (json.dumps(modelJson, indent=4))
foutJson = 'test-model-cnn.json'
with open(foutJson, 'w') as f:
json.dump(modelJson, f, indent=4)
# print (json.dumps(modelJson, indent=4))
| de | 0.385947 | #!/usr/bin/python # -*- coding: utf-8 -*- ################################## # Conv 1'st # Conv 2'nd # Conv 3'rd # Conv 4'th # Conv 5'th # ################################## ################################## # plt.imshow(skio.imread(fimgModel)) # plt.show() # print (json.dumps(modelJson, indent=4)) | 2.738786 | 3 |
tests/integration/test_interface.py | Synodic-Software/CPPython | 0 | 7948 | """
Test the integrations related to the internal interface implementation and the 'Interface' interface itself
"""
import pytest
from cppython_core.schema import InterfaceConfiguration
from pytest_cppython.plugin import InterfaceIntegrationTests
from cppython.console import ConsoleInterface
class TestCLIInterface(InterfaceIntegrationTests):
"""
The tests for our CLI interface
"""
@pytest.fixture(name="interface")
def fixture_interface(self):
"""
Override of the plugin provided interface fixture.
Returns:
ConsoleInterface -- The Interface object to use for the CPPython defined tests
"""
configuration = InterfaceConfiguration()
return ConsoleInterface(configuration)
| """
Test the integrations related to the internal interface implementation and the 'Interface' interface itself
"""
import pytest
from cppython_core.schema import InterfaceConfiguration
from pytest_cppython.plugin import InterfaceIntegrationTests
from cppython.console import ConsoleInterface
class TestCLIInterface(InterfaceIntegrationTests):
"""
The tests for our CLI interface
"""
@pytest.fixture(name="interface")
def fixture_interface(self):
"""
Override of the plugin provided interface fixture.
Returns:
ConsoleInterface -- The Interface object to use for the CPPython defined tests
"""
configuration = InterfaceConfiguration()
return ConsoleInterface(configuration)
| en | 0.669554 | Test the integrations related to the internal interface implementation and the 'Interface' interface itself The tests for our CLI interface Override of the plugin provided interface fixture. Returns: ConsoleInterface -- The Interface object to use for the CPPython defined tests | 2.244931 | 2 |
solutions/python3/894.py | sm2774us/amazon_interview_prep_2021 | 42 | 7949 | class Solution:
def allPossibleFBT(self, N):
def constr(N):
if N == 1: yield TreeNode(0)
for i in range(1, N, 2):
for l in constr(i):
for r in constr(N - i - 1):
m = TreeNode(0)
m.left = l
m.right = r
yield m
return list(constr(N)) | class Solution:
def allPossibleFBT(self, N):
def constr(N):
if N == 1: yield TreeNode(0)
for i in range(1, N, 2):
for l in constr(i):
for r in constr(N - i - 1):
m = TreeNode(0)
m.left = l
m.right = r
yield m
return list(constr(N)) | none | 1 | 3.175186 | 3 |
|
src/main.py | srijankr/DAIN | 3 | 7950 | #@contact <NAME> (<EMAIL>), Georgia Institute of Technology
#@version 1.0
#@date 2021-08-17
#Influence-guided Data Augmentation for Neural Tensor Completion (DAIN)
#This software is free of charge under research purposes.
#For commercial purposes, please contact the main author.
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import argparse
import numpy as np
from dataset import TensorDataset
import torch.optim as optim
from model import MLP
import pandas as pd
import copy
import random
from sklearn.model_selection import train_test_split
import os
def parse_args():
parser = argparse.ArgumentParser(description="Run DAIN for the MLP architecture")
parser.add_argument('--path', nargs='?', default='data/synthetic_10K.tensor',
help='Input data path.')
parser.add_argument('--epochs', type=int, default=50,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size.')
parser.add_argument('--layers', nargs='?', default='[150,1024,1024,128]',
help="Size of each layer. Note that the first layer is the concatenation of tensor embeddings. So layers[0]/N (N=order) is the tensor embedding size.")
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--verbose', type=int, default=5,
help='Show performance per X iterations')
parser.add_argument('--gpu', type=str, default='0',
help='GPU number')
parser.add_argument('--output', type=str, default='demo.txt',
help = 'output name')
parser.add_argument('--train_ratio', type=float, default=0.9,
help = 'Ratio of training data')
return parser.parse_args()
def model_train_and_test(args, model, train_loader, val_loader,test_loader,first):
output_path = 'output/'+args.output
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr = args.lr)
device = model.device
min_val,min_test,min_epoch,final_model = 9999,9999,0,0
for epoch in range(args.epochs):
torch.cuda.empty_cache()
running_loss = 0.0
train_loss,valid_loss = 0,0
for i, data in enumerate(val_loader, 0):
inputs, labels, indices = data[0].to(device), data[1].to(device),data[2]
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs - labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs,labels)
loss.backward()
valid_loss += loss.item()
del inputs,labels,outputs,model.intermediate
valid_loss /= (i+1)
test_loss, test_accuracy = 0,0
for i, data in enumerate(test_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
prediction = model(inputs).flatten()
loss = criterion(prediction,labels)
loss.backward()
test_accuracy += torch.sum(torch.pow((prediction-labels),2)).cpu().item()
del inputs,labels,prediction,model.intermediate
test_accuracy/=len(test_loader.dataset)
for i, data in enumerate(train_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
optimizer.zero_grad()
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs-labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
del inputs, labels, outputs,indices,model.intermediate
train_loss /= (i+1)
if epoch%args.verbose==0:
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy))
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy),file=open(output_path,"a"),flush=True)
if min_val<=valid_loss and epoch-min_epoch>=10:
break
if min_val>valid_loss:
min_val = valid_loss
min_test = test_accuracy
min_epoch = epoch
final_model = copy.deepcopy(model)
final_model.allgrad = copy.deepcopy(model.allgrad)
final_model.checkpoint = epoch+1
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val))
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val), file=open(output_path, "a"),flush=True)
del model
return min_test,final_model
def data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device):
#Step 4: data augmentation
if new_tensor.shape[0]!=0:
cur_trainset = copy.deepcopy(trainset)
new_indices = torch.zeros(new_tensor.shape[0]).long()
cur_trainset.add(new_tensor,new_val,new_indices)
first = False
#Step 1: tensor embedding learning
else:
cur_trainset = copy.deepcopy(trainset)
first = True
layers = eval(args.layers)
train_loader = DataLoader(cur_trainset, batch_size=args.batch_size,shuffle=True)
model = MLP(cur_trainset, device, layers=layers).to(device)
model.allgrad = []
if first==True:
model.allgrad = torch.zeros(int(args.epochs),len(cur_trainset)+len(val_loader.dataset)+len(test_loader.dataset),model.last_size)
test_rmse,final_model = model_train_and_test(args, model, train_loader, val_loader, test_loader,first)
del cur_trainset
if new_tensor.shape[0]!=0:
del new_tensor
if new_val.shape[0]!=0:
del new_val
del model
if first==True:
print('[DONE] Step 1: tensor embedding learning')
#Step 2: cell importance calculation
train_idx,val_idx,test_idx = train_loader.dataset.indices,val_loader.dataset.indices,test_loader.dataset.indices
checkpoint = final_model.checkpoint
val_grad = torch.sum(final_model.allgrad[:checkpoint,val_idx,:],dim=1).squeeze()
maxv,maxp = -9999,0
final_model.importance = np.zeros(len(trainset))
for (i,idx) in enumerate(trainset.indices):
train_grad = final_model.allgrad[:checkpoint,idx,:].squeeze()
contribution = torch.mul(train_grad,val_grad)
final_contribution = torch.sum(torch.sum(contribution,dim=1),dim=0).item()
final_model.importance[i] = final_contribution
final_model.importance = final_model.importance / max(final_model.importance)
return (test_rmse,final_model)
def main():
args = parse_args()
path = args.path
layers = eval(args.layers)
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
output_path = 'output/'+args.output
if os.path.exists('output/')==False:
os.mkdir('output/')
dataset = TensorDataset(path)
trainset,valset, testset,indices = copy.deepcopy(dataset),copy.deepcopy(dataset),copy.deepcopy(dataset),np.arange(dataset.num_data)
data_train, data_test, labels_train, labels_test, index_train, index_test = train_test_split(dataset.tensor.numpy(), dataset.val.numpy(), indices, test_size=1-args.train_ratio)
data_train, data_val, labels_train, labels_val, index_train, index_val = train_test_split(data_train, labels_train, index_train, test_size=0.2)
trainset.tensor,trainset.val,trainset.num_data,trainset.indices = torch.from_numpy(data_train).long(),torch.from_numpy(labels_train).float(),data_train.shape[0],torch.from_numpy(index_train).long()
valset.tensor,valset.val,valset.num_data,valset.indices = torch.from_numpy(data_val).long(),torch.from_numpy(labels_val).float(),data_val.shape[0],torch.from_numpy(index_val).long()
testset.tensor, testset.val, testset.num_data,testset.indices = torch.from_numpy(data_test).long(), torch.from_numpy(labels_test).float(), data_test.shape[0],torch.from_numpy(index_test).long()
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(valset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(testset, batch_size=batch_size, shuffle=True)
print('[DONE] Step 0: Dataset loading & train-val-test split')
print(dataset.dimensionality)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
# CUDA for PyTorch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
#Step 1&2. Train tensor embeddings & calculate cell importance
(rmse,model) = data_augmentation(trainset,torch.empty(0),torch.empty(0),val_loader,test_loader,args,device)
print('Test RMSE before 50% data augmentation = {}'.format(rmse))
print('Test RMSE before 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
original = copy.deepcopy(model)
del model
cell_importance = abs(original.importance)
print('[DONE] Step 2: cell importance calculation')
#Step 3. entity importance calculation
entity_importance = [np.zeros(dataset.dimensionality[i]) for i in range(dataset.order)]
for i in range(len(cell_importance)):
for j in range(dataset.order):
entity = int(trainset.tensor[i,j])
entity_importance[j][entity] += cell_importance[i]
for i in range(dataset.order):
cur = entity_importance[i]
entity_importance[i] = cur/sum(cur)
print('[DONE] Step 3: entity importance calculation')
num_aug = int(0.5 * trainset.tensor.shape[0])
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)))
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)), file=open(output_path, "a"),flush=True)
#Step 4. perform data augmentation
indices = np.zeros((num_aug,trainset.order))
for i in range(dataset.order):
indices[:,i] = np.random.choice(list(range(0,dataset.dimensionality[i])),size=num_aug,p = entity_importance[i])
new_tensor = torch.from_numpy(indices).long()
new_val = original.predict(new_tensor)
print('[DONE] Step 4: data augmentation with entity importance')
(rmse,model) = data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device)
print('Test RMSE after 50% data augmentation = {}'.format(rmse))
print('Test RMSE after 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
del model
if __name__ == "__main__":
main()
| #@contact <NAME> (<EMAIL>), Georgia Institute of Technology
#@version 1.0
#@date 2021-08-17
#Influence-guided Data Augmentation for Neural Tensor Completion (DAIN)
#This software is free of charge under research purposes.
#For commercial purposes, please contact the main author.
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import argparse
import numpy as np
from dataset import TensorDataset
import torch.optim as optim
from model import MLP
import pandas as pd
import copy
import random
from sklearn.model_selection import train_test_split
import os
def parse_args():
parser = argparse.ArgumentParser(description="Run DAIN for the MLP architecture")
parser.add_argument('--path', nargs='?', default='data/synthetic_10K.tensor',
help='Input data path.')
parser.add_argument('--epochs', type=int, default=50,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size.')
parser.add_argument('--layers', nargs='?', default='[150,1024,1024,128]',
help="Size of each layer. Note that the first layer is the concatenation of tensor embeddings. So layers[0]/N (N=order) is the tensor embedding size.")
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--verbose', type=int, default=5,
help='Show performance per X iterations')
parser.add_argument('--gpu', type=str, default='0',
help='GPU number')
parser.add_argument('--output', type=str, default='demo.txt',
help = 'output name')
parser.add_argument('--train_ratio', type=float, default=0.9,
help = 'Ratio of training data')
return parser.parse_args()
def model_train_and_test(args, model, train_loader, val_loader,test_loader,first):
output_path = 'output/'+args.output
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr = args.lr)
device = model.device
min_val,min_test,min_epoch,final_model = 9999,9999,0,0
for epoch in range(args.epochs):
torch.cuda.empty_cache()
running_loss = 0.0
train_loss,valid_loss = 0,0
for i, data in enumerate(val_loader, 0):
inputs, labels, indices = data[0].to(device), data[1].to(device),data[2]
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs - labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs,labels)
loss.backward()
valid_loss += loss.item()
del inputs,labels,outputs,model.intermediate
valid_loss /= (i+1)
test_loss, test_accuracy = 0,0
for i, data in enumerate(test_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
prediction = model(inputs).flatten()
loss = criterion(prediction,labels)
loss.backward()
test_accuracy += torch.sum(torch.pow((prediction-labels),2)).cpu().item()
del inputs,labels,prediction,model.intermediate
test_accuracy/=len(test_loader.dataset)
for i, data in enumerate(train_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
optimizer.zero_grad()
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs-labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
del inputs, labels, outputs,indices,model.intermediate
train_loss /= (i+1)
if epoch%args.verbose==0:
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy))
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy),file=open(output_path,"a"),flush=True)
if min_val<=valid_loss and epoch-min_epoch>=10:
break
if min_val>valid_loss:
min_val = valid_loss
min_test = test_accuracy
min_epoch = epoch
final_model = copy.deepcopy(model)
final_model.allgrad = copy.deepcopy(model.allgrad)
final_model.checkpoint = epoch+1
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val))
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val), file=open(output_path, "a"),flush=True)
del model
return min_test,final_model
def data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device):
#Step 4: data augmentation
if new_tensor.shape[0]!=0:
cur_trainset = copy.deepcopy(trainset)
new_indices = torch.zeros(new_tensor.shape[0]).long()
cur_trainset.add(new_tensor,new_val,new_indices)
first = False
#Step 1: tensor embedding learning
else:
cur_trainset = copy.deepcopy(trainset)
first = True
layers = eval(args.layers)
train_loader = DataLoader(cur_trainset, batch_size=args.batch_size,shuffle=True)
model = MLP(cur_trainset, device, layers=layers).to(device)
model.allgrad = []
if first==True:
model.allgrad = torch.zeros(int(args.epochs),len(cur_trainset)+len(val_loader.dataset)+len(test_loader.dataset),model.last_size)
test_rmse,final_model = model_train_and_test(args, model, train_loader, val_loader, test_loader,first)
del cur_trainset
if new_tensor.shape[0]!=0:
del new_tensor
if new_val.shape[0]!=0:
del new_val
del model
if first==True:
print('[DONE] Step 1: tensor embedding learning')
#Step 2: cell importance calculation
train_idx,val_idx,test_idx = train_loader.dataset.indices,val_loader.dataset.indices,test_loader.dataset.indices
checkpoint = final_model.checkpoint
val_grad = torch.sum(final_model.allgrad[:checkpoint,val_idx,:],dim=1).squeeze()
maxv,maxp = -9999,0
final_model.importance = np.zeros(len(trainset))
for (i,idx) in enumerate(trainset.indices):
train_grad = final_model.allgrad[:checkpoint,idx,:].squeeze()
contribution = torch.mul(train_grad,val_grad)
final_contribution = torch.sum(torch.sum(contribution,dim=1),dim=0).item()
final_model.importance[i] = final_contribution
final_model.importance = final_model.importance / max(final_model.importance)
return (test_rmse,final_model)
def main():
args = parse_args()
path = args.path
layers = eval(args.layers)
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
output_path = 'output/'+args.output
if os.path.exists('output/')==False:
os.mkdir('output/')
dataset = TensorDataset(path)
trainset,valset, testset,indices = copy.deepcopy(dataset),copy.deepcopy(dataset),copy.deepcopy(dataset),np.arange(dataset.num_data)
data_train, data_test, labels_train, labels_test, index_train, index_test = train_test_split(dataset.tensor.numpy(), dataset.val.numpy(), indices, test_size=1-args.train_ratio)
data_train, data_val, labels_train, labels_val, index_train, index_val = train_test_split(data_train, labels_train, index_train, test_size=0.2)
trainset.tensor,trainset.val,trainset.num_data,trainset.indices = torch.from_numpy(data_train).long(),torch.from_numpy(labels_train).float(),data_train.shape[0],torch.from_numpy(index_train).long()
valset.tensor,valset.val,valset.num_data,valset.indices = torch.from_numpy(data_val).long(),torch.from_numpy(labels_val).float(),data_val.shape[0],torch.from_numpy(index_val).long()
testset.tensor, testset.val, testset.num_data,testset.indices = torch.from_numpy(data_test).long(), torch.from_numpy(labels_test).float(), data_test.shape[0],torch.from_numpy(index_test).long()
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(valset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(testset, batch_size=batch_size, shuffle=True)
print('[DONE] Step 0: Dataset loading & train-val-test split')
print(dataset.dimensionality)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
# CUDA for PyTorch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
#Step 1&2. Train tensor embeddings & calculate cell importance
(rmse,model) = data_augmentation(trainset,torch.empty(0),torch.empty(0),val_loader,test_loader,args,device)
print('Test RMSE before 50% data augmentation = {}'.format(rmse))
print('Test RMSE before 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
original = copy.deepcopy(model)
del model
cell_importance = abs(original.importance)
print('[DONE] Step 2: cell importance calculation')
#Step 3. entity importance calculation
entity_importance = [np.zeros(dataset.dimensionality[i]) for i in range(dataset.order)]
for i in range(len(cell_importance)):
for j in range(dataset.order):
entity = int(trainset.tensor[i,j])
entity_importance[j][entity] += cell_importance[i]
for i in range(dataset.order):
cur = entity_importance[i]
entity_importance[i] = cur/sum(cur)
print('[DONE] Step 3: entity importance calculation')
num_aug = int(0.5 * trainset.tensor.shape[0])
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)))
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)), file=open(output_path, "a"),flush=True)
#Step 4. perform data augmentation
indices = np.zeros((num_aug,trainset.order))
for i in range(dataset.order):
indices[:,i] = np.random.choice(list(range(0,dataset.dimensionality[i])),size=num_aug,p = entity_importance[i])
new_tensor = torch.from_numpy(indices).long()
new_val = original.predict(new_tensor)
print('[DONE] Step 4: data augmentation with entity importance')
(rmse,model) = data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device)
print('Test RMSE after 50% data augmentation = {}'.format(rmse))
print('Test RMSE after 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
del model
if __name__ == "__main__":
main()
| en | 0.624461 | #@contact <NAME> (<EMAIL>), Georgia Institute of Technology #@version 1.0 #@date 2021-08-17 #Influence-guided Data Augmentation for Neural Tensor Completion (DAIN) #This software is free of charge under research purposes. #For commercial purposes, please contact the main author. #Step 4: data augmentation #Step 1: tensor embedding learning #Step 2: cell importance calculation # CUDA for PyTorch #Step 1&2. Train tensor embeddings & calculate cell importance #Step 3. entity importance calculation #Step 4. perform data augmentation | 2.499927 | 2 |
pay-api/tests/unit/api/test_fee.py | saravanpa-aot/sbc-pay | 0 | 7951 | <filename>pay-api/tests/unit/api/test_fee.py
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the fees end-point.
Test-Suite to ensure that the /fees endpoint is working as expected.
"""
import json
from datetime import date, timedelta
from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType
from pay_api.schemas import utils as schema_utils
from pay_api.utils.enums import Role
from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header
def test_fees_with_corp_type_and_filing_type(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_valid_start_date(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert not schema_utils.validate(rv.json, 'problem')[0]
def test_fees_with_corp_type_and_filing_type_with_invalid_start_date(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now + timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
assert not schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_valid_end_date(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(1),
now)
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_invalid_end_date(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(2),
now - timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
def test_calculate_fees_with_waive_fees(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(role='staff'), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 0
def test_calculate_fees_with_waive_fees_unauthorized(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 100
def test_fees_with_quantity(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?quantity=10', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_calculate_fees_for_service_fee(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
service_fee = factory_fee_model('SF01', 1.5)
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
service_fee=service_fee)
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 100
assert rv.json.get('serviceFees') == 1.5
def test_calculate_fees_with_zero_service_fee(session, client, jwt, app):
"""Assert that service fee is zero if the filing fee is zero."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 0))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 0
assert rv.json.get('serviceFees') == 0
def test_fee_for_account_fee_settings(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(role=Role.SYSTEM.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/accounts', data=json.dumps(get_gov_account_payload()),
headers=headers)
account_id = rv.json.get('authAccountId')
# Create account fee details.
token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
client.post(f'/api/v1/accounts/{account_id}/fees', data=json.dumps({'accountFees': [
{
'applyFilingFees': False,
'serviceFeeCode': 'TRF02', # 1.0
'product': 'BUSINESS'
}
]}), headers=headers)
# Get fee for this account.
token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id}
rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
# assert filing fee is not applied and service fee is applied
assert rv.json.get('filingFees') == 0
assert rv.json.get('serviceFees') == 1.0
# Now change the settings to apply filing fees and assert
token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
client.put(f'/api/v1/accounts/{account_id}/fees/BUSINESS', data=json.dumps({
'applyFilingFees': True,
'serviceFeeCode': 'TRF01', # 1.5
'product': 'BUSINESS'
}), headers=headers)
# Get fee for this account.
token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id}
rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
# assert filing fee is applied and service fee is applied
assert rv.json.get('filingFees') > 0
assert rv.json.get('serviceFees') == 1.5
def factory_filing_type_model(
filing_type_code: str,
filing_description: str = 'TEST'):
"""Return the filing type model."""
filing_type = FilingType(code=filing_type_code,
description=filing_description)
filing_type.save()
return filing_type
def factory_fee_model(
fee_code: str,
amount: int):
"""Return the fee code model."""
fee_code_master = FeeCode(code=fee_code,
amount=amount)
fee_code_master.save()
return fee_code_master
def factory_corp_type_model(
corp_type_code: str,
corp_type_description: str):
"""Return the corp type model."""
corp_type = CorpType(code=corp_type_code,
description=corp_type_description)
corp_type.save()
return corp_type
def factory_fee_schedule_model(
filing_type: FilingType,
corp_type: CorpType,
fee_code: FeeCode,
fee_start_date: date = date.today(),
fee_end_date: date = None,
service_fee: FeeCode = None):
"""Return the fee schedule model."""
fee_schedule = FeeSchedule(filing_type_code=filing_type.code,
corp_type_code=corp_type.code,
fee_code=fee_code.code,
fee_start_date=fee_start_date,
fee_end_date=fee_end_date
)
if service_fee:
fee_schedule.service_fee_code = service_fee.code
fee_schedule.save()
return fee_schedule
| <filename>pay-api/tests/unit/api/test_fee.py
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the fees end-point.
Test-Suite to ensure that the /fees endpoint is working as expected.
"""
import json
from datetime import date, timedelta
from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType
from pay_api.schemas import utils as schema_utils
from pay_api.utils.enums import Role
from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header
def test_fees_with_corp_type_and_filing_type(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_valid_start_date(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert not schema_utils.validate(rv.json, 'problem')[0]
def test_fees_with_corp_type_and_filing_type_with_invalid_start_date(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now + timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
assert not schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_valid_end_date(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(1),
now)
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_invalid_end_date(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(2),
now - timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
def test_calculate_fees_with_waive_fees(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(role='staff'), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 0
def test_calculate_fees_with_waive_fees_unauthorized(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 100
def test_fees_with_quantity(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?quantity=10', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_calculate_fees_for_service_fee(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
service_fee = factory_fee_model('SF01', 1.5)
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
service_fee=service_fee)
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 100
assert rv.json.get('serviceFees') == 1.5
def test_calculate_fees_with_zero_service_fee(session, client, jwt, app):
"""Assert that service fee is zero if the filing fee is zero."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 0))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 0
assert rv.json.get('serviceFees') == 0
def test_fee_for_account_fee_settings(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(role=Role.SYSTEM.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/accounts', data=json.dumps(get_gov_account_payload()),
headers=headers)
account_id = rv.json.get('authAccountId')
# Create account fee details.
token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
client.post(f'/api/v1/accounts/{account_id}/fees', data=json.dumps({'accountFees': [
{
'applyFilingFees': False,
'serviceFeeCode': 'TRF02', # 1.0
'product': 'BUSINESS'
}
]}), headers=headers)
# Get fee for this account.
token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id}
rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
# assert filing fee is not applied and service fee is applied
assert rv.json.get('filingFees') == 0
assert rv.json.get('serviceFees') == 1.0
# Now change the settings to apply filing fees and assert
token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
client.put(f'/api/v1/accounts/{account_id}/fees/BUSINESS', data=json.dumps({
'applyFilingFees': True,
'serviceFeeCode': 'TRF01', # 1.5
'product': 'BUSINESS'
}), headers=headers)
# Get fee for this account.
token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id}
rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
# assert filing fee is applied and service fee is applied
assert rv.json.get('filingFees') > 0
assert rv.json.get('serviceFees') == 1.5
def factory_filing_type_model(
filing_type_code: str,
filing_description: str = 'TEST'):
"""Return the filing type model."""
filing_type = FilingType(code=filing_type_code,
description=filing_description)
filing_type.save()
return filing_type
def factory_fee_model(
fee_code: str,
amount: int):
"""Return the fee code model."""
fee_code_master = FeeCode(code=fee_code,
amount=amount)
fee_code_master.save()
return fee_code_master
def factory_corp_type_model(
corp_type_code: str,
corp_type_description: str):
"""Return the corp type model."""
corp_type = CorpType(code=corp_type_code,
description=corp_type_description)
corp_type.save()
return corp_type
def factory_fee_schedule_model(
filing_type: FilingType,
corp_type: CorpType,
fee_code: FeeCode,
fee_start_date: date = date.today(),
fee_end_date: date = None,
service_fee: FeeCode = None):
"""Return the fee schedule model."""
fee_schedule = FeeSchedule(filing_type_code=filing_type.code,
corp_type_code=corp_type.code,
fee_code=fee_code.code,
fee_start_date=fee_start_date,
fee_end_date=fee_end_date
)
if service_fee:
fee_schedule.service_fee_code = service_fee.code
fee_schedule.save()
return fee_schedule
| en | 0.814648 | # Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests to assure the fees end-point. Test-Suite to ensure that the /fees endpoint is working as expected. Assert that the endpoint returns 200. Assert that the endpoint returns 200. # Insert a record first and then query for it Assert that the endpoint returns 400. # Insert a record first and then query for it Assert that the endpoint returns 200. # Insert a record first and then query for it Assert that the endpoint returns 400. # Insert a record first and then query for it Assert that the endpoint returns 201. Assert that the endpoint returns 201. Assert that the endpoint returns 200. Assert that the endpoint returns 201. Assert that service fee is zero if the filing fee is zero. Assert that the endpoint returns 200. # Create account fee details. # 1.0 # Get fee for this account. # assert filing fee is not applied and service fee is applied # Now change the settings to apply filing fees and assert # 1.5 # Get fee for this account. # assert filing fee is applied and service fee is applied Return the filing type model. Return the fee code model. Return the corp type model. Return the fee schedule model. | 2.02236 | 2 |
backend/app/auth/service.py | pers0n4/yoonyaho | 0 | 7952 | from datetime import datetime, timedelta
import jwt
from flask import current_app
from app import db
from app.user.repository import UserRepository
class AuthService:
def __init__(self) -> None:
self._user_repository = UserRepository(db.session)
def create_token(self, data) -> dict:
user = self._user_repository.find_one(user_id=data["user_id"])
if user is None:
# user not found
raise RuntimeError
if not user.check_password(data["password"]):
# password
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
refresh_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(hours=4),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token, "refresh_token": refresh_token}
def validate_token(self, token) -> dict:
return jwt.decode(token, current_app.config["SECRET_KEY"], algorithms=["HS512"])
def refresh_token(self, token) -> dict:
payload = self.validate_token(token)
user = self._user_repository.find_one(id=payload["user_id"])
if user is None:
# user not found
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token}
| from datetime import datetime, timedelta
import jwt
from flask import current_app
from app import db
from app.user.repository import UserRepository
class AuthService:
def __init__(self) -> None:
self._user_repository = UserRepository(db.session)
def create_token(self, data) -> dict:
user = self._user_repository.find_one(user_id=data["user_id"])
if user is None:
# user not found
raise RuntimeError
if not user.check_password(data["password"]):
# password
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
refresh_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(hours=4),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token, "refresh_token": refresh_token}
def validate_token(self, token) -> dict:
return jwt.decode(token, current_app.config["SECRET_KEY"], algorithms=["HS512"])
def refresh_token(self, token) -> dict:
payload = self.validate_token(token)
user = self._user_repository.find_one(id=payload["user_id"])
if user is None:
# user not found
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token}
| en | 0.961371 | # user not found # password # user not found | 2.605165 | 3 |
scripts/qlearn.py | kebaek/minigrid | 5 | 7953 | <filename>scripts/qlearn.py
import _init_paths
import argparse
import random
import time
import utils
import os
from collections import defaultdict
import numpy as np
import csv
from progress.bar import IncrementalBar
from utils.hash import *
def parse_arguments():
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument('--env', type=str, default='../env/maze_2.txt',
help='name of the environment')
parser.add_argument("--dir", type=str, default="",
help="name of the directory to episodes")
parser.add_argument('--num_episode', type=int, default=2000,
help='the number of train episodes')
parser.add_argument('--max_episode_length', type=int, default=200,
help='the maximum of the length of an episode')
parser.add_argument('--lr', type=float, default=0.1,
help='the learning rate of the q learning algorithm')
parser.add_argument('--discount', type=float, default=0.9,
help='the discount factor')
parser.add_argument('--eps', type=float, default=0.8,
help='the value for the eps-greedy strategy')
parser.add_argument('--seed', type=int, default=0,
help='random seed for environment')
# parse arguments
args = parser.parse_args()
return args
def train(maze_env, model_dir, num_episode, max_episode_length, lr,
discount, eps, **kwargs):
# create value function and q value function
q_value_function = {}
visited_actions = {}
visited_states = set()
q_value_function = defaultdict(lambda: 0, q_value_function)
visited_actions = defaultdict(lambda: [False]*maze_env.action_space.n, visited_actions)
# train agent
start = time.time()
episodes_length = []
bar = IncrementalBar('Countdown', max = num_episode)
print("Start to train q value function.")
for _ in range(num_episode):
current_length = 0
is_terminal = 0
obs = maze_env.reset()
state = str(maze_env)
while not is_terminal:
visited_states.add(state)
if random.random() <= eps:
action = random.randint(0, maze_env.action_space.n - 1)
else:
action, value = get_max_action(state, q_value_function, maze_env)
if value == 0:
if False in visited_actions[state]:
action = visited_actions[state].index(False)
else:
action = random.randint(0, maze_env.action_space.n - 1)
visited_actions[state][action] = True
next_obs, reward, is_terminal, info = maze_env.step(action)
next_state = str(maze_env)
current_length += 1
next_action, next_q_value = get_max_action(next_state, q_value_function, maze_env)
max_q_value_target = reward + discount*next_q_value
q_value_function[hash_state_action(state, action)] = (1 - lr) * \
q_value_function[hash_state_action(state, action)] + lr*max_q_value_target
state = next_state
bar.next()
episodes_length.append(current_length)
print("Finish training q value function.")
end = time.time()
bar.finish()
print("[Statistics]: Avg_length {0} and Time {1}s".format(sum(episodes_length) / len(episodes_length), end - start))
# output
print("Start to output q value function and policy to file.")
file = open(model_dir + '/q_value.csv', "w")
fieldnames = ['state', 'action', 'value']
writer = csv.DictWriter(file, fieldnames=fieldnames)
for key, value in q_value_function.items():
state, action = reverse_hashing_state_action(key)
writer.writerow({'state':state, 'action':action, 'value':value})
file.close()
file = open(model_dir + '/policy.csv', "w")
fieldnames = ['state', 'action']
writer = csv.DictWriter(file, fieldnames=fieldnames)
for state in visited_states:
action, value = get_max_action(state, q_value_function, maze_env)
if value == 0:
action = -1
writer.writerow({'state':state, 'action':action})
file.close()
print("Finish outputting q value function to file.")
def main():
# parse arguments
args = parse_arguments()
# create env
maze_env = utils.make_env(args.env, args.seed + 10000)
print('Environment Loaded\n')
model_dir = utils.get_model_dir(args.env + '/' + args.dir + '/aQL/lr%.2f_discount%.2f_eps%.2f/epi%dseed%d'%(args.lr, args.discount, args.eps, args.num_episode, args.seed))
os.makedirs(model_dir, exist_ok=True)
print(model_dir)
# train agent
train(maze_env, model_dir, **vars(args))
if __name__ == '__main__':
main()
| <filename>scripts/qlearn.py
import _init_paths
import argparse
import random
import time
import utils
import os
from collections import defaultdict
import numpy as np
import csv
from progress.bar import IncrementalBar
from utils.hash import *
def parse_arguments():
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument('--env', type=str, default='../env/maze_2.txt',
help='name of the environment')
parser.add_argument("--dir", type=str, default="",
help="name of the directory to episodes")
parser.add_argument('--num_episode', type=int, default=2000,
help='the number of train episodes')
parser.add_argument('--max_episode_length', type=int, default=200,
help='the maximum of the length of an episode')
parser.add_argument('--lr', type=float, default=0.1,
help='the learning rate of the q learning algorithm')
parser.add_argument('--discount', type=float, default=0.9,
help='the discount factor')
parser.add_argument('--eps', type=float, default=0.8,
help='the value for the eps-greedy strategy')
parser.add_argument('--seed', type=int, default=0,
help='random seed for environment')
# parse arguments
args = parser.parse_args()
return args
def train(maze_env, model_dir, num_episode, max_episode_length, lr,
discount, eps, **kwargs):
# create value function and q value function
q_value_function = {}
visited_actions = {}
visited_states = set()
q_value_function = defaultdict(lambda: 0, q_value_function)
visited_actions = defaultdict(lambda: [False]*maze_env.action_space.n, visited_actions)
# train agent
start = time.time()
episodes_length = []
bar = IncrementalBar('Countdown', max = num_episode)
print("Start to train q value function.")
for _ in range(num_episode):
current_length = 0
is_terminal = 0
obs = maze_env.reset()
state = str(maze_env)
while not is_terminal:
visited_states.add(state)
if random.random() <= eps:
action = random.randint(0, maze_env.action_space.n - 1)
else:
action, value = get_max_action(state, q_value_function, maze_env)
if value == 0:
if False in visited_actions[state]:
action = visited_actions[state].index(False)
else:
action = random.randint(0, maze_env.action_space.n - 1)
visited_actions[state][action] = True
next_obs, reward, is_terminal, info = maze_env.step(action)
next_state = str(maze_env)
current_length += 1
next_action, next_q_value = get_max_action(next_state, q_value_function, maze_env)
max_q_value_target = reward + discount*next_q_value
q_value_function[hash_state_action(state, action)] = (1 - lr) * \
q_value_function[hash_state_action(state, action)] + lr*max_q_value_target
state = next_state
bar.next()
episodes_length.append(current_length)
print("Finish training q value function.")
end = time.time()
bar.finish()
print("[Statistics]: Avg_length {0} and Time {1}s".format(sum(episodes_length) / len(episodes_length), end - start))
# output
print("Start to output q value function and policy to file.")
file = open(model_dir + '/q_value.csv', "w")
fieldnames = ['state', 'action', 'value']
writer = csv.DictWriter(file, fieldnames=fieldnames)
for key, value in q_value_function.items():
state, action = reverse_hashing_state_action(key)
writer.writerow({'state':state, 'action':action, 'value':value})
file.close()
file = open(model_dir + '/policy.csv', "w")
fieldnames = ['state', 'action']
writer = csv.DictWriter(file, fieldnames=fieldnames)
for state in visited_states:
action, value = get_max_action(state, q_value_function, maze_env)
if value == 0:
action = -1
writer.writerow({'state':state, 'action':action})
file.close()
print("Finish outputting q value function to file.")
def main():
# parse arguments
args = parse_arguments()
# create env
maze_env = utils.make_env(args.env, args.seed + 10000)
print('Environment Loaded\n')
model_dir = utils.get_model_dir(args.env + '/' + args.dir + '/aQL/lr%.2f_discount%.2f_eps%.2f/epi%dseed%d'%(args.lr, args.discount, args.eps, args.num_episode, args.seed))
os.makedirs(model_dir, exist_ok=True)
print(model_dir)
# train agent
train(maze_env, model_dir, **vars(args))
if __name__ == '__main__':
main()
| en | 0.045597 | # add arguments # parse arguments # create value function and q value function # train agent # output # parse arguments # create env # train agent | 2.53133 | 3 |
research/tunnel.py | carrino/FrisPy | 0 | 7954 | <filename>research/tunnel.py<gh_stars>0
import math
from pprint import pprint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from frispy import Disc
from frispy import Discs
from frispy import Model
model = Discs.roc
mph_to_mps = 0.44704
v = 56 * mph_to_mps
rot = -v / model.diameter
ceiling = 4 # 4 meter ceiling
tunnel_width = 4 # 4 meter wide tunnel
def distance(x):
a, nose_up, hyzer = x
d = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
r = d.compute_trajectory(15.0, **{"max_step": .2})
rx = r.x[-1]
ry = abs(r.y[-1])
overCelingIndex = next(filter(lambda i: r.z[i] > ceiling, range(len(r.z))), None)
if overCelingIndex is not None:
return -r.x[overCelingIndex]
outsideTunnelIndex = next(filter(lambda i: math.fabs(r.y[i]) > tunnel_width / 2, range(len(r.z))), None)
if outsideTunnelIndex is not None:
return -r.x[outsideTunnelIndex]
return -rx + ry / (rx + ry)
bnds = [(-90, 90)] * 3
x0 = [6, -3, 10]
res = minimize(distance, x0, method='powell', bounds=bnds, options={'xtol': 1e-8, 'disp': True})
pprint(res)
a, nose_up, hyzer = res.x
disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
result = disc.compute_trajectory(15.0, **{"max_step": .2})
times = result.times
t, x, y, z = result.times, result.x, result.y, result.z
#plt.plot(x, y)
#plt.plot(x, z)
#plt.plot(t, x)
plt.plot(t, y)
plt.plot(t, z)
pprint(x[-1] * 3.28084) # feet
plt.show()
| <filename>research/tunnel.py<gh_stars>0
import math
from pprint import pprint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from frispy import Disc
from frispy import Discs
from frispy import Model
model = Discs.roc
mph_to_mps = 0.44704
v = 56 * mph_to_mps
rot = -v / model.diameter
ceiling = 4 # 4 meter ceiling
tunnel_width = 4 # 4 meter wide tunnel
def distance(x):
a, nose_up, hyzer = x
d = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
r = d.compute_trajectory(15.0, **{"max_step": .2})
rx = r.x[-1]
ry = abs(r.y[-1])
overCelingIndex = next(filter(lambda i: r.z[i] > ceiling, range(len(r.z))), None)
if overCelingIndex is not None:
return -r.x[overCelingIndex]
outsideTunnelIndex = next(filter(lambda i: math.fabs(r.y[i]) > tunnel_width / 2, range(len(r.z))), None)
if outsideTunnelIndex is not None:
return -r.x[outsideTunnelIndex]
return -rx + ry / (rx + ry)
bnds = [(-90, 90)] * 3
x0 = [6, -3, 10]
res = minimize(distance, x0, method='powell', bounds=bnds, options={'xtol': 1e-8, 'disp': True})
pprint(res)
a, nose_up, hyzer = res.x
disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
result = disc.compute_trajectory(15.0, **{"max_step": .2})
times = result.times
t, x, y, z = result.times, result.x, result.y, result.z
#plt.plot(x, y)
#plt.plot(x, z)
#plt.plot(t, x)
plt.plot(t, y)
plt.plot(t, z)
pprint(x[-1] * 3.28084) # feet
plt.show()
| en | 0.15686 | # 4 meter ceiling # 4 meter wide tunnel #plt.plot(x, y) #plt.plot(x, z) #plt.plot(t, x) # feet | 2.253416 | 2 |
openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py | unpilbaek/OpenFermion-Cirq | 278 | 7955 | <filename>openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz
def test_swap_network_trotter_hubbard_ansatz_param_bounds():
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 1, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(1, 4, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 2, 1.0, 4.0)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-2.0, 2.0), (-1.0, 1.0)]
| <filename>openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz
def test_swap_network_trotter_hubbard_ansatz_param_bounds():
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 1, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(1, 4, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 2, 1.0, 4.0)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-2.0, 2.0), (-1.0, 1.0)]
| en | 0.859654 | # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.072568 | 2 |
Modules/Phylogenetic.py | DaneshMoradigaravand/PlasmidPerm | 0 | 7956 | import os
from Bio import AlignIO, Phylo
from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor
class Phylogenetic:
def __init__(self, PATH):
self.PATH=PATH
def binary_sequence_generator(self, input_kmer_pattern, label):
string_inp="".join([ 'A' if x==0 else 'C' for x in input_kmer_pattern])
return([">"+label,string_inp])
def multifasta_fille_generator(self, converted_sequences_phyolgenetic):
file_output = open(os.path.join(self.PATH,"binary_presence_absence_kmers.fasta"), "w")
file_output.writelines('\n'.join(converted_sequences_phyolgenetic) + '\n' )
file_output.close()
def distance_matrix_generator(self):
align = AlignIO.read(os.path.join(self.PATH,"binary_presence_absence_kmers.fasta"), "fasta")
calculator = DistanceCalculator('identity')
distMatrix = calculator.get_distance(align)
return(distMatrix)
def distance_tree_file_generator(self,distance_matrix):
constructor = DistanceTreeConstructor()
UPGMATree = constructor.upgma(distance_matrix)
Phylo.write(UPGMATree, os.path.join(self.PATH,"binary_presence_absence_kmers.tre") , "newick") | import os
from Bio import AlignIO, Phylo
from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor
class Phylogenetic:
def __init__(self, PATH):
self.PATH=PATH
def binary_sequence_generator(self, input_kmer_pattern, label):
string_inp="".join([ 'A' if x==0 else 'C' for x in input_kmer_pattern])
return([">"+label,string_inp])
def multifasta_fille_generator(self, converted_sequences_phyolgenetic):
file_output = open(os.path.join(self.PATH,"binary_presence_absence_kmers.fasta"), "w")
file_output.writelines('\n'.join(converted_sequences_phyolgenetic) + '\n' )
file_output.close()
def distance_matrix_generator(self):
align = AlignIO.read(os.path.join(self.PATH,"binary_presence_absence_kmers.fasta"), "fasta")
calculator = DistanceCalculator('identity')
distMatrix = calculator.get_distance(align)
return(distMatrix)
def distance_tree_file_generator(self,distance_matrix):
constructor = DistanceTreeConstructor()
UPGMATree = constructor.upgma(distance_matrix)
Phylo.write(UPGMATree, os.path.join(self.PATH,"binary_presence_absence_kmers.tre") , "newick") | none | 1 | 2.920527 | 3 |
|
retrieve_regmod_values.py | cbcommunity/cbapi-examples | 17 | 7957 | #!/usr/bin/env python
#
#The MIT License (MIT)
#
# Copyright (c) 2015 Bit9 + Carbon Black
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# -----------------------------------------------------------------------------
# Extension regmod watcher and grabber
#
# This script listens to the CB messaging bus for registry modification events,
# and when a modification is seen that matches a regular expression from a file
# of registry path regular expressions, it goes and grabs the registry value
# using CB Live Response.
#
# You need to make sure rabbitmq is enabled in cb.conf, and you might need to
# open a firewall rule for port 5004. You also will need to enable regmod
# in the DatastoreBroadcastEventTypes=<values> entry. If anything is changed
# here, you'll have to do service cb-enterprise restart.
#
# TODO: More error handling, more performance improvements
#
# last updated 2016-01-23 by <NAME> <EMAIL> (<EMAIL>)
#
import re
import Queue
import sys
from threading import Thread
import time
import traceback
try:
from cbapi.legacy.util.cli_helpers import main_helper
from cbapi.legacy.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.legacy.util.sensor_events_pb2 as cpb
except ImportError:
from cbapi.util.cli_helpers import main_helper
from cbapi.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.util.sensor_events_pb2 as cpb
class RegistryModWatcherAndValueGrabber(MessageSubscriberAndLiveResponseActor):
"""
This class subscribes to messages from the CB messaging bus,
looking for regmod events. For each regmod event, it checks
to see if the the registry path matches one of our regexes.
If it does, it goes and grabs it.
"""
def __init__(self, cb_server_url, cb_ext_api, username, password, regmod_regexes, verbose):
self.regmod_regexes = regmod_regexes
self.verbose = verbose
MessageSubscriberAndLiveResponseActor.__init__(self,
cb_server_url,
cb_ext_api,
username,
password,
"ingress.event.regmod")
# Threading so that message queue arrives do not block waiting for live response
self.queue = Queue.Queue()
self.go = True
self.worker_thread = Thread(target=self._worker_thread_loop)
self.worker_thread.start()
def on_stop(self):
self.go = False
self.worker_thread.join(timeout=2)
MessageSubscriberAndLiveResponseActor.on_stop(self)
def consume_message(self, channel, method_frame, header_frame, body):
if "application/protobuf" != header_frame.content_type:
return
try:
# NOTE -- this is not very efficient in PYTHON, and should
# use a C parser to make this much, much faster.
# http://yz.mit.edu/wp/fast-native-c-protocol-buffers-from-python/
x = cpb.CbEventMsg()
x.ParseFromString(body)
if not x.regmod or x.regmod.action != 2:
# Check for MODIFICATION event because we will usually get
# a creation event and a modification event, and might as
# well go with the one that says data has actually been written.
return
regmod_path = None
if x.regmod.utf8_regpath:
if self.verbose:
print "Event arrived: |%s|" % x.regmod.utf8_regpath
for regmod_regex in self.regmod_regexes:
if regmod_regex.match(x.regmod.utf8_regpath):
regmod_path = x.regmod.utf8_regpath
break
if regmod_path:
regmod_path = regmod_path.replace("\\registry\\machine\\", "HKLM\\")
regmod_path = regmod_path.replace("\\registry\\user\\", "HKEY_USERS\\")
regmod_path = regmod_path.strip()
# TODO -- more cleanup here potentially?
self.queue.put((x, regmod_path))
except:
traceback.print_exc()
def _worker_thread_loop(self):
while self.go:
try:
try:
(x, regmod_path) = self.queue.get(timeout=0.5)
except Queue.Empty:
continue
# TODO -- could comment this out if you want CSV data to feed into something
print "--> Attempting for %s" % regmod_path
# Go Grab it if we think we have something!
sensor_id = x.env.endpoint.SensorId
hostname = x.env.endpoint.SensorHostName
# TODO -- this could use some concurrency and work queues because we could wait a while for
# each of these to get established and retrieve the value
# Establish our CBLR session if necessary!
lrh = self._create_lr_session_if_necessary(sensor_id)
data = lrh.get_registry_value(regmod_path)
print "%s,%s,%d,%s,%s,%s" % ( time.asctime(),
hostname,
sensor_id,
x.header.process_path,
regmod_path,
data.get('value_data', "") if data else "<UNKNOWN>")
# TODO -- could *do something* here, like if it is for autoruns keys then go check the signature status
# of the binary at the path pointed to, and see who wrote it out, etc
except:
traceback.print_exc()
def main(cb, args):
username = args.get("username")
password = args.get("password")
regpaths_file = args.get("regpaths_file")
verbose = args.get("verbose", False)
if verbose:
# maybe you want to print out all the regpaths we're using?
print "Regpaths file:", regpaths_file
f = file(regpaths_file, 'rb')
regpaths_data = f.read()
f.close()
regmod_regexes = []
for line in regpaths_data.split('\n'):
line = line.strip()
if len(line) == 0:
continue
regmod_regexes.append(re.compile(line))
listener = RegistryModWatcherAndValueGrabber(args.get('server_url'), cb, username, password, regmod_regexes, verbose)
try:
if verbose:
print "Registry Mod Watcher and Grabber -- started. Watching for:", regpaths_data
else:
print "Registry Mod Watcher and Grabber -- started. Watching for %d regexes" % len(regmod_regexes)
listener.process()
except KeyboardInterrupt:
print >> sys.stderr, "Caught Ctrl-C"
listener.stop()
print "Registry Mod Watcher and Grabber -- stopped."
if __name__ == "__main__":
## YOU CAN USE data/autoruns_regexes.txt to test ##
required_args =[("-i", "--username", "store", None, "username", "CB messaging username"),
("-p", "--password", "store", None, "password", "CB messaging password"),
("-r", "--regpaths_file", "store", None, "regpaths_file", "File of newline delimited regexes for regpaths")]
optional_args = [("-v", "--verbose", "store_true", False, "verbose", "Enable verbose output")]
main_helper("Subscribe to message bus events and for each registry modification that matches one of our supplied regexes, go retrieve value.",
main,
custom_required=required_args,
custom_optional=optional_args)
| #!/usr/bin/env python
#
#The MIT License (MIT)
#
# Copyright (c) 2015 Bit9 + Carbon Black
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# -----------------------------------------------------------------------------
# Extension regmod watcher and grabber
#
# This script listens to the CB messaging bus for registry modification events,
# and when a modification is seen that matches a regular expression from a file
# of registry path regular expressions, it goes and grabs the registry value
# using CB Live Response.
#
# You need to make sure rabbitmq is enabled in cb.conf, and you might need to
# open a firewall rule for port 5004. You also will need to enable regmod
# in the DatastoreBroadcastEventTypes=<values> entry. If anything is changed
# here, you'll have to do service cb-enterprise restart.
#
# TODO: More error handling, more performance improvements
#
# last updated 2016-01-23 by <NAME> <EMAIL> (<EMAIL>)
#
import re
import Queue
import sys
from threading import Thread
import time
import traceback
try:
from cbapi.legacy.util.cli_helpers import main_helper
from cbapi.legacy.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.legacy.util.sensor_events_pb2 as cpb
except ImportError:
from cbapi.util.cli_helpers import main_helper
from cbapi.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.util.sensor_events_pb2 as cpb
class RegistryModWatcherAndValueGrabber(MessageSubscriberAndLiveResponseActor):
"""
This class subscribes to messages from the CB messaging bus,
looking for regmod events. For each regmod event, it checks
to see if the the registry path matches one of our regexes.
If it does, it goes and grabs it.
"""
def __init__(self, cb_server_url, cb_ext_api, username, password, regmod_regexes, verbose):
self.regmod_regexes = regmod_regexes
self.verbose = verbose
MessageSubscriberAndLiveResponseActor.__init__(self,
cb_server_url,
cb_ext_api,
username,
password,
"ingress.event.regmod")
# Threading so that message queue arrives do not block waiting for live response
self.queue = Queue.Queue()
self.go = True
self.worker_thread = Thread(target=self._worker_thread_loop)
self.worker_thread.start()
def on_stop(self):
self.go = False
self.worker_thread.join(timeout=2)
MessageSubscriberAndLiveResponseActor.on_stop(self)
def consume_message(self, channel, method_frame, header_frame, body):
if "application/protobuf" != header_frame.content_type:
return
try:
# NOTE -- this is not very efficient in PYTHON, and should
# use a C parser to make this much, much faster.
# http://yz.mit.edu/wp/fast-native-c-protocol-buffers-from-python/
x = cpb.CbEventMsg()
x.ParseFromString(body)
if not x.regmod or x.regmod.action != 2:
# Check for MODIFICATION event because we will usually get
# a creation event and a modification event, and might as
# well go with the one that says data has actually been written.
return
regmod_path = None
if x.regmod.utf8_regpath:
if self.verbose:
print "Event arrived: |%s|" % x.regmod.utf8_regpath
for regmod_regex in self.regmod_regexes:
if regmod_regex.match(x.regmod.utf8_regpath):
regmod_path = x.regmod.utf8_regpath
break
if regmod_path:
regmod_path = regmod_path.replace("\\registry\\machine\\", "HKLM\\")
regmod_path = regmod_path.replace("\\registry\\user\\", "HKEY_USERS\\")
regmod_path = regmod_path.strip()
# TODO -- more cleanup here potentially?
self.queue.put((x, regmod_path))
except:
traceback.print_exc()
def _worker_thread_loop(self):
while self.go:
try:
try:
(x, regmod_path) = self.queue.get(timeout=0.5)
except Queue.Empty:
continue
# TODO -- could comment this out if you want CSV data to feed into something
print "--> Attempting for %s" % regmod_path
# Go Grab it if we think we have something!
sensor_id = x.env.endpoint.SensorId
hostname = x.env.endpoint.SensorHostName
# TODO -- this could use some concurrency and work queues because we could wait a while for
# each of these to get established and retrieve the value
# Establish our CBLR session if necessary!
lrh = self._create_lr_session_if_necessary(sensor_id)
data = lrh.get_registry_value(regmod_path)
print "%s,%s,%d,%s,%s,%s" % ( time.asctime(),
hostname,
sensor_id,
x.header.process_path,
regmod_path,
data.get('value_data', "") if data else "<UNKNOWN>")
# TODO -- could *do something* here, like if it is for autoruns keys then go check the signature status
# of the binary at the path pointed to, and see who wrote it out, etc
except:
traceback.print_exc()
def main(cb, args):
username = args.get("username")
password = args.get("password")
regpaths_file = args.get("regpaths_file")
verbose = args.get("verbose", False)
if verbose:
# maybe you want to print out all the regpaths we're using?
print "Regpaths file:", regpaths_file
f = file(regpaths_file, 'rb')
regpaths_data = f.read()
f.close()
regmod_regexes = []
for line in regpaths_data.split('\n'):
line = line.strip()
if len(line) == 0:
continue
regmod_regexes.append(re.compile(line))
listener = RegistryModWatcherAndValueGrabber(args.get('server_url'), cb, username, password, regmod_regexes, verbose)
try:
if verbose:
print "Registry Mod Watcher and Grabber -- started. Watching for:", regpaths_data
else:
print "Registry Mod Watcher and Grabber -- started. Watching for %d regexes" % len(regmod_regexes)
listener.process()
except KeyboardInterrupt:
print >> sys.stderr, "Caught Ctrl-C"
listener.stop()
print "Registry Mod Watcher and Grabber -- stopped."
if __name__ == "__main__":
## YOU CAN USE data/autoruns_regexes.txt to test ##
required_args =[("-i", "--username", "store", None, "username", "CB messaging username"),
("-p", "--password", "store", None, "password", "CB messaging password"),
("-r", "--regpaths_file", "store", None, "regpaths_file", "File of newline delimited regexes for regpaths")]
optional_args = [("-v", "--verbose", "store_true", False, "verbose", "Enable verbose output")]
main_helper("Subscribe to message bus events and for each registry modification that matches one of our supplied regexes, go retrieve value.",
main,
custom_required=required_args,
custom_optional=optional_args)
| en | 0.832871 | #!/usr/bin/env python # #The MIT License (MIT) # # Copyright (c) 2015 Bit9 + Carbon Black # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # ----------------------------------------------------------------------------- # Extension regmod watcher and grabber # # This script listens to the CB messaging bus for registry modification events, # and when a modification is seen that matches a regular expression from a file # of registry path regular expressions, it goes and grabs the registry value # using CB Live Response. # # You need to make sure rabbitmq is enabled in cb.conf, and you might need to # open a firewall rule for port 5004. You also will need to enable regmod # in the DatastoreBroadcastEventTypes=<values> entry. If anything is changed # here, you'll have to do service cb-enterprise restart. # # TODO: More error handling, more performance improvements # # last updated 2016-01-23 by <NAME> <EMAIL> (<EMAIL>) # This class subscribes to messages from the CB messaging bus, looking for regmod events. For each regmod event, it checks to see if the the registry path matches one of our regexes. If it does, it goes and grabs it. # Threading so that message queue arrives do not block waiting for live response # NOTE -- this is not very efficient in PYTHON, and should # use a C parser to make this much, much faster. # http://yz.mit.edu/wp/fast-native-c-protocol-buffers-from-python/ # Check for MODIFICATION event because we will usually get # a creation event and a modification event, and might as # well go with the one that says data has actually been written. # TODO -- more cleanup here potentially? # TODO -- could comment this out if you want CSV data to feed into something # Go Grab it if we think we have something! # TODO -- this could use some concurrency and work queues because we could wait a while for # each of these to get established and retrieve the value # Establish our CBLR session if necessary! # TODO -- could *do something* here, like if it is for autoruns keys then go check the signature status # of the binary at the path pointed to, and see who wrote it out, etc # maybe you want to print out all the regpaths we're using? ## YOU CAN USE data/autoruns_regexes.txt to test ## | 1.263061 | 1 |
h/exceptions.py | ssin122/test-h | 2 | 7958 | <reponame>ssin122/test-h
# -*- coding: utf-8 -*-
"""Exceptions raised by the h application."""
from __future__ import unicode_literals
from h.i18n import TranslationString as _
# N.B. This class **only** covers exceptions thrown by API code provided by
# the h package. memex code has its own base APIError class.
class APIError(Exception):
"""Base exception for problems handling API requests."""
def __init__(self, message, status_code=500):
self.status_code = status_code
super(APIError, self).__init__(message)
class ClientUnauthorized(APIError):
"""
Exception raised if the client credentials provided for an API request
were missing or invalid.
"""
def __init__(self):
message = _('Client credentials are invalid.')
super(ClientUnauthorized, self).__init__(message, status_code=403)
class OAuthTokenError(APIError):
"""
Exception raised when an OAuth token request failed.
This specifically handles OAuth errors which have a type (``message``) and
a description (``description``).
"""
def __init__(self, message, type_, status_code=400):
self.type = type_
super(OAuthTokenError, self).__init__(message, status_code=status_code)
| # -*- coding: utf-8 -*-
"""Exceptions raised by the h application."""
from __future__ import unicode_literals
from h.i18n import TranslationString as _
# N.B. This class **only** covers exceptions thrown by API code provided by
# the h package. memex code has its own base APIError class.
class APIError(Exception):
"""Base exception for problems handling API requests."""
def __init__(self, message, status_code=500):
self.status_code = status_code
super(APIError, self).__init__(message)
class ClientUnauthorized(APIError):
"""
Exception raised if the client credentials provided for an API request
were missing or invalid.
"""
def __init__(self):
message = _('Client credentials are invalid.')
super(ClientUnauthorized, self).__init__(message, status_code=403)
class OAuthTokenError(APIError):
"""
Exception raised when an OAuth token request failed.
This specifically handles OAuth errors which have a type (``message``) and
a description (``description``).
"""
def __init__(self, message, type_, status_code=400):
self.type = type_
super(OAuthTokenError, self).__init__(message, status_code=status_code) | en | 0.858972 | # -*- coding: utf-8 -*- Exceptions raised by the h application. # N.B. This class **only** covers exceptions thrown by API code provided by # the h package. memex code has its own base APIError class. Base exception for problems handling API requests. Exception raised if the client credentials provided for an API request were missing or invalid. Exception raised when an OAuth token request failed. This specifically handles OAuth errors which have a type (``message``) and a description (``description``). | 2.6356 | 3 |
functest/opnfv_tests/openstack/shaker/shaker.py | opnfv-poc/functest | 0 | 7959 | <filename>functest/opnfv_tests/openstack/shaker/shaker.py
#!/usr/bin/env python
# Copyright (c) 2018 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""
Shaker_ wraps around popular system network testing tools like iperf, iperf3
and netperf (with help of flent). Shaker is able to deploy OpenStack instances
and networks in different topologies. Shaker scenario specifies the deployment
and list of tests to execute.
.. _Shaker: http://pyshaker.readthedocs.io/en/latest/
"""
import logging
import os
import json
import scp
from functest.core import singlevm
from functest.utils import env
class Shaker(singlevm.SingleVm2):
"""Run shaker full+perf l2 and l3"""
# pylint: disable=too-many-instance-attributes
__logger = logging.getLogger(__name__)
filename = '/home/opnfv/functest/images/shaker-image-1.3.0+stretch.qcow2'
flavor_ram = 512
flavor_vcpus = 1
flavor_disk = 3
username = 'debian'
port = 9000
ssh_connect_loops = 12
create_server_timeout = 300
shaker_timeout = '3600'
quota_instances = -1
quota_cores = -1
def __init__(self, **kwargs):
super(Shaker, self).__init__(**kwargs)
self.role = None
def check_requirements(self):
if self.count_hypervisors() < 2:
self.__logger.warning("Shaker requires at least 2 hypervisors")
self.is_skipped = True
self.project.clean()
def prepare(self):
super(Shaker, self).prepare()
self.cloud.create_security_group_rule(
self.sec.id, port_range_min=self.port, port_range_max=self.port,
protocol='tcp', direction='ingress')
def execute(self):
"""
Returns:
- 0 if success
- 1 on operation error
"""
assert self.ssh
endpoint = self.get_public_auth_url(self.orig_cloud)
self.__logger.debug("keystone endpoint: %s", endpoint)
if self.orig_cloud.get_role("admin"):
role_name = "admin"
elif self.orig_cloud.get_role("Admin"):
role_name = "Admin"
else:
raise Exception("Cannot detect neither admin nor Admin")
self.orig_cloud.grant_role(
role_name, user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
if not self.orig_cloud.get_role("heat_stack_owner"):
self.role = self.orig_cloud.create_role("heat_stack_owner")
self.orig_cloud.grant_role(
"heat_stack_owner", user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
self.orig_cloud.set_compute_quotas(
self.project.project.name,
instances=self.quota_instances,
cores=self.quota_cores)
scpc = scp.SCPClient(self.ssh.get_transport())
scpc.put('/home/opnfv/functest/conf/env_file', remote_path='~/')
if os.environ.get('OS_CACERT'):
scpc.put(os.environ.get('OS_CACERT'), remote_path='~/os_cacert')
(_, stdout, stderr) = self.ssh.exec_command(
'source ~/env_file && '
'export OS_INTERFACE=public && '
'export OS_AUTH_URL={} && '
'export OS_USERNAME={} && '
'export OS_PROJECT_NAME={} && '
'export OS_PROJECT_ID={} && '
'unset OS_TENANT_NAME && '
'unset OS_TENANT_ID && '
'unset OS_ENDPOINT_TYPE && '
'export OS_PASSWORD="{}" && '
'{}'
'env && '
'timeout {} shaker --debug --image-name {} --flavor-name {} '
'--server-endpoint {}:9000 --external-net {} --dns-nameservers {} '
'--scenario openstack/full_l2,'
'openstack/full_l3_east_west,'
'openstack/full_l3_north_south,'
'openstack/perf_l3_north_south '
'--report report.html --output report.json'.format(
endpoint, self.project.user.name, self.project.project.name,
self.project.project.id, self.project.password,
'export OS_CACERT=~/os_cacert && ' if os.environ.get(
'OS_CACERT') else '',
self.shaker_timeout, self.image.name, self.flavor.name,
self.fip.floating_ip_address, self.ext_net.id,
env.get('NAMESERVER')))
self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
self.__logger.info("error:\n%s", stderr.read().decode("utf-8"))
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
try:
scpc.get('report.json', self.res_dir)
scpc.get('report.html', self.res_dir)
except scp.SCPException:
self.__logger.exception("cannot get report files")
return 1
with open(os.path.join(self.res_dir, 'report.json')) as json_file:
data = json.load(json_file)
for value in data["records"].values():
if value["status"] != "ok":
self.__logger.error(
"%s failed\n%s", value["scenario"], value["stderr"])
return 1
return stdout.channel.recv_exit_status()
def clean(self):
super(Shaker, self).clean()
if self.role:
self.orig_cloud.delete_role(self.role.id)
| <filename>functest/opnfv_tests/openstack/shaker/shaker.py
#!/usr/bin/env python
# Copyright (c) 2018 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""
Shaker_ wraps around popular system network testing tools like iperf, iperf3
and netperf (with help of flent). Shaker is able to deploy OpenStack instances
and networks in different topologies. Shaker scenario specifies the deployment
and list of tests to execute.
.. _Shaker: http://pyshaker.readthedocs.io/en/latest/
"""
import logging
import os
import json
import scp
from functest.core import singlevm
from functest.utils import env
class Shaker(singlevm.SingleVm2):
"""Run shaker full+perf l2 and l3"""
# pylint: disable=too-many-instance-attributes
__logger = logging.getLogger(__name__)
filename = '/home/opnfv/functest/images/shaker-image-1.3.0+stretch.qcow2'
flavor_ram = 512
flavor_vcpus = 1
flavor_disk = 3
username = 'debian'
port = 9000
ssh_connect_loops = 12
create_server_timeout = 300
shaker_timeout = '3600'
quota_instances = -1
quota_cores = -1
def __init__(self, **kwargs):
super(Shaker, self).__init__(**kwargs)
self.role = None
def check_requirements(self):
if self.count_hypervisors() < 2:
self.__logger.warning("Shaker requires at least 2 hypervisors")
self.is_skipped = True
self.project.clean()
def prepare(self):
super(Shaker, self).prepare()
self.cloud.create_security_group_rule(
self.sec.id, port_range_min=self.port, port_range_max=self.port,
protocol='tcp', direction='ingress')
def execute(self):
"""
Returns:
- 0 if success
- 1 on operation error
"""
assert self.ssh
endpoint = self.get_public_auth_url(self.orig_cloud)
self.__logger.debug("keystone endpoint: %s", endpoint)
if self.orig_cloud.get_role("admin"):
role_name = "admin"
elif self.orig_cloud.get_role("Admin"):
role_name = "Admin"
else:
raise Exception("Cannot detect neither admin nor Admin")
self.orig_cloud.grant_role(
role_name, user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
if not self.orig_cloud.get_role("heat_stack_owner"):
self.role = self.orig_cloud.create_role("heat_stack_owner")
self.orig_cloud.grant_role(
"heat_stack_owner", user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
self.orig_cloud.set_compute_quotas(
self.project.project.name,
instances=self.quota_instances,
cores=self.quota_cores)
scpc = scp.SCPClient(self.ssh.get_transport())
scpc.put('/home/opnfv/functest/conf/env_file', remote_path='~/')
if os.environ.get('OS_CACERT'):
scpc.put(os.environ.get('OS_CACERT'), remote_path='~/os_cacert')
(_, stdout, stderr) = self.ssh.exec_command(
'source ~/env_file && '
'export OS_INTERFACE=public && '
'export OS_AUTH_URL={} && '
'export OS_USERNAME={} && '
'export OS_PROJECT_NAME={} && '
'export OS_PROJECT_ID={} && '
'unset OS_TENANT_NAME && '
'unset OS_TENANT_ID && '
'unset OS_ENDPOINT_TYPE && '
'export OS_PASSWORD="{}" && '
'{}'
'env && '
'timeout {} shaker --debug --image-name {} --flavor-name {} '
'--server-endpoint {}:9000 --external-net {} --dns-nameservers {} '
'--scenario openstack/full_l2,'
'openstack/full_l3_east_west,'
'openstack/full_l3_north_south,'
'openstack/perf_l3_north_south '
'--report report.html --output report.json'.format(
endpoint, self.project.user.name, self.project.project.name,
self.project.project.id, self.project.password,
'export OS_CACERT=~/os_cacert && ' if os.environ.get(
'OS_CACERT') else '',
self.shaker_timeout, self.image.name, self.flavor.name,
self.fip.floating_ip_address, self.ext_net.id,
env.get('NAMESERVER')))
self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
self.__logger.info("error:\n%s", stderr.read().decode("utf-8"))
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
try:
scpc.get('report.json', self.res_dir)
scpc.get('report.html', self.res_dir)
except scp.SCPException:
self.__logger.exception("cannot get report files")
return 1
with open(os.path.join(self.res_dir, 'report.json')) as json_file:
data = json.load(json_file)
for value in data["records"].values():
if value["status"] != "ok":
self.__logger.error(
"%s failed\n%s", value["scenario"], value["stderr"])
return 1
return stdout.channel.recv_exit_status()
def clean(self):
super(Shaker, self).clean()
if self.role:
self.orig_cloud.delete_role(self.role.id)
| en | 0.815735 | #!/usr/bin/env python # Copyright (c) 2018 Orange and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 Shaker_ wraps around popular system network testing tools like iperf, iperf3 and netperf (with help of flent). Shaker is able to deploy OpenStack instances and networks in different topologies. Shaker scenario specifies the deployment and list of tests to execute. .. _Shaker: http://pyshaker.readthedocs.io/en/latest/ Run shaker full+perf l2 and l3 # pylint: disable=too-many-instance-attributes Returns: - 0 if success - 1 on operation error | 1.990343 | 2 |
lib/models/bn_helper.py | hongrui16/naic2020_B | 0 | 7960 | import torch
import functools
if torch.__version__.startswith('0'):
from .sync_bn.inplace_abn.bn import InPlaceABNSync
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
BatchNorm2d_class = InPlaceABNSync
relu_inplace = False
else:
# BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm
BatchNorm2d_class = BatchNorm2d = torch.nn.BatchNorm2d
relu_inplace = True | import torch
import functools
if torch.__version__.startswith('0'):
from .sync_bn.inplace_abn.bn import InPlaceABNSync
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
BatchNorm2d_class = InPlaceABNSync
relu_inplace = False
else:
# BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm
BatchNorm2d_class = BatchNorm2d = torch.nn.BatchNorm2d
relu_inplace = True | en | 0.591816 | # BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm | 2.268766 | 2 |
ordered_model/tests/models.py | HiddenClever/django-ordered-model | 0 | 7961 | <reponame>HiddenClever/django-ordered-model
from django.db import models
from ordered_model.models import OrderedModel, OrderedModelBase
class Item(OrderedModel):
name = models.CharField(max_length=100)
class Question(models.Model):
pass
class TestUser(models.Model):
pass
class Answer(OrderedModel):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='answers')
user = models.ForeignKey(TestUser, on_delete=models.CASCADE, related_name='answers')
order_with_respect_to = ('question', 'user')
class Meta:
ordering = ('question', 'user', 'order')
def __unicode__(self):
return u"Answer #{0:d} of question #{1:d} for user #{2:d}".format(self.order, self.question_id, self.user_id)
class CustomItem(OrderedModel):
id = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
modified = models.DateTimeField(null=True, blank=True)
class CustomOrderFieldModel(OrderedModelBase):
sort_order = models.PositiveIntegerField(editable=False, db_index=True)
name = models.CharField(max_length=100)
order_field_name = 'sort_order'
class Meta:
ordering = ('sort_order',)
class Topping(models.Model):
name = models.CharField(max_length=100)
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping, through='PizzaToppingsThroughModel')
class PizzaToppingsThroughModel(OrderedModel):
pizza = models.ForeignKey(Pizza, on_delete=models.CASCADE)
topping = models.ForeignKey(Topping, on_delete=models.CASCADE)
order_with_respect_to = 'pizza'
class Meta:
ordering = ('pizza', 'order')
class BaseQuestion(OrderedModel):
order_class_path = __module__ + '.BaseQuestion'
question = models.TextField(max_length=100)
class Meta:
ordering = ('order',)
class MultipleChoiceQuestion(BaseQuestion):
good_answer = models.TextField(max_length=100)
wrong_answer1 = models.TextField(max_length=100)
wrong_answer2 = models.TextField(max_length=100)
wrong_answer3 = models.TextField(max_length=100)
class OpenQuestion(BaseQuestion):
answer = models.TextField(max_length=100)
| from django.db import models
from ordered_model.models import OrderedModel, OrderedModelBase
class Item(OrderedModel):
name = models.CharField(max_length=100)
class Question(models.Model):
pass
class TestUser(models.Model):
pass
class Answer(OrderedModel):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='answers')
user = models.ForeignKey(TestUser, on_delete=models.CASCADE, related_name='answers')
order_with_respect_to = ('question', 'user')
class Meta:
ordering = ('question', 'user', 'order')
def __unicode__(self):
return u"Answer #{0:d} of question #{1:d} for user #{2:d}".format(self.order, self.question_id, self.user_id)
class CustomItem(OrderedModel):
id = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
modified = models.DateTimeField(null=True, blank=True)
class CustomOrderFieldModel(OrderedModelBase):
sort_order = models.PositiveIntegerField(editable=False, db_index=True)
name = models.CharField(max_length=100)
order_field_name = 'sort_order'
class Meta:
ordering = ('sort_order',)
class Topping(models.Model):
name = models.CharField(max_length=100)
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping, through='PizzaToppingsThroughModel')
class PizzaToppingsThroughModel(OrderedModel):
pizza = models.ForeignKey(Pizza, on_delete=models.CASCADE)
topping = models.ForeignKey(Topping, on_delete=models.CASCADE)
order_with_respect_to = 'pizza'
class Meta:
ordering = ('pizza', 'order')
class BaseQuestion(OrderedModel):
order_class_path = __module__ + '.BaseQuestion'
question = models.TextField(max_length=100)
class Meta:
ordering = ('order',)
class MultipleChoiceQuestion(BaseQuestion):
good_answer = models.TextField(max_length=100)
wrong_answer1 = models.TextField(max_length=100)
wrong_answer2 = models.TextField(max_length=100)
wrong_answer3 = models.TextField(max_length=100)
class OpenQuestion(BaseQuestion):
answer = models.TextField(max_length=100) | en | 0.500986 | #{0:d} of question #{1:d} for user #{2:d}".format(self.order, self.question_id, self.user_id) | 2.431099 | 2 |
library/pandas_utils.py | SACGF/variantgrid | 5 | 7962 | import os
import sys
import numpy as np
import pandas as pd
def get_columns_percent_dataframe(df: pd.DataFrame, totals_column=None, percent_names=True) -> pd.DataFrame:
""" @param totals_column: (default = use sum of columns)
@param percent_names: Rename names from 'col' => 'col %'
Return a dataframe as a percentage of totals_column if provided, or sum of columns """
percent_df = pd.DataFrame(index=df.index)
columns = df.columns
if totals_column:
totals_series = df[totals_column]
columns = columns - [totals_column]
else:
totals_series = df.sum(axis=1)
for col in columns:
new_col = col
if percent_names:
new_col = f"{new_col} %"
multiplier = 100.0 # to get percent
percent_df[new_col] = multiplier * df[col] / totals_series
return percent_df
def get_rows_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
row_sums = df.sum(axis=0)
return df.multiply(100.0) / row_sums
def get_total_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
total = df.sum(axis=0).sum()
return df.multiply(100.0) / total
def df_handle_below_minimum_floats(df: pd.DataFrame) -> pd.DataFrame:
def handle_if_below_min(series):
if series.dtype == 'd':
too_small_mask = abs(series) < sys.float_info.min
series[too_small_mask] = sys.float_info.min
return series
return df.apply(handle_if_below_min, axis=0)
def nan_to_none(val):
if np.isnan(val):
val = None
return val
def df_nan_to_none(df: pd.DataFrame) -> pd.DataFrame:
return df.where((pd.notnull(df)), None)
def df_replace_nan(df: pd.DataFrame, nan_replace='') -> pd.DataFrame:
return df.where((pd.notnull(df)), nan_replace)
def read_csv_skip_header(fle, header='#', **kwargs) -> pd.DataFrame:
if os.stat(fle).st_size == 0:
raise ValueError("File is empty")
with open(fle) as f:
pos = 0
cur_line = f.readline()
while cur_line.startswith(header):
pos = f.tell()
cur_line = f.readline()
f.seek(pos)
return pd.read_csv(f, **kwargs)
| import os
import sys
import numpy as np
import pandas as pd
def get_columns_percent_dataframe(df: pd.DataFrame, totals_column=None, percent_names=True) -> pd.DataFrame:
""" @param totals_column: (default = use sum of columns)
@param percent_names: Rename names from 'col' => 'col %'
Return a dataframe as a percentage of totals_column if provided, or sum of columns """
percent_df = pd.DataFrame(index=df.index)
columns = df.columns
if totals_column:
totals_series = df[totals_column]
columns = columns - [totals_column]
else:
totals_series = df.sum(axis=1)
for col in columns:
new_col = col
if percent_names:
new_col = f"{new_col} %"
multiplier = 100.0 # to get percent
percent_df[new_col] = multiplier * df[col] / totals_series
return percent_df
def get_rows_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
row_sums = df.sum(axis=0)
return df.multiply(100.0) / row_sums
def get_total_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
total = df.sum(axis=0).sum()
return df.multiply(100.0) / total
def df_handle_below_minimum_floats(df: pd.DataFrame) -> pd.DataFrame:
def handle_if_below_min(series):
if series.dtype == 'd':
too_small_mask = abs(series) < sys.float_info.min
series[too_small_mask] = sys.float_info.min
return series
return df.apply(handle_if_below_min, axis=0)
def nan_to_none(val):
if np.isnan(val):
val = None
return val
def df_nan_to_none(df: pd.DataFrame) -> pd.DataFrame:
return df.where((pd.notnull(df)), None)
def df_replace_nan(df: pd.DataFrame, nan_replace='') -> pd.DataFrame:
return df.where((pd.notnull(df)), nan_replace)
def read_csv_skip_header(fle, header='#', **kwargs) -> pd.DataFrame:
if os.stat(fle).st_size == 0:
raise ValueError("File is empty")
with open(fle) as f:
pos = 0
cur_line = f.readline()
while cur_line.startswith(header):
pos = f.tell()
cur_line = f.readline()
f.seek(pos)
return pd.read_csv(f, **kwargs)
| en | 0.65167 | @param totals_column: (default = use sum of columns) @param percent_names: Rename names from 'col' => 'col %' Return a dataframe as a percentage of totals_column if provided, or sum of columns # to get percent Return a dataframe as a percentage of sum of rows Return a dataframe as a percentage of sum of rows | 3.702787 | 4 |
app/services/base.py | grace1307/lan_mapper | 0 | 7963 | <reponame>grace1307/lan_mapper<filename>app/services/base.py
from app.db import db
# Ignore it if db can't find the row when updating/deleting
# Todo: not ignore it, raise some error, remove checkers in view
class BaseService:
__abstract__ = True
model = None
# Create
def add_one(self, **kwargs):
new_row = self.model(**kwargs)
db.session.add(new_row)
db.session.commit() # sqlalchemy auto flushes so maybe this just need commit ?
return new_row
# Read
def select_one(self, id):
return self.model.query.filter(self.model.id == id).one_or_none()
def select_all(self, conditions: list = None, sort_by=None, is_asc=None):
query = db.session.query(self.model)
if conditions is not None:
for condition in conditions:
query = query.filter(condition)
if sort_by is not None and is_asc is not None:
sort_column = self.model.__table__._columns[sort_by]
is_asc = is_asc == 'true'
if sort_column is not None:
query = query.order_by(sort_column.asc() if is_asc else sort_column.desc())
return query.all()
# Update
def update_one(self, id, updated):
row = self.model.query.filter(self.model.id == id)
row_result = row.one_or_none()
if row_result is not None:
row.update(updated)
db.session.commit()
return row.one_or_none()
# Delete
def delete_one(self, id):
row = self.select_one(id)
if row is not None:
db.session.delete(row)
db.session.commit()
| from app.db import db
# Ignore it if db can't find the row when updating/deleting
# Todo: not ignore it, raise some error, remove checkers in view
class BaseService:
__abstract__ = True
model = None
# Create
def add_one(self, **kwargs):
new_row = self.model(**kwargs)
db.session.add(new_row)
db.session.commit() # sqlalchemy auto flushes so maybe this just need commit ?
return new_row
# Read
def select_one(self, id):
return self.model.query.filter(self.model.id == id).one_or_none()
def select_all(self, conditions: list = None, sort_by=None, is_asc=None):
query = db.session.query(self.model)
if conditions is not None:
for condition in conditions:
query = query.filter(condition)
if sort_by is not None and is_asc is not None:
sort_column = self.model.__table__._columns[sort_by]
is_asc = is_asc == 'true'
if sort_column is not None:
query = query.order_by(sort_column.asc() if is_asc else sort_column.desc())
return query.all()
# Update
def update_one(self, id, updated):
row = self.model.query.filter(self.model.id == id)
row_result = row.one_or_none()
if row_result is not None:
row.update(updated)
db.session.commit()
return row.one_or_none()
# Delete
def delete_one(self, id):
row = self.select_one(id)
if row is not None:
db.session.delete(row)
db.session.commit() | en | 0.816564 | # Ignore it if db can't find the row when updating/deleting # Todo: not ignore it, raise some error, remove checkers in view # Create # sqlalchemy auto flushes so maybe this just need commit ? # Read # Update # Delete | 2.451184 | 2 |
set.py | QUDUSKUNLE/Python-Flask | 0 | 7964 | """
How to set up virtual environment
pip install virtualenv
pip install virtualenvwrapper
# export WORKON_HOME=~/Envs
source /usr/local/bin/virtualenvwrapper.sh
# To activate virtualenv and set up flask
1. mkvirtualenv my-venv
###2. workon my-venv
3. pip install Flask
4. pip freeze
5. # To put all dependencies in a file
pip freeze > requirements.txt
6. run.py: entry point of the application
7. relational database management system
SQLite, MYSQL, PostgreSQL
SQLAlchemy is an Object Relational Mapper (ORM),
which means that it connects the objects of an application to tables in a
relational database management system.
""" | """
How to set up virtual environment
pip install virtualenv
pip install virtualenvwrapper
# export WORKON_HOME=~/Envs
source /usr/local/bin/virtualenvwrapper.sh
# To activate virtualenv and set up flask
1. mkvirtualenv my-venv
###2. workon my-venv
3. pip install Flask
4. pip freeze
5. # To put all dependencies in a file
pip freeze > requirements.txt
6. run.py: entry point of the application
7. relational database management system
SQLite, MYSQL, PostgreSQL
SQLAlchemy is an Object Relational Mapper (ORM),
which means that it connects the objects of an application to tables in a
relational database management system.
""" | en | 0.639972 | How to set up virtual environment pip install virtualenv pip install virtualenvwrapper # export WORKON_HOME=~/Envs source /usr/local/bin/virtualenvwrapper.sh # To activate virtualenv and set up flask 1. mkvirtualenv my-venv ###2. workon my-venv 3. pip install Flask 4. pip freeze 5. # To put all dependencies in a file pip freeze > requirements.txt 6. run.py: entry point of the application 7. relational database management system SQLite, MYSQL, PostgreSQL SQLAlchemy is an Object Relational Mapper (ORM), which means that it connects the objects of an application to tables in a relational database management system. | 2.855571 | 3 |
test/test_generate_data_coassembly.py | Badboy-16/SemiBin | 0 | 7965 | from SemiBin.main import generate_data_single
import os
import pytest
import logging
import pandas as pd
def test_generate_data_coassembly():
logger = logging.getLogger('SemiBin')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logger.addHandler(sh)
os.makedirs('output_coassembly',exist_ok=True)
generate_data_single(bams=['test/coassembly_sample_data/input.sorted1.bam',
'test/coassembly_sample_data/input.sorted2.bam',
'test/coassembly_sample_data/input.sorted3.bam',
'test/coassembly_sample_data/input.sorted4.bam',
'test/coassembly_sample_data/input.sorted5.bam'],
num_process=1,
logger=logger,
output='output_coassembly',
handle='test/coassembly_sample_data/input.fasta',
binned_short=False,
must_link_threshold=4000
)
data = pd.read_csv('output_coassembly/data.csv',index_col=0)
data_split = pd.read_csv('output_coassembly/data_split.csv',index_col=0)
assert data.shape == (40,141)
assert data_split.shape == (80,141) | from SemiBin.main import generate_data_single
import os
import pytest
import logging
import pandas as pd
def test_generate_data_coassembly():
logger = logging.getLogger('SemiBin')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logger.addHandler(sh)
os.makedirs('output_coassembly',exist_ok=True)
generate_data_single(bams=['test/coassembly_sample_data/input.sorted1.bam',
'test/coassembly_sample_data/input.sorted2.bam',
'test/coassembly_sample_data/input.sorted3.bam',
'test/coassembly_sample_data/input.sorted4.bam',
'test/coassembly_sample_data/input.sorted5.bam'],
num_process=1,
logger=logger,
output='output_coassembly',
handle='test/coassembly_sample_data/input.fasta',
binned_short=False,
must_link_threshold=4000
)
data = pd.read_csv('output_coassembly/data.csv',index_col=0)
data_split = pd.read_csv('output_coassembly/data_split.csv',index_col=0)
assert data.shape == (40,141)
assert data_split.shape == (80,141) | none | 1 | 2.319043 | 2 |
|
create/create_args_test.py | CarbonROM/android_tools_acloud | 0 | 7966 | # Copyright 2020 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for create."""
import unittest
from unittest import mock
from acloud import errors
from acloud.create import create_args
from acloud.internal import constants
from acloud.internal.lib import driver_test_lib
def _CreateArgs():
"""set default pass in arguments."""
mock_args = mock.MagicMock(
flavor=None,
num=1,
adb_port=None,
hw_property=None,
stable_cheeps_host_image_name=None,
stable_cheeps_host_image_project=None,
username=None,
password=<PASSWORD>,
cheeps_betty_image=None,
local_image=None,
local_kernel_image=None,
local_system_image=None,
system_branch=None,
system_build_id=None,
system_build_target=None,
local_instance=None,
remote_host=None,
host_user=constants.GCE_USER,
host_ssh_private_key_path=None,
avd_type=constants.TYPE_CF,
autoconnect=constants.INS_KEY_VNC)
return mock_args
# pylint: disable=invalid-name,protected-access
class CreateArgsTest(driver_test_lib.BaseDriverTest):
"""Test create_args functions."""
def testVerifyArgs(self):
"""test VerifyArgs."""
mock_args = _CreateArgs()
# Test args default setting shouldn't raise error.
self.assertEqual(None, create_args.VerifyArgs(mock_args))
def testVerifyArgs_ConnectWebRTC(self):
"""test VerifyArgs args.autconnect webrtc.
WebRTC only apply to remote cuttlefish instance
"""
mock_args = _CreateArgs()
mock_args.autoconnect = constants.INS_KEY_WEBRTC
# Test remote instance and avd_type cuttlefish(default)
# Test args.autoconnect webrtc shouldn't raise error.
self.assertEqual(None, create_args.VerifyArgs(mock_args))
# Test pass in none-cuttlefish avd_type should raise error.
mock_args.avd_type = constants.TYPE_GF
self.assertRaises(errors.UnsupportedCreateArgs,
create_args.VerifyArgs, mock_args)
if __name__ == "__main__":
unittest.main()
| # Copyright 2020 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for create."""
import unittest
from unittest import mock
from acloud import errors
from acloud.create import create_args
from acloud.internal import constants
from acloud.internal.lib import driver_test_lib
def _CreateArgs():
"""set default pass in arguments."""
mock_args = mock.MagicMock(
flavor=None,
num=1,
adb_port=None,
hw_property=None,
stable_cheeps_host_image_name=None,
stable_cheeps_host_image_project=None,
username=None,
password=<PASSWORD>,
cheeps_betty_image=None,
local_image=None,
local_kernel_image=None,
local_system_image=None,
system_branch=None,
system_build_id=None,
system_build_target=None,
local_instance=None,
remote_host=None,
host_user=constants.GCE_USER,
host_ssh_private_key_path=None,
avd_type=constants.TYPE_CF,
autoconnect=constants.INS_KEY_VNC)
return mock_args
# pylint: disable=invalid-name,protected-access
class CreateArgsTest(driver_test_lib.BaseDriverTest):
"""Test create_args functions."""
def testVerifyArgs(self):
"""test VerifyArgs."""
mock_args = _CreateArgs()
# Test args default setting shouldn't raise error.
self.assertEqual(None, create_args.VerifyArgs(mock_args))
def testVerifyArgs_ConnectWebRTC(self):
"""test VerifyArgs args.autconnect webrtc.
WebRTC only apply to remote cuttlefish instance
"""
mock_args = _CreateArgs()
mock_args.autoconnect = constants.INS_KEY_WEBRTC
# Test remote instance and avd_type cuttlefish(default)
# Test args.autoconnect webrtc shouldn't raise error.
self.assertEqual(None, create_args.VerifyArgs(mock_args))
# Test pass in none-cuttlefish avd_type should raise error.
mock_args.avd_type = constants.TYPE_GF
self.assertRaises(errors.UnsupportedCreateArgs,
create_args.VerifyArgs, mock_args)
if __name__ == "__main__":
unittest.main()
| en | 0.735465 | # Copyright 2020 - The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for create. set default pass in arguments. # pylint: disable=invalid-name,protected-access Test create_args functions. test VerifyArgs. # Test args default setting shouldn't raise error. test VerifyArgs args.autconnect webrtc. WebRTC only apply to remote cuttlefish instance # Test remote instance and avd_type cuttlefish(default) # Test args.autoconnect webrtc shouldn't raise error. # Test pass in none-cuttlefish avd_type should raise error. | 1.863546 | 2 |
setup.py | Kannuki-san/msman | 0 | 7967 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from cx_Freeze import setup,Executable
icondata='icon.ico'
base = None
# GUI=有効, CUI=無効 にする
if sys.platform == 'win32' : base = 'win32GUI'
exe = Executable(script = 'main.py',
base = base,
#icon=icondata
)
setup(name = 'MSman',
version = '0.1',
description = 'Minecraft Server Manager',
executables = [exe]
) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from cx_Freeze import setup,Executable
icondata='icon.ico'
base = None
# GUI=有効, CUI=無効 にする
if sys.platform == 'win32' : base = 'win32GUI'
exe = Executable(script = 'main.py',
base = base,
#icon=icondata
)
setup(name = 'MSman',
version = '0.1',
description = 'Minecraft Server Manager',
executables = [exe]
) | ja | 0.579002 | #!/usr/bin/env python # -*- coding: utf-8 -*- # GUI=有効, CUI=無効 にする #icon=icondata | 1.950983 | 2 |
stereotype/roles.py | petee-d/stereotype | 6 | 7968 | <gh_stars>1-10
from __future__ import annotations
from threading import Lock
from typing import List, Set, Optional, Any, Tuple
from stereotype.utils import ConfigurationError
class Role:
__slots__ = ('code', 'name', 'empty_by_default')
def __init__(self, name: str, empty_by_default: bool = False):
self.name = name
self.empty_by_default = empty_by_default
with _roles_lock:
self.code = len(_roles)
_roles.append(self)
def __repr__(self):
return f'<Role {self.name}, empty_by_default={self.empty_by_default}, code={self.code}>'
def __hash__(self):
return self.code
def __eq__(self, other):
return type(self) == type(other) and self.code == other.code
def whitelist(self, *fields, override_parents: bool = False):
return RequestedRoleFields(self, fields, is_whitelist=True, override_parents=override_parents)
def blacklist(self, *fields, override_parents: bool = False):
return RequestedRoleFields(self, fields, is_whitelist=False, override_parents=override_parents)
_roles: List[Role] = []
_roles_lock = Lock()
DEFAULT_ROLE = Role('default')
class FinalizedRoleFields:
__slots__ = ('role', 'fields')
def __init__(self, role: Role, fields: Optional[Set[str]] = None):
self.role = role
self.fields = fields or set()
def update_requested(self, other: RequestedRoleFields, all_field_names: Set[str], field_names: Set[str]):
assert self.role == other.role
if other.override_parents:
initial = set() if other.is_whitelist else all_field_names
else:
initial = self.fields
if other.is_whitelist:
self.fields = initial | other.fields
else:
self.fields = (initial | field_names) - other.fields
class RequestedRoleFields:
__slots__ = ('role', 'fields', 'is_whitelist', 'override_parents')
def __init__(self, role: Role, fields, is_whitelist: bool, override_parents: bool):
self.fields, non_descriptors = self._collect_input_fields(fields)
if non_descriptors:
raise ConfigurationError(f'Role blacklist/whitelist needs member descriptors (e.g. cls.my_field), '
f'got {non_descriptors[0]!r}')
self.role = role
self.is_whitelist = is_whitelist
self.override_parents = override_parents
def _collect_input_fields(self, fields) -> Tuple[Set[str], List[Any]]:
field_names: Set[str] = set()
non_descriptors: List[Any] = []
for field in fields:
if type(field).__name__ == 'member_descriptor':
field_names.add(field.__name__)
elif isinstance(field, property):
field_names.add(field.fget.__name__)
else:
non_descriptors.append(field)
return field_names, non_descriptors
| from __future__ import annotations
from threading import Lock
from typing import List, Set, Optional, Any, Tuple
from stereotype.utils import ConfigurationError
class Role:
__slots__ = ('code', 'name', 'empty_by_default')
def __init__(self, name: str, empty_by_default: bool = False):
self.name = name
self.empty_by_default = empty_by_default
with _roles_lock:
self.code = len(_roles)
_roles.append(self)
def __repr__(self):
return f'<Role {self.name}, empty_by_default={self.empty_by_default}, code={self.code}>'
def __hash__(self):
return self.code
def __eq__(self, other):
return type(self) == type(other) and self.code == other.code
def whitelist(self, *fields, override_parents: bool = False):
return RequestedRoleFields(self, fields, is_whitelist=True, override_parents=override_parents)
def blacklist(self, *fields, override_parents: bool = False):
return RequestedRoleFields(self, fields, is_whitelist=False, override_parents=override_parents)
_roles: List[Role] = []
_roles_lock = Lock()
DEFAULT_ROLE = Role('default')
class FinalizedRoleFields:
__slots__ = ('role', 'fields')
def __init__(self, role: Role, fields: Optional[Set[str]] = None):
self.role = role
self.fields = fields or set()
def update_requested(self, other: RequestedRoleFields, all_field_names: Set[str], field_names: Set[str]):
assert self.role == other.role
if other.override_parents:
initial = set() if other.is_whitelist else all_field_names
else:
initial = self.fields
if other.is_whitelist:
self.fields = initial | other.fields
else:
self.fields = (initial | field_names) - other.fields
class RequestedRoleFields:
__slots__ = ('role', 'fields', 'is_whitelist', 'override_parents')
def __init__(self, role: Role, fields, is_whitelist: bool, override_parents: bool):
self.fields, non_descriptors = self._collect_input_fields(fields)
if non_descriptors:
raise ConfigurationError(f'Role blacklist/whitelist needs member descriptors (e.g. cls.my_field), '
f'got {non_descriptors[0]!r}')
self.role = role
self.is_whitelist = is_whitelist
self.override_parents = override_parents
def _collect_input_fields(self, fields) -> Tuple[Set[str], List[Any]]:
field_names: Set[str] = set()
non_descriptors: List[Any] = []
for field in fields:
if type(field).__name__ == 'member_descriptor':
field_names.add(field.__name__)
elif isinstance(field, property):
field_names.add(field.fget.__name__)
else:
non_descriptors.append(field)
return field_names, non_descriptors | none | 1 | 2.492157 | 2 |
|
WEB21-1-12/WEB2/power/zvl_test.py | coderdq/vuetest | 0 | 7969 | <gh_stars>0
# coding:utf-8
'''
矢网的测试项,包括增益,带内波动,VSWR
一个曲线最多建10个marker
'''
import os
import logging
from commoninterface.zvlbase import ZVLBase
logger = logging.getLogger('ghost')
class HandleZVL(object):
def __init__(self, ip, offset):
self.zvl = None
self.ip = ip
self.offset = float(offset)
def init_zvl(self, path):
logger.debug('init zvl')
self.zvl = ZVLBase()
self.zvl.init_inst(self.ip)
self.zvl.reset_zvl()
self.path = path # 存储图片的路径
def close_zvl(self):
self.zvl.close_inst()
def set_edge(self, low_edge, up_edge):
'''
:param low_edge: float单位MHz
:param up_edge: float单位MHz
:return:
'''
try:
low = '{}MHz'.format(low_edge)
up = '{}MHz'.format(up_edge)
self.zvl.set_freq(low, up)
return True
except Exception as e:
logger.error('set_edge error {}'.format(e))
return False
def set_trace(self, tracen, form, means):
'''
:param tracen: int
form:str,
means:str,'S11','S12','S21','S22'
:return:
'''
try:
self.zvl.set_trace_form(tracen, form)
self.zvl.change_trace_meas(tracen, means)
if form == 'MLOG':
self.zvl.set_div_value(tracen, 10)
# zvl.set_ref_value(zvlhandler, tracen, -40)
return True
except Exception as e:
logger.error('set_trace error {}'.format(e))
return False
def read_markery(self, tracen, markern, x):
x_str = '{}MHz'.format(x)
self.zvl.set_trace_marker(tracen, markern, x_str) # 设置marker点
_, marker1y = self.zvl.query_marker(tracen, markern)
return marker1y
def read_max_marker(self, tracen, markern):
try:
self.zvl.create_max_marker(tracen, markern) # max marker
# create_max_marker(zvlhandler, tracen, markern + 1) # max marker
marker1x, marker1y = self.zvl.query_marker(tracen, markern)
return float(marker1x) / 1000000.0, marker1y
except Exception as e:
logger.error('get_max_loss error {}'.format(e))
return None
def get_ripple_in_bw(self, tracen, markern):
'''
带内波动
:return:
'''
try:
self.zvl.create_min_marker(tracen, markern) # min marker
self.zvl.create_max_marker(tracen, markern + 1) # max marker
_, marker1y = self.zvl.query_marker(tracen, markern)
_, marker2y = self.zvl.query_marker(tracen, markern + 1)
absy = abs(float(marker1y) - float(marker2y))
return absy
except Exception as e:
logger.error('get_ripple_in_bw error{}'.format(e))
return None
def get_gain(self, *args):
'''
读取增益及带内波动
S21 dBmg
:return:高,中,低点增益,带内波动
'''
logger.debug('zvl get gain')
high, mid, low = args # 高中低
self.zvl.remove_allmarker(1)
self.set_edge(low, high)
tracen = 1
self.set_trace(tracen, 'MLOG', 'S21')
markern = 1
# 读高,中,低点的增益
high_markery = float(self.read_markery(tracen, markern, high))
markern += 1
mid_markery = float(self.read_markery(tracen, markern, mid))
markern += 1
low_markery = float(self.read_markery(tracen, markern, low))
# 带内波动
markern += 1
ripple = self.get_ripple_in_bw(tracen, markern) # 绝对值
ret = [high_markery + self.offset, mid_markery + self.offset,
low_markery + self.offset, ripple]
ret2 = ['%.2f' % float(item) for item in ret]
return ret2
def get_vswr(self, *args):
'''
VSWR S11,SWR
:return:max markerx,max markery
'''
logger.debug('zvl get_vswr')
self.zvl.remove_allmarker(1)
high, mid, low, dl_ul,temp = args # 高中低
tracen = 1
markern = 1
start = float(low) - 2.5
end = float(high) + 2.5
self.set_edge(start, end)
self.set_trace(tracen, 'SWR', 'S11')
marker = self.read_max_marker(tracen, markern)
# 截图
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_{}_VSWR.PNG'.format(temp, dl_ul,end))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
# mstr='@'.join([str(item) for item in marker])
marker2 = ['%.2f' % float(item) for item in marker]
return marker2
def get_gain_vs_freq(self, markerlist,dl_ul, temp):
'''
825~835MHz,870~880,890~915,935~960,1570.42~1585,
1710~1785,1805~1880,1920~1980,2110~2170,
2570~2620,1880~1915,2300~2400,2400~2483.5
截图三张,一张图最多截10个marker
markerlist:[]
:return:
'''
logger.debug('zvl get_gain_vs_freq')
self.zvl.remove_allmarker(1)
tracen = 1
markern = 1
self.set_trace(tracen, 'MLOG', 'S21')
markery_list = [] # 所有点的增益,注意要加上offset
try:
# 第一张图
self.set_edge(700, 1700)
marker_lst = markerlist[:10]
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern) # str
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_1.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
self.zvl.remove_allmarker(1)
# 第二张图
marker_lst = markerlist[10:20]
markern = 1
self.set_edge(1700, 3000)
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern)
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_2.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
self.zvl.remove_allmarker(1)
# 第三张图
marker_lst = markerlist[20:]
markern = 1
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern)
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_3.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
except Exception as e:
logger.error(e)
finally:
# logger.debug(markery_list)
ret = ['%.2f' % (float(item) + self.offset) for item in markery_list]
return ret
| # coding:utf-8
'''
矢网的测试项,包括增益,带内波动,VSWR
一个曲线最多建10个marker
'''
import os
import logging
from commoninterface.zvlbase import ZVLBase
logger = logging.getLogger('ghost')
class HandleZVL(object):
def __init__(self, ip, offset):
self.zvl = None
self.ip = ip
self.offset = float(offset)
def init_zvl(self, path):
logger.debug('init zvl')
self.zvl = ZVLBase()
self.zvl.init_inst(self.ip)
self.zvl.reset_zvl()
self.path = path # 存储图片的路径
def close_zvl(self):
self.zvl.close_inst()
def set_edge(self, low_edge, up_edge):
'''
:param low_edge: float单位MHz
:param up_edge: float单位MHz
:return:
'''
try:
low = '{}MHz'.format(low_edge)
up = '{}MHz'.format(up_edge)
self.zvl.set_freq(low, up)
return True
except Exception as e:
logger.error('set_edge error {}'.format(e))
return False
def set_trace(self, tracen, form, means):
'''
:param tracen: int
form:str,
means:str,'S11','S12','S21','S22'
:return:
'''
try:
self.zvl.set_trace_form(tracen, form)
self.zvl.change_trace_meas(tracen, means)
if form == 'MLOG':
self.zvl.set_div_value(tracen, 10)
# zvl.set_ref_value(zvlhandler, tracen, -40)
return True
except Exception as e:
logger.error('set_trace error {}'.format(e))
return False
def read_markery(self, tracen, markern, x):
x_str = '{}MHz'.format(x)
self.zvl.set_trace_marker(tracen, markern, x_str) # 设置marker点
_, marker1y = self.zvl.query_marker(tracen, markern)
return marker1y
def read_max_marker(self, tracen, markern):
try:
self.zvl.create_max_marker(tracen, markern) # max marker
# create_max_marker(zvlhandler, tracen, markern + 1) # max marker
marker1x, marker1y = self.zvl.query_marker(tracen, markern)
return float(marker1x) / 1000000.0, marker1y
except Exception as e:
logger.error('get_max_loss error {}'.format(e))
return None
def get_ripple_in_bw(self, tracen, markern):
'''
带内波动
:return:
'''
try:
self.zvl.create_min_marker(tracen, markern) # min marker
self.zvl.create_max_marker(tracen, markern + 1) # max marker
_, marker1y = self.zvl.query_marker(tracen, markern)
_, marker2y = self.zvl.query_marker(tracen, markern + 1)
absy = abs(float(marker1y) - float(marker2y))
return absy
except Exception as e:
logger.error('get_ripple_in_bw error{}'.format(e))
return None
def get_gain(self, *args):
'''
读取增益及带内波动
S21 dBmg
:return:高,中,低点增益,带内波动
'''
logger.debug('zvl get gain')
high, mid, low = args # 高中低
self.zvl.remove_allmarker(1)
self.set_edge(low, high)
tracen = 1
self.set_trace(tracen, 'MLOG', 'S21')
markern = 1
# 读高,中,低点的增益
high_markery = float(self.read_markery(tracen, markern, high))
markern += 1
mid_markery = float(self.read_markery(tracen, markern, mid))
markern += 1
low_markery = float(self.read_markery(tracen, markern, low))
# 带内波动
markern += 1
ripple = self.get_ripple_in_bw(tracen, markern) # 绝对值
ret = [high_markery + self.offset, mid_markery + self.offset,
low_markery + self.offset, ripple]
ret2 = ['%.2f' % float(item) for item in ret]
return ret2
def get_vswr(self, *args):
'''
VSWR S11,SWR
:return:max markerx,max markery
'''
logger.debug('zvl get_vswr')
self.zvl.remove_allmarker(1)
high, mid, low, dl_ul,temp = args # 高中低
tracen = 1
markern = 1
start = float(low) - 2.5
end = float(high) + 2.5
self.set_edge(start, end)
self.set_trace(tracen, 'SWR', 'S11')
marker = self.read_max_marker(tracen, markern)
# 截图
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_{}_VSWR.PNG'.format(temp, dl_ul,end))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
# mstr='@'.join([str(item) for item in marker])
marker2 = ['%.2f' % float(item) for item in marker]
return marker2
def get_gain_vs_freq(self, markerlist,dl_ul, temp):
'''
825~835MHz,870~880,890~915,935~960,1570.42~1585,
1710~1785,1805~1880,1920~1980,2110~2170,
2570~2620,1880~1915,2300~2400,2400~2483.5
截图三张,一张图最多截10个marker
markerlist:[]
:return:
'''
logger.debug('zvl get_gain_vs_freq')
self.zvl.remove_allmarker(1)
tracen = 1
markern = 1
self.set_trace(tracen, 'MLOG', 'S21')
markery_list = [] # 所有点的增益,注意要加上offset
try:
# 第一张图
self.set_edge(700, 1700)
marker_lst = markerlist[:10]
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern) # str
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_1.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
self.zvl.remove_allmarker(1)
# 第二张图
marker_lst = markerlist[10:20]
markern = 1
self.set_edge(1700, 3000)
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern)
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_2.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
self.zvl.remove_allmarker(1)
# 第三张图
marker_lst = markerlist[20:]
markern = 1
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern)
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_3.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
except Exception as e:
logger.error(e)
finally:
# logger.debug(markery_list)
ret = ['%.2f' % (float(item) + self.offset) for item in markery_list]
return ret | zh | 0.430545 | # coding:utf-8 矢网的测试项,包括增益,带内波动,VSWR 一个曲线最多建10个marker # 存储图片的路径 :param low_edge: float单位MHz :param up_edge: float单位MHz :return: :param tracen: int form:str, means:str,'S11','S12','S21','S22' :return: # zvl.set_ref_value(zvlhandler, tracen, -40) # 设置marker点 # max marker # create_max_marker(zvlhandler, tracen, markern + 1) # max marker 带内波动 :return: # min marker # max marker 读取增益及带内波动 S21 dBmg :return:高,中,低点增益,带内波动 # 高中低 # 读高,中,低点的增益 # 带内波动 # 绝对值 VSWR S11,SWR :return:max markerx,max markery # 高中低 # 截图 # mstr='@'.join([str(item) for item in marker]) 825~835MHz,870~880,890~915,935~960,1570.42~1585, 1710~1785,1805~1880,1920~1980,2110~2170, 2570~2620,1880~1915,2300~2400,2400~2483.5 截图三张,一张图最多截10个marker markerlist:[] :return: # 所有点的增益,注意要加上offset # 第一张图 # str # 第二张图 # 第三张图 # logger.debug(markery_list) | 2.385178 | 2 |
kshell/partial_level_density.py | ErlendLima/70Zn | 0 | 7970 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import shellmodelutilities as smutil
# Set bin width and range
bin_width = 0.20
Emax = 14
Nbins = int(np.ceil(Emax/bin_width))
Emax_adjusted = bin_width*Nbins # Trick to get an integer number of bins
bins = np.linspace(0,Emax_adjusted,Nbins+1)
# Define list of calculation input files and corresponding label names
inputfile = "summary_Zn70_jun45.txt"
# Instantiate figure which we will fill
f_rho, ax_rho = plt.subplots(1,1)
# Read energy levels from file
levels = smutil.read_energy_levels(inputfile)
# Choose which [2*J,pi] combinations to include in partial level density plot
Jpi_list = [[0,-1],[2,-1],[4,-1],[6,-1],[8,-1],[10,-1],[12,-1],[14,-1],[16,-1],[18,-1],[20,-1],[22,-1],[24,-1],[26,-1],[28,-1],
[0,+1],[2,+1],[4,+1],[6,+1],[8,+1],[10,+1],[12,+1],[14,+1],[16,+1],[18,+1],[20,+1],[22,+1],[24,+1],[26,+1],[28,+1]]
# Allocate (Ex,Jpi) matrix to store partial level density
rho_ExJpi = np.zeros((Nbins,len(Jpi_list)))
# Count number of levels for each (Ex, J, pi) pixel.
Egs = levels[0,0] # Ground state energy
for i_l in range(len(levels[:,0])):
E, J, pi = levels[i_l]
# Skip if level is outside range:
if E-Egs >= Emax:
continue
i_Ex = int(np.floor((E-Egs)/bin_width))
try:
i_Jpi = Jpi_list.index([J,pi])
except:
continue
rho_ExJpi[i_Ex,i_Jpi] += 1
rho_ExJpi /= bin_width # Normalize to bin width, to get density in MeV^-1
# Plot it
from matplotlib.colors import LogNorm # To get log scaling on the z axis
colorbar_object = ax_rho.pcolormesh(np.linspace(0,len(Jpi_list)-1,len(Jpi_list)), bins, rho_ExJpi, norm=LogNorm())
f_rho.colorbar(colorbar_object) # Add colorbar to plot
# Make the plot nice
ax_rho.set_xlabel(r"$\pi\cdot J\,\mathrm{(\hbar)}$")
ax_rho.set_ylabel(r'$E_x \, \mathrm{(MeV)}$')
# A bit of Python voodoo to get the x labels right:
Jpi_array = np.append(np.linspace(0,-int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2)),np.linspace(0,int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2))) # Array of pi*J for plot
def format_func(value, tick_number):
if value >= 0 and value <= 28:
return int(Jpi_array[int(value)])
else:
return None
ax_rho.set_xlim([0,29])
ax_rho.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax_rho.set_xticks([0,2,4,6,8,10,12,14,15,17,19,21,23,25,27])
# Show plot
plt.show()
| from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import shellmodelutilities as smutil
# Set bin width and range
bin_width = 0.20
Emax = 14
Nbins = int(np.ceil(Emax/bin_width))
Emax_adjusted = bin_width*Nbins # Trick to get an integer number of bins
bins = np.linspace(0,Emax_adjusted,Nbins+1)
# Define list of calculation input files and corresponding label names
inputfile = "summary_Zn70_jun45.txt"
# Instantiate figure which we will fill
f_rho, ax_rho = plt.subplots(1,1)
# Read energy levels from file
levels = smutil.read_energy_levels(inputfile)
# Choose which [2*J,pi] combinations to include in partial level density plot
Jpi_list = [[0,-1],[2,-1],[4,-1],[6,-1],[8,-1],[10,-1],[12,-1],[14,-1],[16,-1],[18,-1],[20,-1],[22,-1],[24,-1],[26,-1],[28,-1],
[0,+1],[2,+1],[4,+1],[6,+1],[8,+1],[10,+1],[12,+1],[14,+1],[16,+1],[18,+1],[20,+1],[22,+1],[24,+1],[26,+1],[28,+1]]
# Allocate (Ex,Jpi) matrix to store partial level density
rho_ExJpi = np.zeros((Nbins,len(Jpi_list)))
# Count number of levels for each (Ex, J, pi) pixel.
Egs = levels[0,0] # Ground state energy
for i_l in range(len(levels[:,0])):
E, J, pi = levels[i_l]
# Skip if level is outside range:
if E-Egs >= Emax:
continue
i_Ex = int(np.floor((E-Egs)/bin_width))
try:
i_Jpi = Jpi_list.index([J,pi])
except:
continue
rho_ExJpi[i_Ex,i_Jpi] += 1
rho_ExJpi /= bin_width # Normalize to bin width, to get density in MeV^-1
# Plot it
from matplotlib.colors import LogNorm # To get log scaling on the z axis
colorbar_object = ax_rho.pcolormesh(np.linspace(0,len(Jpi_list)-1,len(Jpi_list)), bins, rho_ExJpi, norm=LogNorm())
f_rho.colorbar(colorbar_object) # Add colorbar to plot
# Make the plot nice
ax_rho.set_xlabel(r"$\pi\cdot J\,\mathrm{(\hbar)}$")
ax_rho.set_ylabel(r'$E_x \, \mathrm{(MeV)}$')
# A bit of Python voodoo to get the x labels right:
Jpi_array = np.append(np.linspace(0,-int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2)),np.linspace(0,int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2))) # Array of pi*J for plot
def format_func(value, tick_number):
if value >= 0 and value <= 28:
return int(Jpi_array[int(value)])
else:
return None
ax_rho.set_xlim([0,29])
ax_rho.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax_rho.set_xticks([0,2,4,6,8,10,12,14,15,17,19,21,23,25,27])
# Show plot
plt.show()
| en | 0.803474 | # Set bin width and range # Trick to get an integer number of bins # Define list of calculation input files and corresponding label names # Instantiate figure which we will fill # Read energy levels from file # Choose which [2*J,pi] combinations to include in partial level density plot # Allocate (Ex,Jpi) matrix to store partial level density # Count number of levels for each (Ex, J, pi) pixel. # Ground state energy # Skip if level is outside range: # Normalize to bin width, to get density in MeV^-1 # Plot it # To get log scaling on the z axis # Add colorbar to plot # Make the plot nice # A bit of Python voodoo to get the x labels right: # Array of pi*J for plot # Show plot | 2.068034 | 2 |
tests/integration/test_provider_base.py | neuro-inc/platform-buckets-api | 0 | 7971 | <filename>tests/integration/test_provider_base.py
import abc
import secrets
from collections.abc import AsyncIterator, Awaitable, Callable, Mapping
from contextlib import AbstractAsyncContextManager, asynccontextmanager
from dataclasses import dataclass
from datetime import datetime, timezone
import pytest
from aiohttp import ClientSession
from yarl import URL
from platform_buckets_api.providers import (
BucketExistsError,
BucketNotExistsError,
BucketPermission,
BucketProvider,
RoleExistsError,
UserBucketOperations,
)
from platform_buckets_api.storage import ImportedBucket, ProviderBucket
BUCKET_NAME_PREFIX = "integration-tests-"
ROLE_NAME_PREFIX = "integration-tests-"
def _make_bucket_name() -> str:
return BUCKET_NAME_PREFIX + secrets.token_hex(5)
def _make_role_name() -> str:
return ROLE_NAME_PREFIX + secrets.token_hex(5)
class BasicBucketClient(abc.ABC):
@abc.abstractmethod
async def put_object(self, key: str, data: bytes) -> None:
pass
@abc.abstractmethod
async def read_object(self, key: str) -> bytes:
pass
@abc.abstractmethod
async def list_objects(self) -> list[str]:
pass
@abc.abstractmethod
async def delete_object(self, key: str) -> None:
pass
@dataclass()
class ProviderTestOption:
type: str
provider: BucketProvider
bucket_exists: Callable[[str], Awaitable[bool]]
make_client: Callable[
[ProviderBucket, Mapping[str, str]],
AbstractAsyncContextManager[BasicBucketClient],
]
get_admin: Callable[
[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]
]
role_exists: Callable[[str], Awaitable[bool]]
get_public_url: Callable[[str, str], URL]
credentials_for_imported: Mapping[str, str]
def as_admin_cm(
creator_func: Callable[[ProviderBucket], BasicBucketClient]
) -> Callable[[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]]:
@asynccontextmanager
async def creator(bucket: ProviderBucket) -> AsyncIterator[BasicBucketClient]:
yield creator_func(bucket)
return creator
# Access checkers
async def _test_no_access(
admin_client: BasicBucketClient,
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = secrets.token_hex(8)
with pytest.raises(Exception):
await user_client.put_object(key, data)
await admin_client.put_object(key, data)
with pytest.raises(Exception):
await user_client.read_object(key)
with pytest.raises(Exception):
await user_client.list_objects()
with pytest.raises(Exception):
await user_client.delete_object(key)
async def _test_read_access(
admin_client: BasicBucketClient,
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = "foo"
with pytest.raises(Exception):
await user_client.put_object(key, data)
await admin_client.put_object(key, data)
assert await user_client.read_object(key) == data
assert key in await user_client.list_objects()
with pytest.raises(Exception):
await user_client.delete_object(key)
async def _test_write_access(
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = "foo"
await user_client.put_object(key, data)
assert await user_client.read_object(key) == data
assert key in await user_client.list_objects()
await user_client.delete_object(key)
assert key not in await user_client.list_objects()
class TestProviderBase:
__test__ = False
async def test_bucket_create(self, provider_option: ProviderTestOption) -> None:
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
assert bucket.name == name
assert await provider_option.bucket_exists(name)
async def test_bucket_duplicate_create(
self,
provider_option: ProviderTestOption,
) -> None:
name = _make_bucket_name()
await provider_option.provider.create_bucket(name)
with pytest.raises(BucketExistsError):
await provider_option.provider.create_bucket(name)
async def test_bucket_delete(self, provider_option: ProviderTestOption) -> None:
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
await provider_option.provider.delete_bucket(bucket.name)
assert not await provider_option.bucket_exists(name)
async def test_bucket_delete_unknown(
self, provider_option: ProviderTestOption
) -> None:
with pytest.raises(BucketNotExistsError):
await provider_option.provider.delete_bucket(_make_bucket_name())
async def test_bucket_credentials_write_access(
self, provider_option: ProviderTestOption
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
credentials = await provider_option.provider.get_bucket_credentials(
bucket, write=True, requester="testing"
)
async with provider_option.make_client(bucket, credentials) as user_client:
await _test_write_access(user_client)
async def test_bucket_credentials_read_access(
self, provider_option: ProviderTestOption
) -> None:
return
if provider_option.type == "aws":
pytest.skip("Moto do not support embedding policies into token")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
credentials = await provider_option.provider.get_bucket_credentials(
bucket, write=False, requester="testing"
)
async with provider_option.make_client(
bucket, credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
async def test_signed_url_for_blob(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto fails for signed url with 500")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("foo/bar", b"test data")
url = await provider_option.provider.sign_url_for_blob(bucket, "foo/bar")
async with ClientSession() as session:
async with session.get(url) as resp:
data = await resp.read()
assert data == b"test data"
async def test_public_access_to_bucket(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto has bad support of this operation")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("blob1", b"blob data 1")
await admin_client.put_object("blob2", b"blob data 2")
await provider_option.provider.set_public_access(bucket.name, True)
async with ClientSession() as session:
url = provider_option.get_public_url(bucket.name, "blob1")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 1"
url = provider_option.get_public_url(bucket.name, "blob2")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 2"
async def test_bucket_make_public_for_imported_bucket(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto fails with 500")
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("blob1", b"blob data 1")
await admin_client.put_object("blob2", b"blob data 2")
async with UserBucketOperations.get_for_imported_bucket(
ImportedBucket(
id="not-important",
created_at=datetime.now(timezone.utc),
owner="user",
name="not-important",
org_name=None,
public=False,
provider_bucket=bucket,
credentials=provider_option.credentials_for_imported,
)
) as operations:
await operations.set_public_access(bucket.name, True)
async with ClientSession() as session:
url = provider_option.get_public_url(bucket.name, "blob1")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 1"
url = provider_option.get_public_url(bucket.name, "blob2")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 2"
@pytest.fixture()
async def sample_role_permissions(
self, provider_option: ProviderTestOption
) -> list[BucketPermission]:
bucket_name = _make_bucket_name()
await provider_option.provider.create_bucket(bucket_name)
return [
BucketPermission(
bucket_name=bucket_name,
write=True,
)
]
async def test_role_create(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
role = await provider_option.provider.create_role(name, sample_role_permissions)
assert name in role.name
assert await provider_option.role_exists(role.name)
async def test_role_create_multiple(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name1, name2 = _make_role_name(), _make_role_name()
role1 = await provider_option.provider.create_role(
name1, sample_role_permissions
)
role2 = await provider_option.provider.create_role(
name2, sample_role_permissions
)
assert await provider_option.role_exists(role1.name)
assert await provider_option.role_exists(role2.name)
async def test_role_duplicate(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
await provider_option.provider.create_role(name, sample_role_permissions)
with pytest.raises(RoleExistsError):
await provider_option.provider.create_role(name, sample_role_permissions)
async def test_role_delete(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
role = await provider_option.provider.create_role(name, sample_role_permissions)
await provider_option.provider.delete_role(role)
assert not await provider_option.role_exists(role.name)
async def test_role_grant_bucket_write_access(
self,
provider_option: ProviderTestOption,
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(bucket, role.credentials) as user_client:
await _test_write_access(user_client)
async def test_role_grant_bucket_read_only_access(
self,
provider_option: ProviderTestOption,
) -> None:
return
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=False,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
async def test_role_grant_access_multiple_buckets(
self,
provider_option: ProviderTestOption,
) -> None:
if provider_option.type == "azure":
pytest.skip("Azure provider do not support multiple buckets roles")
bucket1 = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket1.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(
bucket1, role.credentials
) as user_client:
await _test_write_access(user_client)
bucket2 = await provider_option.provider.create_bucket(_make_bucket_name())
await provider_option.provider.set_role_permissions(
role,
[
BucketPermission(
bucket_name=bucket1.name,
write=True,
),
BucketPermission(
bucket_name=bucket2.name,
write=True,
),
],
)
async with provider_option.make_client(
bucket1, role.credentials
) as user_client:
await _test_write_access(user_client)
async with provider_option.make_client(
bucket2, role.credentials
) as user_client:
await _test_write_access(user_client)
async def test_role_downgrade_access(
self,
provider_option: ProviderTestOption,
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(bucket, role.credentials) as user_client:
await _test_write_access(user_client)
await provider_option.provider.set_role_permissions(
role,
[
BucketPermission(
bucket_name=bucket.name,
write=False,
),
],
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
await provider_option.provider.set_role_permissions(
role,
[],
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_no_access(admin, user_client)
| <filename>tests/integration/test_provider_base.py
import abc
import secrets
from collections.abc import AsyncIterator, Awaitable, Callable, Mapping
from contextlib import AbstractAsyncContextManager, asynccontextmanager
from dataclasses import dataclass
from datetime import datetime, timezone
import pytest
from aiohttp import ClientSession
from yarl import URL
from platform_buckets_api.providers import (
BucketExistsError,
BucketNotExistsError,
BucketPermission,
BucketProvider,
RoleExistsError,
UserBucketOperations,
)
from platform_buckets_api.storage import ImportedBucket, ProviderBucket
BUCKET_NAME_PREFIX = "integration-tests-"
ROLE_NAME_PREFIX = "integration-tests-"
def _make_bucket_name() -> str:
return BUCKET_NAME_PREFIX + secrets.token_hex(5)
def _make_role_name() -> str:
return ROLE_NAME_PREFIX + secrets.token_hex(5)
class BasicBucketClient(abc.ABC):
@abc.abstractmethod
async def put_object(self, key: str, data: bytes) -> None:
pass
@abc.abstractmethod
async def read_object(self, key: str) -> bytes:
pass
@abc.abstractmethod
async def list_objects(self) -> list[str]:
pass
@abc.abstractmethod
async def delete_object(self, key: str) -> None:
pass
@dataclass()
class ProviderTestOption:
type: str
provider: BucketProvider
bucket_exists: Callable[[str], Awaitable[bool]]
make_client: Callable[
[ProviderBucket, Mapping[str, str]],
AbstractAsyncContextManager[BasicBucketClient],
]
get_admin: Callable[
[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]
]
role_exists: Callable[[str], Awaitable[bool]]
get_public_url: Callable[[str, str], URL]
credentials_for_imported: Mapping[str, str]
def as_admin_cm(
creator_func: Callable[[ProviderBucket], BasicBucketClient]
) -> Callable[[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]]:
@asynccontextmanager
async def creator(bucket: ProviderBucket) -> AsyncIterator[BasicBucketClient]:
yield creator_func(bucket)
return creator
# Access checkers
async def _test_no_access(
admin_client: BasicBucketClient,
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = secrets.token_hex(8)
with pytest.raises(Exception):
await user_client.put_object(key, data)
await admin_client.put_object(key, data)
with pytest.raises(Exception):
await user_client.read_object(key)
with pytest.raises(Exception):
await user_client.list_objects()
with pytest.raises(Exception):
await user_client.delete_object(key)
async def _test_read_access(
admin_client: BasicBucketClient,
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = "foo"
with pytest.raises(Exception):
await user_client.put_object(key, data)
await admin_client.put_object(key, data)
assert await user_client.read_object(key) == data
assert key in await user_client.list_objects()
with pytest.raises(Exception):
await user_client.delete_object(key)
async def _test_write_access(
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = "foo"
await user_client.put_object(key, data)
assert await user_client.read_object(key) == data
assert key in await user_client.list_objects()
await user_client.delete_object(key)
assert key not in await user_client.list_objects()
class TestProviderBase:
__test__ = False
async def test_bucket_create(self, provider_option: ProviderTestOption) -> None:
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
assert bucket.name == name
assert await provider_option.bucket_exists(name)
async def test_bucket_duplicate_create(
self,
provider_option: ProviderTestOption,
) -> None:
name = _make_bucket_name()
await provider_option.provider.create_bucket(name)
with pytest.raises(BucketExistsError):
await provider_option.provider.create_bucket(name)
async def test_bucket_delete(self, provider_option: ProviderTestOption) -> None:
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
await provider_option.provider.delete_bucket(bucket.name)
assert not await provider_option.bucket_exists(name)
async def test_bucket_delete_unknown(
self, provider_option: ProviderTestOption
) -> None:
with pytest.raises(BucketNotExistsError):
await provider_option.provider.delete_bucket(_make_bucket_name())
async def test_bucket_credentials_write_access(
self, provider_option: ProviderTestOption
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
credentials = await provider_option.provider.get_bucket_credentials(
bucket, write=True, requester="testing"
)
async with provider_option.make_client(bucket, credentials) as user_client:
await _test_write_access(user_client)
async def test_bucket_credentials_read_access(
self, provider_option: ProviderTestOption
) -> None:
return
if provider_option.type == "aws":
pytest.skip("Moto do not support embedding policies into token")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
credentials = await provider_option.provider.get_bucket_credentials(
bucket, write=False, requester="testing"
)
async with provider_option.make_client(
bucket, credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
async def test_signed_url_for_blob(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto fails for signed url with 500")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("foo/bar", b"test data")
url = await provider_option.provider.sign_url_for_blob(bucket, "foo/bar")
async with ClientSession() as session:
async with session.get(url) as resp:
data = await resp.read()
assert data == b"test data"
async def test_public_access_to_bucket(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto has bad support of this operation")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("blob1", b"blob data 1")
await admin_client.put_object("blob2", b"blob data 2")
await provider_option.provider.set_public_access(bucket.name, True)
async with ClientSession() as session:
url = provider_option.get_public_url(bucket.name, "blob1")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 1"
url = provider_option.get_public_url(bucket.name, "blob2")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 2"
async def test_bucket_make_public_for_imported_bucket(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto fails with 500")
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("blob1", b"blob data 1")
await admin_client.put_object("blob2", b"blob data 2")
async with UserBucketOperations.get_for_imported_bucket(
ImportedBucket(
id="not-important",
created_at=datetime.now(timezone.utc),
owner="user",
name="not-important",
org_name=None,
public=False,
provider_bucket=bucket,
credentials=provider_option.credentials_for_imported,
)
) as operations:
await operations.set_public_access(bucket.name, True)
async with ClientSession() as session:
url = provider_option.get_public_url(bucket.name, "blob1")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 1"
url = provider_option.get_public_url(bucket.name, "blob2")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 2"
@pytest.fixture()
async def sample_role_permissions(
self, provider_option: ProviderTestOption
) -> list[BucketPermission]:
bucket_name = _make_bucket_name()
await provider_option.provider.create_bucket(bucket_name)
return [
BucketPermission(
bucket_name=bucket_name,
write=True,
)
]
async def test_role_create(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
role = await provider_option.provider.create_role(name, sample_role_permissions)
assert name in role.name
assert await provider_option.role_exists(role.name)
async def test_role_create_multiple(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name1, name2 = _make_role_name(), _make_role_name()
role1 = await provider_option.provider.create_role(
name1, sample_role_permissions
)
role2 = await provider_option.provider.create_role(
name2, sample_role_permissions
)
assert await provider_option.role_exists(role1.name)
assert await provider_option.role_exists(role2.name)
async def test_role_duplicate(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
await provider_option.provider.create_role(name, sample_role_permissions)
with pytest.raises(RoleExistsError):
await provider_option.provider.create_role(name, sample_role_permissions)
async def test_role_delete(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
role = await provider_option.provider.create_role(name, sample_role_permissions)
await provider_option.provider.delete_role(role)
assert not await provider_option.role_exists(role.name)
async def test_role_grant_bucket_write_access(
self,
provider_option: ProviderTestOption,
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(bucket, role.credentials) as user_client:
await _test_write_access(user_client)
async def test_role_grant_bucket_read_only_access(
self,
provider_option: ProviderTestOption,
) -> None:
return
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=False,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
async def test_role_grant_access_multiple_buckets(
self,
provider_option: ProviderTestOption,
) -> None:
if provider_option.type == "azure":
pytest.skip("Azure provider do not support multiple buckets roles")
bucket1 = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket1.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(
bucket1, role.credentials
) as user_client:
await _test_write_access(user_client)
bucket2 = await provider_option.provider.create_bucket(_make_bucket_name())
await provider_option.provider.set_role_permissions(
role,
[
BucketPermission(
bucket_name=bucket1.name,
write=True,
),
BucketPermission(
bucket_name=bucket2.name,
write=True,
),
],
)
async with provider_option.make_client(
bucket1, role.credentials
) as user_client:
await _test_write_access(user_client)
async with provider_option.make_client(
bucket2, role.credentials
) as user_client:
await _test_write_access(user_client)
async def test_role_downgrade_access(
self,
provider_option: ProviderTestOption,
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(bucket, role.credentials) as user_client:
await _test_write_access(user_client)
await provider_option.provider.set_role_permissions(
role,
[
BucketPermission(
bucket_name=bucket.name,
write=False,
),
],
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
await provider_option.provider.set_role_permissions(
role,
[],
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_no_access(admin, user_client)
| en | 0.440272 | # Access checkers | 1.965176 | 2 |
sbpy/photometry/bandpass.py | jianyangli/sbpy | 1 | 7972 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
sbpy bandpass Module
"""
__all__ = [
'bandpass'
]
import os
from astropy.utils.data import get_pkg_data_filename
def bandpass(name):
"""Retrieve bandpass transmission spectrum from sbpy.
Parameters
----------
name : string
Name of the bandpass, case insensitive. See notes for
available filters.
Returns
-------
bp : `~synphot.SpectralElement`
Notes
-----
Available filters:
+-------------+---------------------------+
| Name | Source |
+=============+===========================+
| 2MASS J | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS H | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS Ks | Cohen et al. 2003 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| PS1 g | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 r | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 i | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 w | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 y | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 z | Tonry et al. 2012 |
+-------------+---------------------------+
| SDSS u | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS g | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS r | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS i | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS z | SDSS, dated 2001 |
+-------------+---------------------------+
| WFC3 F438W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WFC3 F606W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WISE W1 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W2 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W3 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W4 | Jarrett et al. 2011 |
+-------------+---------------------------+
References
----------
.. [CDBS] Space Telescope Science Institute. HST Calibration Reference
Data System. https://hst-crds.stsci.edu/ .
.. [COH03] <NAME>. et al. 2003. Spectral Irradiance Calibration
in the Infrared. XIV. The Absolute Calibration of 2MASS. AJ
126, 1090.
.. [JAR11] <NAME>. et al. 2011. The Spitzer-WISE Survey of
the Ecliptic Poles. ApJ 735, 112.
.. [SDSS] Sloan Digital Sky Survey. Camera.
www.sdss.org/instruments/camera .
.. [TON12] <NAME>. et al. 2012. The Pan-STARRS1 Photometric
System. ApJ 750, 99.
"""
try:
import synphot
except ImportError:
raise ImportError('synphot is required.')
name2file = {
'2mass j': '2mass-j-rsr.txt',
'2mass h': '2mass-h-rsr.txt',
'2mass ks': '2mass-ks-rsr.txt',
'cousins r': 'cousins_r_004_syn.fits',
'cousins i': 'cousins_i_004_syn.fits',
'johnson u': 'johnson_u_004_syn.fits',
'johnson b': 'johnson_b_004_syn.fits',
'johnson v': 'johnson_v_004_syn.fits',
'ps1 g': 'ps1-gp1.txt',
'ps1 r': 'ps1-rp1.txt',
'ps1 i': 'ps1-ip1.txt',
'ps1 w': 'ps1-wp1.txt',
'ps1 y': 'ps1-yp1.txt',
'ps1 z': 'ps1-zp1.txt',
'sdss u': 'sdss-u.fits',
'sdss g': 'sdss-g.fits',
'sdss r': 'sdss-r.fits',
'sdss i': 'sdss-i.fits',
'sdss z': 'sdss-z.fits',
'wfc3 f438w': 'wfc3_uvis_f438w_004_syn.fits',
'wfc3 f606w': 'wfc3_uvis_f606w_004_syn.fits',
'wise w1': 'WISE-RSR-W1.EE.txt',
'wise w2': 'WISE-RSR-W2.EE.txt',
'wise w3': 'WISE-RSR-W3.EE.txt',
'wise w4': 'WISE-RSR-W4.EE.txt',
}
fn = get_pkg_data_filename(os.path.join(
'..', 'photometry', 'data', name2file[name.lower()]))
bp = synphot.SpectralElement.from_file(fn)
return bp
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
sbpy bandpass Module
"""
__all__ = [
'bandpass'
]
import os
from astropy.utils.data import get_pkg_data_filename
def bandpass(name):
"""Retrieve bandpass transmission spectrum from sbpy.
Parameters
----------
name : string
Name of the bandpass, case insensitive. See notes for
available filters.
Returns
-------
bp : `~synphot.SpectralElement`
Notes
-----
Available filters:
+-------------+---------------------------+
| Name | Source |
+=============+===========================+
| 2MASS J | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS H | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS Ks | Cohen et al. 2003 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| <NAME> | STScI CDBS, v4 |
+-------------+---------------------------+
| PS1 g | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 r | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 i | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 w | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 y | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 z | Tonry et al. 2012 |
+-------------+---------------------------+
| SDSS u | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS g | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS r | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS i | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS z | SDSS, dated 2001 |
+-------------+---------------------------+
| WFC3 F438W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WFC3 F606W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WISE W1 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W2 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W3 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W4 | Jarrett et al. 2011 |
+-------------+---------------------------+
References
----------
.. [CDBS] Space Telescope Science Institute. HST Calibration Reference
Data System. https://hst-crds.stsci.edu/ .
.. [COH03] <NAME>. et al. 2003. Spectral Irradiance Calibration
in the Infrared. XIV. The Absolute Calibration of 2MASS. AJ
126, 1090.
.. [JAR11] <NAME>. et al. 2011. The Spitzer-WISE Survey of
the Ecliptic Poles. ApJ 735, 112.
.. [SDSS] Sloan Digital Sky Survey. Camera.
www.sdss.org/instruments/camera .
.. [TON12] <NAME>. et al. 2012. The Pan-STARRS1 Photometric
System. ApJ 750, 99.
"""
try:
import synphot
except ImportError:
raise ImportError('synphot is required.')
name2file = {
'2mass j': '2mass-j-rsr.txt',
'2mass h': '2mass-h-rsr.txt',
'2mass ks': '2mass-ks-rsr.txt',
'cousins r': 'cousins_r_004_syn.fits',
'cousins i': 'cousins_i_004_syn.fits',
'johnson u': 'johnson_u_004_syn.fits',
'johnson b': 'johnson_b_004_syn.fits',
'johnson v': 'johnson_v_004_syn.fits',
'ps1 g': 'ps1-gp1.txt',
'ps1 r': 'ps1-rp1.txt',
'ps1 i': 'ps1-ip1.txt',
'ps1 w': 'ps1-wp1.txt',
'ps1 y': 'ps1-yp1.txt',
'ps1 z': 'ps1-zp1.txt',
'sdss u': 'sdss-u.fits',
'sdss g': 'sdss-g.fits',
'sdss r': 'sdss-r.fits',
'sdss i': 'sdss-i.fits',
'sdss z': 'sdss-z.fits',
'wfc3 f438w': 'wfc3_uvis_f438w_004_syn.fits',
'wfc3 f606w': 'wfc3_uvis_f606w_004_syn.fits',
'wise w1': 'WISE-RSR-W1.EE.txt',
'wise w2': 'WISE-RSR-W2.EE.txt',
'wise w3': 'WISE-RSR-W3.EE.txt',
'wise w4': 'WISE-RSR-W4.EE.txt',
}
fn = get_pkg_data_filename(os.path.join(
'..', 'photometry', 'data', name2file[name.lower()]))
bp = synphot.SpectralElement.from_file(fn)
return bp
| en | 0.452316 | # Licensed under a 3-clause BSD style license - see LICENSE.rst sbpy bandpass Module Retrieve bandpass transmission spectrum from sbpy. Parameters ---------- name : string Name of the bandpass, case insensitive. See notes for available filters. Returns ------- bp : `~synphot.SpectralElement` Notes ----- Available filters: +-------------+---------------------------+ | Name | Source | +=============+===========================+ | 2MASS J | Cohen et al. 2003 | +-------------+---------------------------+ | 2MASS H | Cohen et al. 2003 | +-------------+---------------------------+ | 2MASS Ks | Cohen et al. 2003 | +-------------+---------------------------+ | <NAME> | STScI CDBS, v4 | +-------------+---------------------------+ | <NAME> | STScI CDBS, v4 | +-------------+---------------------------+ | <NAME> | STScI CDBS, v4 | +-------------+---------------------------+ | <NAME> | STScI CDBS, v4 | +-------------+---------------------------+ | <NAME> | STScI CDBS, v4 | +-------------+---------------------------+ | PS1 g | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 r | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 i | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 w | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 y | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 z | Tonry et al. 2012 | +-------------+---------------------------+ | SDSS u | SDSS, dated 2001 | +-------------+---------------------------+ | SDSS g | SDSS, dated 2001 | +-------------+---------------------------+ | SDSS r | SDSS, dated 2001 | +-------------+---------------------------+ | SDSS i | SDSS, dated 2001 | +-------------+---------------------------+ | SDSS z | SDSS, dated 2001 | +-------------+---------------------------+ | WFC3 F438W | HST/WFC3 UVIS, v4 | +-------------+---------------------------+ | WFC3 F606W | HST/WFC3 UVIS, v4 | +-------------+---------------------------+ | WISE W1 | Jarrett et al. 2011 | +-------------+---------------------------+ | WISE W2 | Jarrett et al. 2011 | +-------------+---------------------------+ | WISE W3 | Jarrett et al. 2011 | +-------------+---------------------------+ | WISE W4 | Jarrett et al. 2011 | +-------------+---------------------------+ References ---------- .. [CDBS] Space Telescope Science Institute. HST Calibration Reference Data System. https://hst-crds.stsci.edu/ . .. [COH03] <NAME>. et al. 2003. Spectral Irradiance Calibration in the Infrared. XIV. The Absolute Calibration of 2MASS. AJ 126, 1090. .. [JAR11] <NAME>. et al. 2011. The Spitzer-WISE Survey of the Ecliptic Poles. ApJ 735, 112. .. [SDSS] Sloan Digital Sky Survey. Camera. www.sdss.org/instruments/camera . .. [TON12] <NAME>. et al. 2012. The Pan-STARRS1 Photometric System. ApJ 750, 99. | 1.866113 | 2 |
appserver/search/views.py | sinag/SWE574-Horuscope | 0 | 7973 | from django.http import HttpResponse
from django.shortcuts import render, redirect
from community.models import Community
# Create your views here.
def search_basic(request):
communities = None
if request.POST:
community_query = request.POST.get('community_search', False)
communities = Community.objects.filter(city__icontains=community_query)
print(communities)
return render(request, 'search/search_basic.html', {'communities': communities})
return render(request, 'search/search_basic.html', {'communities': communities})
| from django.http import HttpResponse
from django.shortcuts import render, redirect
from community.models import Community
# Create your views here.
def search_basic(request):
communities = None
if request.POST:
community_query = request.POST.get('community_search', False)
communities = Community.objects.filter(city__icontains=community_query)
print(communities)
return render(request, 'search/search_basic.html', {'communities': communities})
return render(request, 'search/search_basic.html', {'communities': communities})
| en | 0.968116 | # Create your views here. | 2.081141 | 2 |
teams/migrations/0001_initial.py | Sudani-Coder/teammanager | 0 | 7974 | <filename>teams/migrations/0001_initial.py
# Generated by Django 3.1.2 on 2020-10-18 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GameScore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_team', models.CharField(max_length=200)),
('second_team', models.CharField(max_length=200)),
('first_team_score', models.IntegerField(default=0)),
('second_team_score', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('number', models.IntegerField()),
('age', models.IntegerField()),
('position_in_field', models.CharField(choices=[('1', 'حارس'), ('2', 'دفاع'), ('3', 'وسط'), ('4', 'هجوم')], max_length=200)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('details', models.TextField()),
],
),
]
| <filename>teams/migrations/0001_initial.py
# Generated by Django 3.1.2 on 2020-10-18 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GameScore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_team', models.CharField(max_length=200)),
('second_team', models.CharField(max_length=200)),
('first_team_score', models.IntegerField(default=0)),
('second_team_score', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('number', models.IntegerField()),
('age', models.IntegerField()),
('position_in_field', models.CharField(choices=[('1', 'حارس'), ('2', 'دفاع'), ('3', 'وسط'), ('4', 'هجوم')], max_length=200)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('details', models.TextField()),
],
),
]
| en | 0.823185 | # Generated by Django 3.1.2 on 2020-10-18 17:19 | 1.858458 | 2 |
qcodes_contrib_drivers/drivers/Oxford/ILM200.py | jenshnielsen/Qcodes_contrib_drivers | 0 | 7975 | # OxfordInstruments_ILM200.py class, to perform the communication between the Wrapper and the device
# Copyright (c) 2017 QuTech (Delft)
# Code is available under the available under the `MIT open-source license <https://opensource.org/licenses/MIT>`__
#
# <NAME> <<EMAIL>>, 2017
# <NAME> <<EMAIL>>, 2016
# <NAME> <<EMAIL>>, 2009
# <NAME> <<EMAIL>>, 2009
from time import sleep
import visa
import logging
from qcodes import VisaInstrument
class OxfordInstruments_ILM200(VisaInstrument):
"""
This is the qcodes driver for the Oxford Instruments ILM 200 Helium Level Meter.
Usage:
Initialize with
<name> = instruments.create('name', 'OxfordInstruments_ILM200', address='<Instrument address>')
<Instrument address> = ASRL4::INSTR
Note: Since the ISOBUS allows for several instruments to be managed in parallel, the command
which is sent to the device starts with '@n', where n is the ISOBUS instrument number.
"""
def __init__(self, name, address, number=1, **kwargs):
"""
Initializes the Oxford Instruments ILM 200 Helium Level Meter.
Args:
name (str): name of the instrument
address (str): instrument address
number (int): ISOBUS instrument number (number=1 is specific to the ILM in F008)
Returns:
None
"""
logging.debug(__name__ + ' : Initializing instrument')
super().__init__(name, address, **kwargs)
self.visa_handle.set_visa_attribute(visa.constants.VI_ATTR_ASRL_STOP_BITS,
visa.constants.VI_ASRL_STOP_TWO)
self._address = address
self._number = number
self._values = {}
self.add_parameter('level',
label='level',
get_cmd=self._do_get_level,
unit='%')
self.add_parameter('status',
get_cmd=self._do_get_status)
self.add_parameter('rate',
get_cmd=self._do_get_rate,
set_cmd=self._do_set_rate)
# a dummy command to avoid the initial error
try:
self.get_idn()
sleep(70e-3) # wait for the device to be able to respond
self._read() # to flush the buffer
except Exception as ex:
logging.debug(ex)
def _execute(self, message):
"""
Write a command to the device and read answer. This function writes to
the buffer by adding the device number at the front, instead of 'ask'.
Args:
message (str) : write command for the device
Returns:
None
"""
logging.info(
__name__ + ' : Send the following command to the device: %s' % message)
self.visa_handle.write('@%s%s' % (self._number, message))
sleep(70e-3) # wait for the device to be able to respond
result = self._read()
if result.find('?') >= 0:
print("Error: Command %s not recognized" % message)
else:
return result
def _read(self):
"""
Reads the total bytes in the buffer and outputs as a string.
Args:
None
Returns:
message (str)
"""
# because protocol has no termination chars the read reads the number
# of bytes in the buffer
bytes_in_buffer = self.visa_handle.bytes_in_buffer
# a workaround for a timeout error in the pyvsia read_raw() function
with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):
mes = self.visa_handle.visalib.read(
self.visa_handle.session, bytes_in_buffer)
# cannot be done on same line for some reason
mes = str(mes[0].decode())
return mes
def get_idn(self):
"""
Overrides the function of Instrument since ILM does not support `*IDN?`
This string is supposed to be a
comma-separated list of vendor, model, serial, and firmware, but
semicolon and colon are also common separators so we accept them here
as well.
Returns:
A dict containing vendor, model, serial, and firmware.
"""
try:
idstr = '' # in case self.ask fails
idstr = self._get_version().split()
# form is supposed to be comma-separated, but we've seen
# other separators occasionally
idparts = [idstr[3] + ' ' + idstr[4], idstr[0], idstr[5],
idstr[1] + ' ' + idstr[2]]
# in case parts at the end are missing, fill in None
if len(idparts) < 4:
idparts += [None] * (4 - len(idparts))
except Exception as ex:
logging.warn('Error getting or interpreting *IDN?: ' + repr(idstr))
logging.debug(ex)
idparts = [None, None, None, None]
return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))
def get_all(self):
"""
Reads all implemented parameters from the instrument,
and updates the wrapper.
"""
logging.info(__name__ + ' : reading all settings from instrument')
self.level.get()
self.status.get()
self.rate.get()
def close(self):
"""
Safely close connection
"""
logging.info(__name__ + ' : Closing ILM200 connection')
self.local()
super().close()
# Functions: Monitor commands
def _get_version(self):
"""
Identify the device
Args:
None
Returns:
identification (str): should be 'ILM200 Version 1.08 (c) OXFORD 1994\r'
"""
logging.info(__name__ + ' : Identify the device')
return self._execute('V')
def _do_get_level(self):
"""
Get Helium level of channel 1.
Args:
None
Returns:
result (float) : Helium level
"""
logging.info(__name__ + ' : Read level of channel 1')
result = self._execute('R1')
return float(result.replace("R", "")) / 10
def _do_get_status(self):
"""
Get status of the device.
"""
logging.info(__name__ + ' : Get status of the device.')
result = self._execute('X')
usage = {
0: "Channel not in use",
1: "Channel used for Nitrogen level",
2: "Channel used for Helium Level (Normal pulsed operation)",
3: "Channel used for Helium Level (Continuous measurement)",
9: "Error on channel (Usually means probe unplugged)"
}
# current_flowing = {
# 0 : "Curent not flowing in Helium Probe Wire",
# 1 : "Curent not flowing in Helium Probe Wire"
# }
# auto_fill_status = {
# 00 : "End Fill (Level > FULL)",
# 01 : "Not Filling (Level < FULL, Level > FILL)",
# 10 : "Filling (Level < FULL, Level > FILL)",
# 11 : "Start Filling (Level < FILL)"
# }
return usage.get(int(result[1]), "Unknown")
def _do_get_rate(self):
"""
Get helium meter channel 1 probe rate
Input:
None
Output:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
rate = {
1: "1 : Helium Probe in FAST rate",
0: "0 : Helium Probe in SLOW rate"
}
result = self._execute('X')
return rate.get(int(format(int(result[5:7]), '08b')[6]), "Unknown")
def remote(self):
"""
Set control to remote & locked
"""
logging.info(__name__ + ' : Set control to remote & locked')
self.set_remote_status(1)
def local(self):
"""
Set control to local & locked
"""
logging.info(__name__ + ' : Set control to local & locked')
self.set_remote_status(0)
def set_remote_status(self, mode):
"""
Set remote control status.
Args:
mode(int) :
0 : "Local and locked",
1 : "Remote and locked",
2 : "Local and unlocked",
3 : "Remote and unlocked",
Returns:
None
"""
status = {
0: "Local and locked",
1: "Remote and locked",
2: "Local and unlocked",
3: "Remote and unlocked",
}
logging.info(__name__ + ' : Setting remote control status to %s' %
status.get(mode, "Unknown"))
self._execute('C%s' % mode)
# Functions: Control commands (only recognised when in REMOTE control)
def set_to_slow(self):
"""
Set helium meter channel 1 to slow mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in SLOW rate')
self._execute('S1')
self.set_remote_status(3)
def set_to_fast(self):
"""
Set helium meter channel 1 to fast mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in FAST rate')
self._execute('T1')
self.set_remote_status(3)
def _do_set_rate(self, rate):
"""
Set helium meter channel 1 probe rate
Args:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
self.set_remote_status(1)
if rate == 0:
self.set_to_slow()
elif rate == 1:
self.set_to_fast()
self.set_remote_status(3)
logging.info(self._do_get_rate())
| # OxfordInstruments_ILM200.py class, to perform the communication between the Wrapper and the device
# Copyright (c) 2017 QuTech (Delft)
# Code is available under the available under the `MIT open-source license <https://opensource.org/licenses/MIT>`__
#
# <NAME> <<EMAIL>>, 2017
# <NAME> <<EMAIL>>, 2016
# <NAME> <<EMAIL>>, 2009
# <NAME> <<EMAIL>>, 2009
from time import sleep
import visa
import logging
from qcodes import VisaInstrument
class OxfordInstruments_ILM200(VisaInstrument):
"""
This is the qcodes driver for the Oxford Instruments ILM 200 Helium Level Meter.
Usage:
Initialize with
<name> = instruments.create('name', 'OxfordInstruments_ILM200', address='<Instrument address>')
<Instrument address> = ASRL4::INSTR
Note: Since the ISOBUS allows for several instruments to be managed in parallel, the command
which is sent to the device starts with '@n', where n is the ISOBUS instrument number.
"""
def __init__(self, name, address, number=1, **kwargs):
"""
Initializes the Oxford Instruments ILM 200 Helium Level Meter.
Args:
name (str): name of the instrument
address (str): instrument address
number (int): ISOBUS instrument number (number=1 is specific to the ILM in F008)
Returns:
None
"""
logging.debug(__name__ + ' : Initializing instrument')
super().__init__(name, address, **kwargs)
self.visa_handle.set_visa_attribute(visa.constants.VI_ATTR_ASRL_STOP_BITS,
visa.constants.VI_ASRL_STOP_TWO)
self._address = address
self._number = number
self._values = {}
self.add_parameter('level',
label='level',
get_cmd=self._do_get_level,
unit='%')
self.add_parameter('status',
get_cmd=self._do_get_status)
self.add_parameter('rate',
get_cmd=self._do_get_rate,
set_cmd=self._do_set_rate)
# a dummy command to avoid the initial error
try:
self.get_idn()
sleep(70e-3) # wait for the device to be able to respond
self._read() # to flush the buffer
except Exception as ex:
logging.debug(ex)
def _execute(self, message):
"""
Write a command to the device and read answer. This function writes to
the buffer by adding the device number at the front, instead of 'ask'.
Args:
message (str) : write command for the device
Returns:
None
"""
logging.info(
__name__ + ' : Send the following command to the device: %s' % message)
self.visa_handle.write('@%s%s' % (self._number, message))
sleep(70e-3) # wait for the device to be able to respond
result = self._read()
if result.find('?') >= 0:
print("Error: Command %s not recognized" % message)
else:
return result
def _read(self):
"""
Reads the total bytes in the buffer and outputs as a string.
Args:
None
Returns:
message (str)
"""
# because protocol has no termination chars the read reads the number
# of bytes in the buffer
bytes_in_buffer = self.visa_handle.bytes_in_buffer
# a workaround for a timeout error in the pyvsia read_raw() function
with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):
mes = self.visa_handle.visalib.read(
self.visa_handle.session, bytes_in_buffer)
# cannot be done on same line for some reason
mes = str(mes[0].decode())
return mes
def get_idn(self):
"""
Overrides the function of Instrument since ILM does not support `*IDN?`
This string is supposed to be a
comma-separated list of vendor, model, serial, and firmware, but
semicolon and colon are also common separators so we accept them here
as well.
Returns:
A dict containing vendor, model, serial, and firmware.
"""
try:
idstr = '' # in case self.ask fails
idstr = self._get_version().split()
# form is supposed to be comma-separated, but we've seen
# other separators occasionally
idparts = [idstr[3] + ' ' + idstr[4], idstr[0], idstr[5],
idstr[1] + ' ' + idstr[2]]
# in case parts at the end are missing, fill in None
if len(idparts) < 4:
idparts += [None] * (4 - len(idparts))
except Exception as ex:
logging.warn('Error getting or interpreting *IDN?: ' + repr(idstr))
logging.debug(ex)
idparts = [None, None, None, None]
return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))
def get_all(self):
"""
Reads all implemented parameters from the instrument,
and updates the wrapper.
"""
logging.info(__name__ + ' : reading all settings from instrument')
self.level.get()
self.status.get()
self.rate.get()
def close(self):
"""
Safely close connection
"""
logging.info(__name__ + ' : Closing ILM200 connection')
self.local()
super().close()
# Functions: Monitor commands
def _get_version(self):
"""
Identify the device
Args:
None
Returns:
identification (str): should be 'ILM200 Version 1.08 (c) OXFORD 1994\r'
"""
logging.info(__name__ + ' : Identify the device')
return self._execute('V')
def _do_get_level(self):
"""
Get Helium level of channel 1.
Args:
None
Returns:
result (float) : Helium level
"""
logging.info(__name__ + ' : Read level of channel 1')
result = self._execute('R1')
return float(result.replace("R", "")) / 10
def _do_get_status(self):
"""
Get status of the device.
"""
logging.info(__name__ + ' : Get status of the device.')
result = self._execute('X')
usage = {
0: "Channel not in use",
1: "Channel used for Nitrogen level",
2: "Channel used for Helium Level (Normal pulsed operation)",
3: "Channel used for Helium Level (Continuous measurement)",
9: "Error on channel (Usually means probe unplugged)"
}
# current_flowing = {
# 0 : "Curent not flowing in Helium Probe Wire",
# 1 : "Curent not flowing in Helium Probe Wire"
# }
# auto_fill_status = {
# 00 : "End Fill (Level > FULL)",
# 01 : "Not Filling (Level < FULL, Level > FILL)",
# 10 : "Filling (Level < FULL, Level > FILL)",
# 11 : "Start Filling (Level < FILL)"
# }
return usage.get(int(result[1]), "Unknown")
def _do_get_rate(self):
"""
Get helium meter channel 1 probe rate
Input:
None
Output:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
rate = {
1: "1 : Helium Probe in FAST rate",
0: "0 : Helium Probe in SLOW rate"
}
result = self._execute('X')
return rate.get(int(format(int(result[5:7]), '08b')[6]), "Unknown")
def remote(self):
"""
Set control to remote & locked
"""
logging.info(__name__ + ' : Set control to remote & locked')
self.set_remote_status(1)
def local(self):
"""
Set control to local & locked
"""
logging.info(__name__ + ' : Set control to local & locked')
self.set_remote_status(0)
def set_remote_status(self, mode):
"""
Set remote control status.
Args:
mode(int) :
0 : "Local and locked",
1 : "Remote and locked",
2 : "Local and unlocked",
3 : "Remote and unlocked",
Returns:
None
"""
status = {
0: "Local and locked",
1: "Remote and locked",
2: "Local and unlocked",
3: "Remote and unlocked",
}
logging.info(__name__ + ' : Setting remote control status to %s' %
status.get(mode, "Unknown"))
self._execute('C%s' % mode)
# Functions: Control commands (only recognised when in REMOTE control)
def set_to_slow(self):
"""
Set helium meter channel 1 to slow mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in SLOW rate')
self._execute('S1')
self.set_remote_status(3)
def set_to_fast(self):
"""
Set helium meter channel 1 to fast mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in FAST rate')
self._execute('T1')
self.set_remote_status(3)
def _do_set_rate(self, rate):
"""
Set helium meter channel 1 probe rate
Args:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
self.set_remote_status(1)
if rate == 0:
self.set_to_slow()
elif rate == 1:
self.set_to_fast()
self.set_remote_status(3)
logging.info(self._do_get_rate())
| en | 0.783209 | # OxfordInstruments_ILM200.py class, to perform the communication between the Wrapper and the device # Copyright (c) 2017 QuTech (Delft) # Code is available under the available under the `MIT open-source license <https://opensource.org/licenses/MIT>`__ # # <NAME> <<EMAIL>>, 2017 # <NAME> <<EMAIL>>, 2016 # <NAME> <<EMAIL>>, 2009 # <NAME> <<EMAIL>>, 2009 This is the qcodes driver for the Oxford Instruments ILM 200 Helium Level Meter. Usage: Initialize with <name> = instruments.create('name', 'OxfordInstruments_ILM200', address='<Instrument address>') <Instrument address> = ASRL4::INSTR Note: Since the ISOBUS allows for several instruments to be managed in parallel, the command which is sent to the device starts with '@n', where n is the ISOBUS instrument number. Initializes the Oxford Instruments ILM 200 Helium Level Meter. Args: name (str): name of the instrument address (str): instrument address number (int): ISOBUS instrument number (number=1 is specific to the ILM in F008) Returns: None # a dummy command to avoid the initial error # wait for the device to be able to respond # to flush the buffer Write a command to the device and read answer. This function writes to the buffer by adding the device number at the front, instead of 'ask'. Args: message (str) : write command for the device Returns: None # wait for the device to be able to respond Reads the total bytes in the buffer and outputs as a string. Args: None Returns: message (str) # because protocol has no termination chars the read reads the number # of bytes in the buffer # a workaround for a timeout error in the pyvsia read_raw() function # cannot be done on same line for some reason Overrides the function of Instrument since ILM does not support `*IDN?` This string is supposed to be a comma-separated list of vendor, model, serial, and firmware, but semicolon and colon are also common separators so we accept them here as well. Returns: A dict containing vendor, model, serial, and firmware. # in case self.ask fails # form is supposed to be comma-separated, but we've seen # other separators occasionally # in case parts at the end are missing, fill in None Reads all implemented parameters from the instrument, and updates the wrapper. Safely close connection # Functions: Monitor commands Identify the device Args: None Returns: identification (str): should be 'ILM200 Version 1.08 (c) OXFORD 1994\r' Get Helium level of channel 1. Args: None Returns: result (float) : Helium level Get status of the device. # current_flowing = { # 0 : "Curent not flowing in Helium Probe Wire", # 1 : "Curent not flowing in Helium Probe Wire" # } # auto_fill_status = { # 00 : "End Fill (Level > FULL)", # 01 : "Not Filling (Level < FULL, Level > FILL)", # 10 : "Filling (Level < FULL, Level > FILL)", # 11 : "Start Filling (Level < FILL)" # } Get helium meter channel 1 probe rate Input: None Output: rate(int) : 0 : "SLOW" 1 : "FAST" Set control to remote & locked Set control to local & locked Set remote control status. Args: mode(int) : 0 : "Local and locked", 1 : "Remote and locked", 2 : "Local and unlocked", 3 : "Remote and unlocked", Returns: None # Functions: Control commands (only recognised when in REMOTE control) Set helium meter channel 1 to slow mode. Set helium meter channel 1 to fast mode. Set helium meter channel 1 probe rate Args: rate(int) : 0 : "SLOW" 1 : "FAST" | 2.264951 | 2 |
load_cifar_10.py | xgxofdream/CNN-Using-Local-CIFAR-10-dataset | 0 | 7976 | <reponame>xgxofdream/CNN-Using-Local-CIFAR-10-dataset
import numpy as np
import matplotlib.pyplot as plt
import pickle
"""
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000
training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains
exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random
order, but some training batches may contain more images from one class than another. Between them, the training
batches contain exactly 5000 images from each class.
"""
def unpickle(file):
"""load the cifar-10 data"""
with open(file, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def load_cifar_10_data(data_dir, negatives=False):
"""
Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels
"""
# get the meta_data_dict
# num_cases_per_batch: 1000
# label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# num_vis: :3072
meta_data_dict = unpickle(data_dir + "/batches.meta")
cifar_label_names = meta_data_dict[b'label_names']
cifar_label_names = np.array(cifar_label_names)
# training data
cifar_train_data = None
cifar_train_filenames = []
cifar_train_labels = []
# cifar_train_data_dict
# 'batch_label': 'training batch 5 of 5'
# 'data': ndarray
# 'filenames': list
# 'labels': list
for i in range(1, 6):
cifar_train_data_dict = unpickle(data_dir + "/data_batch_{}".format(i))
if i == 1:
cifar_train_data = cifar_train_data_dict[b'data']
else:
cifar_train_data = np.vstack((cifar_train_data, cifar_train_data_dict[b'data']))
cifar_train_filenames += cifar_train_data_dict[b'filenames']
cifar_train_labels += cifar_train_data_dict[b'labels']
cifar_train_data = cifar_train_data.reshape((len(cifar_train_data), 3, 32, 32))
if negatives:
cifar_train_data = cifar_train_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_train_data = np.rollaxis(cifar_train_data, 1, 4)
cifar_train_filenames = np.array(cifar_train_filenames)
cifar_train_labels = np.array(cifar_train_labels)
# test data
# cifar_test_data_dict
# 'batch_label': 'testing batch 1 of 1'
# 'data': ndarray
# 'filenames': list
# 'labels': list
cifar_test_data_dict = unpickle(data_dir + "/test_batch")
cifar_test_data = cifar_test_data_dict[b'data']
cifar_test_filenames = cifar_test_data_dict[b'filenames']
cifar_test_labels = cifar_test_data_dict[b'labels']
cifar_test_data = cifar_test_data.reshape((len(cifar_test_data), 3, 32, 32))
if negatives:
cifar_test_data = cifar_test_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_test_data = np.rollaxis(cifar_test_data, 1, 4)
cifar_test_filenames = np.array(cifar_test_filenames)
cifar_test_labels = np.array(cifar_test_labels)
return cifar_train_data, cifar_train_filenames, cifar_train_labels, \
cifar_test_data, cifar_test_filenames, cifar_test_labels, cifar_label_names
if __name__ == "__main__":
"""show it works"""
cifar_10_dir = '.\cifar10-dataset'
train_data, train_filenames, train_labels, test_data, test_filenames, test_labels, label_names = \
load_cifar_10_data(cifar_10_dir)
print("Train data: ", train_data.shape)
print("Train filenames: ", train_filenames.shape)
print("Train labels: ", train_labels.shape)
print("Test data: ", test_data.shape)
print("Test filenames: ", test_filenames.shape)
print("Test labels: ", test_labels.shape)
print("Label names: ", label_names.shape)
# Don't forget that the label_names and filesnames are in binary and need conversion if used.
# display some random training images in a 25x25 grid
num_plot = 5
f, ax = plt.subplots(num_plot, num_plot)
for m in range(num_plot):
for n in range(num_plot):
idx = np.random.randint(0, train_data.shape[0])
ax[m, n].imshow(train_data[idx])
ax[m, n].get_xaxis().set_visible(False)
ax[m, n].get_yaxis().set_visible(False)
f.subplots_adjust(hspace=0.1)
f.subplots_adjust(wspace=0)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import pickle
"""
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000
training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains
exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random
order, but some training batches may contain more images from one class than another. Between them, the training
batches contain exactly 5000 images from each class.
"""
def unpickle(file):
"""load the cifar-10 data"""
with open(file, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def load_cifar_10_data(data_dir, negatives=False):
"""
Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels
"""
# get the meta_data_dict
# num_cases_per_batch: 1000
# label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# num_vis: :3072
meta_data_dict = unpickle(data_dir + "/batches.meta")
cifar_label_names = meta_data_dict[b'label_names']
cifar_label_names = np.array(cifar_label_names)
# training data
cifar_train_data = None
cifar_train_filenames = []
cifar_train_labels = []
# cifar_train_data_dict
# 'batch_label': 'training batch 5 of 5'
# 'data': ndarray
# 'filenames': list
# 'labels': list
for i in range(1, 6):
cifar_train_data_dict = unpickle(data_dir + "/data_batch_{}".format(i))
if i == 1:
cifar_train_data = cifar_train_data_dict[b'data']
else:
cifar_train_data = np.vstack((cifar_train_data, cifar_train_data_dict[b'data']))
cifar_train_filenames += cifar_train_data_dict[b'filenames']
cifar_train_labels += cifar_train_data_dict[b'labels']
cifar_train_data = cifar_train_data.reshape((len(cifar_train_data), 3, 32, 32))
if negatives:
cifar_train_data = cifar_train_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_train_data = np.rollaxis(cifar_train_data, 1, 4)
cifar_train_filenames = np.array(cifar_train_filenames)
cifar_train_labels = np.array(cifar_train_labels)
# test data
# cifar_test_data_dict
# 'batch_label': 'testing batch 1 of 1'
# 'data': ndarray
# 'filenames': list
# 'labels': list
cifar_test_data_dict = unpickle(data_dir + "/test_batch")
cifar_test_data = cifar_test_data_dict[b'data']
cifar_test_filenames = cifar_test_data_dict[b'filenames']
cifar_test_labels = cifar_test_data_dict[b'labels']
cifar_test_data = cifar_test_data.reshape((len(cifar_test_data), 3, 32, 32))
if negatives:
cifar_test_data = cifar_test_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_test_data = np.rollaxis(cifar_test_data, 1, 4)
cifar_test_filenames = np.array(cifar_test_filenames)
cifar_test_labels = np.array(cifar_test_labels)
return cifar_train_data, cifar_train_filenames, cifar_train_labels, \
cifar_test_data, cifar_test_filenames, cifar_test_labels, cifar_label_names
if __name__ == "__main__":
"""show it works"""
cifar_10_dir = '.\cifar10-dataset'
train_data, train_filenames, train_labels, test_data, test_filenames, test_labels, label_names = \
load_cifar_10_data(cifar_10_dir)
print("Train data: ", train_data.shape)
print("Train filenames: ", train_filenames.shape)
print("Train labels: ", train_labels.shape)
print("Test data: ", test_data.shape)
print("Test filenames: ", test_filenames.shape)
print("Test labels: ", test_labels.shape)
print("Label names: ", label_names.shape)
# Don't forget that the label_names and filesnames are in binary and need conversion if used.
# display some random training images in a 25x25 grid
num_plot = 5
f, ax = plt.subplots(num_plot, num_plot)
for m in range(num_plot):
for n in range(num_plot):
idx = np.random.randint(0, train_data.shape[0])
ax[m, n].imshow(train_data[idx])
ax[m, n].get_xaxis().set_visible(False)
ax[m, n].get_yaxis().set_visible(False)
f.subplots_adjust(hspace=0.1)
f.subplots_adjust(wspace=0)
plt.show() | en | 0.669486 | The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class. load the cifar-10 data Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels # get the meta_data_dict # num_cases_per_batch: 1000 # label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] # num_vis: :3072 # training data # cifar_train_data_dict # 'batch_label': 'training batch 5 of 5' # 'data': ndarray # 'filenames': list # 'labels': list # test data # cifar_test_data_dict # 'batch_label': 'testing batch 1 of 1' # 'data': ndarray # 'filenames': list # 'labels': list show it works # Don't forget that the label_names and filesnames are in binary and need conversion if used. # display some random training images in a 25x25 grid | 3.208987 | 3 |
volatility3/framework/plugins/mac/lsmod.py | leohearts/volatility3 | 0 | 7977 | # This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
"""A module containing a collection of plugins that produce data typically
found in Mac's lsmod command."""
from volatility3.framework import renderers, interfaces, contexts
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import plugins
from volatility3.framework.objects import utility
from volatility3.framework.renderers import format_hints
class Lsmod(plugins.PluginInterface):
"""Lists loaded kernel modules."""
_required_framework_version = (1, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls):
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"]),
requirements.SymbolTableRequirement(name = "darwin", description = "Mac kernel")
]
@classmethod
def list_modules(cls, context: interfaces.context.ContextInterface, layer_name: str, darwin_symbols: str):
"""Lists all the modules in the primary layer.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
layer_name: The name of the layer on which to operate
darwin_symbols: The name of the table containing the kernel symbols
Returns:
A list of modules from the `layer_name` layer
"""
kernel = contexts.Module(context, darwin_symbols, layer_name, 0)
kernel_layer = context.layers[layer_name]
kmod_ptr = kernel.object_from_symbol(symbol_name = "kmod")
try:
kmod = kmod_ptr.dereference().cast("kmod_info")
except exceptions.InvalidAddressException:
return []
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return []
seen = set()
while kmod != 0 and \
kmod not in seen and \
len(seen) < 1024:
kmod_obj = kmod.dereference()
if not kernel_layer.is_valid(kmod_obj.vol.offset, kmod_obj.vol.size):
break
seen.add(kmod)
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return
def _generator(self):
for module in self.list_modules(self.context, self.config['primary'], self.config['darwin']):
mod_name = utility.array_to_string(module.name)
mod_size = module.size
yield 0, (format_hints.Hex(module.vol.offset), mod_name, mod_size)
def run(self):
return renderers.TreeGrid([("Offset", format_hints.Hex), ("Name", str), ("Size", int)], self._generator())
| # This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
"""A module containing a collection of plugins that produce data typically
found in Mac's lsmod command."""
from volatility3.framework import renderers, interfaces, contexts
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import plugins
from volatility3.framework.objects import utility
from volatility3.framework.renderers import format_hints
class Lsmod(plugins.PluginInterface):
"""Lists loaded kernel modules."""
_required_framework_version = (1, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls):
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"]),
requirements.SymbolTableRequirement(name = "darwin", description = "Mac kernel")
]
@classmethod
def list_modules(cls, context: interfaces.context.ContextInterface, layer_name: str, darwin_symbols: str):
"""Lists all the modules in the primary layer.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
layer_name: The name of the layer on which to operate
darwin_symbols: The name of the table containing the kernel symbols
Returns:
A list of modules from the `layer_name` layer
"""
kernel = contexts.Module(context, darwin_symbols, layer_name, 0)
kernel_layer = context.layers[layer_name]
kmod_ptr = kernel.object_from_symbol(symbol_name = "kmod")
try:
kmod = kmod_ptr.dereference().cast("kmod_info")
except exceptions.InvalidAddressException:
return []
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return []
seen = set()
while kmod != 0 and \
kmod not in seen and \
len(seen) < 1024:
kmod_obj = kmod.dereference()
if not kernel_layer.is_valid(kmod_obj.vol.offset, kmod_obj.vol.size):
break
seen.add(kmod)
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return
def _generator(self):
for module in self.list_modules(self.context, self.config['primary'], self.config['darwin']):
mod_name = utility.array_to_string(module.name)
mod_size = module.size
yield 0, (format_hints.Hex(module.vol.offset), mod_name, mod_size)
def run(self):
return renderers.TreeGrid([("Offset", format_hints.Hex), ("Name", str), ("Size", int)], self._generator())
| en | 0.800769 | # This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0 # which is available at https://www.volatilityfoundation.org/license/vsl-v1.0 # A module containing a collection of plugins that produce data typically found in Mac's lsmod command. Lists loaded kernel modules. Lists all the modules in the primary layer. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate darwin_symbols: The name of the table containing the kernel symbols Returns: A list of modules from the `layer_name` layer | 2.140901 | 2 |
instahunter.py | Araekiel/instahunter | 17 | 7978 | <gh_stars>10-100
'''
instahunter.py
Author: Araekiel
Copyright: Copyright © 2019, Araekiel
License: MIT
Version: 1.6.3
'''
import click
import requests
import json
from datetime import datetime
@click.group()
def cli():
"""Made by Araekiel | v1.6.3"""
headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0"}
@click.command()
@click.option('-tag', prompt="Hashtag", help="The hashtag you want to search the posts with")
@click.option('--post-type', default="latest", help="latest: Get latest posts | top: Get top posts")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getposts(tag, post_type, create_file, file_type):
"""This command will fetch latest or top public posts with a Hashtag"""
try:
# Creating file if required, creating array json_data to store data if the file type is json
if(create_file == "true"):
if(file_type == "json"):
file = open(tag+"_posts.json", "w+")
json_data = []
else:
file = open(tag+"_posts.txt", "w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/explore/tags/%s/?__a=1" % tag
req = requests.get(url=api_url, headers=headers)
data = req.json()
if(post_type == "top"):
edges = data["graphql"]["hashtag"]["edge_hashtag_to_top_posts"]["edges"]
else:
edges = data["graphql"]["hashtag"]["edge_hashtag_to_media"]["edges"]
# Looping through 'edges' in the data acquired
for edge in edges:
counter = counter + 1
# Collecting necessary data from each edge
try:
caption = edge["node"]["edge_media_to_caption"]["edges"][0]["node"]["text"]
except:
caption = "No Caption"
scraped_data = {
"id": counter,
"post_id": edge["node"]["id"],
"shortcode": edge["node"]["shortcode"],
"owner_id": edge["node"]["owner"]["id"],
"display_url": edge["node"]["display_url"],
"caption": caption,
"time": str(datetime.fromtimestamp(
edge["node"]["taken_at_timestamp"])),
"n_likes": edge["node"]["edge_liked_by"]["count"],
"n_comments": edge["node"]["edge_media_to_comment"]["count"],
"is_video": edge["node"]["is_video"]
}
if(create_file == "true"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
if(file_type == "json"):
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["owner_id"]), str(scraped_data["display_url"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["n_comments"]), str(scraped_data["is_video"])))
else:
click.echo("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
counter, scraped_data["post_id"], scraped_data["shortcode"], scraped_data["owner_id"], scraped_data["display_url"], scraped_data["caption"], scraped_data["time"], scraped_data["n_likes"], scraped_data["n_comments"], scraped_data["is_video"]))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_posts.json'" % tag)
else:
click.echo("File Created, name: '%s_posts.txt" % tag)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-username', prompt="Username", help="Username you want to search the user with")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getuser(username, create_file, file_type):
"""This command will fetch user data with a Username"""
api_url = "https://www.instagram.com/%s/?__a=1" % username
try:
req = requests.get(url=api_url, headers=headers)
data = req.json()
# Collecting necessary data
user = data["graphql"]["user"]
if(user["highlight_reel_count"] > 0):
has_highlights = True
else:
has_highlights = False
scraped_data = {
"user_id": user["id"],
"username": user["username"],
"full_name": user["full_name"],
"profile_pic_url": user["profile_pic_url_hd"],
"bio": user["biography"],
"n_uploads": user["edge_owner_to_timeline_media"]["count"],
"n_followers": user["edge_followed_by"]["count"],
"n_following": user["edge_follow"]["count"],
"is_private": user["is_private"],
"is_verified": user["is_verified"],
"external_url": user["external_url"],
"igtv_videos": user["edge_felix_video_timeline"]["count"],
"has_highlights": has_highlights
}
if(create_file == "true"):
if(file_type == "json"):
file = open(username+"_user.json", "w+")
json.dump(scraped_data, file)
file.close()
click.echo("File Created, name: '%s_user.json'" % str(username))
else:
file = open(username+"_user.txt", "w+", encoding="utf-8")
file.write("User ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nBio: %s \nUploads: %s \nFollowers: %s \nFollowing: %s \nPrivate ID: %s \nVerified ID: %s \nExternal URL: %s \nIGTV videos: %s \nHas highlights: %s" % (
str(scraped_data["user_id"]), scraped_data["username"], scraped_data["full_name"], scraped_data["profile_pic_url"], scraped_data["bio"], str(scraped_data["n_uploads"]), str(scraped_data["n_followers"]), str(scraped_data["n_following"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"]), scraped_data["external_url"], str(scraped_data["igtv_videos"]), str(scraped_data["has_highlights"])))
file.close()
click.echo("File Created, name: '%s_user.txt'" % str(username))
else:
click.echo("User ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nBio: %s \nUploads: %s \nFollowers: %s \nFollowing: %s \nPrivate ID: %s \nVerified ID: %s \nExternal URL: %s \nIGTV videos: %s \nHas highlights: %s" % (
str(scraped_data["user_id"]), scraped_data["username"], scraped_data["full_name"], scraped_data["profile_pic_url"], scraped_data["bio"], str(scraped_data["n_uploads"]), str(scraped_data["n_followers"]), str(scraped_data["n_following"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"]), scraped_data["external_url"], str(scraped_data["igtv_videos"]), str(scraped_data["has_highlights"])))
click.echo('Done!')
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-username', prompt="Username", help='The username of the user you want to search the user id of')
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getuserposts(username, create_file, file_type):
"""This command will fetch recent posts of a user with a Username"""
try:
# Creating file if required, creating array json_data to store data if the file type is json
if(create_file == "true"):
if(file_type == "json"):
file = open(username+"_posts.json", "w+")
json_data = []
else:
file = open(username+"_posts.txt", "w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/%s/?__a=1" % username
req = requests.get(url=api_url, headers=headers)
data = req.json()
posts = data["graphql"]["user"]["edge_owner_to_timeline_media"]["edges"]
# Looping through posts
for post in posts:
counter = counter + 1
node = post["node"]
# Collecting necessary data
try:
caption = node["edge_media_to_caption"]["edges"][0]["node"]["text"]
except:
caption = ""
try:
location = node["location"]["name"]
except:
location = "No Location"
scraped_data = {
"id": counter,
"post_id": node["id"],
"shortcode": node["shortcode"],
"display_url": node["display_url"],
"height": node["dimensions"]["height"],
"width": node["dimensions"]["width"],
"caption": caption,
"time": str(datetime.fromtimestamp(node["taken_at_timestamp"])),
"n_likes": node["edge_liked_by"]["count"],
"comments_disabled": node["comments_disabled"],
"n_comments": node["edge_media_to_comment"]["count"],
"location": location,
"is_video": node["is_video"]
}
if(create_file == "true"):
if(file_type == "json"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nDisplay URL: %s \nImage Height: %s \nImage Width: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nComments Disabled: %s \nNumber of comments: %s \nLocation: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["display_url"]), str(scraped_data["height"]), str(scraped_data["width"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["comments_disabled"]), str(scraped_data["n_comments"]), str(scraped_data["location"]), str(scraped_data["is_video"])))
else:
click.echo("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nDisplay URL: %s \nImage Height: %s \nImage Width: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nComments Disabled: %s \nNumber of comments: %s \nLocation: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["display_url"]), str(scraped_data["height"]), str(scraped_data["width"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["comments_disabled"]), str(scraped_data["n_comments"]), str(scraped_data["location"]), str(scraped_data["is_video"])))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_posts.json'" % username)
else:
click.echo("File Created, name: '%s_posts.txt" % username)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-query', prompt="Query", help="The term you want to search users with")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def search(query, create_file, file_type):
"""This command searches for users on instagram"""
try:
if(create_file == "true"):
if(file_type == "json"):
file = open(query+"_users.json", "w+")
json_data = []
else:
file = open(query+"_users.text",
"w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/web/search/topsearch/?query=%s" % query
req = requests.get(api_url, headers=headers)
data = req.json()
users = data["users"]
for user in users:
counter = counter + 1
scraped_data = {
"id": counter,
"user_id": user["user"]["pk"],
"username": user["user"]["username"],
"full_name": user["user"]["full_name"],
"profile_pic_url": user["user"]["profile_pic_url"],
"is_private": user["user"]["is_private"],
"is_verified": user["user"]["is_verified"],
}
if(create_file == "true"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
if(file_type == "json"):
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nUser ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nPrivate ID: %s \nVerified ID: %s \n###############################\n\n\n\n\n" % (str(counter), str(
scraped_data["user_id"]), str(scraped_data["username"]), str(scraped_data["full_name"]), str(scraped_data["profile_pic_url"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"])))
else:
click.echo("###############################\nID: %s \nUser ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nPrivate ID: %s \nVerified ID: %s \n###############################\n\n\n\n\n" % (str(counter), str(
scraped_data["user_id"]), str(scraped_data["username"]), str(scraped_data["full_name"]), str(scraped_data["profile_pic_url"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"])))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_users.json'" %
query)
else:
click.echo("File Created, name: '%s_users.txt'" %
query)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
cli.add_command(getposts)
cli.add_command(getuser)
cli.add_command(getuserposts)
cli.add_command(search)
if __name__ == "__main__":
cli()
| '''
instahunter.py
Author: Araekiel
Copyright: Copyright © 2019, Araekiel
License: MIT
Version: 1.6.3
'''
import click
import requests
import json
from datetime import datetime
@click.group()
def cli():
"""Made by Araekiel | v1.6.3"""
headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0"}
@click.command()
@click.option('-tag', prompt="Hashtag", help="The hashtag you want to search the posts with")
@click.option('--post-type', default="latest", help="latest: Get latest posts | top: Get top posts")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getposts(tag, post_type, create_file, file_type):
"""This command will fetch latest or top public posts with a Hashtag"""
try:
# Creating file if required, creating array json_data to store data if the file type is json
if(create_file == "true"):
if(file_type == "json"):
file = open(tag+"_posts.json", "w+")
json_data = []
else:
file = open(tag+"_posts.txt", "w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/explore/tags/%s/?__a=1" % tag
req = requests.get(url=api_url, headers=headers)
data = req.json()
if(post_type == "top"):
edges = data["graphql"]["hashtag"]["edge_hashtag_to_top_posts"]["edges"]
else:
edges = data["graphql"]["hashtag"]["edge_hashtag_to_media"]["edges"]
# Looping through 'edges' in the data acquired
for edge in edges:
counter = counter + 1
# Collecting necessary data from each edge
try:
caption = edge["node"]["edge_media_to_caption"]["edges"][0]["node"]["text"]
except:
caption = "No Caption"
scraped_data = {
"id": counter,
"post_id": edge["node"]["id"],
"shortcode": edge["node"]["shortcode"],
"owner_id": edge["node"]["owner"]["id"],
"display_url": edge["node"]["display_url"],
"caption": caption,
"time": str(datetime.fromtimestamp(
edge["node"]["taken_at_timestamp"])),
"n_likes": edge["node"]["edge_liked_by"]["count"],
"n_comments": edge["node"]["edge_media_to_comment"]["count"],
"is_video": edge["node"]["is_video"]
}
if(create_file == "true"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
if(file_type == "json"):
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["owner_id"]), str(scraped_data["display_url"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["n_comments"]), str(scraped_data["is_video"])))
else:
click.echo("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
counter, scraped_data["post_id"], scraped_data["shortcode"], scraped_data["owner_id"], scraped_data["display_url"], scraped_data["caption"], scraped_data["time"], scraped_data["n_likes"], scraped_data["n_comments"], scraped_data["is_video"]))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_posts.json'" % tag)
else:
click.echo("File Created, name: '%s_posts.txt" % tag)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-username', prompt="Username", help="Username you want to search the user with")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getuser(username, create_file, file_type):
"""This command will fetch user data with a Username"""
api_url = "https://www.instagram.com/%s/?__a=1" % username
try:
req = requests.get(url=api_url, headers=headers)
data = req.json()
# Collecting necessary data
user = data["graphql"]["user"]
if(user["highlight_reel_count"] > 0):
has_highlights = True
else:
has_highlights = False
scraped_data = {
"user_id": user["id"],
"username": user["username"],
"full_name": user["full_name"],
"profile_pic_url": user["profile_pic_url_hd"],
"bio": user["biography"],
"n_uploads": user["edge_owner_to_timeline_media"]["count"],
"n_followers": user["edge_followed_by"]["count"],
"n_following": user["edge_follow"]["count"],
"is_private": user["is_private"],
"is_verified": user["is_verified"],
"external_url": user["external_url"],
"igtv_videos": user["edge_felix_video_timeline"]["count"],
"has_highlights": has_highlights
}
if(create_file == "true"):
if(file_type == "json"):
file = open(username+"_user.json", "w+")
json.dump(scraped_data, file)
file.close()
click.echo("File Created, name: '%s_user.json'" % str(username))
else:
file = open(username+"_user.txt", "w+", encoding="utf-8")
file.write("User ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nBio: %s \nUploads: %s \nFollowers: %s \nFollowing: %s \nPrivate ID: %s \nVerified ID: %s \nExternal URL: %s \nIGTV videos: %s \nHas highlights: %s" % (
str(scraped_data["user_id"]), scraped_data["username"], scraped_data["full_name"], scraped_data["profile_pic_url"], scraped_data["bio"], str(scraped_data["n_uploads"]), str(scraped_data["n_followers"]), str(scraped_data["n_following"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"]), scraped_data["external_url"], str(scraped_data["igtv_videos"]), str(scraped_data["has_highlights"])))
file.close()
click.echo("File Created, name: '%s_user.txt'" % str(username))
else:
click.echo("User ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nBio: %s \nUploads: %s \nFollowers: %s \nFollowing: %s \nPrivate ID: %s \nVerified ID: %s \nExternal URL: %s \nIGTV videos: %s \nHas highlights: %s" % (
str(scraped_data["user_id"]), scraped_data["username"], scraped_data["full_name"], scraped_data["profile_pic_url"], scraped_data["bio"], str(scraped_data["n_uploads"]), str(scraped_data["n_followers"]), str(scraped_data["n_following"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"]), scraped_data["external_url"], str(scraped_data["igtv_videos"]), str(scraped_data["has_highlights"])))
click.echo('Done!')
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-username', prompt="Username", help='The username of the user you want to search the user id of')
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getuserposts(username, create_file, file_type):
"""This command will fetch recent posts of a user with a Username"""
try:
# Creating file if required, creating array json_data to store data if the file type is json
if(create_file == "true"):
if(file_type == "json"):
file = open(username+"_posts.json", "w+")
json_data = []
else:
file = open(username+"_posts.txt", "w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/%s/?__a=1" % username
req = requests.get(url=api_url, headers=headers)
data = req.json()
posts = data["graphql"]["user"]["edge_owner_to_timeline_media"]["edges"]
# Looping through posts
for post in posts:
counter = counter + 1
node = post["node"]
# Collecting necessary data
try:
caption = node["edge_media_to_caption"]["edges"][0]["node"]["text"]
except:
caption = ""
try:
location = node["location"]["name"]
except:
location = "No Location"
scraped_data = {
"id": counter,
"post_id": node["id"],
"shortcode": node["shortcode"],
"display_url": node["display_url"],
"height": node["dimensions"]["height"],
"width": node["dimensions"]["width"],
"caption": caption,
"time": str(datetime.fromtimestamp(node["taken_at_timestamp"])),
"n_likes": node["edge_liked_by"]["count"],
"comments_disabled": node["comments_disabled"],
"n_comments": node["edge_media_to_comment"]["count"],
"location": location,
"is_video": node["is_video"]
}
if(create_file == "true"):
if(file_type == "json"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nDisplay URL: %s \nImage Height: %s \nImage Width: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nComments Disabled: %s \nNumber of comments: %s \nLocation: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["display_url"]), str(scraped_data["height"]), str(scraped_data["width"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["comments_disabled"]), str(scraped_data["n_comments"]), str(scraped_data["location"]), str(scraped_data["is_video"])))
else:
click.echo("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nDisplay URL: %s \nImage Height: %s \nImage Width: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nComments Disabled: %s \nNumber of comments: %s \nLocation: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["display_url"]), str(scraped_data["height"]), str(scraped_data["width"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["comments_disabled"]), str(scraped_data["n_comments"]), str(scraped_data["location"]), str(scraped_data["is_video"])))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_posts.json'" % username)
else:
click.echo("File Created, name: '%s_posts.txt" % username)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-query', prompt="Query", help="The term you want to search users with")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def search(query, create_file, file_type):
"""This command searches for users on instagram"""
try:
if(create_file == "true"):
if(file_type == "json"):
file = open(query+"_users.json", "w+")
json_data = []
else:
file = open(query+"_users.text",
"w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/web/search/topsearch/?query=%s" % query
req = requests.get(api_url, headers=headers)
data = req.json()
users = data["users"]
for user in users:
counter = counter + 1
scraped_data = {
"id": counter,
"user_id": user["user"]["pk"],
"username": user["user"]["username"],
"full_name": user["user"]["full_name"],
"profile_pic_url": user["user"]["profile_pic_url"],
"is_private": user["user"]["is_private"],
"is_verified": user["user"]["is_verified"],
}
if(create_file == "true"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
if(file_type == "json"):
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nUser ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nPrivate ID: %s \nVerified ID: %s \n###############################\n\n\n\n\n" % (str(counter), str(
scraped_data["user_id"]), str(scraped_data["username"]), str(scraped_data["full_name"]), str(scraped_data["profile_pic_url"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"])))
else:
click.echo("###############################\nID: %s \nUser ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nPrivate ID: %s \nVerified ID: %s \n###############################\n\n\n\n\n" % (str(counter), str(
scraped_data["user_id"]), str(scraped_data["username"]), str(scraped_data["full_name"]), str(scraped_data["profile_pic_url"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"])))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_users.json'" %
query)
else:
click.echo("File Created, name: '%s_users.txt'" %
query)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
cli.add_command(getposts)
cli.add_command(getuser)
cli.add_command(getuserposts)
cli.add_command(search)
if __name__ == "__main__":
cli() | en | 0.460898 | instahunter.py Author: Araekiel Copyright: Copyright © 2019, Araekiel License: MIT Version: 1.6.3 Made by Araekiel | v1.6.3 This command will fetch latest or top public posts with a Hashtag # Creating file if required, creating array json_data to store data if the file type is json # Looping through 'edges' in the data acquired # Collecting necessary data from each edge # If the file type is json then appending the data to json_data array instead of writing it to the file right away ##############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n" % ( ##############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n" % ( # Closing the file and dumping the data before closing if the file type is json This command will fetch user data with a Username # Collecting necessary data This command will fetch recent posts of a user with a Username # Creating file if required, creating array json_data to store data if the file type is json # Looping through posts # Collecting necessary data # If the file type is json then appending the data to json_data array instead of writing it to the file right away ##############################\nID: %s \nPost ID: %s \nShortcode: %s \nDisplay URL: %s \nImage Height: %s \nImage Width: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nComments Disabled: %s \nNumber of comments: %s \nLocation: %s \nIs Video: %s \n###############################\n\n\n\n\n" % ( ##############################\nID: %s \nPost ID: %s \nShortcode: %s \nDisplay URL: %s \nImage Height: %s \nImage Width: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nComments Disabled: %s \nNumber of comments: %s \nLocation: %s \nIs Video: %s \n###############################\n\n\n\n\n" % ( # Closing the file and dumping the data before closing if the file type is json This command searches for users on instagram # If the file type is json then appending the data to json_data array instead of writing it to the file right away ##############################\nID: %s \nUser ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nPrivate ID: %s \nVerified ID: %s \n###############################\n\n\n\n\n" % (str(counter), str( ##############################\nID: %s \nUser ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nPrivate ID: %s \nVerified ID: %s \n###############################\n\n\n\n\n" % (str(counter), str( # Closing the file and dumping the data before closing if the file type is json | 2.770931 | 3 |
pyscf/prop/esr/uks.py | azag0/pyscf | 2 | 7979 | <reponame>azag0/pyscf<gh_stars>1-10
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Non-relativistic unrestricted Kohn-Sham electron spin-rotation coupling
(In testing)
Refs:
J. Phys. Chem. A. 114, 9246, 2010
Mole. Phys. 9, 6, 585, 1964
'''
from functools import reduce
import numpy, sys
from pyscf import lib
from pyscf.lib import logger
from pyscf.dft import numint
from pyscf.prop.nmr import uks as uks_nmr
from pyscf.prop.esr import uhf as uhf_esr
from pyscf.prop.esr.uhf import _write, align
from pyscf.data import nist
from pyscf.grad import rks as rks_grad
# Note mo10 is the imaginary part of MO^1
def para(obj, mo10, mo_coeff, mo_occ, qed_fac=1):
mol = obj.mol
effspin = mol.spin * .5
muB = .5 # Bohr magneton
#qed_fac = (nist.G_ELECTRON - 1)
orboa = mo_coeff[0][:,mo_occ[0]>0]
orbob = mo_coeff[1][:,mo_occ[1]>0]
dm0a = numpy.dot(orboa, orboa.T)
dm0b = numpy.dot(orbob, orbob.T)
dm10a = [reduce(numpy.dot, (mo_coeff[0], x, orboa.T)) for x in mo10[0]]
dm10b = [reduce(numpy.dot, (mo_coeff[1], x, orbob.T)) for x in mo10[1]]
dm10a = numpy.asarray([x-x.T for x in dm10a])
dm10b = numpy.asarray([x-x.T for x in dm10b])
hso1e = uhf_esr.make_h01_soc1e(obj, mo_coeff, mo_occ, qed_fac)
para1e =-numpy.einsum('xji,yij->xy', dm10a, hso1e)
para1e+= numpy.einsum('xji,yij->xy', dm10b, hso1e)
para1e *= 1./effspin / muB
#_write(obj, align(para1e)[0], 'SOC(1e)/OZ')
if obj.para_soc2e:
raise NotImplementedError('dia_soc2e = %s' % obj.dia_soc2e)
para = para1e
return para
# Treat Vxc as one-particle operator Vnuc
def get_vxc_soc(ni, mol, grids, xc_code, dms, max_memory=2000, verbose=None):
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi=1)
ngrids = len(grids.weights)
BLKSIZE = numint.BLKSIZE
blksize = min(int(max_memory/12*1e6/8/nao/BLKSIZE)*BLKSIZE, ngrids)
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
buf = numpy.empty((4,blksize,nao))
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize, buf=buf):
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
vxc = ni.eval_xc(xc_code, (rho_a, rho_b), 1, deriv=1)[1]
vrho = vxc[0]
aow = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho[:,0])
_cross3x3_(vmat[0], mol, aow, ao[1:], mask, shls_slice, ao_loc)
aow = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho[:,1])
_cross3x3_(vmat[1], mol, aow, ao[1:], mask, shls_slice, ao_loc)
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
buf = numpy.empty((10,blksize,nao))
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize, buf=buf):
rho_a = make_rho(0, ao, mask, 'GGA')
rho_b = make_rho(1, ao, mask, 'GGA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, deriv=1)[1]
wva, wvb = numint._uks_gga_wv0((rho_a, rho_b), vxc, weight)
ip_ao = ao[1:4]
ipip_ao = ao[4:]
aow = rks_grad._make_dR_dao_w(ao, wva)
_cross3x3_(vmat[0], mol, aow, ip_ao, mask, shls_slice, ao_loc)
aow = rks_grad._make_dR_dao_w(ao, wvb)
_cross3x3_(vmat[1], mol, aow, ip_ao, mask, shls_slice, ao_loc)
rho = vxc = vrho = vsigma = wv = aow = None
vmat = vmat - vmat.transpose(0,1,3,2)
else:
raise NotImplementedError('meta-GGA')
return vmat
def _cross3x3_(out, mol, ao1, ao2, mask, shls_slice, ao_loc):
out[0] += numint._dot_ao_ao(mol, ao1[1], ao2[2], mask, shls_slice, ao_loc)
out[0] -= numint._dot_ao_ao(mol, ao1[2], ao2[1], mask, shls_slice, ao_loc)
out[1] += numint._dot_ao_ao(mol, ao1[2], ao2[0], mask, shls_slice, ao_loc)
out[1] -= numint._dot_ao_ao(mol, ao1[0], ao2[2], mask, shls_slice, ao_loc)
out[2] += numint._dot_ao_ao(mol, ao1[0], ao2[1], mask, shls_slice, ao_loc)
out[2] -= numint._dot_ao_ao(mol, ao1[1], ao2[0], mask, shls_slice, ao_loc)
return out
# Jia, start to work here
class ESR(uhf_esr.ESR):
'''dE = B dot gtensor dot s'''
def __init__(self, scf_method):
uhf_esr.ESR.__init__(self, scf_method)
self.dia_soc2e = False
self.para_soc2e = False
def para(self, mo10=None, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = self._scf.mo_coeff
if mo_occ is None: mo_occ = self._scf.mo_occ
if mo10 is None:
self.mo10, self.mo_e10 = self.solve_mo1()
mo10 = self.mo10
return para(self, mo10, mo_coeff, mo_occ)
#make_para_soc2e = make_para_soc2e
get_fock = uks_nmr.get_fock
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='H 0 0.1 0; H 0 0 1.',
basis='ccpvdz', spin=1, charge=-1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
esr_obj = ESR(mf)
esr_obj.gauge_orig = (0,0,0)
esr_obj.para_soc2e = False
esr_obj.so_eff_charge = True
print(esr_obj.kernel())
mol = gto.M(atom='''
H 0 0 1
H 1.2 0 1
H .1 1.1 0.3
H .8 .7 .6
''',
basis='ccpvdz', spin=1, charge=1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
gobj = GTensor(mf)
#print(gobj.kernel())
gobj.para_soc2e = 'SSO'
gobj.dia_soc2e = None
gobj.so_eff_charge = False
nao, nmo = mf.mo_coeff[0].shape
nelec = mol.nelec
numpy.random.seed(1)
mo10 =[numpy.random.random((3,nmo,nelec[0])),
numpy.random.random((3,nmo,nelec[1]))]
print(lib.finger(para(gobj, mo10, mf.mo_coeff, mf.mo_occ)) - -2.1813250579863279e-05)
numpy.random.seed(1)
dm0 = numpy.random.random((2,nao,nao))
dm0 = dm0 + dm0.transpose(0,2,1)
dm10 = numpy.random.random((2,3,nao,nao))
dm10 = dm10 - dm10.transpose(0,1,3,2)
print(lib.finger(make_para_soc2e(gobj, dm0, dm10)) - 0.0036073897889263721)
| #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Non-relativistic unrestricted Kohn-Sham electron spin-rotation coupling
(In testing)
Refs:
J. Phys. Chem. A. 114, 9246, 2010
Mole. Phys. 9, 6, 585, 1964
'''
from functools import reduce
import numpy, sys
from pyscf import lib
from pyscf.lib import logger
from pyscf.dft import numint
from pyscf.prop.nmr import uks as uks_nmr
from pyscf.prop.esr import uhf as uhf_esr
from pyscf.prop.esr.uhf import _write, align
from pyscf.data import nist
from pyscf.grad import rks as rks_grad
# Note mo10 is the imaginary part of MO^1
def para(obj, mo10, mo_coeff, mo_occ, qed_fac=1):
mol = obj.mol
effspin = mol.spin * .5
muB = .5 # Bohr magneton
#qed_fac = (nist.G_ELECTRON - 1)
orboa = mo_coeff[0][:,mo_occ[0]>0]
orbob = mo_coeff[1][:,mo_occ[1]>0]
dm0a = numpy.dot(orboa, orboa.T)
dm0b = numpy.dot(orbob, orbob.T)
dm10a = [reduce(numpy.dot, (mo_coeff[0], x, orboa.T)) for x in mo10[0]]
dm10b = [reduce(numpy.dot, (mo_coeff[1], x, orbob.T)) for x in mo10[1]]
dm10a = numpy.asarray([x-x.T for x in dm10a])
dm10b = numpy.asarray([x-x.T for x in dm10b])
hso1e = uhf_esr.make_h01_soc1e(obj, mo_coeff, mo_occ, qed_fac)
para1e =-numpy.einsum('xji,yij->xy', dm10a, hso1e)
para1e+= numpy.einsum('xji,yij->xy', dm10b, hso1e)
para1e *= 1./effspin / muB
#_write(obj, align(para1e)[0], 'SOC(1e)/OZ')
if obj.para_soc2e:
raise NotImplementedError('dia_soc2e = %s' % obj.dia_soc2e)
para = para1e
return para
# Treat Vxc as one-particle operator Vnuc
def get_vxc_soc(ni, mol, grids, xc_code, dms, max_memory=2000, verbose=None):
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi=1)
ngrids = len(grids.weights)
BLKSIZE = numint.BLKSIZE
blksize = min(int(max_memory/12*1e6/8/nao/BLKSIZE)*BLKSIZE, ngrids)
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
buf = numpy.empty((4,blksize,nao))
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize, buf=buf):
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
vxc = ni.eval_xc(xc_code, (rho_a, rho_b), 1, deriv=1)[1]
vrho = vxc[0]
aow = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho[:,0])
_cross3x3_(vmat[0], mol, aow, ao[1:], mask, shls_slice, ao_loc)
aow = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho[:,1])
_cross3x3_(vmat[1], mol, aow, ao[1:], mask, shls_slice, ao_loc)
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
buf = numpy.empty((10,blksize,nao))
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize, buf=buf):
rho_a = make_rho(0, ao, mask, 'GGA')
rho_b = make_rho(1, ao, mask, 'GGA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, deriv=1)[1]
wva, wvb = numint._uks_gga_wv0((rho_a, rho_b), vxc, weight)
ip_ao = ao[1:4]
ipip_ao = ao[4:]
aow = rks_grad._make_dR_dao_w(ao, wva)
_cross3x3_(vmat[0], mol, aow, ip_ao, mask, shls_slice, ao_loc)
aow = rks_grad._make_dR_dao_w(ao, wvb)
_cross3x3_(vmat[1], mol, aow, ip_ao, mask, shls_slice, ao_loc)
rho = vxc = vrho = vsigma = wv = aow = None
vmat = vmat - vmat.transpose(0,1,3,2)
else:
raise NotImplementedError('meta-GGA')
return vmat
def _cross3x3_(out, mol, ao1, ao2, mask, shls_slice, ao_loc):
out[0] += numint._dot_ao_ao(mol, ao1[1], ao2[2], mask, shls_slice, ao_loc)
out[0] -= numint._dot_ao_ao(mol, ao1[2], ao2[1], mask, shls_slice, ao_loc)
out[1] += numint._dot_ao_ao(mol, ao1[2], ao2[0], mask, shls_slice, ao_loc)
out[1] -= numint._dot_ao_ao(mol, ao1[0], ao2[2], mask, shls_slice, ao_loc)
out[2] += numint._dot_ao_ao(mol, ao1[0], ao2[1], mask, shls_slice, ao_loc)
out[2] -= numint._dot_ao_ao(mol, ao1[1], ao2[0], mask, shls_slice, ao_loc)
return out
# Jia, start to work here
class ESR(uhf_esr.ESR):
'''dE = B dot gtensor dot s'''
def __init__(self, scf_method):
uhf_esr.ESR.__init__(self, scf_method)
self.dia_soc2e = False
self.para_soc2e = False
def para(self, mo10=None, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = self._scf.mo_coeff
if mo_occ is None: mo_occ = self._scf.mo_occ
if mo10 is None:
self.mo10, self.mo_e10 = self.solve_mo1()
mo10 = self.mo10
return para(self, mo10, mo_coeff, mo_occ)
#make_para_soc2e = make_para_soc2e
get_fock = uks_nmr.get_fock
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='H 0 0.1 0; H 0 0 1.',
basis='ccpvdz', spin=1, charge=-1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
esr_obj = ESR(mf)
esr_obj.gauge_orig = (0,0,0)
esr_obj.para_soc2e = False
esr_obj.so_eff_charge = True
print(esr_obj.kernel())
mol = gto.M(atom='''
H 0 0 1
H 1.2 0 1
H .1 1.1 0.3
H .8 .7 .6
''',
basis='ccpvdz', spin=1, charge=1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
gobj = GTensor(mf)
#print(gobj.kernel())
gobj.para_soc2e = 'SSO'
gobj.dia_soc2e = None
gobj.so_eff_charge = False
nao, nmo = mf.mo_coeff[0].shape
nelec = mol.nelec
numpy.random.seed(1)
mo10 =[numpy.random.random((3,nmo,nelec[0])),
numpy.random.random((3,nmo,nelec[1]))]
print(lib.finger(para(gobj, mo10, mf.mo_coeff, mf.mo_occ)) - -2.1813250579863279e-05)
numpy.random.seed(1)
dm0 = numpy.random.random((2,nao,nao))
dm0 = dm0 + dm0.transpose(0,2,1)
dm10 = numpy.random.random((2,3,nao,nao))
dm10 = dm10 - dm10.transpose(0,1,3,2)
print(lib.finger(make_para_soc2e(gobj, dm0, dm10)) - 0.0036073897889263721) | en | 0.745577 | #!/usr/bin/env python # Copyright 2014-2019 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: <NAME> <<EMAIL>> # Non-relativistic unrestricted Kohn-Sham electron spin-rotation coupling (In testing) Refs: J. Phys. Chem. A. 114, 9246, 2010 Mole. Phys. 9, 6, 585, 1964 # Note mo10 is the imaginary part of MO^1 # Bohr magneton #qed_fac = (nist.G_ELECTRON - 1) #_write(obj, align(para1e)[0], 'SOC(1e)/OZ') # Treat Vxc as one-particle operator Vnuc # Jia, start to work here dE = B dot gtensor dot s #make_para_soc2e = make_para_soc2e H 0 0 1 H 1.2 0 1 H .1 1.1 0.3 H .8 .7 .6 #print(gobj.kernel()) | 1.878816 | 2 |
examples/gather_demo.py | mununum/MAgent | 1 | 7980 | <reponame>mununum/MAgent
import random
import magent
from magent.builtin.rule_model import RandomActor
import numpy as np
def init_food(env, food_handle):
tree = np.asarray([[-1,0], [0,0], [0,-1], [0,1], [1,0]])
third = map_size//4 # mapsize includes walls
for i in range(1, 4):
for j in range(1, 4):
base = np.asarray([third*i, third*j])
env.add_agents(food_handle, method="custom", pos=tree+base)
def neigbor_regen_food(env, food_handle, p=0.003):
coords = env.get_pos(food_handle)
rands = np.random.random(len(coords))
for i, pos in enumerate(coords):
if rands[i] > p:
continue
neighbor = np.asarray([[-1,0],[0,-1], [0,1], [1,0]])
regen_pos = [pos+neighbor[np.random.randint(0,4)]]
env.add_agents(food_handle, method="custom",
pos=regen_pos)
if __name__ == "__main__":
gw = magent.gridworld
cfg = gw.Config()
map_size = 25
cfg.set({"map_width": map_size, "map_height": map_size})
agent_group = cfg.add_group(
cfg.register_agent_type(
name="agent",
attr={
'width': 1,
'length': 1,
'view_range': gw.CircleRange(4),
'can_gather': True}))
food_group = cfg.add_group(
cfg.register_agent_type(
"food",
attr={'width': 1,
'length': 1,
'can_be_gathered': True}))
# add reward rule
a = gw.AgentSymbol(agent_group, index='any')
b = gw.AgentSymbol(food_group, index='any')
e = gw.Event(a, 'collide', b)
cfg.add_reward_rule(e, receiver=a, value=1)
# cfg.add_reward_rule(e2, receiver=b, value=1, die=True)
# cfg.add_reward_rule(e3, receiver=[a,b], value=[-1,-1])
env = magent.GridWorld(cfg)
agent_handle, food_handle = env.get_handles()
model1 = RandomActor(env, agent_handle, "up")
env.set_render_dir("build/render")
env.reset()
upstart = [(map_size//2 - 2, map_size//2 - 2), (map_size//2 + 2, map_size//2 - 2),
(map_size//2, map_size//2), (map_size//2 - 2, map_size//2 + 2),
(map_size//2 + 2, map_size//2 + 2)]
# spawnrate = 0.1
env.add_agents(agent_handle, method="custom", pos=upstart)
# env.add_agents(rightgroup, method="custom", pos=rightstart)
init_food(env, food_handle)
k = env.get_observation(agent_handle)
print env.get_pos(agent_handle)
print len(env.get_pos(food_handle))
done = False
step_ct = 0
r_sum = 0
while not done:
obs_1 = env.get_observation(agent_handle)
ids_1 = env.get_agent_id(agent_handle)
acts_1 = model1.infer_action(obs_1, ids_1)
env.set_action(agent_handle, acts_1)
# simulate one step
done = env.step()
# render
env.render()
# get reward
reward = sum(env.get_reward(agent_handle))
r_sum += reward
# clear dead agents
env.clear_dead()
neigbor_regen_food(env, food_handle)
# print info
# if step_ct % 10 == 0:
# print("step %d" % step_ct)
step_ct += 1
if step_ct > 250:
break
print r_sum | import random
import magent
from magent.builtin.rule_model import RandomActor
import numpy as np
def init_food(env, food_handle):
tree = np.asarray([[-1,0], [0,0], [0,-1], [0,1], [1,0]])
third = map_size//4 # mapsize includes walls
for i in range(1, 4):
for j in range(1, 4):
base = np.asarray([third*i, third*j])
env.add_agents(food_handle, method="custom", pos=tree+base)
def neigbor_regen_food(env, food_handle, p=0.003):
coords = env.get_pos(food_handle)
rands = np.random.random(len(coords))
for i, pos in enumerate(coords):
if rands[i] > p:
continue
neighbor = np.asarray([[-1,0],[0,-1], [0,1], [1,0]])
regen_pos = [pos+neighbor[np.random.randint(0,4)]]
env.add_agents(food_handle, method="custom",
pos=regen_pos)
if __name__ == "__main__":
gw = magent.gridworld
cfg = gw.Config()
map_size = 25
cfg.set({"map_width": map_size, "map_height": map_size})
agent_group = cfg.add_group(
cfg.register_agent_type(
name="agent",
attr={
'width': 1,
'length': 1,
'view_range': gw.CircleRange(4),
'can_gather': True}))
food_group = cfg.add_group(
cfg.register_agent_type(
"food",
attr={'width': 1,
'length': 1,
'can_be_gathered': True}))
# add reward rule
a = gw.AgentSymbol(agent_group, index='any')
b = gw.AgentSymbol(food_group, index='any')
e = gw.Event(a, 'collide', b)
cfg.add_reward_rule(e, receiver=a, value=1)
# cfg.add_reward_rule(e2, receiver=b, value=1, die=True)
# cfg.add_reward_rule(e3, receiver=[a,b], value=[-1,-1])
env = magent.GridWorld(cfg)
agent_handle, food_handle = env.get_handles()
model1 = RandomActor(env, agent_handle, "up")
env.set_render_dir("build/render")
env.reset()
upstart = [(map_size//2 - 2, map_size//2 - 2), (map_size//2 + 2, map_size//2 - 2),
(map_size//2, map_size//2), (map_size//2 - 2, map_size//2 + 2),
(map_size//2 + 2, map_size//2 + 2)]
# spawnrate = 0.1
env.add_agents(agent_handle, method="custom", pos=upstart)
# env.add_agents(rightgroup, method="custom", pos=rightstart)
init_food(env, food_handle)
k = env.get_observation(agent_handle)
print env.get_pos(agent_handle)
print len(env.get_pos(food_handle))
done = False
step_ct = 0
r_sum = 0
while not done:
obs_1 = env.get_observation(agent_handle)
ids_1 = env.get_agent_id(agent_handle)
acts_1 = model1.infer_action(obs_1, ids_1)
env.set_action(agent_handle, acts_1)
# simulate one step
done = env.step()
# render
env.render()
# get reward
reward = sum(env.get_reward(agent_handle))
r_sum += reward
# clear dead agents
env.clear_dead()
neigbor_regen_food(env, food_handle)
# print info
# if step_ct % 10 == 0:
# print("step %d" % step_ct)
step_ct += 1
if step_ct > 250:
break
print r_sum | en | 0.513848 | # mapsize includes walls # add reward rule # cfg.add_reward_rule(e2, receiver=b, value=1, die=True) # cfg.add_reward_rule(e3, receiver=[a,b], value=[-1,-1]) # spawnrate = 0.1 # env.add_agents(rightgroup, method="custom", pos=rightstart) # simulate one step # render # get reward # clear dead agents # print info # if step_ct % 10 == 0: # print("step %d" % step_ct) | 2.203338 | 2 |
corehq/apps/domain/deletion.py | shyamkumarlchauhan/commcare-hq | 0 | 7981 | <reponame>shyamkumarlchauhan/commcare-hq
import itertools
import logging
from datetime import date
from django.apps import apps
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Q
from dimagi.utils.chunked import chunked
from corehq.apps.accounting.models import Subscription
from corehq.apps.accounting.utils import get_change_status
from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type
from corehq.apps.domain.utils import silence_during_tests
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.products.views import ProductFieldsView
from corehq.apps.userreports.dbaccessors import (
delete_all_ucr_tables_for_domain,
)
from corehq.apps.users.views.mobile import UserFieldsView
from corehq.blobs import CODES, get_blob_db
from corehq.blobs.models import BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state
from corehq.form_processor.interfaces.dbaccessors import (
CaseAccessors,
FormAccessors,
)
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
class BaseDeletion(object):
def __init__(self, app_label):
self.app_label = app_label
def is_app_installed(self):
try:
return bool(apps.get_app_config(self.app_label))
except LookupError:
return False
class CustomDeletion(BaseDeletion):
def __init__(self, app_label, deletion_fn):
super(CustomDeletion, self).__init__(app_label)
self.deletion_fn = deletion_fn
def execute(self, domain_name):
if self.is_app_installed():
self.deletion_fn(domain_name)
class RawDeletion(BaseDeletion):
def __init__(self, app_label, raw_query):
super(RawDeletion, self).__init__(app_label)
self.raw_query = raw_query
def execute(self, cursor, domain_name):
if self.is_app_installed():
cursor.execute(self.raw_query, [domain_name])
class ModelDeletion(BaseDeletion):
def __init__(self, app_label, model_name, domain_filter_kwarg):
super(ModelDeletion, self).__init__(app_label)
self.domain_filter_kwarg = domain_filter_kwarg
self.model_name = model_name
def get_model_class(self):
return apps.get_model(self.app_label, self.model_name)
def execute(self, domain_name):
if not domain_name:
# The Django orm will properly turn a None domain_name to a
# IS NULL filter. We don't want to allow deleting records for
# NULL domain names since they might have special meaning (like
# in some of the SMS models).
raise RuntimeError("Expected a valid domain name")
if self.is_app_installed():
model = self.get_model_class()
model.objects.filter(**{self.domain_filter_kwarg: domain_name}).delete()
def _delete_domain_backend_mappings(domain_name):
model = apps.get_model('sms', 'SQLMobileBackendMapping')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_domain_backends(domain_name):
model = apps.get_model('sms', 'SQLMobileBackend')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_web_user_membership(domain_name):
from corehq.apps.users.models import WebUser
active_web_users = WebUser.by_domain(domain_name)
inactive_web_users = WebUser.by_domain(domain_name, is_active=False)
for web_user in list(active_web_users) + list(inactive_web_users):
web_user.delete_domain_membership(domain_name)
if settings.UNIT_TESTING and not web_user.domain_memberships:
web_user.delete()
else:
web_user.save()
def _terminate_subscriptions(domain_name):
today = date.today()
with transaction.atomic():
current_subscription = Subscription.get_active_subscription_by_domain(domain_name)
if current_subscription:
current_subscription.date_end = today
current_subscription.is_active = False
current_subscription.save()
current_subscription.transfer_credits()
_, downgraded_privs, upgraded_privs = get_change_status(current_subscription.plan_version, None)
current_subscription.subscriber.deactivate_subscription(
downgraded_privileges=downgraded_privs,
upgraded_privileges=upgraded_privs,
old_subscription=current_subscription,
new_subscription=None,
)
Subscription.visible_objects.filter(
Q(date_start__gt=today) | Q(date_start=today, is_active=False),
subscriber__domain=domain_name,
).update(is_hidden_to_ops=True)
def _delete_all_cases(domain_name):
logger.info('Deleting cases...')
case_accessor = CaseAccessors(domain_name)
case_ids = case_accessor.get_case_ids_in_domain()
for case_id_chunk in chunked(with_progress_bar(case_ids, stream=silence_during_tests()), 500):
case_accessor.soft_delete_cases(list(case_id_chunk))
logger.info('Deleting cases complete.')
def _delete_all_forms(domain_name):
logger.info('Deleting forms...')
form_accessor = FormAccessors(domain_name)
form_ids = list(itertools.chain(*[
form_accessor.get_all_form_ids_in_domain(doc_type=doc_type)
for doc_type in doc_type_to_state
]))
for form_id_chunk in chunked(with_progress_bar(form_ids, stream=silence_during_tests()), 500):
form_accessor.soft_delete_forms(list(form_id_chunk))
logger.info('Deleting forms complete.')
def _delete_data_files(domain_name):
get_blob_db().bulk_delete(metas=list(BlobMeta.objects.partitioned_query(domain_name).filter(
parent_id=domain_name,
type_code=CODES.data_file,
)))
def _delete_custom_data_fields(domain_name):
# The CustomDataFieldsDefinition instances are cleaned up as part of the
# bulk couch delete, but we also need to clear the cache
logger.info('Deleting custom data fields...')
for field_view in [LocationFieldsView, ProductFieldsView, UserFieldsView]:
get_by_domain_and_type.clear(domain_name, field_view.field_type)
logger.info('Deleting custom data fields complete.')
# We use raw queries instead of ORM because Django queryset delete needs to
# fetch objects into memory to send signals and handle cascades. It makes deletion very slow
# if we have a millions of rows in stock data tables.
DOMAIN_DELETE_OPERATIONS = [
RawDeletion('stock', """
DELETE FROM stock_stocktransaction
WHERE report_id IN (SELECT id FROM stock_stockreport WHERE domain=%s)
"""),
RawDeletion('stock', "DELETE FROM stock_stockreport WHERE domain=%s"),
RawDeletion('stock', """
DELETE FROM commtrack_stockstate
WHERE product_id IN (SELECT product_id FROM products_sqlproduct WHERE domain=%s)
"""),
ModelDeletion('products', 'SQLProduct', 'domain'),
ModelDeletion('locations', 'SQLLocation', 'domain'),
ModelDeletion('locations', 'LocationType', 'domain'),
ModelDeletion('stock', 'DocDomainMapping', 'domain_name'),
ModelDeletion('domain_migration_flags', 'DomainMigrationProgress', 'domain'),
ModelDeletion('sms', 'DailyOutboundSMSLimitReached', 'domain'),
ModelDeletion('sms', 'SMS', 'domain'),
ModelDeletion('sms', 'SQLLastReadMessage', 'domain'),
ModelDeletion('sms', 'ExpectedCallback', 'domain'),
ModelDeletion('ivr', 'Call', 'domain'),
ModelDeletion('sms', 'Keyword', 'domain'),
ModelDeletion('sms', 'PhoneNumber', 'domain'),
ModelDeletion('sms', 'MessagingSubEvent', 'parent__domain'),
ModelDeletion('sms', 'MessagingEvent', 'domain'),
ModelDeletion('sms', 'QueuedSMS', 'domain'),
ModelDeletion('sms', 'SelfRegistrationInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backend_mappings),
ModelDeletion('sms', 'MobileBackendInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backends),
CustomDeletion('users', _delete_web_user_membership),
CustomDeletion('accounting', _terminate_subscriptions),
CustomDeletion('form_processor', _delete_all_cases),
CustomDeletion('form_processor', _delete_all_forms),
ModelDeletion('aggregate_ucrs', 'AggregateTableDefinition', 'domain'),
ModelDeletion('app_manager', 'AppReleaseByLocation', 'domain'),
ModelDeletion('app_manager', 'LatestEnabledBuildProfiles', 'domain'),
ModelDeletion('app_manager', 'ResourceOverride', 'domain'),
ModelDeletion('app_manager', 'GlobalAppConfig', 'domain'),
ModelDeletion('case_importer', 'CaseUploadRecord', 'domain'),
ModelDeletion('case_search', 'CaseSearchConfig', 'domain'),
ModelDeletion('case_search', 'CaseSearchQueryAddition', 'domain'),
ModelDeletion('case_search', 'FuzzyProperties', 'domain'),
ModelDeletion('case_search', 'IgnorePatterns', 'domain'),
ModelDeletion('cloudcare', 'ApplicationAccess', 'domain'),
ModelDeletion('consumption', 'DefaultConsumption', 'domain'),
ModelDeletion('data_analytics', 'GIRRow', 'domain_name'),
ModelDeletion('data_analytics', 'MALTRow', 'domain_name'),
ModelDeletion('data_dictionary', 'CaseType', 'domain'),
ModelDeletion('data_interfaces', 'CaseRuleAction', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleCriteria', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'domain'), # TODO
ModelDeletion('data_interfaces', 'AutomaticUpdateRule', 'domain'),
ModelDeletion('data_interfaces', 'DomainCaseRuleRun', 'domain'),
ModelDeletion('domain', 'TransferDomainRequest', 'domain'),
ModelDeletion('export', 'EmailExportWhenDoneRequest', 'domain'),
CustomDeletion('export', _delete_data_files),
ModelDeletion('locations', 'LocationFixtureConfiguration', 'domain'),
ModelDeletion('ota', 'MobileRecoveryMeasure', 'domain'),
ModelDeletion('ota', 'SerialIdBucket', 'domain'),
ModelDeletion('phone', 'OwnershipCleanlinessFlag', 'domain'),
ModelDeletion('phone', 'SyncLogSQL', 'domain'),
ModelDeletion('registration', 'RegistrationRequest', 'domain'),
ModelDeletion('reminders', 'EmailUsage', 'domain'),
ModelDeletion('reports', 'ReportsSidebarOrdering', 'domain'),
ModelDeletion('smsforms', 'SQLXFormsSession', 'domain'),
ModelDeletion('translations', 'SMSTranslations', 'domain'),
ModelDeletion('translations', 'TransifexBlacklist', 'domain'),
ModelDeletion('userreports', 'AsyncIndicator', 'domain'),
ModelDeletion('users', 'DomainRequest', 'domain'),
ModelDeletion('users', 'Invitation', 'domain'),
ModelDeletion('users', 'DomainPermissionsMirror', 'source'),
ModelDeletion('zapier', 'ZapierSubscription', 'domain'),
ModelDeletion('dhis2', 'Dhis2Connection', 'domain'),
ModelDeletion('motech', 'RequestLog', 'domain'),
ModelDeletion('couchforms', 'UnfinishedSubmissionStub', 'domain'),
CustomDeletion('custom_data_fields', _delete_custom_data_fields),
CustomDeletion('ucr', delete_all_ucr_tables_for_domain),
]
def apply_deletion_operations(domain_name):
raw_ops, model_ops = _split_ops_by_type(DOMAIN_DELETE_OPERATIONS)
with connection.cursor() as cursor:
for op in raw_ops:
op.execute(cursor, domain_name)
for op in model_ops:
op.execute(domain_name)
def _split_ops_by_type(ops):
raw_ops = []
model_ops = []
for op in ops:
if isinstance(op, RawDeletion):
raw_ops.append(op)
else:
model_ops.append(op)
return raw_ops, model_ops
| import itertools
import logging
from datetime import date
from django.apps import apps
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Q
from dimagi.utils.chunked import chunked
from corehq.apps.accounting.models import Subscription
from corehq.apps.accounting.utils import get_change_status
from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type
from corehq.apps.domain.utils import silence_during_tests
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.products.views import ProductFieldsView
from corehq.apps.userreports.dbaccessors import (
delete_all_ucr_tables_for_domain,
)
from corehq.apps.users.views.mobile import UserFieldsView
from corehq.blobs import CODES, get_blob_db
from corehq.blobs.models import BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state
from corehq.form_processor.interfaces.dbaccessors import (
CaseAccessors,
FormAccessors,
)
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
class BaseDeletion(object):
def __init__(self, app_label):
self.app_label = app_label
def is_app_installed(self):
try:
return bool(apps.get_app_config(self.app_label))
except LookupError:
return False
class CustomDeletion(BaseDeletion):
def __init__(self, app_label, deletion_fn):
super(CustomDeletion, self).__init__(app_label)
self.deletion_fn = deletion_fn
def execute(self, domain_name):
if self.is_app_installed():
self.deletion_fn(domain_name)
class RawDeletion(BaseDeletion):
def __init__(self, app_label, raw_query):
super(RawDeletion, self).__init__(app_label)
self.raw_query = raw_query
def execute(self, cursor, domain_name):
if self.is_app_installed():
cursor.execute(self.raw_query, [domain_name])
class ModelDeletion(BaseDeletion):
def __init__(self, app_label, model_name, domain_filter_kwarg):
super(ModelDeletion, self).__init__(app_label)
self.domain_filter_kwarg = domain_filter_kwarg
self.model_name = model_name
def get_model_class(self):
return apps.get_model(self.app_label, self.model_name)
def execute(self, domain_name):
if not domain_name:
# The Django orm will properly turn a None domain_name to a
# IS NULL filter. We don't want to allow deleting records for
# NULL domain names since they might have special meaning (like
# in some of the SMS models).
raise RuntimeError("Expected a valid domain name")
if self.is_app_installed():
model = self.get_model_class()
model.objects.filter(**{self.domain_filter_kwarg: domain_name}).delete()
def _delete_domain_backend_mappings(domain_name):
model = apps.get_model('sms', 'SQLMobileBackendMapping')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_domain_backends(domain_name):
model = apps.get_model('sms', 'SQLMobileBackend')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_web_user_membership(domain_name):
from corehq.apps.users.models import WebUser
active_web_users = WebUser.by_domain(domain_name)
inactive_web_users = WebUser.by_domain(domain_name, is_active=False)
for web_user in list(active_web_users) + list(inactive_web_users):
web_user.delete_domain_membership(domain_name)
if settings.UNIT_TESTING and not web_user.domain_memberships:
web_user.delete()
else:
web_user.save()
def _terminate_subscriptions(domain_name):
today = date.today()
with transaction.atomic():
current_subscription = Subscription.get_active_subscription_by_domain(domain_name)
if current_subscription:
current_subscription.date_end = today
current_subscription.is_active = False
current_subscription.save()
current_subscription.transfer_credits()
_, downgraded_privs, upgraded_privs = get_change_status(current_subscription.plan_version, None)
current_subscription.subscriber.deactivate_subscription(
downgraded_privileges=downgraded_privs,
upgraded_privileges=upgraded_privs,
old_subscription=current_subscription,
new_subscription=None,
)
Subscription.visible_objects.filter(
Q(date_start__gt=today) | Q(date_start=today, is_active=False),
subscriber__domain=domain_name,
).update(is_hidden_to_ops=True)
def _delete_all_cases(domain_name):
logger.info('Deleting cases...')
case_accessor = CaseAccessors(domain_name)
case_ids = case_accessor.get_case_ids_in_domain()
for case_id_chunk in chunked(with_progress_bar(case_ids, stream=silence_during_tests()), 500):
case_accessor.soft_delete_cases(list(case_id_chunk))
logger.info('Deleting cases complete.')
def _delete_all_forms(domain_name):
logger.info('Deleting forms...')
form_accessor = FormAccessors(domain_name)
form_ids = list(itertools.chain(*[
form_accessor.get_all_form_ids_in_domain(doc_type=doc_type)
for doc_type in doc_type_to_state
]))
for form_id_chunk in chunked(with_progress_bar(form_ids, stream=silence_during_tests()), 500):
form_accessor.soft_delete_forms(list(form_id_chunk))
logger.info('Deleting forms complete.')
def _delete_data_files(domain_name):
get_blob_db().bulk_delete(metas=list(BlobMeta.objects.partitioned_query(domain_name).filter(
parent_id=domain_name,
type_code=CODES.data_file,
)))
def _delete_custom_data_fields(domain_name):
# The CustomDataFieldsDefinition instances are cleaned up as part of the
# bulk couch delete, but we also need to clear the cache
logger.info('Deleting custom data fields...')
for field_view in [LocationFieldsView, ProductFieldsView, UserFieldsView]:
get_by_domain_and_type.clear(domain_name, field_view.field_type)
logger.info('Deleting custom data fields complete.')
# We use raw queries instead of ORM because Django queryset delete needs to
# fetch objects into memory to send signals and handle cascades. It makes deletion very slow
# if we have a millions of rows in stock data tables.
DOMAIN_DELETE_OPERATIONS = [
RawDeletion('stock', """
DELETE FROM stock_stocktransaction
WHERE report_id IN (SELECT id FROM stock_stockreport WHERE domain=%s)
"""),
RawDeletion('stock', "DELETE FROM stock_stockreport WHERE domain=%s"),
RawDeletion('stock', """
DELETE FROM commtrack_stockstate
WHERE product_id IN (SELECT product_id FROM products_sqlproduct WHERE domain=%s)
"""),
ModelDeletion('products', 'SQLProduct', 'domain'),
ModelDeletion('locations', 'SQLLocation', 'domain'),
ModelDeletion('locations', 'LocationType', 'domain'),
ModelDeletion('stock', 'DocDomainMapping', 'domain_name'),
ModelDeletion('domain_migration_flags', 'DomainMigrationProgress', 'domain'),
ModelDeletion('sms', 'DailyOutboundSMSLimitReached', 'domain'),
ModelDeletion('sms', 'SMS', 'domain'),
ModelDeletion('sms', 'SQLLastReadMessage', 'domain'),
ModelDeletion('sms', 'ExpectedCallback', 'domain'),
ModelDeletion('ivr', 'Call', 'domain'),
ModelDeletion('sms', 'Keyword', 'domain'),
ModelDeletion('sms', 'PhoneNumber', 'domain'),
ModelDeletion('sms', 'MessagingSubEvent', 'parent__domain'),
ModelDeletion('sms', 'MessagingEvent', 'domain'),
ModelDeletion('sms', 'QueuedSMS', 'domain'),
ModelDeletion('sms', 'SelfRegistrationInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backend_mappings),
ModelDeletion('sms', 'MobileBackendInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backends),
CustomDeletion('users', _delete_web_user_membership),
CustomDeletion('accounting', _terminate_subscriptions),
CustomDeletion('form_processor', _delete_all_cases),
CustomDeletion('form_processor', _delete_all_forms),
ModelDeletion('aggregate_ucrs', 'AggregateTableDefinition', 'domain'),
ModelDeletion('app_manager', 'AppReleaseByLocation', 'domain'),
ModelDeletion('app_manager', 'LatestEnabledBuildProfiles', 'domain'),
ModelDeletion('app_manager', 'ResourceOverride', 'domain'),
ModelDeletion('app_manager', 'GlobalAppConfig', 'domain'),
ModelDeletion('case_importer', 'CaseUploadRecord', 'domain'),
ModelDeletion('case_search', 'CaseSearchConfig', 'domain'),
ModelDeletion('case_search', 'CaseSearchQueryAddition', 'domain'),
ModelDeletion('case_search', 'FuzzyProperties', 'domain'),
ModelDeletion('case_search', 'IgnorePatterns', 'domain'),
ModelDeletion('cloudcare', 'ApplicationAccess', 'domain'),
ModelDeletion('consumption', 'DefaultConsumption', 'domain'),
ModelDeletion('data_analytics', 'GIRRow', 'domain_name'),
ModelDeletion('data_analytics', 'MALTRow', 'domain_name'),
ModelDeletion('data_dictionary', 'CaseType', 'domain'),
ModelDeletion('data_interfaces', 'CaseRuleAction', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleCriteria', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'domain'), # TODO
ModelDeletion('data_interfaces', 'AutomaticUpdateRule', 'domain'),
ModelDeletion('data_interfaces', 'DomainCaseRuleRun', 'domain'),
ModelDeletion('domain', 'TransferDomainRequest', 'domain'),
ModelDeletion('export', 'EmailExportWhenDoneRequest', 'domain'),
CustomDeletion('export', _delete_data_files),
ModelDeletion('locations', 'LocationFixtureConfiguration', 'domain'),
ModelDeletion('ota', 'MobileRecoveryMeasure', 'domain'),
ModelDeletion('ota', 'SerialIdBucket', 'domain'),
ModelDeletion('phone', 'OwnershipCleanlinessFlag', 'domain'),
ModelDeletion('phone', 'SyncLogSQL', 'domain'),
ModelDeletion('registration', 'RegistrationRequest', 'domain'),
ModelDeletion('reminders', 'EmailUsage', 'domain'),
ModelDeletion('reports', 'ReportsSidebarOrdering', 'domain'),
ModelDeletion('smsforms', 'SQLXFormsSession', 'domain'),
ModelDeletion('translations', 'SMSTranslations', 'domain'),
ModelDeletion('translations', 'TransifexBlacklist', 'domain'),
ModelDeletion('userreports', 'AsyncIndicator', 'domain'),
ModelDeletion('users', 'DomainRequest', 'domain'),
ModelDeletion('users', 'Invitation', 'domain'),
ModelDeletion('users', 'DomainPermissionsMirror', 'source'),
ModelDeletion('zapier', 'ZapierSubscription', 'domain'),
ModelDeletion('dhis2', 'Dhis2Connection', 'domain'),
ModelDeletion('motech', 'RequestLog', 'domain'),
ModelDeletion('couchforms', 'UnfinishedSubmissionStub', 'domain'),
CustomDeletion('custom_data_fields', _delete_custom_data_fields),
CustomDeletion('ucr', delete_all_ucr_tables_for_domain),
]
def apply_deletion_operations(domain_name):
raw_ops, model_ops = _split_ops_by_type(DOMAIN_DELETE_OPERATIONS)
with connection.cursor() as cursor:
for op in raw_ops:
op.execute(cursor, domain_name)
for op in model_ops:
op.execute(domain_name)
def _split_ops_by_type(ops):
raw_ops = []
model_ops = []
for op in ops:
if isinstance(op, RawDeletion):
raw_ops.append(op)
else:
model_ops.append(op)
return raw_ops, model_ops | en | 0.867128 | # The Django orm will properly turn a None domain_name to a # IS NULL filter. We don't want to allow deleting records for # NULL domain names since they might have special meaning (like # in some of the SMS models). # The CustomDataFieldsDefinition instances are cleaned up as part of the # bulk couch delete, but we also need to clear the cache # We use raw queries instead of ORM because Django queryset delete needs to # fetch objects into memory to send signals and handle cascades. It makes deletion very slow # if we have a millions of rows in stock data tables. DELETE FROM stock_stocktransaction WHERE report_id IN (SELECT id FROM stock_stockreport WHERE domain=%s) DELETE FROM commtrack_stockstate WHERE product_id IN (SELECT product_id FROM products_sqlproduct WHERE domain=%s) # TODO | 1.741329 | 2 |
icosphere/icosphere.py | JackWalpole/icosahedron | 2 | 7982 | """Subdivided icosahedral mesh generation"""
from __future__ import print_function
import numpy as np
# following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html
# hierarchy:
# Icosphere -> Triangle -> Point
class IcoSphere:
"""
Usage: IcoSphere(level)
Maximum supported level = 8
get started with:
>>> A = IcoSphere(3)
... A.plot3d()
"""
# maximum level for subdivision of the icosahedron
maxlevel = 8
def __init__(self, level):
if type(level) is not int:
raise TypeError('level must be an integer')
elif level < 0:
raise Exception('level must be no less than 0')
elif level > self.maxlevel:
raise Exception('level larger than ' + str(self.maxlevel) + ' not supported')
self.level = level
self.points = []
self.triangles = []
self.npts = 0
################################
# initialise level 1 icosahedron
################################
# golden ration
t = (1.0 + np.sqrt(5.0)) / 2.0
# add vertices
self._addPoint(np.array([-1, t, 0]))
self._addPoint(np.array([ 1, t, 0]))
self._addPoint(np.array([-1,-t, 0]))
self._addPoint(np.array([ 1,-t, 0]))
self._addPoint(np.array([ 0,-1, t]))
self._addPoint(np.array([ 0, 1, t]))
self._addPoint(np.array([ 0,-1,-t]))
self._addPoint(np.array([ 0, 1,-t]))
self._addPoint(np.array([ t, 0,-1]))
self._addPoint(np.array([ t, 0, 1]))
self._addPoint(np.array([-t, 0,-1]))
self._addPoint(np.array([-t, 0, 1]))
# make triangles
tris = self.triangles
verts = self.points
# 5 faces around point 0
tris.append(Triangle([ verts[0],verts[11], verts[5]]))
tris.append(Triangle([ verts[0], verts[5], verts[1]]))
tris.append(Triangle([ verts[0], verts[1], verts[7]]))
tris.append(Triangle([ verts[0], verts[7],verts[10]]))
tris.append(Triangle([ verts[0],verts[10],verts[11]]))
# 5 adjacent faces
tris.append(Triangle([ verts[1], verts[5], verts[9]]))
tris.append(Triangle([ verts[5],verts[11], verts[4]]))
tris.append(Triangle([verts[11],verts[10], verts[2]]))
tris.append(Triangle([verts[10], verts[7], verts[6]]))
tris.append(Triangle([ verts[7], verts[1], verts[8]]))
# 5 faces around point 3
tris.append(Triangle([ verts[3], verts[9], verts[4]]))
tris.append(Triangle([ verts[3], verts[4], verts[2]]))
tris.append(Triangle([ verts[3], verts[2], verts[6]]))
tris.append(Triangle([ verts[3], verts[6], verts[8]]))
tris.append(Triangle([ verts[3], verts[8], verts[9]]))
# 5 adjacent faces
tris.append(Triangle([ verts[4], verts[9], verts[5]]))
tris.append(Triangle([ verts[2], verts[4],verts[11]]))
tris.append(Triangle([ verts[6], verts[2],verts[10]]))
tris.append(Triangle([ verts[8], verts[6], verts[7]]))
tris.append(Triangle([ verts[9], verts[8], verts[1]]))
########################################
# refine triangles to desired mesh level
########################################
for l in range(self.level):
midPointDict = {}
faces = []
for tri in self.triangles:
# replace triangle by 4 triangles
p = tri.pts
a = self._getMiddlePoint(p[0], p[1], midPointDict)
b = self._getMiddlePoint(p[1], p[2], midPointDict)
c = self._getMiddlePoint(p[2], p[0], midPointDict)
faces.append(Triangle([p[0], a, c]))
faces.append(Triangle([p[1], b, a]))
faces.append(Triangle([p[2], c, b]))
faces.append(Triangle([a, b, c]))
# once looped thru all triangles overwrite self.triangles
self.triangles = faces
self.nfaces = len(self.triangles)
# check that npts and nfaces are as expected
expected_npts = calculate_npts(self.level)
expected_nfaces = calculate_nfaces(self.level)
if self.npts != calculate_npts(self.level):
raise Exception('npts '+str(self.npts)+' not as expected '+str(expected_npts))
elif self.nfaces != calculate_nfaces(self.level):
raise Exception('nfaces '+str(self.nfaces)+' not as expected '+str(expected_nfaces))
def _addPoint(self, xyz):
"""Add point to self.points"""
self.points.append(Point(self.npts, xyz))
self.npts += 1
def _getMiddlePoint(self, p1, p2, midPointDict):
"""return Point"""
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError('p1 and p2 must be Points')
# does point already exist?
key = tuple(sorted([p1.idx, p2.idx]))
if key in midPointDict:
# point exists
pass
else:
# point is new
self._addPoint((p1.xyz + p2.xyz)/2)
midPointDict[key] = self.points[-1]
return midPointDict[key]
def plot3d(self):
"""Matplotlib 3D plot of mesh"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xyz = np.asarray([ pt.xyz for pt in self.points ])
x = xyz[:,0]
y = xyz[:,1]
z = xyz[:,2]
ts = np.asarray([ [ p.idx for p in t.pts ] for t in self.triangles ])
ax.plot_trisurf(x,y,ts,z)
plt.show()
def dump_xyz(self):
[ print(*pt.xyz) for pt in self.points ]
def dump_latlonr(self):
[ print(*cart2geo(*pt.xyz)) for pt in self.points ]
class Triangle:
"""A triangle adjoining three adjacent points"""
def __init__(self, pts):
if not isinstance(pts, list):
raise TypeError('pts must be a list')
elif len(pts) !=3:
raise Exception('pts must be of length 3')
else:
self.pts = pts
class Point:
"""A 3D point on the mesh"""
def __init__(self, idx, xyz):
if type(idx) is not int:
raise TypeError('idx must be an integer')
elif not isinstance(xyz,np.ndarray):
raise TypeError('xyz must be a numpy array')
elif xyz.size != 3:
raise Exception('xyz must be of size 3')
else:
# ensure length equals 1 and add to list of points
self.xyz = (xyz/np.linalg.norm(xyz))
self.idx = idx
def calculate_npts(level):
n = 2**level
return 2 + 10 * n**2
def calculate_nfaces(level):
n = 2**level
return 20 * n**2
def cart2geo(x, y, z):
"""convert x y z cartesian coordinates to latitude longitude radius
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
r = np.sqrt(x**2 + y**2 + z**2)
lon = np.rad2deg(np.arctan2(y,x))
lat = np.rad2deg(np.arcsin(z/r))
return lat, lon, r
def geo2cart(lat, lon, r):
"""convert latitude longitude radius to x y z cartesian coordinates
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
x = r * np.cos(lon) * np.cos(lat)
y = r * np.sin(lon) * np.cos(lat)
z = r * np.sin(lat)
return x, y, z
# def xyzToLatLonR(xyz):
# trans = np.array([np.])
| """Subdivided icosahedral mesh generation"""
from __future__ import print_function
import numpy as np
# following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html
# hierarchy:
# Icosphere -> Triangle -> Point
class IcoSphere:
"""
Usage: IcoSphere(level)
Maximum supported level = 8
get started with:
>>> A = IcoSphere(3)
... A.plot3d()
"""
# maximum level for subdivision of the icosahedron
maxlevel = 8
def __init__(self, level):
if type(level) is not int:
raise TypeError('level must be an integer')
elif level < 0:
raise Exception('level must be no less than 0')
elif level > self.maxlevel:
raise Exception('level larger than ' + str(self.maxlevel) + ' not supported')
self.level = level
self.points = []
self.triangles = []
self.npts = 0
################################
# initialise level 1 icosahedron
################################
# golden ration
t = (1.0 + np.sqrt(5.0)) / 2.0
# add vertices
self._addPoint(np.array([-1, t, 0]))
self._addPoint(np.array([ 1, t, 0]))
self._addPoint(np.array([-1,-t, 0]))
self._addPoint(np.array([ 1,-t, 0]))
self._addPoint(np.array([ 0,-1, t]))
self._addPoint(np.array([ 0, 1, t]))
self._addPoint(np.array([ 0,-1,-t]))
self._addPoint(np.array([ 0, 1,-t]))
self._addPoint(np.array([ t, 0,-1]))
self._addPoint(np.array([ t, 0, 1]))
self._addPoint(np.array([-t, 0,-1]))
self._addPoint(np.array([-t, 0, 1]))
# make triangles
tris = self.triangles
verts = self.points
# 5 faces around point 0
tris.append(Triangle([ verts[0],verts[11], verts[5]]))
tris.append(Triangle([ verts[0], verts[5], verts[1]]))
tris.append(Triangle([ verts[0], verts[1], verts[7]]))
tris.append(Triangle([ verts[0], verts[7],verts[10]]))
tris.append(Triangle([ verts[0],verts[10],verts[11]]))
# 5 adjacent faces
tris.append(Triangle([ verts[1], verts[5], verts[9]]))
tris.append(Triangle([ verts[5],verts[11], verts[4]]))
tris.append(Triangle([verts[11],verts[10], verts[2]]))
tris.append(Triangle([verts[10], verts[7], verts[6]]))
tris.append(Triangle([ verts[7], verts[1], verts[8]]))
# 5 faces around point 3
tris.append(Triangle([ verts[3], verts[9], verts[4]]))
tris.append(Triangle([ verts[3], verts[4], verts[2]]))
tris.append(Triangle([ verts[3], verts[2], verts[6]]))
tris.append(Triangle([ verts[3], verts[6], verts[8]]))
tris.append(Triangle([ verts[3], verts[8], verts[9]]))
# 5 adjacent faces
tris.append(Triangle([ verts[4], verts[9], verts[5]]))
tris.append(Triangle([ verts[2], verts[4],verts[11]]))
tris.append(Triangle([ verts[6], verts[2],verts[10]]))
tris.append(Triangle([ verts[8], verts[6], verts[7]]))
tris.append(Triangle([ verts[9], verts[8], verts[1]]))
########################################
# refine triangles to desired mesh level
########################################
for l in range(self.level):
midPointDict = {}
faces = []
for tri in self.triangles:
# replace triangle by 4 triangles
p = tri.pts
a = self._getMiddlePoint(p[0], p[1], midPointDict)
b = self._getMiddlePoint(p[1], p[2], midPointDict)
c = self._getMiddlePoint(p[2], p[0], midPointDict)
faces.append(Triangle([p[0], a, c]))
faces.append(Triangle([p[1], b, a]))
faces.append(Triangle([p[2], c, b]))
faces.append(Triangle([a, b, c]))
# once looped thru all triangles overwrite self.triangles
self.triangles = faces
self.nfaces = len(self.triangles)
# check that npts and nfaces are as expected
expected_npts = calculate_npts(self.level)
expected_nfaces = calculate_nfaces(self.level)
if self.npts != calculate_npts(self.level):
raise Exception('npts '+str(self.npts)+' not as expected '+str(expected_npts))
elif self.nfaces != calculate_nfaces(self.level):
raise Exception('nfaces '+str(self.nfaces)+' not as expected '+str(expected_nfaces))
def _addPoint(self, xyz):
"""Add point to self.points"""
self.points.append(Point(self.npts, xyz))
self.npts += 1
def _getMiddlePoint(self, p1, p2, midPointDict):
"""return Point"""
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError('p1 and p2 must be Points')
# does point already exist?
key = tuple(sorted([p1.idx, p2.idx]))
if key in midPointDict:
# point exists
pass
else:
# point is new
self._addPoint((p1.xyz + p2.xyz)/2)
midPointDict[key] = self.points[-1]
return midPointDict[key]
def plot3d(self):
"""Matplotlib 3D plot of mesh"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xyz = np.asarray([ pt.xyz for pt in self.points ])
x = xyz[:,0]
y = xyz[:,1]
z = xyz[:,2]
ts = np.asarray([ [ p.idx for p in t.pts ] for t in self.triangles ])
ax.plot_trisurf(x,y,ts,z)
plt.show()
def dump_xyz(self):
[ print(*pt.xyz) for pt in self.points ]
def dump_latlonr(self):
[ print(*cart2geo(*pt.xyz)) for pt in self.points ]
class Triangle:
"""A triangle adjoining three adjacent points"""
def __init__(self, pts):
if not isinstance(pts, list):
raise TypeError('pts must be a list')
elif len(pts) !=3:
raise Exception('pts must be of length 3')
else:
self.pts = pts
class Point:
"""A 3D point on the mesh"""
def __init__(self, idx, xyz):
if type(idx) is not int:
raise TypeError('idx must be an integer')
elif not isinstance(xyz,np.ndarray):
raise TypeError('xyz must be a numpy array')
elif xyz.size != 3:
raise Exception('xyz must be of size 3')
else:
# ensure length equals 1 and add to list of points
self.xyz = (xyz/np.linalg.norm(xyz))
self.idx = idx
def calculate_npts(level):
n = 2**level
return 2 + 10 * n**2
def calculate_nfaces(level):
n = 2**level
return 20 * n**2
def cart2geo(x, y, z):
"""convert x y z cartesian coordinates to latitude longitude radius
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
r = np.sqrt(x**2 + y**2 + z**2)
lon = np.rad2deg(np.arctan2(y,x))
lat = np.rad2deg(np.arcsin(z/r))
return lat, lon, r
def geo2cart(lat, lon, r):
"""convert latitude longitude radius to x y z cartesian coordinates
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
x = r * np.cos(lon) * np.cos(lat)
y = r * np.sin(lon) * np.cos(lat)
z = r * np.sin(lat)
return x, y, z
# def xyzToLatLonR(xyz):
# trans = np.array([np.])
| en | 0.615121 | Subdivided icosahedral mesh generation # following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html # hierarchy: # Icosphere -> Triangle -> Point Usage: IcoSphere(level) Maximum supported level = 8 get started with: >>> A = IcoSphere(3) ... A.plot3d() # maximum level for subdivision of the icosahedron ################################ # initialise level 1 icosahedron ################################ # golden ration # add vertices # make triangles # 5 faces around point 0 # 5 adjacent faces # 5 faces around point 3 # 5 adjacent faces ######################################## # refine triangles to desired mesh level ######################################## # replace triangle by 4 triangles # once looped thru all triangles overwrite self.triangles # check that npts and nfaces are as expected Add point to self.points return Point # does point already exist? # point exists # point is new Matplotlib 3D plot of mesh A triangle adjoining three adjacent points A 3D point on the mesh # ensure length equals 1 and add to list of points convert x y z cartesian coordinates to latitude longitude radius xyz is a numpy array, a right handed co-ordinate system is assumed with -- x-axis going through the equator at 0 degrees longitude -- y-axis going through the equator at 90 degrees longitude -- z-axis going through the north pole. convert latitude longitude radius to x y z cartesian coordinates xyz is a numpy array, a right handed co-ordinate system is assumed with -- x-axis going through the equator at 0 degrees longitude -- y-axis going through the equator at 90 degrees longitude -- z-axis going through the north pole. # def xyzToLatLonR(xyz): # trans = np.array([np.]) | 3.419049 | 3 |
src/main.py | Lidenbrock-ed/challenge-prework-backend-python | 0 | 7983 | <gh_stars>0
# Resolve the problem!!
import string
import random
SYMBOLS = list('!"#$%&\'()*+,-./:;?@[]^_`{|}~')
def generate_password():
# Start coding here
letters_min = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','x','y','z']
letters_may = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','X','Y','Z']
numbers = ['1','2', '3','4','5','6','7','8','9','0']
safe_password = letters_min + letters_may + numbers + SYMBOLS
final_password = []
for i in range(15):
generate_caracter = random.choice(safe_password)
final_password.append(generate_caracter)
final_password = "".join(final_password)
print(final_password)
return final_password
def validate(password):
if len(password) >= 8 and len(password) <= 16:
has_lowercase_letters = False
has_numbers = False
has_uppercase_letters = False
has_symbols = False
for char in password:
if char in string.ascii_lowercase:
has_lowercase_letters = True
break
for char in password:
if char in string.ascii_uppercase:
has_uppercase_letters = True
break
for char in password:
if char in string.digits:
has_numbers = True
break
for char in password:
if char in SYMBOLS:
has_symbols = True
break
if has_symbols and has_numbers and has_lowercase_letters and has_uppercase_letters:
return True
return False
def run():
password = generate_password()
if validate(password):
print('Secure Password')
else:
print('Insecure Password')
if __name__ == '__main__':
run()
| # Resolve the problem!!
import string
import random
SYMBOLS = list('!"#$%&\'()*+,-./:;?@[]^_`{|}~')
def generate_password():
# Start coding here
letters_min = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','x','y','z']
letters_may = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','X','Y','Z']
numbers = ['1','2', '3','4','5','6','7','8','9','0']
safe_password = letters_min + letters_may + numbers + SYMBOLS
final_password = []
for i in range(15):
generate_caracter = random.choice(safe_password)
final_password.append(generate_caracter)
final_password = "".join(final_password)
print(final_password)
return final_password
def validate(password):
if len(password) >= 8 and len(password) <= 16:
has_lowercase_letters = False
has_numbers = False
has_uppercase_letters = False
has_symbols = False
for char in password:
if char in string.ascii_lowercase:
has_lowercase_letters = True
break
for char in password:
if char in string.ascii_uppercase:
has_uppercase_letters = True
break
for char in password:
if char in string.digits:
has_numbers = True
break
for char in password:
if char in SYMBOLS:
has_symbols = True
break
if has_symbols and has_numbers and has_lowercase_letters and has_uppercase_letters:
return True
return False
def run():
password = generate_password()
if validate(password):
print('Secure Password')
else:
print('Insecure Password')
if __name__ == '__main__':
run() | en | 0.676035 | # Resolve the problem!! # Start coding here | 3.699143 | 4 |
targets/baremetal-sdk/curie-bsp/setup.py | ideas-detoxes/jerryscript | 4,324 | 7984 | #!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fnmatch
import os
def build_soft_links(project_path, jerry_path):
""" Creates soft links into the @project_path. """
if not os.path.exists(project_path):
os.makedirs(project_path)
links = [
{ # arc
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc'),
'link_name': 'arc'
},
{ # include
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include'),
'link_name': 'include'
},
{ # quark
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark'),
'link_name': 'quark'
},
{ # quark/jerryscript
'src': jerry_path,
'link_name': os.path.join('quark', 'jerryscript')
}
]
for link in links:
src = os.path.join(jerry_path, link['src'])
link_name = os.path.join(project_path, link['link_name'])
if not os.path.islink(link_name):
os.symlink(src, link_name)
print("Created symlink '{link_name}' -> '{src}'".format(src=src, link_name=link_name))
def find_sources(root_dir, sub_dir):
"""
Find .c and .S files inside the @root_dir/@sub_dir directory.
Note: the returned paths will be relative to the @root_dir directory.
"""
src_dir = os.path.join(root_dir, sub_dir)
matches = []
for root, dirnames, filenames in os.walk(src_dir):
for filename in fnmatch.filter(filenames, '*.[c|S]'):
file_path = os.path.join(root, filename)
relative_path = os.path.relpath(file_path, root_dir)
matches.append(relative_path)
return matches
def build_jerry_data(jerry_path):
"""
Build up a dictionary which contains the following items:
- sources: list of JerryScript sources which should be built.
- dirs: list of JerryScript dirs used.
- cflags: CFLAGS for the build.
"""
jerry_sources = []
jerry_dirs = set()
for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]:
for file in find_sources(os.path.normpath(jerry_path), sub_dir):
path = os.path.join('jerryscript', file)
jerry_sources.append(path)
jerry_dirs.add(os.path.split(path)[0])
jerry_cflags = [
'-DJERRY_GLOBAL_HEAP_SIZE=10',
'-DJERRY_NDEBUG',
'-DJERRY_DISABLE_HEAVY_DEBUG',
'-DJERRY_BUILTIN_NUMBER=0',
'-DJERRY_BUILTIN_STRING=0',
'-DJERRY_BUILTIN_BOOLEAN=0',
#'-DJERRY_BUILTIN_ERRORS=0',
'-DJERRY_BUILTIN_ARRAY=0',
'-DJERRY_BUILTIN_MATH=0',
'-DJERRY_BUILTIN_JSON=0',
'-DJERRY_BUILTIN_DATE=0',
'-DJERRY_BUILTIN_REGEXP=0',
'-DJERRY_BUILTIN_ANNEXB=0',
'-DJERRY_ESNEXT=0',
'-DJERRY_LCACHE=0',
'-DJERRY_PROPERTY_HASHMAP=0',
]
return {
'sources': jerry_sources,
'dirs': jerry_dirs,
'cflags': jerry_cflags,
}
def write_file(path, content):
""" Writes @content into the file at specified by the @path. """
norm_path = os.path.normpath(path)
with open(norm_path, "w+") as f:
f.write(content)
print("Wrote file '{0}'".format(norm_path))
def build_obj_y(source_list):
"""
Build obj-y additions from the @source_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['obj-y += {0}.o'.format(os.path.splitext(fname)[0]) for fname in source_list])
def build_cflags_y(cflags_list):
"""
Build cflags-y additions from the @cflags_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['cflags-y += {0}'.format(cflag) for cflag in cflags_list])
def build_mkdir(dir_list):
""" Build mkdir calls for each dir in the @dir_list. """
return '\n'.join(['\t$(AT)mkdir -p {0}'.format(os.path.join('$(OUT_SRC)', path)) for path in dir_list])
def create_root_kbuild(project_path):
""" Creates @project_path/Kbuild.mk file. """
root_kbuild_path = os.path.join(project_path, 'Kbuild.mk')
root_kbuild_content = '''
obj-$(CONFIG_QUARK_SE_ARC) += arc/
obj-$(CONFIG_QUARK_SE_QUARK) += quark/
'''
write_file(root_kbuild_path, root_kbuild_content)
def create_root_makefile(project_path):
""" Creates @project_path/Makefile file. """
root_makefile_path = os.path.join(project_path, 'Makefile')
root_makefile_content = '''
THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
T := $(abspath $(THIS_DIR)/../..)
PROJECT := {project_name}
BOARD := curie_101
ifeq ($(filter curie_101, $(BOARD)),)
$(error The curie jerry sample application can only run on the curie_101 Board)
endif
BUILDVARIANT ?= debug
quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig
arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig
# Optional: set the default version
VERSION_MAJOR := 1
VERSION_MINOR := 0
VERSION_PATCH := 0
include $(T)/build/project.mk
'''.format(project_name=project_name)
write_file(root_makefile_path, root_makefile_content)
def create_arc_kbuild(project_path):
""" Creates @project_path/arc/Kbuild.mk file. """
arc_path = os.path.join(project_path, 'arc')
arc_kbuild_path = os.path.join(arc_path, 'Kbuild.mk')
arc_sources = find_sources(arc_path, '.')
arc_kbuild_content = build_obj_y(arc_sources)
write_file(arc_kbuild_path, arc_kbuild_content)
def create_quark_kbuild(project_path, jerry_path):
""" Creates @project_path/quark/Kbuild.mk file. """
quark_kbuild_path = os.path.join(project_path, 'quark', 'Kbuild.mk')
# Extract a few JerryScript related data
jerry_data = build_jerry_data(jerry_path)
jerry_objects = build_obj_y(jerry_data['sources'])
jerry_defines = jerry_data['cflags']
jerry_build_dirs = build_mkdir(jerry_data['dirs'])
quark_include_paths = [
'include',
'jerryscript',
os.path.join('jerryscript', 'jerry-math', 'include'),
os.path.join('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')
] + list(jerry_data['dirs'])
quark_includes = [
'-Wno-error',
] + ['-I%s' % os.path.join(project_path, 'quark', path) for path in quark_include_paths]
quark_cflags = build_cflags_y(jerry_defines + quark_includes)
quark_kbuild_content = '''
{cflags}
obj-y += main.o
{objects}
build_dirs:
{dirs}
$(OUT_SRC): build_dirs
'''.format(objects=jerry_objects, cflags=quark_cflags, dirs=jerry_build_dirs)
write_file(quark_kbuild_path, quark_kbuild_content)
def main(curie_path, project_name, jerry_path):
project_path = os.path.join(curie_path, 'wearable_device_sw', 'projects', project_name)
build_soft_links(project_path, jerry_path)
create_root_kbuild(project_path)
create_root_makefile(project_path)
create_arc_kbuild(project_path)
create_quark_kbuild(project_path, jerry_path)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage:')
print('{script_name} [full or relative path of Curie_BSP]'.format(script_name=sys.argv[0]))
sys.exit(1)
project_name = 'curie_bsp_jerry'
file_dir = os.path.dirname(os.path.abspath(__file__))
jerry_path = os.path.join(file_dir, "..", "..", "..")
curie_path = os.path.join(os.getcwd(), sys.argv[1])
main(curie_path, project_name, jerry_path)
| #!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fnmatch
import os
def build_soft_links(project_path, jerry_path):
""" Creates soft links into the @project_path. """
if not os.path.exists(project_path):
os.makedirs(project_path)
links = [
{ # arc
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc'),
'link_name': 'arc'
},
{ # include
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include'),
'link_name': 'include'
},
{ # quark
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark'),
'link_name': 'quark'
},
{ # quark/jerryscript
'src': jerry_path,
'link_name': os.path.join('quark', 'jerryscript')
}
]
for link in links:
src = os.path.join(jerry_path, link['src'])
link_name = os.path.join(project_path, link['link_name'])
if not os.path.islink(link_name):
os.symlink(src, link_name)
print("Created symlink '{link_name}' -> '{src}'".format(src=src, link_name=link_name))
def find_sources(root_dir, sub_dir):
"""
Find .c and .S files inside the @root_dir/@sub_dir directory.
Note: the returned paths will be relative to the @root_dir directory.
"""
src_dir = os.path.join(root_dir, sub_dir)
matches = []
for root, dirnames, filenames in os.walk(src_dir):
for filename in fnmatch.filter(filenames, '*.[c|S]'):
file_path = os.path.join(root, filename)
relative_path = os.path.relpath(file_path, root_dir)
matches.append(relative_path)
return matches
def build_jerry_data(jerry_path):
"""
Build up a dictionary which contains the following items:
- sources: list of JerryScript sources which should be built.
- dirs: list of JerryScript dirs used.
- cflags: CFLAGS for the build.
"""
jerry_sources = []
jerry_dirs = set()
for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]:
for file in find_sources(os.path.normpath(jerry_path), sub_dir):
path = os.path.join('jerryscript', file)
jerry_sources.append(path)
jerry_dirs.add(os.path.split(path)[0])
jerry_cflags = [
'-DJERRY_GLOBAL_HEAP_SIZE=10',
'-DJERRY_NDEBUG',
'-DJERRY_DISABLE_HEAVY_DEBUG',
'-DJERRY_BUILTIN_NUMBER=0',
'-DJERRY_BUILTIN_STRING=0',
'-DJERRY_BUILTIN_BOOLEAN=0',
#'-DJERRY_BUILTIN_ERRORS=0',
'-DJERRY_BUILTIN_ARRAY=0',
'-DJERRY_BUILTIN_MATH=0',
'-DJERRY_BUILTIN_JSON=0',
'-DJERRY_BUILTIN_DATE=0',
'-DJERRY_BUILTIN_REGEXP=0',
'-DJERRY_BUILTIN_ANNEXB=0',
'-DJERRY_ESNEXT=0',
'-DJERRY_LCACHE=0',
'-DJERRY_PROPERTY_HASHMAP=0',
]
return {
'sources': jerry_sources,
'dirs': jerry_dirs,
'cflags': jerry_cflags,
}
def write_file(path, content):
""" Writes @content into the file at specified by the @path. """
norm_path = os.path.normpath(path)
with open(norm_path, "w+") as f:
f.write(content)
print("Wrote file '{0}'".format(norm_path))
def build_obj_y(source_list):
"""
Build obj-y additions from the @source_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['obj-y += {0}.o'.format(os.path.splitext(fname)[0]) for fname in source_list])
def build_cflags_y(cflags_list):
"""
Build cflags-y additions from the @cflags_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['cflags-y += {0}'.format(cflag) for cflag in cflags_list])
def build_mkdir(dir_list):
""" Build mkdir calls for each dir in the @dir_list. """
return '\n'.join(['\t$(AT)mkdir -p {0}'.format(os.path.join('$(OUT_SRC)', path)) for path in dir_list])
def create_root_kbuild(project_path):
""" Creates @project_path/Kbuild.mk file. """
root_kbuild_path = os.path.join(project_path, 'Kbuild.mk')
root_kbuild_content = '''
obj-$(CONFIG_QUARK_SE_ARC) += arc/
obj-$(CONFIG_QUARK_SE_QUARK) += quark/
'''
write_file(root_kbuild_path, root_kbuild_content)
def create_root_makefile(project_path):
""" Creates @project_path/Makefile file. """
root_makefile_path = os.path.join(project_path, 'Makefile')
root_makefile_content = '''
THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
T := $(abspath $(THIS_DIR)/../..)
PROJECT := {project_name}
BOARD := curie_101
ifeq ($(filter curie_101, $(BOARD)),)
$(error The curie jerry sample application can only run on the curie_101 Board)
endif
BUILDVARIANT ?= debug
quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig
arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig
# Optional: set the default version
VERSION_MAJOR := 1
VERSION_MINOR := 0
VERSION_PATCH := 0
include $(T)/build/project.mk
'''.format(project_name=project_name)
write_file(root_makefile_path, root_makefile_content)
def create_arc_kbuild(project_path):
""" Creates @project_path/arc/Kbuild.mk file. """
arc_path = os.path.join(project_path, 'arc')
arc_kbuild_path = os.path.join(arc_path, 'Kbuild.mk')
arc_sources = find_sources(arc_path, '.')
arc_kbuild_content = build_obj_y(arc_sources)
write_file(arc_kbuild_path, arc_kbuild_content)
def create_quark_kbuild(project_path, jerry_path):
""" Creates @project_path/quark/Kbuild.mk file. """
quark_kbuild_path = os.path.join(project_path, 'quark', 'Kbuild.mk')
# Extract a few JerryScript related data
jerry_data = build_jerry_data(jerry_path)
jerry_objects = build_obj_y(jerry_data['sources'])
jerry_defines = jerry_data['cflags']
jerry_build_dirs = build_mkdir(jerry_data['dirs'])
quark_include_paths = [
'include',
'jerryscript',
os.path.join('jerryscript', 'jerry-math', 'include'),
os.path.join('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')
] + list(jerry_data['dirs'])
quark_includes = [
'-Wno-error',
] + ['-I%s' % os.path.join(project_path, 'quark', path) for path in quark_include_paths]
quark_cflags = build_cflags_y(jerry_defines + quark_includes)
quark_kbuild_content = '''
{cflags}
obj-y += main.o
{objects}
build_dirs:
{dirs}
$(OUT_SRC): build_dirs
'''.format(objects=jerry_objects, cflags=quark_cflags, dirs=jerry_build_dirs)
write_file(quark_kbuild_path, quark_kbuild_content)
def main(curie_path, project_name, jerry_path):
project_path = os.path.join(curie_path, 'wearable_device_sw', 'projects', project_name)
build_soft_links(project_path, jerry_path)
create_root_kbuild(project_path)
create_root_makefile(project_path)
create_arc_kbuild(project_path)
create_quark_kbuild(project_path, jerry_path)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage:')
print('{script_name} [full or relative path of Curie_BSP]'.format(script_name=sys.argv[0]))
sys.exit(1)
project_name = 'curie_bsp_jerry'
file_dir = os.path.dirname(os.path.abspath(__file__))
jerry_path = os.path.join(file_dir, "..", "..", "..")
curie_path = os.path.join(os.getcwd(), sys.argv[1])
main(curie_path, project_name, jerry_path)
| en | 0.741096 | #!/usr/bin/env python # Copyright JS Foundation and other contributors, http://js.foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Creates soft links into the @project_path. # arc # include # quark # quark/jerryscript Find .c and .S files inside the @root_dir/@sub_dir directory. Note: the returned paths will be relative to the @root_dir directory. Build up a dictionary which contains the following items: - sources: list of JerryScript sources which should be built. - dirs: list of JerryScript dirs used. - cflags: CFLAGS for the build. #'-DJERRY_BUILTIN_ERRORS=0', Writes @content into the file at specified by the @path. Build obj-y additions from the @source_list. Note: the input sources should have their file extensions. Build cflags-y additions from the @cflags_list. Note: the input sources should have their file extensions. Build mkdir calls for each dir in the @dir_list. Creates @project_path/Kbuild.mk file. obj-$(CONFIG_QUARK_SE_ARC) += arc/ obj-$(CONFIG_QUARK_SE_QUARK) += quark/ Creates @project_path/Makefile file. THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) T := $(abspath $(THIS_DIR)/../..) PROJECT := {project_name} BOARD := curie_101 ifeq ($(filter curie_101, $(BOARD)),) $(error The curie jerry sample application can only run on the curie_101 Board) endif BUILDVARIANT ?= debug quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig # Optional: set the default version VERSION_MAJOR := 1 VERSION_MINOR := 0 VERSION_PATCH := 0 include $(T)/build/project.mk Creates @project_path/arc/Kbuild.mk file. Creates @project_path/quark/Kbuild.mk file. # Extract a few JerryScript related data {cflags} obj-y += main.o {objects} build_dirs: {dirs} $(OUT_SRC): build_dirs | 2.049077 | 2 |
pythonteste/aula08a.py | genisyskernel/cursoemvideo-python | 1 | 7985 | from math import sqrt
import emoji
num = int(input("Digite um número: "))
raiz = sqrt(num)
print("A raiz do número {0} é {1:.2f}.".format(num, raiz))
print(emoji.emojize("Hello World! :earth_americas:", use_aliases=True))
| from math import sqrt
import emoji
num = int(input("Digite um número: "))
raiz = sqrt(num)
print("A raiz do número {0} é {1:.2f}.".format(num, raiz))
print(emoji.emojize("Hello World! :earth_americas:", use_aliases=True))
| none | 1 | 4.239592 | 4 |
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_30/rule_20.py | apcarrik/kaggle | 0 | 7986 | <reponame>apcarrik/kaggle
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Maritalstatus", "instances": 34, "metric_value": 0.99, "depth": 1}
if obj[7]>0:
# {"feature": "Age", "instances": 25, "metric_value": 0.9896, "depth": 2}
if obj[6]<=5:
# {"feature": "Time", "instances": 21, "metric_value": 0.9984, "depth": 3}
if obj[2]<=1:
# {"feature": "Occupation", "instances": 13, "metric_value": 0.8905, "depth": 4}
if obj[10]<=13:
# {"feature": "Coupon", "instances": 11, "metric_value": 0.684, "depth": 5}
if obj[3]>0:
# {"feature": "Distance", "instances": 10, "metric_value": 0.469, "depth": 6}
if obj[16]<=2:
return 'False'
elif obj[16]>2:
# {"feature": "Coupon_validity", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[4]<=0:
return 'True'
elif obj[4]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[10]>13:
return 'True'
else: return 'True'
elif obj[2]>1:
# {"feature": "Occupation", "instances": 8, "metric_value": 0.8113, "depth": 4}
if obj[10]<=7:
return 'True'
elif obj[10]>7:
# {"feature": "Weather", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[1]<=0:
return 'False'
elif obj[1]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[6]>5:
return 'True'
else: return 'True'
elif obj[7]<=0:
# {"feature": "Age", "instances": 9, "metric_value": 0.5033, "depth": 2}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
else: return 'False'
| def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Maritalstatus", "instances": 34, "metric_value": 0.99, "depth": 1}
if obj[7]>0:
# {"feature": "Age", "instances": 25, "metric_value": 0.9896, "depth": 2}
if obj[6]<=5:
# {"feature": "Time", "instances": 21, "metric_value": 0.9984, "depth": 3}
if obj[2]<=1:
# {"feature": "Occupation", "instances": 13, "metric_value": 0.8905, "depth": 4}
if obj[10]<=13:
# {"feature": "Coupon", "instances": 11, "metric_value": 0.684, "depth": 5}
if obj[3]>0:
# {"feature": "Distance", "instances": 10, "metric_value": 0.469, "depth": 6}
if obj[16]<=2:
return 'False'
elif obj[16]>2:
# {"feature": "Coupon_validity", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[4]<=0:
return 'True'
elif obj[4]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[10]>13:
return 'True'
else: return 'True'
elif obj[2]>1:
# {"feature": "Occupation", "instances": 8, "metric_value": 0.8113, "depth": 4}
if obj[10]<=7:
return 'True'
elif obj[10]>7:
# {"feature": "Weather", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[1]<=0:
return 'False'
elif obj[1]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[6]>5:
return 'True'
else: return 'True'
elif obj[7]<=0:
# {"feature": "Age", "instances": 9, "metric_value": 0.5033, "depth": 2}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
else: return 'False' | en | 0.484483 | #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance # {"feature": "Maritalstatus", "instances": 34, "metric_value": 0.99, "depth": 1} # {"feature": "Age", "instances": 25, "metric_value": 0.9896, "depth": 2} # {"feature": "Time", "instances": 21, "metric_value": 0.9984, "depth": 3} # {"feature": "Occupation", "instances": 13, "metric_value": 0.8905, "depth": 4} # {"feature": "Coupon", "instances": 11, "metric_value": 0.684, "depth": 5} # {"feature": "Distance", "instances": 10, "metric_value": 0.469, "depth": 6} # {"feature": "Coupon_validity", "instances": 2, "metric_value": 1.0, "depth": 7} # {"feature": "Occupation", "instances": 8, "metric_value": 0.8113, "depth": 4} # {"feature": "Weather", "instances": 3, "metric_value": 0.9183, "depth": 5} # {"feature": "Age", "instances": 9, "metric_value": 0.5033, "depth": 2} | 2.743604 | 3 |
main.py | AdrienCourtois/DexiNed | 0 | 7987 |
from __future__ import print_function
import argparse
import os
import time, platform
import cv2
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info
from losses import *
from model import DexiNed
# from model0C import DexiNed
from utils import (image_normalization, save_image_batch_to_disk,
visualize_result)
IS_LINUX = True if platform.system()=="Linux" else False
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device,
log_interval_vis, tb_writer, args=None):
imgs_res_folder = os.path.join(args.output_dir, 'current_res')
os.makedirs(imgs_res_folder,exist_ok=True)
# Put model in training mode
model.train()
# l_weight = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.1] # for bdcn ori loss
# before [0.6,0.6,1.1,1.1,0.4,0.4,1.3] [0.4,0.4,1.1,1.1,0.6,0.6,1.3],[0.4,0.4,1.1,1.1,0.8,0.8,1.3]
l_weight = [0.7,0.7,1.1,1.1,0.3,0.3,1.3] # for bdcn loss theory 3 before the last 1.3 0.6-0..5
# l_weight = [[0.05, 2.], [0.05, 2.], [0.05, 2.],
# [0.1, 1.], [0.1, 1.], [0.1, 1.],
# [0.01, 4.]] # for cats loss
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device) # BxCxHxW
labels = sample_batched['labels'].to(device) # BxHxW
preds_list = model(images)
# loss = sum([criterion(preds, labels, l_w, device) for preds, l_w in zip(preds_list, l_weight)]) # cats_loss
loss = sum([criterion(preds, labels,l_w)/args.batch_size for preds, l_w in zip(preds_list,l_weight)]) # bdcn_loss
# loss = sum([criterion(preds, labels) for preds in preds_list]) #HED loss, rcf_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if tb_writer is not None:
tb_writer.add_scalar('loss',
loss.detach(),
(len(dataloader) * epoch + batch_id))
if batch_id % 5 == 0:
print(time.ctime(), 'Epoch: {0} Sample {1}/{2} Loss: {3}'
.format(epoch, batch_id, len(dataloader), loss.item()))
if batch_id % log_interval_vis == 0:
res_data = []
img = images.cpu().numpy()
res_data.append(img[2])
ed_gt = labels.cpu().numpy()
res_data.append(ed_gt[2])
# tmp_pred = tmp_preds[2,...]
for i in range(len(preds_list)):
tmp = preds_list[i]
tmp = tmp[2]
# print(tmp.shape)
tmp = torch.sigmoid(tmp).unsqueeze(dim=0)
tmp = tmp.cpu().detach().numpy()
res_data.append(tmp)
vis_imgs = visualize_result(res_data, arg=args)
del tmp, res_data
vis_imgs = cv2.resize(vis_imgs,
(int(vis_imgs.shape[1]*0.8), int(vis_imgs.shape[0]*0.8)))
img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3}' \
.format(epoch, batch_id, len(dataloader), loss.item())
BLACK = (0, 0, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
font_size = 1.1
font_color = BLACK
font_thickness = 2
x, y = 30, 30
vis_imgs = cv2.putText(vis_imgs,
img_test,
(x, y),
font, font_size, font_color, font_thickness, cv2.LINE_AA)
cv2.imwrite(os.path.join(imgs_res_folder, 'results.png'), vis_imgs)
def validate_one_epoch(epoch, dataloader, model, device, output_dir, arg=None):
# XXX This is not really validation, but testing
# Put model in eval mode
model.eval()
with torch.no_grad():
for _, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
# labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
preds = model(images)
# print('pred shape', preds[0].shape)
save_image_batch_to_disk(preds[-1],
output_dir,
file_names,img_shape=image_shape,
arg=arg)
def test(checkpoint_path, dataloader, model, device, output_dir, args):
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
# images = images[:, [2, 1, 0], :, :]
start_time = time.time()
preds = model(images)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk(preds,
output_dir,
file_names,
image_shape,
arg=args)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def testPich(checkpoint_path, dataloader, model, device, output_dir, args):
# a test model plus the interganged channels
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
start_time = time.time()
# images2 = images[:, [1, 0, 2], :, :] #GBR
images2 = images[:, [2, 1, 0], :, :] # RGB
preds = model(images)
preds2 = model(images2)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk([preds,preds2],
output_dir,
file_names,
image_shape,
arg=args, is_inchannel=True)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='DexiNed trainer.')
parser.add_argument('--choose_test_data',
type=int,
default=3,
help='Already set the dataset for testing choice: 0 - 8')
# ----------- test -------0--
TEST_DATA = DATASET_NAMES[parser.parse_args().choose_test_data] # max 8
test_inf = dataset_info(TEST_DATA, is_linux=IS_LINUX)
test_dir = test_inf['data_dir']
is_testing = True # current test _bdcnlossNew256-sd7-1.10.4p5
# Training settings
TRAIN_DATA = DATASET_NAMES[0] # BIPED=0
train_inf = dataset_info(TRAIN_DATA, is_linux=IS_LINUX)
train_dir = train_inf['data_dir']
# Data parameters
parser.add_argument('--input_dir',
type=str,
default=train_dir,
help='the path to the directory with the input data.')
parser.add_argument('--input_val_dir',
type=str,
default=test_inf['data_dir'],
help='the path to the directory with the input data for validation.')
parser.add_argument('--output_dir',
type=str,
default='checkpoints',
help='the path to output the results.')
parser.add_argument('--train_data',
type=str,
choices=DATASET_NAMES,
default=TRAIN_DATA,
help='Name of the dataset.')
parser.add_argument('--test_data',
type=str,
choices=DATASET_NAMES,
default=TEST_DATA,
help='Name of the dataset.')
parser.add_argument('--test_list',
type=str,
default=test_inf['test_list'],
help='Dataset sample indices list.')
parser.add_argument('--train_list',
type=str,
default=train_inf['train_list'],
help='Dataset sample indices list.')
parser.add_argument('--is_testing',type=bool,
default=is_testing,
help='Script in testing mode.')
parser.add_argument('--double_img',
type=bool,
default=True,
help='True: use same 2 imgs changing channels') # Just for test
parser.add_argument('--resume',
type=bool,
default=False,
help='use previous trained data') # Just for test
parser.add_argument('--checkpoint_data',
type=str,
default='14/14_model.pth',
help='Checkpoint path from which to restore model weights from.')
parser.add_argument('--test_img_width',
type=int,
default=test_inf['img_width'],
help='Image width for testing.')
parser.add_argument('--test_img_height',
type=int,
default=test_inf['img_height'],
help='Image height for testing.')
parser.add_argument('--res_dir',
type=str,
default='result',
help='Result directory')
parser.add_argument('--log_interval_vis',
type=int,
default=50,
help='The number of batches to wait before printing test predictions.')
parser.add_argument('--epochs',
type=int,
default=22,
metavar='N',
help='Number of training epochs (default: 25).')
parser.add_argument('--lr',
default=1e-4,
type=float,
help='Initial learning rate.')
parser.add_argument('--wd',
type=float,
default=1e-4,
metavar='WD',
help='weight decay (default: 1e-4)')
# parser.add_argument('--lr_stepsize',
# default=1e4,
# type=int,
# help='Learning rate step size.')
parser.add_argument('--batch_size',
type=int,
default=8,
metavar='B',
help='the mini-batch size (default: 8)')
parser.add_argument('--workers',
default=8,
type=int,
help='The number of workers for the dataloaders.')
parser.add_argument('--tensorboard',type=bool,
default=True,
help='Use Tensorboard for logging.'),
parser.add_argument('--img_width',
type=int,
default=480,
help='Image width for training.') # BIPED 400 BSDS 352 MDBD 480
parser.add_argument('--img_height',
type=int,
default=480,
help='Image height for training.') # BIPED 400 BSDS 352
parser.add_argument('--channel_swap',
default=[2, 1, 0],
type=int)
parser.add_argument('--crop_img',
default=True,
type=bool,
help='If true crop training images, else resize images to match image width and height.')
parser.add_argument('--mean_pixel_values',
default=[103.939,116.779,123.68, 137.86],
type=float) # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892]
args = parser.parse_args()
return args
def main(args):
"""Main function."""
print(f"Number of GPU's available: {torch.cuda.device_count()}")
print(f"Pytorch version: {torch.__version__}")
# Tensorboard summary writer
tb_writer = None
training_dir = os.path.join(args.output_dir,args.train_data)
os.makedirs(training_dir,exist_ok=True)
checkpoint_path = os.path.join(args.output_dir, args.train_data, args.checkpoint_data)
if args.tensorboard and not args.is_testing:
# from tensorboardX import SummaryWriter # previous torch version
from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather
tb_writer = SummaryWriter(log_dir=training_dir)
# Get computing device
device = torch.device('cpu' if torch.cuda.device_count() == 0
else 'cuda')
# Instantiate model and move it to the computing device
model = DexiNed().to(device)
# model = nn.DataParallel(model)
ini_epoch =0
if not args.is_testing:
if args.resume:
ini_epoch=17
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
dataset_train = BipedDataset(args.input_dir,
img_width=args.img_width,
img_height=args.img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
train_mode='train',
arg=args
)
dataloader_train = DataLoader(dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers)
dataset_val = TestDataset(args.input_val_dir,
test_data=args.test_data,
img_width=args.test_img_width,
img_height=args.test_img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
test_list=args.test_list, arg=args
)
dataloader_val = DataLoader(dataset_val,
batch_size=1,
shuffle=False,
num_workers=args.workers)
# Testing
if args.is_testing:
output_dir = os.path.join(args.res_dir, args.train_data+"2"+ args.test_data)
print(f"output_dir: {output_dir}")
if args.double_img:
# predict twice an image changing channels, then mix those results
testPich(checkpoint_path, dataloader_val, model, device, output_dir, args)
else:
test(checkpoint_path, dataloader_val, model, device, output_dir, args)
return
criterion = bdcn_loss2
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.wd)
# lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize,
# gamma=args.lr_gamma)
# Main training loop
seed=1021
for epoch in range(ini_epoch,args.epochs):
if epoch%7==0:
seed = seed+1000
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("------ Random seed applied-------------")
# Create output directories
output_dir_epoch = os.path.join(args.output_dir,args.train_data, str(epoch))
img_test_dir = os.path.join(output_dir_epoch, args.test_data + '_res')
os.makedirs(output_dir_epoch,exist_ok=True)
os.makedirs(img_test_dir,exist_ok=True)
train_one_epoch(epoch,
dataloader_train,
model,
criterion,
optimizer,
device,
args.log_interval_vis,
tb_writer,
args=args)
validate_one_epoch(epoch,
dataloader_val,
model,
device,
img_test_dir,
arg=args)
# Save model after end of every epoch
torch.save(model.module.state_dict() if hasattr(model, "module") else model.state_dict(),
os.path.join(output_dir_epoch, '{0}_model.pth'.format(epoch)))
if __name__ == '__main__':
args = parse_args()
main(args)
|
from __future__ import print_function
import argparse
import os
import time, platform
import cv2
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info
from losses import *
from model import DexiNed
# from model0C import DexiNed
from utils import (image_normalization, save_image_batch_to_disk,
visualize_result)
IS_LINUX = True if platform.system()=="Linux" else False
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device,
log_interval_vis, tb_writer, args=None):
imgs_res_folder = os.path.join(args.output_dir, 'current_res')
os.makedirs(imgs_res_folder,exist_ok=True)
# Put model in training mode
model.train()
# l_weight = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.1] # for bdcn ori loss
# before [0.6,0.6,1.1,1.1,0.4,0.4,1.3] [0.4,0.4,1.1,1.1,0.6,0.6,1.3],[0.4,0.4,1.1,1.1,0.8,0.8,1.3]
l_weight = [0.7,0.7,1.1,1.1,0.3,0.3,1.3] # for bdcn loss theory 3 before the last 1.3 0.6-0..5
# l_weight = [[0.05, 2.], [0.05, 2.], [0.05, 2.],
# [0.1, 1.], [0.1, 1.], [0.1, 1.],
# [0.01, 4.]] # for cats loss
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device) # BxCxHxW
labels = sample_batched['labels'].to(device) # BxHxW
preds_list = model(images)
# loss = sum([criterion(preds, labels, l_w, device) for preds, l_w in zip(preds_list, l_weight)]) # cats_loss
loss = sum([criterion(preds, labels,l_w)/args.batch_size for preds, l_w in zip(preds_list,l_weight)]) # bdcn_loss
# loss = sum([criterion(preds, labels) for preds in preds_list]) #HED loss, rcf_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if tb_writer is not None:
tb_writer.add_scalar('loss',
loss.detach(),
(len(dataloader) * epoch + batch_id))
if batch_id % 5 == 0:
print(time.ctime(), 'Epoch: {0} Sample {1}/{2} Loss: {3}'
.format(epoch, batch_id, len(dataloader), loss.item()))
if batch_id % log_interval_vis == 0:
res_data = []
img = images.cpu().numpy()
res_data.append(img[2])
ed_gt = labels.cpu().numpy()
res_data.append(ed_gt[2])
# tmp_pred = tmp_preds[2,...]
for i in range(len(preds_list)):
tmp = preds_list[i]
tmp = tmp[2]
# print(tmp.shape)
tmp = torch.sigmoid(tmp).unsqueeze(dim=0)
tmp = tmp.cpu().detach().numpy()
res_data.append(tmp)
vis_imgs = visualize_result(res_data, arg=args)
del tmp, res_data
vis_imgs = cv2.resize(vis_imgs,
(int(vis_imgs.shape[1]*0.8), int(vis_imgs.shape[0]*0.8)))
img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3}' \
.format(epoch, batch_id, len(dataloader), loss.item())
BLACK = (0, 0, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
font_size = 1.1
font_color = BLACK
font_thickness = 2
x, y = 30, 30
vis_imgs = cv2.putText(vis_imgs,
img_test,
(x, y),
font, font_size, font_color, font_thickness, cv2.LINE_AA)
cv2.imwrite(os.path.join(imgs_res_folder, 'results.png'), vis_imgs)
def validate_one_epoch(epoch, dataloader, model, device, output_dir, arg=None):
# XXX This is not really validation, but testing
# Put model in eval mode
model.eval()
with torch.no_grad():
for _, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
# labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
preds = model(images)
# print('pred shape', preds[0].shape)
save_image_batch_to_disk(preds[-1],
output_dir,
file_names,img_shape=image_shape,
arg=arg)
def test(checkpoint_path, dataloader, model, device, output_dir, args):
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
# images = images[:, [2, 1, 0], :, :]
start_time = time.time()
preds = model(images)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk(preds,
output_dir,
file_names,
image_shape,
arg=args)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def testPich(checkpoint_path, dataloader, model, device, output_dir, args):
# a test model plus the interganged channels
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
start_time = time.time()
# images2 = images[:, [1, 0, 2], :, :] #GBR
images2 = images[:, [2, 1, 0], :, :] # RGB
preds = model(images)
preds2 = model(images2)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk([preds,preds2],
output_dir,
file_names,
image_shape,
arg=args, is_inchannel=True)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='DexiNed trainer.')
parser.add_argument('--choose_test_data',
type=int,
default=3,
help='Already set the dataset for testing choice: 0 - 8')
# ----------- test -------0--
TEST_DATA = DATASET_NAMES[parser.parse_args().choose_test_data] # max 8
test_inf = dataset_info(TEST_DATA, is_linux=IS_LINUX)
test_dir = test_inf['data_dir']
is_testing = True # current test _bdcnlossNew256-sd7-1.10.4p5
# Training settings
TRAIN_DATA = DATASET_NAMES[0] # BIPED=0
train_inf = dataset_info(TRAIN_DATA, is_linux=IS_LINUX)
train_dir = train_inf['data_dir']
# Data parameters
parser.add_argument('--input_dir',
type=str,
default=train_dir,
help='the path to the directory with the input data.')
parser.add_argument('--input_val_dir',
type=str,
default=test_inf['data_dir'],
help='the path to the directory with the input data for validation.')
parser.add_argument('--output_dir',
type=str,
default='checkpoints',
help='the path to output the results.')
parser.add_argument('--train_data',
type=str,
choices=DATASET_NAMES,
default=TRAIN_DATA,
help='Name of the dataset.')
parser.add_argument('--test_data',
type=str,
choices=DATASET_NAMES,
default=TEST_DATA,
help='Name of the dataset.')
parser.add_argument('--test_list',
type=str,
default=test_inf['test_list'],
help='Dataset sample indices list.')
parser.add_argument('--train_list',
type=str,
default=train_inf['train_list'],
help='Dataset sample indices list.')
parser.add_argument('--is_testing',type=bool,
default=is_testing,
help='Script in testing mode.')
parser.add_argument('--double_img',
type=bool,
default=True,
help='True: use same 2 imgs changing channels') # Just for test
parser.add_argument('--resume',
type=bool,
default=False,
help='use previous trained data') # Just for test
parser.add_argument('--checkpoint_data',
type=str,
default='14/14_model.pth',
help='Checkpoint path from which to restore model weights from.')
parser.add_argument('--test_img_width',
type=int,
default=test_inf['img_width'],
help='Image width for testing.')
parser.add_argument('--test_img_height',
type=int,
default=test_inf['img_height'],
help='Image height for testing.')
parser.add_argument('--res_dir',
type=str,
default='result',
help='Result directory')
parser.add_argument('--log_interval_vis',
type=int,
default=50,
help='The number of batches to wait before printing test predictions.')
parser.add_argument('--epochs',
type=int,
default=22,
metavar='N',
help='Number of training epochs (default: 25).')
parser.add_argument('--lr',
default=1e-4,
type=float,
help='Initial learning rate.')
parser.add_argument('--wd',
type=float,
default=1e-4,
metavar='WD',
help='weight decay (default: 1e-4)')
# parser.add_argument('--lr_stepsize',
# default=1e4,
# type=int,
# help='Learning rate step size.')
parser.add_argument('--batch_size',
type=int,
default=8,
metavar='B',
help='the mini-batch size (default: 8)')
parser.add_argument('--workers',
default=8,
type=int,
help='The number of workers for the dataloaders.')
parser.add_argument('--tensorboard',type=bool,
default=True,
help='Use Tensorboard for logging.'),
parser.add_argument('--img_width',
type=int,
default=480,
help='Image width for training.') # BIPED 400 BSDS 352 MDBD 480
parser.add_argument('--img_height',
type=int,
default=480,
help='Image height for training.') # BIPED 400 BSDS 352
parser.add_argument('--channel_swap',
default=[2, 1, 0],
type=int)
parser.add_argument('--crop_img',
default=True,
type=bool,
help='If true crop training images, else resize images to match image width and height.')
parser.add_argument('--mean_pixel_values',
default=[103.939,116.779,123.68, 137.86],
type=float) # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892]
args = parser.parse_args()
return args
def main(args):
"""Main function."""
print(f"Number of GPU's available: {torch.cuda.device_count()}")
print(f"Pytorch version: {torch.__version__}")
# Tensorboard summary writer
tb_writer = None
training_dir = os.path.join(args.output_dir,args.train_data)
os.makedirs(training_dir,exist_ok=True)
checkpoint_path = os.path.join(args.output_dir, args.train_data, args.checkpoint_data)
if args.tensorboard and not args.is_testing:
# from tensorboardX import SummaryWriter # previous torch version
from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather
tb_writer = SummaryWriter(log_dir=training_dir)
# Get computing device
device = torch.device('cpu' if torch.cuda.device_count() == 0
else 'cuda')
# Instantiate model and move it to the computing device
model = DexiNed().to(device)
# model = nn.DataParallel(model)
ini_epoch =0
if not args.is_testing:
if args.resume:
ini_epoch=17
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
dataset_train = BipedDataset(args.input_dir,
img_width=args.img_width,
img_height=args.img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
train_mode='train',
arg=args
)
dataloader_train = DataLoader(dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers)
dataset_val = TestDataset(args.input_val_dir,
test_data=args.test_data,
img_width=args.test_img_width,
img_height=args.test_img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
test_list=args.test_list, arg=args
)
dataloader_val = DataLoader(dataset_val,
batch_size=1,
shuffle=False,
num_workers=args.workers)
# Testing
if args.is_testing:
output_dir = os.path.join(args.res_dir, args.train_data+"2"+ args.test_data)
print(f"output_dir: {output_dir}")
if args.double_img:
# predict twice an image changing channels, then mix those results
testPich(checkpoint_path, dataloader_val, model, device, output_dir, args)
else:
test(checkpoint_path, dataloader_val, model, device, output_dir, args)
return
criterion = bdcn_loss2
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.wd)
# lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize,
# gamma=args.lr_gamma)
# Main training loop
seed=1021
for epoch in range(ini_epoch,args.epochs):
if epoch%7==0:
seed = seed+1000
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("------ Random seed applied-------------")
# Create output directories
output_dir_epoch = os.path.join(args.output_dir,args.train_data, str(epoch))
img_test_dir = os.path.join(output_dir_epoch, args.test_data + '_res')
os.makedirs(output_dir_epoch,exist_ok=True)
os.makedirs(img_test_dir,exist_ok=True)
train_one_epoch(epoch,
dataloader_train,
model,
criterion,
optimizer,
device,
args.log_interval_vis,
tb_writer,
args=args)
validate_one_epoch(epoch,
dataloader_val,
model,
device,
img_test_dir,
arg=args)
# Save model after end of every epoch
torch.save(model.module.state_dict() if hasattr(model, "module") else model.state_dict(),
os.path.join(output_dir_epoch, '{0}_model.pth'.format(epoch)))
if __name__ == '__main__':
args = parse_args()
main(args)
| en | 0.662844 | # from model0C import DexiNed # Put model in training mode # l_weight = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.1] # for bdcn ori loss # before [0.6,0.6,1.1,1.1,0.4,0.4,1.3] [0.4,0.4,1.1,1.1,0.6,0.6,1.3],[0.4,0.4,1.1,1.1,0.8,0.8,1.3] # for bdcn loss theory 3 before the last 1.3 0.6-0..5 # l_weight = [[0.05, 2.], [0.05, 2.], [0.05, 2.], # [0.1, 1.], [0.1, 1.], [0.1, 1.], # [0.01, 4.]] # for cats loss # BxCxHxW # BxHxW # loss = sum([criterion(preds, labels, l_w, device) for preds, l_w in zip(preds_list, l_weight)]) # cats_loss # bdcn_loss # loss = sum([criterion(preds, labels) for preds in preds_list]) #HED loss, rcf_loss # tmp_pred = tmp_preds[2,...] # print(tmp.shape) # XXX This is not really validation, but testing # Put model in eval mode # labels = sample_batched['labels'].to(device) # print('pred shape', preds[0].shape) # Put model in evaluation mode # images = images[:, [2, 1, 0], :, :] # a test model plus the interganged channels # Put model in evaluation mode # images2 = images[:, [1, 0, 2], :, :] #GBR # RGB Parse command line arguments. # ----------- test -------0-- # max 8 # current test _bdcnlossNew256-sd7-1.10.4p5 # Training settings # BIPED=0 # Data parameters # Just for test # Just for test # parser.add_argument('--lr_stepsize', # default=1e4, # type=int, # help='Learning rate step size.') # BIPED 400 BSDS 352 MDBD 480 # BIPED 400 BSDS 352 # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892] Main function. # Tensorboard summary writer # from tensorboardX import SummaryWriter # previous torch version # for torch 1.4 or greather # Get computing device # Instantiate model and move it to the computing device # model = nn.DataParallel(model) # Testing # predict twice an image changing channels, then mix those results # lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize, # gamma=args.lr_gamma) # Main training loop # Create output directories # Save model after end of every epoch | 2.191823 | 2 |
src/core/build/pretreat_targets.py | chaoyangcui/test_developertest | 0 | 7988 | <gh_stars>0
#!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import shutil
from core.constants import JsTestConst
from xdevice import platform_logger
LOG = platform_logger("PretreatTargets")
##############################################################################
##############################################################################
class PretreatTargets(object):
def __init__(self, target_list):
self.path_list = []
self.name_list = []
self.target_list = target_list
def pretreat_targets_from_list(self):
path_list, name_list = self._parse_target_info()
self._pretreat_by_target_name(path_list, name_list)
def disassemble_targets_from_list(self):
self._disassemble_by_target_name(self.path_list, self.name_list)
def _parse_target_info(self):
path_list = []
name_list = []
for line in self.target_list:
path = line.split(':')[0][2:]
name = line.split(':')[1].split('(')[0]
path_list.append(path)
name_list.append(name)
return path_list, name_list
def _pretreat_by_target_name(self, path_list, name_list):
for name, path in zip(name_list, path_list):
if name.endswith("JsTest"):
if self._pretreat_js_target(path, name):
self.path_list.append(path)
self.name_list.append(name)
LOG.info("js test %s pretreat success" % name)
def _pretreat_js_target(self, path, name):
template_path = os.path.join(sys.framework_root_dir, "libs",
"js_template", "src")
target_path = os.path.join(sys.source_code_root_path, path)
config_path = os.path.join(target_path, "config.json")
gn_path = os.path.join(target_path, "BUILD.gn")
gn_bak_path = os.path.join(target_path, "BuildBak")
test_path = os.path.join(target_path, "src", "main", "js",
"default", "test")
if not os.path.exists(config_path):
LOG.error("js test needs config.json file")
return False
if not os.path.exists(gn_path):
LOG.error("js test needs BUILD.gn file")
return False
LOG.info("target_path: %s" % target_path)
#modify BUILD.gn file to compile hap
output_path = self._parse_output_path_in_gn(gn_path)
if output_path == "":
LOG.error(" BUILD.gn needs 'module_output_path'")
return
os.rename(gn_path, gn_bak_path)
template_args = {'output_path': output_path, 'suite_name': name}
with open(gn_path, 'w') as filehandle:
filehandle.write(JsTestConst.BUILD_GN_FILE_TEMPLATE %
template_args)
#copy js hap template to target path
shutil.copytree(template_path, os.path.join(target_path, "src"))
shutil.copy(config_path, os.path.join(target_path, "src", "main"))
file_name = os.listdir(target_path)
for file in file_name:
if file.endswith(".js"):
LOG.info("file: %s" % file)
shutil.copy(os.path.join(target_path, file), test_path)
with open(os.path.join(test_path, "List.test.js"), 'a') \
as list_data:
list_data.write("require('./%s')" % file)
#modify i18n json file
i18n_path = os.path.join(target_path, "src", "main", "js",
"default", "i18n", "en-US.json")
json_data = ""
with open(i18n_path, 'r') as i18n_file:
lines = i18n_file.readlines()
for line in lines:
if "TargetName" in line:
line = line.replace("TargetName", name)
json_data += line
with open(i18n_path, 'w') as i18n_file:
i18n_file.write(json_data)
return True
def _parse_output_path_in_gn(self, gn_path):
output_path = ""
with open(gn_path, 'r') as gn_file:
for line in gn_file.readlines():
if line.startswith("module_output_path"):
output_path = line.split()[2].strip('"')
break
return output_path
def _disassemble_by_target_name(self, path_list, name_list):
for name, path in zip(name_list, path_list):
LOG.info("name: %s path: %s" % (name, path))
if name.endswith("JsTest"):
self._disassemble_js_target(path, name)
LOG.info("js test %s disassemble success" % name)
def _disassemble_js_target(self, path, name):
target_path = os.path.join(sys.source_code_root_path, path)
src_path = os.path.join(target_path, "src")
gn_path = os.path.join(target_path, "BUILD.gn")
gn_bak_path = os.path.join(target_path, "BuildBak")
if os.path.exists(src_path):
shutil.rmtree(src_path)
if os.path.exists(gn_path) and os.path.exists(gn_bak_path):
os.remove(gn_path)
os.rename(gn_bak_path, gn_path)
##############################################################################
##############################################################################
| #!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import shutil
from core.constants import JsTestConst
from xdevice import platform_logger
LOG = platform_logger("PretreatTargets")
##############################################################################
##############################################################################
class PretreatTargets(object):
def __init__(self, target_list):
self.path_list = []
self.name_list = []
self.target_list = target_list
def pretreat_targets_from_list(self):
path_list, name_list = self._parse_target_info()
self._pretreat_by_target_name(path_list, name_list)
def disassemble_targets_from_list(self):
self._disassemble_by_target_name(self.path_list, self.name_list)
def _parse_target_info(self):
path_list = []
name_list = []
for line in self.target_list:
path = line.split(':')[0][2:]
name = line.split(':')[1].split('(')[0]
path_list.append(path)
name_list.append(name)
return path_list, name_list
def _pretreat_by_target_name(self, path_list, name_list):
for name, path in zip(name_list, path_list):
if name.endswith("JsTest"):
if self._pretreat_js_target(path, name):
self.path_list.append(path)
self.name_list.append(name)
LOG.info("js test %s pretreat success" % name)
def _pretreat_js_target(self, path, name):
template_path = os.path.join(sys.framework_root_dir, "libs",
"js_template", "src")
target_path = os.path.join(sys.source_code_root_path, path)
config_path = os.path.join(target_path, "config.json")
gn_path = os.path.join(target_path, "BUILD.gn")
gn_bak_path = os.path.join(target_path, "BuildBak")
test_path = os.path.join(target_path, "src", "main", "js",
"default", "test")
if not os.path.exists(config_path):
LOG.error("js test needs config.json file")
return False
if not os.path.exists(gn_path):
LOG.error("js test needs BUILD.gn file")
return False
LOG.info("target_path: %s" % target_path)
#modify BUILD.gn file to compile hap
output_path = self._parse_output_path_in_gn(gn_path)
if output_path == "":
LOG.error(" BUILD.gn needs 'module_output_path'")
return
os.rename(gn_path, gn_bak_path)
template_args = {'output_path': output_path, 'suite_name': name}
with open(gn_path, 'w') as filehandle:
filehandle.write(JsTestConst.BUILD_GN_FILE_TEMPLATE %
template_args)
#copy js hap template to target path
shutil.copytree(template_path, os.path.join(target_path, "src"))
shutil.copy(config_path, os.path.join(target_path, "src", "main"))
file_name = os.listdir(target_path)
for file in file_name:
if file.endswith(".js"):
LOG.info("file: %s" % file)
shutil.copy(os.path.join(target_path, file), test_path)
with open(os.path.join(test_path, "List.test.js"), 'a') \
as list_data:
list_data.write("require('./%s')" % file)
#modify i18n json file
i18n_path = os.path.join(target_path, "src", "main", "js",
"default", "i18n", "en-US.json")
json_data = ""
with open(i18n_path, 'r') as i18n_file:
lines = i18n_file.readlines()
for line in lines:
if "TargetName" in line:
line = line.replace("TargetName", name)
json_data += line
with open(i18n_path, 'w') as i18n_file:
i18n_file.write(json_data)
return True
def _parse_output_path_in_gn(self, gn_path):
output_path = ""
with open(gn_path, 'r') as gn_file:
for line in gn_file.readlines():
if line.startswith("module_output_path"):
output_path = line.split()[2].strip('"')
break
return output_path
def _disassemble_by_target_name(self, path_list, name_list):
for name, path in zip(name_list, path_list):
LOG.info("name: %s path: %s" % (name, path))
if name.endswith("JsTest"):
self._disassemble_js_target(path, name)
LOG.info("js test %s disassemble success" % name)
def _disassemble_js_target(self, path, name):
target_path = os.path.join(sys.source_code_root_path, path)
src_path = os.path.join(target_path, "src")
gn_path = os.path.join(target_path, "BUILD.gn")
gn_bak_path = os.path.join(target_path, "BuildBak")
if os.path.exists(src_path):
shutil.rmtree(src_path)
if os.path.exists(gn_path) and os.path.exists(gn_bak_path):
os.remove(gn_path)
os.rename(gn_bak_path, gn_path)
##############################################################################
############################################################################## | en | 0.343732 | #!/usr/bin/env python3 # coding=utf-8 # # Copyright (c) 2021 Huawei Device Co., Ltd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ############################################################################## ############################################################################## #modify BUILD.gn file to compile hap #copy js hap template to target path #modify i18n json file ############################################################################## ############################################################################## | 2.132674 | 2 |
tests/testapp/urls.py | lukaszbanasiak/django-contrib-comments | 1 | 7989 | from __future__ import absolute_import
from django.conf.urls import patterns, url
from django_comments.feeds import LatestCommentFeed
from custom_comments import views
feeds = {
'comments': LatestCommentFeed,
}
urlpatterns = patterns('',
url(r'^post/$', views.custom_submit_comment),
url(r'^flag/(\d+)/$', views.custom_flag_comment),
url(r'^delete/(\d+)/$', views.custom_delete_comment),
url(r'^approve/(\d+)/$', views.custom_approve_comment),
url(r'^cr/(\d+)/(.+)/$', 'django.contrib.contenttypes.views.shortcut', name='comments-url-redirect'),
)
urlpatterns += patterns('',
(r'^rss/comments/$', LatestCommentFeed()),
)
| from __future__ import absolute_import
from django.conf.urls import patterns, url
from django_comments.feeds import LatestCommentFeed
from custom_comments import views
feeds = {
'comments': LatestCommentFeed,
}
urlpatterns = patterns('',
url(r'^post/$', views.custom_submit_comment),
url(r'^flag/(\d+)/$', views.custom_flag_comment),
url(r'^delete/(\d+)/$', views.custom_delete_comment),
url(r'^approve/(\d+)/$', views.custom_approve_comment),
url(r'^cr/(\d+)/(.+)/$', 'django.contrib.contenttypes.views.shortcut', name='comments-url-redirect'),
)
urlpatterns += patterns('',
(r'^rss/comments/$', LatestCommentFeed()),
)
| none | 1 | 1.829459 | 2 |
|
pyTorch/utils.py | rajasekar-venkatesan/Deep_Learning | 0 | 7990 | <reponame>rajasekar-venkatesan/Deep_Learning
import pandas as pd, numpy as np
from sklearn.preprocessing import OneHotEncoder
author_int_dict = {'EAP':0,'HPL':1,'MWS':2}
def load_train_test_data (num_samples=None):
train_data = pd.read_csv('../data/train.csv')
train_data['author'] = [author_int_dict[a] for a in train_data['author'].tolist()]
test_data = pd.read_csv('../data/test.csv')
return train_data[:num_samples],test_data[:num_samples]
def categorical_labeler (labels):
labels = labels.reshape(-1, 1)
#labels = OneHotEncoder().fit_transform(labels).todense()
labels = np.array(labels, dtype=np.int64)
return labels
if __name__ == '__main__':
pass | import pandas as pd, numpy as np
from sklearn.preprocessing import OneHotEncoder
author_int_dict = {'EAP':0,'HPL':1,'MWS':2}
def load_train_test_data (num_samples=None):
train_data = pd.read_csv('../data/train.csv')
train_data['author'] = [author_int_dict[a] for a in train_data['author'].tolist()]
test_data = pd.read_csv('../data/test.csv')
return train_data[:num_samples],test_data[:num_samples]
def categorical_labeler (labels):
labels = labels.reshape(-1, 1)
#labels = OneHotEncoder().fit_transform(labels).todense()
labels = np.array(labels, dtype=np.int64)
return labels
if __name__ == '__main__':
pass | en | 0.208363 | #labels = OneHotEncoder().fit_transform(labels).todense() | 3.165597 | 3 |
example/dec/dec.py | TheBurningCrusade/A_mxnet | 159 | 7991 | # pylint: skip-file
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], Y[i]] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9/10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = model.extract_feature(self.feature, args, test_iter, N, self.xpu).values()[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = model.extract_feature(self.feature, args, test_iter, N, self.xpu).values()[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print np.std(np.bincount(y_pred)), np.bincount(y_pred)
print np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0]
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| # pylint: skip-file
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], Y[i]] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9/10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = model.extract_feature(self.feature, args, test_iter, N, self.xpu).values()[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = model.extract_feature(self.feature, args, test_iter, N, self.xpu).values()[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print np.std(np.bincount(y_pred)), np.bincount(y_pred)
print np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0]
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| en | 0.447345 | # pylint: skip-file # code to automatically download dataset | 2.471548 | 2 |
cctbx/maptbx/tst_target_and_gradients.py | rimmartin/cctbx_project | 0 | 7992 | from __future__ import division
from cctbx.array_family import flex
from cctbx import xray
from cctbx import crystal
from cctbx import maptbx
from cctbx.maptbx import minimization
from libtbx.test_utils import approx_equal
import random
from cctbx.development import random_structure
from cctbx import sgtbx
if (1):
random.seed(0)
flex.set_random_seed(0)
def get_xrs():
crystal_symmetry = crystal.symmetry(
unit_cell=(10,10,10,90,90,90),
space_group_symbol="P 1")
return xray.structure(
crystal_symmetry=crystal_symmetry,
scatterers=flex.xray_scatterer([
xray.scatterer(label="C", site=(0,0,0))]))
def get_map(xrs, d_min=1.):
f_calc = xrs.structure_factors(d_min=d_min).f_calc()
fft_map = f_calc.fft_map()
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded(), f_calc
def exercise_00():
"""
Exercise maptbx.target_and_gradients_diffmap .
"""
xrs = get_xrs()
map_data, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_data,
step = 0.3,
sites_frac = xrs.sites_frac())
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
assert approx_equal(tg.target(), 0)
assert approx_equal(list(tg.gradients()), [[0,0,0]])
xrs = xrs.translate(x=0.3, y=-0.5, z=0.7)
assert approx_equal(xrs.sites_cart(), [[0.3,-0.5,0.7]])
map_current, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_current,
step = 0.3,
sites_frac = xrs.sites_frac())
assert tg.target() > 0
for g in tg.gradients():
for g_ in g:
assert abs(g_)>0.
def exercise_01(d_min=1.0):
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization.
"""
xrs = get_xrs()
map_target, f_calc = get_map(xrs=xrs)
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
for sx in [-1,0,1]:
for sy in [-1,0,1]:
for sz in [-1,0,1]:
xrs_cp = xrs.deep_copy_scatterers()
xrs_cp = xrs_cp.translate(x=0.3*sx, y=0.5*sy, z=0.7*sz)
assert approx_equal(xrs_cp.sites_cart(), [[0.3*sx,0.5*sy,0.7*sz]],1.e-6)
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs_cp.unit_cell(),
space_group_info = xrs_cp.space_group_info(),
pre_determined_n_real = map_target.accessor().all())
o = minimization.run(
xray_structure = xrs_cp,
miller_array = f_calc,
crystal_gridding = crystal_gridding,
map_target = map_target,
step = d_min/4,
target_type = "diffmap")
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
def exercise_02():
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error>0.7
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
for step in [miller_array.d_min()/4]*5:
minimized = minimization.run(
xray_structure = xrs_sh,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = step,
geometry_restraints_manager = None,
target_type = "diffmap")
xrs_sh = minimized.xray_structure
map_current = minimized.map_current
final_error = flex.mean(xrs.distances(other = minimized.xray_structure))
assert approx_equal(start_error, 0.8, 1.e-3)
assert final_error < 1.e-4
def exercise_03():
"""
Exercise maptbx.target_and_gradients_simple.
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
#
t1 = maptbx.real_space_target_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
selection = flex.bool(xrs_sh.scatterers().size(), True))
g1 = maptbx.real_space_gradients_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
o = maptbx.target_and_gradients_simple(
unit_cell = xrs.unit_cell(),
map_target = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
assert approx_equal(t1, o.target())
for gi,gj in zip(g1, o.gradients()):
assert approx_equal(gi, gj)
def exercise_04():
"""
Exercise maptbx.target_and_gradients_simple in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1., resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 150)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.3)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error > 0.29
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
xrs_sh_ = xrs_sh.deep_copy_scatterers()
minimized = minimization.run(
xray_structure = xrs_sh_,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = 0.5,
geometry_restraints_manager = None,
target_type = "simple")
xrs_sh_ = xrs_sh_.replace_sites_cart(minimized.sites_cart)
final_error = flex.mean(xrs.distances(other = xrs_sh_))
assert final_error < 0.015
if (__name__ == "__main__"):
exercise_00()
exercise_01()
exercise_02()
exercise_03()
exercise_04()
| from __future__ import division
from cctbx.array_family import flex
from cctbx import xray
from cctbx import crystal
from cctbx import maptbx
from cctbx.maptbx import minimization
from libtbx.test_utils import approx_equal
import random
from cctbx.development import random_structure
from cctbx import sgtbx
if (1):
random.seed(0)
flex.set_random_seed(0)
def get_xrs():
crystal_symmetry = crystal.symmetry(
unit_cell=(10,10,10,90,90,90),
space_group_symbol="P 1")
return xray.structure(
crystal_symmetry=crystal_symmetry,
scatterers=flex.xray_scatterer([
xray.scatterer(label="C", site=(0,0,0))]))
def get_map(xrs, d_min=1.):
f_calc = xrs.structure_factors(d_min=d_min).f_calc()
fft_map = f_calc.fft_map()
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded(), f_calc
def exercise_00():
"""
Exercise maptbx.target_and_gradients_diffmap .
"""
xrs = get_xrs()
map_data, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_data,
step = 0.3,
sites_frac = xrs.sites_frac())
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
assert approx_equal(tg.target(), 0)
assert approx_equal(list(tg.gradients()), [[0,0,0]])
xrs = xrs.translate(x=0.3, y=-0.5, z=0.7)
assert approx_equal(xrs.sites_cart(), [[0.3,-0.5,0.7]])
map_current, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_current,
step = 0.3,
sites_frac = xrs.sites_frac())
assert tg.target() > 0
for g in tg.gradients():
for g_ in g:
assert abs(g_)>0.
def exercise_01(d_min=1.0):
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization.
"""
xrs = get_xrs()
map_target, f_calc = get_map(xrs=xrs)
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
for sx in [-1,0,1]:
for sy in [-1,0,1]:
for sz in [-1,0,1]:
xrs_cp = xrs.deep_copy_scatterers()
xrs_cp = xrs_cp.translate(x=0.3*sx, y=0.5*sy, z=0.7*sz)
assert approx_equal(xrs_cp.sites_cart(), [[0.3*sx,0.5*sy,0.7*sz]],1.e-6)
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs_cp.unit_cell(),
space_group_info = xrs_cp.space_group_info(),
pre_determined_n_real = map_target.accessor().all())
o = minimization.run(
xray_structure = xrs_cp,
miller_array = f_calc,
crystal_gridding = crystal_gridding,
map_target = map_target,
step = d_min/4,
target_type = "diffmap")
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
def exercise_02():
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error>0.7
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
for step in [miller_array.d_min()/4]*5:
minimized = minimization.run(
xray_structure = xrs_sh,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = step,
geometry_restraints_manager = None,
target_type = "diffmap")
xrs_sh = minimized.xray_structure
map_current = minimized.map_current
final_error = flex.mean(xrs.distances(other = minimized.xray_structure))
assert approx_equal(start_error, 0.8, 1.e-3)
assert final_error < 1.e-4
def exercise_03():
"""
Exercise maptbx.target_and_gradients_simple.
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
#
t1 = maptbx.real_space_target_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
selection = flex.bool(xrs_sh.scatterers().size(), True))
g1 = maptbx.real_space_gradients_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
o = maptbx.target_and_gradients_simple(
unit_cell = xrs.unit_cell(),
map_target = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
assert approx_equal(t1, o.target())
for gi,gj in zip(g1, o.gradients()):
assert approx_equal(gi, gj)
def exercise_04():
"""
Exercise maptbx.target_and_gradients_simple in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1., resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 150)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.3)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error > 0.29
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
xrs_sh_ = xrs_sh.deep_copy_scatterers()
minimized = minimization.run(
xray_structure = xrs_sh_,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = 0.5,
geometry_restraints_manager = None,
target_type = "simple")
xrs_sh_ = xrs_sh_.replace_sites_cart(minimized.sites_cart)
final_error = flex.mean(xrs.distances(other = xrs_sh_))
assert final_error < 0.015
if (__name__ == "__main__"):
exercise_00()
exercise_01()
exercise_02()
exercise_03()
exercise_04()
| en | 0.320213 | Exercise maptbx.target_and_gradients_diffmap . Exercise maptbx.target_and_gradients_diffmap in action: minimization. Exercise maptbx.target_and_gradients_diffmap in action: minimization (bigger model). Exercise maptbx.target_and_gradients_simple. # Exercise maptbx.target_and_gradients_simple in action: minimization (bigger model). | 1.747131 | 2 |
open_imagilib/matrix.py | viktor-ferenczi/open-imagilib | 2 | 7993 | <gh_stars>1-10
""" LED matrix
"""
__all__ = ['Matrix']
from .colors import Color, on, off
from .fonts import font_6x8
class Matrix(list):
def __init__(self, source=None) -> None:
if source is None:
row_iter = ([off for _ in range(8)] for _ in range(8))
elif isinstance(source, list):
row_iter = (list(row) for row in source)
else:
raise TypeError('Unknown source to build a Matrix from')
super().__init__(row_iter)
def background(self, color: Color) -> None:
for i in range(8):
for j in range(8):
self[i][j] = color
def character(self, char: str, char_color: Color = on, *, x_offset: int = 1) -> None:
if x_offset <= -8 or x_offset >= 8:
return
if len(char) > 1:
char = char[0]
if not char:
char = ' '
if char < ' ' or char > '\x7f':
char = '\x7f'
bitmap = font_6x8[ord(char) - 32]
for i, row in enumerate(bitmap):
for j, c in enumerate(row):
if c != ' ':
x = x_offset + j
if 0 <= x < 8:
self[i][x] = char_color
| """ LED matrix
"""
__all__ = ['Matrix']
from .colors import Color, on, off
from .fonts import font_6x8
class Matrix(list):
def __init__(self, source=None) -> None:
if source is None:
row_iter = ([off for _ in range(8)] for _ in range(8))
elif isinstance(source, list):
row_iter = (list(row) for row in source)
else:
raise TypeError('Unknown source to build a Matrix from')
super().__init__(row_iter)
def background(self, color: Color) -> None:
for i in range(8):
for j in range(8):
self[i][j] = color
def character(self, char: str, char_color: Color = on, *, x_offset: int = 1) -> None:
if x_offset <= -8 or x_offset >= 8:
return
if len(char) > 1:
char = char[0]
if not char:
char = ' '
if char < ' ' or char > '\x7f':
char = '\x7f'
bitmap = font_6x8[ord(char) - 32]
for i, row in enumerate(bitmap):
for j, c in enumerate(row):
if c != ' ':
x = x_offset + j
if 0 <= x < 8:
self[i][x] = char_color | en | 0.844907 | LED matrix | 3.275832 | 3 |
prml/linear/_classifier.py | alexandru-dinu/PRML | 0 | 7994 | <filename>prml/linear/_classifier.py
class Classifier(object):
"""Base class for classifiers."""
| <filename>prml/linear/_classifier.py
class Classifier(object):
"""Base class for classifiers."""
| en | 0.714258 | Base class for classifiers. | 1.32588 | 1 |
tests/env_config/test_base.py | DAtek/datek-app-utils | 0 | 7995 | from pytest import raises
from datek_app_utils.env_config.base import BaseConfig
from datek_app_utils.env_config.errors import InstantiationForbiddenError
class SomeOtherMixinWhichDoesntRelateToEnvConfig:
color = "red"
class TestConfig:
def test_iter(self, monkeypatch, key_volume, base_config_class):
volume = 5
monkeypatch.setenv(key_volume, str(volume))
class Config(SomeOtherMixinWhichDoesntRelateToEnvConfig, base_config_class):
TYPE: str
items = [item for item in Config]
assert len(items) == 5
assert Config.color == "red"
assert items[0].name == "TYPE"
assert items[0].value is None
assert items[0].type == str
assert items[1].name == "FIELD_WITH_DEFAULT_VALUE"
assert items[1].value == "C"
assert items[1].type == str
assert items[2].name == "NON_MANDATORY_FIELD"
assert items[2].value is None
assert items[2].type == str
assert items[3].name == "TYPED_NON_MANDATORY_FIELD"
assert items[3].value is None
assert items[3].type == str
assert items[4].name == "VOLUME"
assert items[4].value == volume
assert items[4].type == int
def test_get(self, monkeypatch, key_volume, base_config_class):
volume = 10
monkeypatch.setenv(key_volume, str(volume))
assert getattr(base_config_class, "VOLUME") == volume
def test_constructor_is_forbidden(self):
class Config(BaseConfig):
pass
with raises(InstantiationForbiddenError):
Config()
| from pytest import raises
from datek_app_utils.env_config.base import BaseConfig
from datek_app_utils.env_config.errors import InstantiationForbiddenError
class SomeOtherMixinWhichDoesntRelateToEnvConfig:
color = "red"
class TestConfig:
def test_iter(self, monkeypatch, key_volume, base_config_class):
volume = 5
monkeypatch.setenv(key_volume, str(volume))
class Config(SomeOtherMixinWhichDoesntRelateToEnvConfig, base_config_class):
TYPE: str
items = [item for item in Config]
assert len(items) == 5
assert Config.color == "red"
assert items[0].name == "TYPE"
assert items[0].value is None
assert items[0].type == str
assert items[1].name == "FIELD_WITH_DEFAULT_VALUE"
assert items[1].value == "C"
assert items[1].type == str
assert items[2].name == "NON_MANDATORY_FIELD"
assert items[2].value is None
assert items[2].type == str
assert items[3].name == "TYPED_NON_MANDATORY_FIELD"
assert items[3].value is None
assert items[3].type == str
assert items[4].name == "VOLUME"
assert items[4].value == volume
assert items[4].type == int
def test_get(self, monkeypatch, key_volume, base_config_class):
volume = 10
monkeypatch.setenv(key_volume, str(volume))
assert getattr(base_config_class, "VOLUME") == volume
def test_constructor_is_forbidden(self):
class Config(BaseConfig):
pass
with raises(InstantiationForbiddenError):
Config()
| none | 1 | 2.185494 | 2 |
|
comprehend.py | korniichuk/cvr-features | 0 | 7996 | # -*- coding: utf-8 -*-
# Name: comprehend
# Version: 0.1a2
# Owner: <NAME>
# Maintainer(s):
import boto3
def get_sentiment(text, language_code='en'):
"""Get sentiment.
Inspects text and returns an inference of the prevailing sentiment
(positive, neutral, mixed, or negative).
Args:
text: UTF-8 text string. Each string must contain fewer that
5,000 bytes of UTF-8 encoded characters (required | type: str).
language_code: language of text (not required | type: str |
default: 'en').
Returns:
sentiment: sentiment: positive, neutral, mixed, or negative
(type: str).
"""
def prepare_text(text):
while len(bytes(text, 'utf-8')) > 4999:
text = text[:-1]
return text
comprehend = boto3.client('comprehend')
text = prepare_text(text)
try:
r = comprehend.detect_sentiment(Text=text, LanguageCode='en')
except Exception as e:
raise e
sentiment = r['Sentiment'].lower()
return sentiment
# Example. Get sentiment of text below:
# "I ordered a small and expected it to fit just right but it was a little bit
# more like a medium-large. It was great quality. It's a lighter brown than
# pictured but fairly close. Would be ten times better if it was lined with
# cotton or wool on the inside."
# text = "I ordered a small and expected it to fit just right but it was a \
# little bit more like a medium-large. It was great quality. It's a \
# lighter brown than pictured but fairly close. Would be ten times \
# better if it was lined with cotton or wool on the inside."
# get_sentiment(text)
| # -*- coding: utf-8 -*-
# Name: comprehend
# Version: 0.1a2
# Owner: <NAME>
# Maintainer(s):
import boto3
def get_sentiment(text, language_code='en'):
"""Get sentiment.
Inspects text and returns an inference of the prevailing sentiment
(positive, neutral, mixed, or negative).
Args:
text: UTF-8 text string. Each string must contain fewer that
5,000 bytes of UTF-8 encoded characters (required | type: str).
language_code: language of text (not required | type: str |
default: 'en').
Returns:
sentiment: sentiment: positive, neutral, mixed, or negative
(type: str).
"""
def prepare_text(text):
while len(bytes(text, 'utf-8')) > 4999:
text = text[:-1]
return text
comprehend = boto3.client('comprehend')
text = prepare_text(text)
try:
r = comprehend.detect_sentiment(Text=text, LanguageCode='en')
except Exception as e:
raise e
sentiment = r['Sentiment'].lower()
return sentiment
# Example. Get sentiment of text below:
# "I ordered a small and expected it to fit just right but it was a little bit
# more like a medium-large. It was great quality. It's a lighter brown than
# pictured but fairly close. Would be ten times better if it was lined with
# cotton or wool on the inside."
# text = "I ordered a small and expected it to fit just right but it was a \
# little bit more like a medium-large. It was great quality. It's a \
# lighter brown than pictured but fairly close. Would be ten times \
# better if it was lined with cotton or wool on the inside."
# get_sentiment(text)
| en | 0.929776 | # -*- coding: utf-8 -*- # Name: comprehend # Version: 0.1a2 # Owner: <NAME> # Maintainer(s): Get sentiment. Inspects text and returns an inference of the prevailing sentiment (positive, neutral, mixed, or negative). Args: text: UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters (required | type: str). language_code: language of text (not required | type: str | default: 'en'). Returns: sentiment: sentiment: positive, neutral, mixed, or negative (type: str). # Example. Get sentiment of text below: # "I ordered a small and expected it to fit just right but it was a little bit # more like a medium-large. It was great quality. It's a lighter brown than # pictured but fairly close. Would be ten times better if it was lined with # cotton or wool on the inside." # text = "I ordered a small and expected it to fit just right but it was a \ # little bit more like a medium-large. It was great quality. It's a \ # lighter brown than pictured but fairly close. Would be ten times \ # better if it was lined with cotton or wool on the inside." # get_sentiment(text) | 3.325627 | 3 |
mapclientplugins/argonsceneexporterstep/ui_configuredialog.py | Kayvv/mapclientplugins.argonsceneexporterstep | 0 | 7997 | <gh_stars>0
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'configuredialog.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_ConfigureDialog(object):
def setupUi(self, ConfigureDialog):
if not ConfigureDialog.objectName():
ConfigureDialog.setObjectName(u"ConfigureDialog")
ConfigureDialog.resize(510, 342)
self.gridLayout = QGridLayout(ConfigureDialog)
self.gridLayout.setObjectName(u"gridLayout")
self.configGroupBox = QGroupBox(ConfigureDialog)
self.configGroupBox.setObjectName(u"configGroupBox")
self.formLayout = QFormLayout(self.configGroupBox)
self.formLayout.setObjectName(u"formLayout")
self.label0 = QLabel(self.configGroupBox)
self.label0.setObjectName(u"label0")
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.label0)
self.lineEditIdentifier = QLineEdit(self.configGroupBox)
self.lineEditIdentifier.setObjectName(u"lineEditIdentifier")
self.formLayout.setWidget(0, QFormLayout.FieldRole, self.lineEditIdentifier)
self.label_3 = QLabel(self.configGroupBox)
self.label_3.setObjectName(u"label_3")
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.label_3)
self.prefix_lineEdit = QLineEdit(self.configGroupBox)
self.prefix_lineEdit.setObjectName(u"prefix_lineEdit")
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.prefix_lineEdit)
self.label_4 = QLabel(self.configGroupBox)
self.label_4.setObjectName(u"label_4")
self.formLayout.setWidget(3, QFormLayout.LabelRole, self.label_4)
self.timeSteps_lineEdit = QLineEdit(self.configGroupBox)
self.timeSteps_lineEdit.setObjectName(u"timeSteps_lineEdit")
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.timeSteps_lineEdit)
self.label = QLabel(self.configGroupBox)
self.label.setObjectName(u"label")
self.formLayout.setWidget(4, QFormLayout.LabelRole, self.label)
self.initialTime_lineEdit = QLineEdit(self.configGroupBox)
self.initialTime_lineEdit.setObjectName(u"initialTime_lineEdit")
self.formLayout.setWidget(4, QFormLayout.FieldRole, self.initialTime_lineEdit)
self.label_2 = QLabel(self.configGroupBox)
self.label_2.setObjectName(u"label_2")
self.formLayout.setWidget(5, QFormLayout.LabelRole, self.label_2)
self.finishTime_lineEdit = QLineEdit(self.configGroupBox)
self.finishTime_lineEdit.setObjectName(u"finishTime_lineEdit")
self.formLayout.setWidget(5, QFormLayout.FieldRole, self.finishTime_lineEdit)
self.label1 = QLabel(self.configGroupBox)
self.label1.setObjectName(u"label1")
self.formLayout.setWidget(6, QFormLayout.LabelRole, self.label1)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.lineEditOutputDirectory = QLineEdit(self.configGroupBox)
self.lineEditOutputDirectory.setObjectName(u"lineEditOutputDirectory")
self.horizontalLayout.addWidget(self.lineEditOutputDirectory)
self.pushButtonOutputDirectory = QPushButton(self.configGroupBox)
self.pushButtonOutputDirectory.setObjectName(u"pushButtonOutputDirectory")
self.horizontalLayout.addWidget(self.pushButtonOutputDirectory)
self.formLayout.setLayout(6, QFormLayout.FieldRole, self.horizontalLayout)
self.label_5 = QLabel(self.configGroupBox)
self.label_5.setObjectName(u"label_5")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.label_5)
self.comboBoxExportType = QComboBox(self.configGroupBox)
self.comboBoxExportType.addItem("")
self.comboBoxExportType.addItem("")
self.comboBoxExportType.setObjectName(u"comboBoxExportType")
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.comboBoxExportType)
self.gridLayout.addWidget(self.configGroupBox, 0, 0, 1, 1)
self.buttonBox = QDialogButtonBox(ConfigureDialog)
self.buttonBox.setObjectName(u"buttonBox")
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
QWidget.setTabOrder(self.lineEditIdentifier, self.prefix_lineEdit)
QWidget.setTabOrder(self.prefix_lineEdit, self.comboBoxExportType)
QWidget.setTabOrder(self.comboBoxExportType, self.timeSteps_lineEdit)
QWidget.setTabOrder(self.timeSteps_lineEdit, self.initialTime_lineEdit)
QWidget.setTabOrder(self.initialTime_lineEdit, self.finishTime_lineEdit)
QWidget.setTabOrder(self.finishTime_lineEdit, self.lineEditOutputDirectory)
QWidget.setTabOrder(self.lineEditOutputDirectory, self.pushButtonOutputDirectory)
self.retranslateUi(ConfigureDialog)
self.buttonBox.accepted.connect(ConfigureDialog.accept)
self.buttonBox.rejected.connect(ConfigureDialog.reject)
self.comboBoxExportType.setCurrentIndex(0)
QMetaObject.connectSlotsByName(ConfigureDialog)
# setupUi
def retranslateUi(self, ConfigureDialog):
ConfigureDialog.setWindowTitle(QCoreApplication.translate("ConfigureDialog", u"Configure Step", None))
self.configGroupBox.setTitle("")
self.label0.setText(QCoreApplication.translate("ConfigureDialog", u"identifier: ", None))
self.label_3.setText(QCoreApplication.translate("ConfigureDialog", u"Prefix : ", None))
self.label_4.setText(QCoreApplication.translate("ConfigureDialog", u"Time Steps : ", None))
self.label.setText(QCoreApplication.translate("ConfigureDialog", u"Initial Time : ", None))
self.label_2.setText(QCoreApplication.translate("ConfigureDialog", u"Finish Time : ", None))
self.label1.setText(QCoreApplication.translate("ConfigureDialog", u"Output directory:", None))
self.pushButtonOutputDirectory.setText(QCoreApplication.translate("ConfigureDialog", u"...", None))
self.label_5.setText(QCoreApplication.translate("ConfigureDialog", u"Export type:", None))
self.comboBoxExportType.setItemText(0, QCoreApplication.translate("ConfigureDialog", u"webgl", None))
self.comboBoxExportType.setItemText(1, QCoreApplication.translate("ConfigureDialog", u"thumbnail", None))
# retranslateUi
| # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'configuredialog.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_ConfigureDialog(object):
def setupUi(self, ConfigureDialog):
if not ConfigureDialog.objectName():
ConfigureDialog.setObjectName(u"ConfigureDialog")
ConfigureDialog.resize(510, 342)
self.gridLayout = QGridLayout(ConfigureDialog)
self.gridLayout.setObjectName(u"gridLayout")
self.configGroupBox = QGroupBox(ConfigureDialog)
self.configGroupBox.setObjectName(u"configGroupBox")
self.formLayout = QFormLayout(self.configGroupBox)
self.formLayout.setObjectName(u"formLayout")
self.label0 = QLabel(self.configGroupBox)
self.label0.setObjectName(u"label0")
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.label0)
self.lineEditIdentifier = QLineEdit(self.configGroupBox)
self.lineEditIdentifier.setObjectName(u"lineEditIdentifier")
self.formLayout.setWidget(0, QFormLayout.FieldRole, self.lineEditIdentifier)
self.label_3 = QLabel(self.configGroupBox)
self.label_3.setObjectName(u"label_3")
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.label_3)
self.prefix_lineEdit = QLineEdit(self.configGroupBox)
self.prefix_lineEdit.setObjectName(u"prefix_lineEdit")
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.prefix_lineEdit)
self.label_4 = QLabel(self.configGroupBox)
self.label_4.setObjectName(u"label_4")
self.formLayout.setWidget(3, QFormLayout.LabelRole, self.label_4)
self.timeSteps_lineEdit = QLineEdit(self.configGroupBox)
self.timeSteps_lineEdit.setObjectName(u"timeSteps_lineEdit")
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.timeSteps_lineEdit)
self.label = QLabel(self.configGroupBox)
self.label.setObjectName(u"label")
self.formLayout.setWidget(4, QFormLayout.LabelRole, self.label)
self.initialTime_lineEdit = QLineEdit(self.configGroupBox)
self.initialTime_lineEdit.setObjectName(u"initialTime_lineEdit")
self.formLayout.setWidget(4, QFormLayout.FieldRole, self.initialTime_lineEdit)
self.label_2 = QLabel(self.configGroupBox)
self.label_2.setObjectName(u"label_2")
self.formLayout.setWidget(5, QFormLayout.LabelRole, self.label_2)
self.finishTime_lineEdit = QLineEdit(self.configGroupBox)
self.finishTime_lineEdit.setObjectName(u"finishTime_lineEdit")
self.formLayout.setWidget(5, QFormLayout.FieldRole, self.finishTime_lineEdit)
self.label1 = QLabel(self.configGroupBox)
self.label1.setObjectName(u"label1")
self.formLayout.setWidget(6, QFormLayout.LabelRole, self.label1)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.lineEditOutputDirectory = QLineEdit(self.configGroupBox)
self.lineEditOutputDirectory.setObjectName(u"lineEditOutputDirectory")
self.horizontalLayout.addWidget(self.lineEditOutputDirectory)
self.pushButtonOutputDirectory = QPushButton(self.configGroupBox)
self.pushButtonOutputDirectory.setObjectName(u"pushButtonOutputDirectory")
self.horizontalLayout.addWidget(self.pushButtonOutputDirectory)
self.formLayout.setLayout(6, QFormLayout.FieldRole, self.horizontalLayout)
self.label_5 = QLabel(self.configGroupBox)
self.label_5.setObjectName(u"label_5")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.label_5)
self.comboBoxExportType = QComboBox(self.configGroupBox)
self.comboBoxExportType.addItem("")
self.comboBoxExportType.addItem("")
self.comboBoxExportType.setObjectName(u"comboBoxExportType")
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.comboBoxExportType)
self.gridLayout.addWidget(self.configGroupBox, 0, 0, 1, 1)
self.buttonBox = QDialogButtonBox(ConfigureDialog)
self.buttonBox.setObjectName(u"buttonBox")
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
QWidget.setTabOrder(self.lineEditIdentifier, self.prefix_lineEdit)
QWidget.setTabOrder(self.prefix_lineEdit, self.comboBoxExportType)
QWidget.setTabOrder(self.comboBoxExportType, self.timeSteps_lineEdit)
QWidget.setTabOrder(self.timeSteps_lineEdit, self.initialTime_lineEdit)
QWidget.setTabOrder(self.initialTime_lineEdit, self.finishTime_lineEdit)
QWidget.setTabOrder(self.finishTime_lineEdit, self.lineEditOutputDirectory)
QWidget.setTabOrder(self.lineEditOutputDirectory, self.pushButtonOutputDirectory)
self.retranslateUi(ConfigureDialog)
self.buttonBox.accepted.connect(ConfigureDialog.accept)
self.buttonBox.rejected.connect(ConfigureDialog.reject)
self.comboBoxExportType.setCurrentIndex(0)
QMetaObject.connectSlotsByName(ConfigureDialog)
# setupUi
def retranslateUi(self, ConfigureDialog):
ConfigureDialog.setWindowTitle(QCoreApplication.translate("ConfigureDialog", u"Configure Step", None))
self.configGroupBox.setTitle("")
self.label0.setText(QCoreApplication.translate("ConfigureDialog", u"identifier: ", None))
self.label_3.setText(QCoreApplication.translate("ConfigureDialog", u"Prefix : ", None))
self.label_4.setText(QCoreApplication.translate("ConfigureDialog", u"Time Steps : ", None))
self.label.setText(QCoreApplication.translate("ConfigureDialog", u"Initial Time : ", None))
self.label_2.setText(QCoreApplication.translate("ConfigureDialog", u"Finish Time : ", None))
self.label1.setText(QCoreApplication.translate("ConfigureDialog", u"Output directory:", None))
self.pushButtonOutputDirectory.setText(QCoreApplication.translate("ConfigureDialog", u"...", None))
self.label_5.setText(QCoreApplication.translate("ConfigureDialog", u"Export type:", None))
self.comboBoxExportType.setItemText(0, QCoreApplication.translate("ConfigureDialog", u"webgl", None))
self.comboBoxExportType.setItemText(1, QCoreApplication.translate("ConfigureDialog", u"thumbnail", None))
# retranslateUi | de | 0.413164 | # -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'configuredialog.ui' ## ## Created by: Qt User Interface Compiler version 5.15.2 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ # setupUi # retranslateUi | 2.110821 | 2 |
pbx_gs_python_utils/lambdas/utils/puml_to_slack.py | owasp-sbot/pbx-gs-python-utils | 3 | 7998 | <reponame>owasp-sbot/pbx-gs-python-utils<filename>pbx_gs_python_utils/lambdas/utils/puml_to_slack.py<gh_stars>1-10
import base64
import tempfile
import requests
from osbot_aws.apis import Secrets
from osbot_aws.apis.Lambdas import Lambdas
def upload_png_file(channel_id, file):
bot_token = Secrets('slack-gs-bot').value()
my_file = {
'file': ('/tmp/myfile.png', open(file, 'rb'), 'png')
}
payload = {
"filename" : 'image.png',
"token" : bot_token,
"channels" : [channel_id],
}
requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
return 'image sent .... '
def run(event, context):
channel = event['channel']
puml = event['puml']
puml = puml.replace('<', '<').replace('>', '>')
(fd, tmp_file) = tempfile.mkstemp('png)')
puml_to_png = Lambda('utils.puml_to_png').invoke
result = puml_to_png({"puml": puml })
with open(tmp_file, "wb") as fh:
fh.write(base64.decodebytes(result['png_base64'].encode()))
return upload_png_file(channel, tmp_file)
| import base64
import tempfile
import requests
from osbot_aws.apis import Secrets
from osbot_aws.apis.Lambdas import Lambdas
def upload_png_file(channel_id, file):
bot_token = Secrets('slack-gs-bot').value()
my_file = {
'file': ('/tmp/myfile.png', open(file, 'rb'), 'png')
}
payload = {
"filename" : 'image.png',
"token" : bot_token,
"channels" : [channel_id],
}
requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
return 'image sent .... '
def run(event, context):
channel = event['channel']
puml = event['puml']
puml = puml.replace('<', '<').replace('>', '>')
(fd, tmp_file) = tempfile.mkstemp('png)')
puml_to_png = Lambda('utils.puml_to_png').invoke
result = puml_to_png({"puml": puml })
with open(tmp_file, "wb") as fh:
fh.write(base64.decodebytes(result['png_base64'].encode()))
return upload_png_file(channel, tmp_file) | none | 1 | 2.301006 | 2 |
|
src/system_io/input.py | DeseineClement/bigdata-housing-classifier | 0 | 7999 | from sys import argv
from getopt import getopt
from os import R_OK, access
from string import Template
DEFAULT_DATASET_FILE_PATH = "dataset/data.csv"
DEFAULT_DATASET_COLUMNS = ['surface (m2)', 'height (m)', 'latitude', 'housing_type', 'longitude', 'country_code',
'city']
DEFAULT_VISU = ["scatter_plot", "histogram"]
DEFAULT_RANGE = [0, 1000]
def arguments():
options, *_ = getopt(argv[1:], 'dc', ['dataset-file=', 'columns=', 'visus=', 'range='])
dataset_file = DEFAULT_DATASET_FILE_PATH
dataset_columns = DEFAULT_DATASET_COLUMNS
dataset_visus = DEFAULT_VISU
dataset_range = DEFAULT_RANGE
for opt, arg in options:
if opt in ('-d', '--dataset-file'):
dataset_file = arg
elif opt in ('-c', '--columns'):
dataset_columns = arg.split(',')
elif opt in ('-v', '--visus'):
dataset_visus = arg.split(',')
elif opt in ('-r', '--range'):
dataset_range = arg.split(',')
dataset_range = list(map(lambda x: int(x), dataset_range))
if len(dataset_range) == 1 :
dataset_range.append(DEFAULT_RANGE[1])
if not access(dataset_file, R_OK):
raise RuntimeError(Template("the file $file does not exists or is not readable.").substitute(file=dataset_file))
for column in dataset_columns:
if column not in DEFAULT_DATASET_COLUMNS:
raise RuntimeError(Template("Invalid column $column must be one of $columns.").
substitute(column=column, columns=','.join(DEFAULT_DATASET_COLUMNS)))
for visu in dataset_visus:
if visu not in DEFAULT_VISU:
raise RuntimeError(Template("Invalid visu $column must be one of $columns.").
substitute(column=visu, columns=','.join(DEFAULT_VISU)))
for range_num in dataset_range:
if range_num not in range(0, 1001):
raise RuntimeError(Template("Invalid range $column must be between 0 and 999.").
substitute(column=range_num))
return dataset_file, dataset_columns, dataset_visus, dataset_range
| from sys import argv
from getopt import getopt
from os import R_OK, access
from string import Template
DEFAULT_DATASET_FILE_PATH = "dataset/data.csv"
DEFAULT_DATASET_COLUMNS = ['surface (m2)', 'height (m)', 'latitude', 'housing_type', 'longitude', 'country_code',
'city']
DEFAULT_VISU = ["scatter_plot", "histogram"]
DEFAULT_RANGE = [0, 1000]
def arguments():
options, *_ = getopt(argv[1:], 'dc', ['dataset-file=', 'columns=', 'visus=', 'range='])
dataset_file = DEFAULT_DATASET_FILE_PATH
dataset_columns = DEFAULT_DATASET_COLUMNS
dataset_visus = DEFAULT_VISU
dataset_range = DEFAULT_RANGE
for opt, arg in options:
if opt in ('-d', '--dataset-file'):
dataset_file = arg
elif opt in ('-c', '--columns'):
dataset_columns = arg.split(',')
elif opt in ('-v', '--visus'):
dataset_visus = arg.split(',')
elif opt in ('-r', '--range'):
dataset_range = arg.split(',')
dataset_range = list(map(lambda x: int(x), dataset_range))
if len(dataset_range) == 1 :
dataset_range.append(DEFAULT_RANGE[1])
if not access(dataset_file, R_OK):
raise RuntimeError(Template("the file $file does not exists or is not readable.").substitute(file=dataset_file))
for column in dataset_columns:
if column not in DEFAULT_DATASET_COLUMNS:
raise RuntimeError(Template("Invalid column $column must be one of $columns.").
substitute(column=column, columns=','.join(DEFAULT_DATASET_COLUMNS)))
for visu in dataset_visus:
if visu not in DEFAULT_VISU:
raise RuntimeError(Template("Invalid visu $column must be one of $columns.").
substitute(column=visu, columns=','.join(DEFAULT_VISU)))
for range_num in dataset_range:
if range_num not in range(0, 1001):
raise RuntimeError(Template("Invalid range $column must be between 0 and 999.").
substitute(column=range_num))
return dataset_file, dataset_columns, dataset_visus, dataset_range
| none | 1 | 2.956936 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.