content
stringlengths 5
1.05M
|
---|
# -*- coding:utf-8 -*-
import sqlite3
import json
import datetime
class Windows_Timeline_Information:
par_id = ''
case_id = ''
evd_id = ''
program_name = ''
display_name = ''
content = ''
activity_type = ''
focus_seconds = ''
start_time = ''
end_time = ''
activity_id = ''
platform = ''
created_time = ''
created_in_cloud_time = ''
last_modified_time = ''
last_modified_one_client_time = ''
original_last_modified_on_client_time = ''
local_only_flag = ''
group = ''
clipboardpayload = ''
timezone = ''
"""
TEST Enviroment
Windows 10 1909 (OS Build 18363.720)
program_name : AppId -> application
display_name : Payload -> display Text, app display
content : AppActivityId
activity_type : ActivityType
focus_seconds : if EndTime-StartTime > 0 -> EndTime-StartTime
start_time : StartTime
end_time : EndTime
activity_id : Id(Mixed GUID)
platform : AppId -> platform
created_time : ?? 추가분석 필요 일단 NULL
created_in_cloud_time : CreatedInCloud
last_modified_time : LastModifiedTime
last_modified_on_client_time : LastModifiedOnClient
original_last_modified_on_client_time : OriginalLastModifiedOnClient
local_only_flag : IsLocalOnly
"""
# db에 들어갈 column_name
our_db_column_name = ['program_name', 'display_name', 'content', 'activity_type', 'focus_seconds', 'start_time', \
'end_time', 'activity_id', 'platform','created_in_cloud_time', 'last_modified_time', \
'last_modified_time', 'last_modified_on_client_time', 'original_last_modified_on_client_time', 'local_only_flag', 'group', 'clipboardpayload(base64)', 'timezone']
known_path_dict = dict()
known_path_column = ["6D809377-6AF0-444B-8957-A3773F02200E", "7C5A40EF-A0FB-4BFC-874A-C0F2E0B9FA8E",
"1AC14E77-02E7-4E5D-B744-2EB1AE5198B7", "F38BF404-1D43-42F2-9305-67DE0B28FC23",
"D65231B0-B2F1-4857-A4CE-A8E7C6EA7D27"]
known_path_data = ["%ProgramFiles% (%SystemDrive%\\Program Files)", "%SystemDrive%\Program Files (x86)", "%SystemRoot%\\System", "%SystemRoot%",
"%SystemRoot%\\system32"]
for i in range(0, len(known_path_column)):
known_path_dict[known_path_column[i]] = known_path_data[i]
parsing_column_name_list = ['AppId', 'Payload', 'AppActivityId', 'ActivityType', 'StartTime', 'EndTime', 'Id',
'CreatedInCloud', 'LastModifiedTime', 'LastModifiedOnClient', 'OriginalLastModifiedOnClient', 'IsLocalOnly', 'Group', 'ClipboardPayload']
# 버전에 따라서 DB column명이 달라질 때를 대비해서 만들어놓음.
app_id_list = ["AppId"]
payload_list = ["Payload"]
app_activity_id_list = ["AppActivityId"]
activity_type_list = ["ActivityType"]
start_time_list = ["StartTime"]
end_time_list = ["EndTime"]
id_list = ["Id"]
created_in_cloud_list = ["CreatedInCloud"]
last_modified_time_list = ["LastModifiedTime"]
last_modified_on_client_list = ["LastModifiedOnClient"]
original_last_modified_on_client_list = ["OriginalLastModifiedOnClient"]
is_local_list = ["IsLocalOnly"]
clipboardpayload_list = ["ClipboardPayload"]
group_list = ["Group"]
def convertTime(unixtime):
if unixtime is not None:
temp_time = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=unixtime)
date = temp_time.isoformat()
date += 'Z'
return date
else:
pass
def convertbe(bytes):
result = bytes
return result
def convertle(bytes):
result = bytes[::-1]
return result
def parseAppActivityId(data):
known_string = 'ECB32AF3-1440-4086-94E3-5311F97F89C4'
if data.find(known_string) >= 0 :
data = data.strip(known_string)
return data
elif data.find(known_string) < 0:
return data
def parseType(data):
#참고 : https://github.com/kacos2000/WindowsTimeline/blob/master/WindowsTimeline.sql
type = ""
if data == 2:
type = "Notification"
elif data == 3:
type = "Mobile Backup"
elif data == 5:
type = "Open App/File/Page"
elif data == 6:
type = "App In Use/Focus"
elif data == 10:
type = "Clipboard"
elif data == 16:
type = "Copy/Paste"
elif data == 11 or 12 or 15:
type = "System"
return type
def parseLocal(data):
local_only_flag = "False"
if data == 1:
local_only_flag = "True"
return local_only_flag
def parseId(data):
#mixed endian 변환
le1 = data[0:4]
le2 = data[4:6]
le3 = data[6:8]
be1 = data[8:10]
be2 = data[10:]
activity_id = convertle(le1).hex() + "-" + convertle(le2).hex() + "-" + convertle(le3).hex() + "-" + convertbe(be1).hex() + "-" + convertbe(be2).hex()
return activity_id
def parseAppId (data) :
#AppId parsing
#Appid structure : {'application': '....', 'platform' : '...'}, ...
result = list()
_json_data = json.loads(data)
json_data = _json_data[0]
if json_data['platform'] == 'afs_crossplatform':
json_data = _json_data[1]
platform = json_data['platform']
application = json_data['application']
else :
platform = json_data['platform']
application = json_data['application']
for i in range (0, len(known_path_column)):
if known_path_column[i] in application:
replaced_path = "{" + known_path_column[i] + "}"
application = application.replace(replaced_path, known_path_dict[known_path_column[i]])
result.append(application)
result.append(platform)
return result
def parseClipBoard(data):
#print(data)
if data is not None:
json_data = json.loads(data)
keys = [key for key in json_data]
if len(keys) == 0 :
return None
elif len(keys) !=0 and 'content' in keys[0] :
encoded_text = keys[0]['content']
return encoded_text
else:
return None
def parseGroup(data):
return data
def parsePayload(data) :
result = list() # display Text, app display
display_name = ""
timezone = ""
focus_seconds = 0
try:
json_data = json.loads(data)
keys = [key for key in json_data]
if "displayText" in keys:
displayText = json_data['displayText']
result.append(displayText)
if "appDisplayName" in keys:
displayname = json_data['appDisplayName']
result.append(displayname)
if "activeDurationSeconds" in keys :
focus_seconds = json_data["activeDurationSeconds"]
# if "clipboardDataId" in keys:
# clipboard_id = json_data["clipboardDataId"]
if "userTimezone" in keys:
timezone = json_data["userTimezone"]
if len(result) >= 2 :
display_name = result[0] + " (" + result[1] + ")"
else :
display_name = result[0]
except:
pass
return display_name, focus_seconds, timezone
def saveDataInDict(our_db, output_column_name, data):
our_db[output_column_name] = data
def parsecolumn(our_db, data, column_name):
if column_name in parsing_column_name_list:
saveDataInDict(our_db, 'created_time', '')
if column_name in app_id_list:
(program_name, platform) = parseAppId(data)
saveDataInDict(our_db, 'program_name', program_name)
saveDataInDict(our_db, 'platform', platform)
elif column_name in payload_list:
(display_name, focus_seconds, timezone) = parsePayload(data)
saveDataInDict(our_db, 'display_name', display_name)
saveDataInDict(our_db, 'focus_seconds', focus_seconds)
saveDataInDict(our_db, 'timezone', timezone)
elif column_name in app_activity_id_list:
saveDataInDict(our_db, 'content', parseAppActivityId(data))
elif column_name in activity_type_list :
saveDataInDict(our_db, 'activity_type', parseType(data))
elif column_name in start_time_list:
saveDataInDict(our_db, 'start_time', convertTime(data))
elif column_name in end_time_list :
saveDataInDict(our_db, 'end_time', convertTime(data))
elif column_name in id_list :
saveDataInDict(our_db, 'activity_id', parseId(data))
elif column_name in created_in_cloud_list :
saveDataInDict(our_db, 'created_in_cloud_time', convertTime(data))
elif column_name in last_modified_time_list :
saveDataInDict(our_db, 'last_modified_time', convertTime(data))
elif column_name in last_modified_on_client_list:
saveDataInDict(our_db, 'last_modified_on_client_time', convertTime(data))
elif column_name in original_last_modified_on_client_list :
saveDataInDict(our_db, 'original_last_modified_on_client_time', convertTime(data))
elif column_name in is_local_list:
saveDataInDict(our_db, 'local_only_flag', parseLocal(data))
elif column_name in clipboardpayload_list :
saveDataInDict(our_db, 'clipboardpayload(base64)', parseClipBoard(data))
elif column_name in group_list:
saveDataInDict(our_db, 'group', parseGroup(data))
else :
pass
def convertDictionaryToList(dict):
result = list()
for output_column_name in our_db_column_name:
result.append(dict[output_column_name])
return result
def divide2column(row, column_name_list) :
our_db = dict()
for i in range(0, len(column_name_list)):
parsecolumn(our_db, row[i], column_name_list[i])
result = convertDictionaryToList(our_db)
return result
def WINDOWSTIMELINE(filename):
result = []
column_name_list = []
timeline_count = 0
targetDB = filename
conn = sqlite3.connect(targetDB)
cur = conn.cursor()
# 윈도우 버전별로 칼럼이 달라져서 column_name_list를 새롭게 구해야함.
sql_command = f"SELECT sql FROM sqlite_master WHERE tbl_name='Activity' AND name = 'Activity'"
cur.execute(sql_command)
first_row = None
for row in cur:
first_row = str(row)
if not first_row:
# TODO: 이유 찾아야 함
return False
first_column = first_row.split('(')
column_list = first_column[2]
start = '['
end = ']'
index1 = -1
index2 = -1
while True:
index1 = column_list.find(start, index1 + 1)
index2 = column_list.find(end, index2+1)
if index1 == -1 or index2 ==-1:
break
column_name_list.append(column_list[index1+1:index2])
sql_command = "SELECT *"
sql_command += " FROM Activity"
cur.execute(sql_command)
for row in cur:
rs = divide2column(row,column_name_list)
windows_timeline_information = Windows_Timeline_Information()
result.append(windows_timeline_information)
result[timeline_count].program_name = rs[0]
result[timeline_count].display_name = rs[1]
if len(rs[2]) == 0:
result[timeline_count].content = rs[2]
else:
if rs[2][0] == '\\':
result[timeline_count].content = rs[2][1:]
else:
result[timeline_count].content = rs[2]
result[timeline_count].activity_type = rs[3]
result[timeline_count].focus_seconds = rs[4]
result[timeline_count].start_time = rs[5]
result[timeline_count].end_time = rs[6]
result[timeline_count].activity_id = rs[7]
result[timeline_count].platform = rs[8]
result[timeline_count].created_time = rs[9]
result[timeline_count].created_in_cloud_time = rs[10]
result[timeline_count].last_modified_time = rs[11]
result[timeline_count].last_modified_on_client_time = rs[12]
result[timeline_count].original_last_modified_on_client_time = rs[13]
result[timeline_count].local_only_flag = rs[14]
result[timeline_count].group = rs[15]
result[timeline_count].clipboardpayload= rs[16]
result[timeline_count].timezone = rs[17]
timeline_count = timeline_count + 1
return result |
import numpy as np
import sys
from nn import NeuralNetwork
import warnings
if __name__ == '__main__':
warnings.filterwarnings("ignore")
NNX = np.loadtxt('data/wheat-seeds.csv',delimiter=',')
NNY = NNX[:,-1:]
NNX = NNX[:, :-1]
model1 = NeuralNetwork(10,3,activate='r',iter=100000,rate=0.1)
print('\nSeed Dataset:\nTraining r square score: '+ str(model1.initializeNN(NNX,NNY)) + '\n15-fold cross validation r square score: ' + str(model1.trainNN(NNX,NNY)))
NNX = np.loadtxt('data/bikes.csv',delimiter=',')
NNY = NNX[:,-1:]
NNX = NNX[:, :-1]
model2 = NeuralNetwork(10,3,activate='r',iter=10000,rate=0.1)
print('\nHousing Dataset:\nTraining r square score: '+ str(model2.initializeNN(NNX,NNY)) + '\n15-fold cross validation r square score: ' + str(model2.trainNN(NNX,NNY)))
|
#!/usr/bin/python3
import os
import csv
import re
# Garrett Maury, 11/30/2021
# Clear Terminal
clear = 'clear'
os.system(clear)
with open('linux_users.csv', 'r') as file:
# read each row into a dictionary
reader = csv.DictReader(file)
data = {}
for row in reader:
for header, value in row.items():
try:
data[header].append(value)
except KeyError:
data[header] = [value]
# Extract all of the variables needed
Employee_ID = data['EmployeeID']
Last_Name = data['LastName']
First_Name = data['FirstName']
Office = data['Office']
Phone = data['Phone']
Department = data['Department']
Group = data['Group']
# Make User Names
all_users = []
for i in range(7):
first_initial = First_Name[i]
# Get first index of string
try:
first_initial = first_initial[0]
user_name = first_initial + Last_Name[i]
except IndexError:
user_name = "Insufficient data."
# Check for Duplicate Names
if all_users.count(user_name) > 0:
user_name = user_name + str(1) # Make a unique username
if all_users.count(user_name) > 1:
user_name = user_name + str(2) # Make a unique username
elif all_users.count(user_name) > 2:
last_char = user_name[-1]
num = int(last_char) + 1
user_name = user_name + num
# Check for Duplicate IDs
if Employee_ID.count(Employee_ID[i]) > 0:
Employee_ID[i] = Employee_ID[i] + str(1) # Make a unique id
if Employee_ID.count(Employee_ID[i]) > 1:
Employee_ID[i] = Employee_ID[i] + str(2) # Make a unique id
elif Employee_ID.count(Employee_ID[i]) > 2:
last_char = Employee_ID[i]
last_char2 = last_char[-1]
num = int(last_char2) + 1
Employee_ID[i] = Employee_ID[i] + num
# Check if Last Names have a illegal character
for element in range(len(Last_Name)):
# Check if a string contains only alphabetical letters
if Last_Name[element].isalpha():
pass
else:
Last_Name[element] = re.sub(r"[^a-zA-Z0-9]", "", Last_Name[element])
# Add to a list of all users
all_users.append(user_name)
# Add groups if they don't exist
for element in range(len(Group)):
os.system('groupadd -f ' + Group[element])
# Check for empty fields in the csv file and make sure to note the one's that don't work
bad_numbers = []
for element in range(len(Group)):
if len(Employee_ID[element]) == 0 or len(Last_Name[element]) == 0 or len(First_Name[element]) <= 0 or len(Office[element]) == 0 or len(Phone[element]) == 0 or len(Department[element]) == 0 or len(Group[element]) == 0:
bad_numbers.append(element)
# Try to add the users
print("Adding new users to the system.")
for i in range(7):
# If there is a bad addition
if i in bad_numbers:
id = Employee_ID[i]
username = all_users[i]
print('Cannot process employee ID ' + id + '. ' + username + ' NOT added to system.')
else:
id = Employee_ID[i]
username = all_users[i]
cmd = 'sudo useradd ' + username
# execute command to add users
os.system(cmd)
# Assign users to a group
os.system('sudo usermod -a -G ' + Group[i] + ' ' + username)
# Assign users a home dir
os.system('usermod -d /home/' + Group[i] + ' ' + username)
# assign a default shell
os.system('chsh -s /usr/local/bin/bash ' + username)
# assign a default password
os.system('echo ' + username + ':password | chpasswd')
# expire password
os.system('passwd --expire ' + username)
# Accept the user notification
print('Processing employee ID ' + id + '. ' + username + ' added to system.')
|
#############################################################################
#
# Copyright (c) 2018 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import doctest
import re
import unittest
import zope.locking.testing
import zope.testing.renormalizing
normalizer = zope.testing.renormalizing.RENormalizing([
(re.compile(r'datetime\.timedelta\(0, (.*)\)'),
r'datetime.timedelta(seconds=\1)'),
])
def test_suite():
layer = zope.locking.testing.layer
def get_connection():
return layer.db.open()
def get_db():
return layer.db
suite = unittest.TestSuite((
doctest.DocFileSuite(
'README.rst',
optionflags=doctest.IGNORE_EXCEPTION_DETAIL,
checker=normalizer,
globs=dict(
get_connection=get_connection,
get_db=get_db
)),
doctest.DocFileSuite(
'annoying.rst',
optionflags=doctest.IGNORE_EXCEPTION_DETAIL,
checker=normalizer,
globs=dict(
get_connection=get_connection,
get_db=get_db
)),
doctest.DocFileSuite(
'cleanup.rst',
optionflags=doctest.IGNORE_EXCEPTION_DETAIL,
checker=normalizer,
globs=dict(
get_connection=get_connection,
get_db=get_db
)),
))
suite.layer = layer
return suite
|
import cv2
import os
import configuration
import queue
import random
import string
import tensorflow as tf
from real_time_detection.GUI import FaceFeatureReader
# class FaceReader
class FaceReader:
'''
This class is used to return the face data in real time.
Attribute:
cap: the capture stream
faceCascade: model for detecting where the face is.
file_name: the file name of the current frame in hard disk
delete_queue: the queue is used to save all the delete file name
faces: the faces for predicting the emotion, we used a set of face
rather than one face.
'''
def __init__(self, input_type, file_path=None, face_feature_reader_obj=None):
'''
Arguments:
input_type: 'file' indicates that the stream is from file. In other
case, the stream will from the defalt camera.
'''
# graph needs to be modified
tf.compat.v1.get_default_graph()
self.face_feature_reader_obj = face_feature_reader_obj
# self.graph = graph #tf.compat.v1.get_default_graph() # face_feature_reader_obj
# self.face_feature_reader_obj = FaceFeatureReader(self.graph)
self.input_type = input_type
if input_type == 'file':
self.cap = cv2.VideoCapture(file_path)
else:
self.cap = cv2.VideoCapture(0)
ret, frame = self.cap.read()
cascPath = configuration.MODEL_PATH + "haarcascade_frontalface_alt.xml"
self.faceCascade = cv2.CascadeClassifier(cascPath)
self.file_name = None
self.delete_queue = queue.Queue()
self.faces = []
def delete_files(self):
'''
delete files for releasing the resourse.
'''
print("delete_files()")
while self.delete_queue.qsize() > 10:
file = self.delete_queue.get()
if (os.path.exists(file)):
os.remove(file)
def get_one_face(self):
'''
Returns:
one face from stream.
'''
print("get_one_face().start...")
if self.input_type == 'file':
cnt = 0
while cnt < 15:
self.cap.read()
cnt += 1
ret, frame = self.cap.read()
print("ret, frame:")
print(ret)
print(frame)
if ret is True:
print("ret is True")
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
(x, y, w, h) = self.detect_face(gray)
if (w != 0):
face = gray[y:y + h, x:x + w]
face = cv2.resize(face, (48, 48))
self.faces.append(face)
frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), thickness=2)
if self.file_name is not None:
print("self.file_name is not None")
del_file_name = 'static/cache_image/%s.png' % self.file_name
self.delete_queue.put(del_file_name)
if self.delete_queue.qsize() > 50:
self.delete_files()
self.file_name = ''.join(random.sample(string.ascii_letters + string.digits, 12))
cv2.imwrite('static/cache_image/%s.png' % self.file_name, frame)
print("get_one_face().end...")
return self.file_name
else:
print("ERROR")
print("get_one_face().end...")
return 'ERROR'
def detect_face(self, gray):
'''
find faces from a gray image.
Arguments:
gray: a gray image
Returns:
(x, y, w, h)
x, y: the left-up points of the face
w, h: the width and height of the face
'''
print("detect_face()")
faces = self.faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(32, 32)
)
print("faces:")
print(faces)
print("faces' len:")
if len(faces) > 0:
print(len(faces))
(x, y, w, h) = faces[0]
print("faces[0]:")
print((x, y, w, h))
else:
print("0")
(x, y, w, h) = (0, 0, 0, 0)
return (x, y, w, h)
def read_face_feature(self):
'''
Returns:
items: a list, the first element is the frame path while the rest
is the feature map.
'''
print("read_face_feature()")
ret, frame = self.cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
(x, y, w, h) = self.detect_face(gray)
if (w != 0):
face = gray[y:y + h, x:x + w]
face = cv2.resize(face, (48, 48))
self.face_feature_reader_obj.set_face(face)
frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), thickness=2)
random_str = ''.join(random.sample(string.ascii_letters + string.digits, 12))
frame_path = 'static/cache_image/%s.png' % random_str
cv2.imwrite(frame_path, frame)
feature_map_list = self.face_feature_reader_obj.read_feature_map()
items = [frame_path, ]
items += feature_map_list
self.delete_queue.put(frame_path)
if self.delete_queue.qsize() > 10:
self.delete_files()
return items
# class FaceReader |
HYBRIK_29_KEYPOINTS = [
'pelvis',
'left_hip',
'right_hip', # 2
'spine_1',
'left_knee',
'right_knee', # 5
'spine_2',
'left_ankle',
'right_ankle', # 8
'spine_3',
'left_foot',
'right_foot', # 11
'neck',
'left_collar',
'right_collar', # 14
'jaw', # 15
'left_shoulder',
'right_shoulder', # 17
'left_elbow',
'right_elbow', # 19
'left_wrist',
'right_wrist', # 21
'left_thumb',
'right_thumb', # 23
'head',
'left_middle',
'right_middle', # 26
'left_bigtoe',
'right_bigtoe' # 28
]
|
import requests, bs4, os, youtube_dl
# Returns List of Tracks Joined By "+"
def tracks(url):
res = requests.get(url)
soup = bs4.BeautifulSoup(res.text.encode('utf-8').decode('ascii', 'ignore'), 'html.parser')
searchTracks = soup.select('.update_song_info a')
prettyTracks = soup.select('.update_song_info a')
for n, i in enumerate(searchTracks):
searchTracks[n] = i.getText('').replace('-', '').replace(' ', '+').replace('++', '+')
for n, i in enumerate(prettyTracks):
prettyTracks[n] = i.getText('')
return(searchTracks, prettyTracks)
searchList, prettyList = tracks('http://www.mzhiphop.com')
# Returns Selection of Songs
def userSelection(searchList, prettyList):
for count, i in enumerate(prettyList):
print(str(count) + ' - ' + i)
askList = input('Input numbers or "all": \n>')
if askList == 'all':
return(searchList)
else:
finalList = [int(x) for x in askList.split()]
userSelection = []
for n in finalList:
userSelection.append(searchList[n])
return(userSelection)
userSelection = userSelection(searchList, prettyList)
# Search Youtube
def youtube(song):
resYoutube = requests.get('https://www.youtube.com/results?search_query=' + song)
resYoutube.raise_for_status()
soupYT = bs4.BeautifulSoup(resYoutube.text.encode('utf-8').decode('ascii', 'ignore'), 'html.parser')
checkForAds = soupYT.find_all('div', {'class': 'pyv-afc-ads-container'})
if checkForAds == []:
count = 0
else:
count = 2
video = soupYT.find_all('h3', {'class': 'yt-lockup-title'})
videoHref = video[count].find('a')
url = 'https://www.youtube.com' + videoHref.attrs['href']
return(url)
def getURL(userSelection):
ytList = []
for n in userSelection:
ytList.append(youtube(n))
return(ytList)
urlList = getURL(userSelection)
# Youtube_DL - Download Song
def youtubeDL(url):
options = {
'format': 'bestaudio/best', # choice of quality
'extractaudio' : True, # only keep the audio
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'outtmpl': '/Music/mzhiphop/%(title)s.%(ext)s', # name the file
'noplaylist' : True, # only download single song, not playlist
}
with youtube_dl.YoutubeDL(options) as ydl:
ydl.download([url])
def main():
for d in urlList:
youtubeDL(d)
if __name__ == '__main__':
main() |
import logging.config
from .video import Video
log_config = {
'version': 1,
'formatters': {
'detailed': {
'class': 'logging.Formatter',
'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'detailed',
},
},
'loggers': {
'websockets': {
'level': 'INFO',
'handlers': ['console'],
},
'psivideo': {
'level': 'DEBUG',
'handlers': ['console'],
},
},
'root': {
'level': 'DEBUG',
'handlers': ['console'],
},
}
def main():
#from argparse import ArgumentParser
#parser = ArgumentParser('psivideo')
logging.config.dictConfig(log_config)
video = Video()
video.start()
video.join()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright mcendu 2019.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AEAD wrappers.
"""
import struct
from typing import NoReturn
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import ChaCha20
from cryptography.hazmat.primitives.poly1305 import Poly1305
from librvlt.base.callbacks import AEAlgorithm
@AEAlgorithm.register(29)
class ChaCha20Poly1305(AEAlgorithm):
"""
ChaCha20Poly1305 wrapper. Almost exactly what is defined in RFC 8439
<https://tools.ietf.org/html/rfc8439>.
"""
text_len: int = 0
aad_len: int = 0
def __init__(self, key: bytes, iv: bytes,
decrypt: bool = False, aad: bytes = b''):
super().__init__(decrypt)
self.cipher = Cipher(
ChaCha20(key, iv), None, default_backend()
).encryptor()
# Initialize MAC key
mac_key = self.cipher.update(bytes(64))[:32]
self.mac = Poly1305(
mac_key
& b'\xff\xff\xff\x0f\xfc\xff\xff\x0f'
b'\xfc\xff\xff\x0f\xfc\xff\xff\x0f'
b'\xff\xff\xff\xff\xff\xff\xff\xff'
b'\xff\xff\xff\xff\xff\xff\xff\xff'
)
# if AAD exists, update AAD to self.mac
if not aad:
return
self.aad_len = len(aad)
pad1_len = 16 - (self.aad_len % 16) % 16
self.mac.update(aad + bytes(pad1_len))
def encrypt(self, b: bytes) -> bytes:
c_text = self.cipher.update(b)
self.text_len += len(c_text)
self.mac.update(c_text)
return c_text
def decrypt(self, b: bytes) -> bytes:
self.text_len += len(b)
self.mac.update(b)
return self.cipher.update(b)
def read(self, b: bytes) -> NoReturn:
self.mac.update(b)
def __end_auth_msg(self):
pad2_len = 16 - (self.text_len % 16) % 16
self.mac.update(pad2_len)
self.mac.update(struct.pack(b'<Q<Q',
self.aad_len, self.text_len))
def tag(self) -> bytes:
self.__end_auth_msg()
return self.mac.finalize()
def verify(self, b: bytes) -> bool:
self.__end_auth_msg()
return self.mac.verify(b)
|
import sys
from pathlib import Path
from PyQt5 import QtCore, QtGui
from PyQt5.QtCore import Qt, QSettings, QDir
from PyQt5.QtWidgets import (QApplication, QDialog, QGridLayout, QFormLayout, QLabel, QLayout, QLineEdit,
QPushButton, QFileDialog, QWidget, QGroupBox, QVBoxLayout, QHBoxLayout,
QDialogButtonBox, QSizePolicy, QCheckBox, QMessageBox, QScrollArea)
class IPSaveAllDialog(QDialog):
__streams = []
__fileInfo = []
__filtered_fileInfo = []
__fileEdits = []
__filtered_fileEdits = []
def __init__(self, parent):
super().__init__()
self.__parent = parent
self.__buildUI__()
def exec_(self, streams='', directoryPath=None):
self.__streams = streams
self.settings = QSettings('LANL', 'InfraView')
if directoryPath is None:
self.__directoryName = self.settings.value("last_save_directory", QDir.homePath())
else:
self.__directoryName = str(directoryPath)
self.lineEdit_Directory.setText(self.__directoryName)
# manually call this slot to make sure things are populated correctly
self.checkBoxClicked()
return super().exec_()
def __buildUI__(self):
self.setWindowTitle('Save Data')
self.setMinimumWidth(500)
label_directory = QLabel(self.tr('Directory: '))
self.lineEdit_Directory = QLineEdit()
button_directory = QPushButton('Edit...')
button_directory.clicked.connect(self.directoryDialog)
pathWidget = QWidget()
pathLayout = QGridLayout()
pathLayout.addWidget(label_directory, 0, 0)
pathLayout.addWidget(self.lineEdit_Directory, 0, 1)
pathLayout.addWidget(button_directory, 0, 2)
pathWidget.setLayout(pathLayout)
label_saveFiltered = QLabel('Save Filtered Data: ')
self.saveFiltered_check = QCheckBox()
self.saveFiltered_check.clicked.connect(self.checkBoxClicked)
label_saveOriginal = QLabel('Save Original Data: ')
self.saveOriginal_check = QCheckBox()
self.saveOriginal_check.clicked.connect(self.checkBoxClicked)
hlayout = QHBoxLayout()
hlayout.addWidget(label_saveOriginal)
hlayout.addWidget(self.saveOriginal_check)
hlayout.addWidget(label_saveFiltered)
hlayout.addWidget(self.saveFiltered_check)
self.fileWidget = QWidget()
self.gridlayout1 = QGridLayout()
self.gridlayout1.addWidget(self.fileWidget)
self.fileGridLayout = QGridLayout()
self.fileWidget.setLayout(self.fileGridLayout)
self.fileGroupBox = QGroupBox('File Names')
self.fileGroupBox.setLayout(self.gridlayout1)
self.fileScrollArea = QScrollArea()
# self.fileScrollArea.setWidget(self.fileWidget)
self.fileScrollArea.setWidgetResizable(True)
self.filteredGridLayout = QGridLayout()
self.filteredGroupBox = QGroupBox('Filtered File Names')
self.filteredGroupBox.setLayout(self.filteredGridLayout)
self.filteredGroupBox.setVisible(False)
filteredScrollArea = QScrollArea()
# filteredScrollArea.setWidget(self.filteredGroupBox)
filteredScrollArea.setWidgetResizable(True)
buttons = QDialogButtonBox(QDialogButtonBox.Save | QDialogButtonBox.Cancel, Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
mainLayout = QVBoxLayout()
mainLayout.setSizeConstraint(QLayout.SetFixedSize)
mainLayout.addWidget(pathWidget)
mainLayout.addLayout(hlayout)
mainLayout.addWidget(self.fileGroupBox)
mainLayout.addWidget(self.filteredGroupBox)
mainLayout.addStretch()
mainLayout.addWidget(buttons)
self.setLayout(mainLayout)
# this needs to be called after the grid layouts are made...putting it at the end to make sure
self.saveOriginal_check.setChecked(True)
def generateOriginalDataFileInfo(self):
for idx, trace in enumerate(self.__streams):
stats = trace.stats
fileFormat = stats['_format']
basename = trace.id
if basename[0] == '.':
basename = basename[1:]
filename = basename + '.' + fileFormat
self.__fileInfo.append({'fname': filename, 'format': fileFormat, 'directory': self.__directoryName})
self.__fileEdits.append(QLineEdit(filename))
self.__fileEdits[-1].textChanged.connect(self.fileEditsChanged)
self.fileGridLayout.addWidget(self.__fileEdits[idx], idx, 0)
def generateFilteredDataFileInfo(self):
for idx, trace in enumerate(self.__streams):
stats = trace.stats
fileFormat = stats['_format']
basename = trace.id
if basename[0] == '.':
basename = basename[1:]
filename = 'filtered.' + basename + '.' + fileFormat
self.__filtered_fileInfo.append({'fname': filename, 'format': fileFormat, 'directory': self.__directoryName})
self.__filtered_fileEdits.append(QLineEdit(filename))
self.__filtered_fileEdits[idx].textChanged.connect(self.filtered_fileEditsChanged)
self.filteredGridLayout.addWidget(self.__filtered_fileEdits[idx], idx, 0)
def directoryDialog(self):
self.__directoryName = QFileDialog.getExistingDirectory(self, "Choose a Directory", self.__directoryName, QtGui.QFileDialog.ShowDirsOnly)
if self.__directoryName != '':
self.settings.setValue("last_save_directory", self.__directoryName)
self.lineEdit_Directory.setText(self.__directoryName)
def getSaveDirectory(self):
return self.lineEdit_Directory.text()
def getFileInfo(self):
return self.__fileInfo
def getFilteredFileInfo(self):
return self.__filtered_fileInfo
def getFileChoiceData(self):
# This is the for the checkboxes for whether to save the original data, filtered data, or both
if self.saveOriginal_check.isChecked() and not self.saveFiltered_check.isChecked():
# Save just the original data
return 1
elif self.saveFiltered_check.isChecked() and not self.saveOriginal_check.isChecked():
# Save just the filtered data
return 2
elif self.saveOriginal_check.isChecked() and self.saveFiltered_check.isChecked():
# Save both
return 3
else:
# Peculiar case where neither is checked
return 0
def errorPopup(self, message):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setText(message)
msgBox.setWindowTitle("Oops...")
msgBox.exec_()
@QtCore.pyqtSlot()
def fileEditsChanged(self):
# slot function called when a fileEdit box is edited
for idx, newFileName in enumerate(self.__fileEdits):
self.__fileInfo[idx]['fname'] = self.__fileEdits[idx].text()
@QtCore.pyqtSlot()
def filtered_fileEditsChanged(self):
# slot function called when a fileEdit box is edited
for idx, newFileName in enumerate(self.__filtered_fileEdits):
self.__filtered_fileInfo[idx]['fname'] = self.__filtered_fileEdits[idx].text()
@QtCore.pyqtSlot()
def checkBoxClicked(self):
if self.saveFiltered_check.isChecked():
filterDisplaySettings = self.__parent.waveformWidget.filterSettingsWidget.get_filter_display_settings()
if filterDisplaySettings['apply'] is False:
# self.saveFiltered_check.blockSignals(True)
self.saveFiltered_check.setChecked(False)
# self.saveFiltered_check.blockSignals(False)
self.errorPopup('Filter is not currently applied to any data')
return
# clear out previous file info
self.__fileInfo.clear()
# Clear out the layouts
for i in reversed(range(self.fileGridLayout.count())):
self.fileGridLayout.itemAt(i).widget().setParent(None)
for i in reversed(range(self.filteredGridLayout.count())):
self.filteredGridLayout.itemAt(i).widget().setParent(None)
# Repopulate the fileInfo and make the new lineedits
if self.saveOriginal_check.isChecked():
self.fileGroupBox.setVisible(True)
self.generateOriginalDataFileInfo()
else:
self.fileGroupBox.setVisible(False)
if self.saveFiltered_check.isChecked():
self.filteredGroupBox.setVisible(True)
self.generateFilteredDataFileInfo()
else:
self.filteredGroupBox.setVisible(False)
|
import requests
try:
requisicao = requests.get('https://api.github.com/users/adrielcavalcante')
print(requisicao)
except Exception as err:
print('Erro: ',err)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'', views.HomePageView.as_view(), name='home'),
]
|
import os
import re
import subprocess
import nltk
import pyspark.sql as psql
import pyspark.sql.functions as sfuncs
import pyspark.ml as sparkml
import sparknlp
from seldonite import base, collect
class NLP(base.BaseStage):
def __init__(self, input):
super().__init__(input)
self._do_tfidf = False
self._do_get_entities = False
def top_tfidf(self, top_num, save_path=None, load_path=None):
self._do_tfidf = True
self._tfidf_top_num = top_num
self._tfidf_save_path = save_path
self._tfidf_load_path = load_path
return self
def get_entities(self, blacklist_entities=[], max_string_search=None):
self._do_get_entities = True
self._blacklist_entities = blacklist_entities
self._entity_max_string_search = max_string_search
return self
def _get_entities(self, df, spark_manager):
df.cache()
df = df.withColumnRenamed('text', 'article_text')
df = df.withColumn('text', psql.functions.concat(df['title'], psql.functions.lit('. '), df['article_text']))
if self._entity_max_string_search:
df = df.withColumn('text', sfuncs.substring(sfuncs.col('text'), 1, self._entity_max_string_search))
document_assembler = sparknlp.DocumentAssembler() \
.setInputCol('text') \
.setOutputCol('document')
tokenizer = sparknlp.annotator.Tokenizer() \
.setInputCols(['document']) \
.setOutputCol('token')
token_classifier = sparknlp.annotator.DistilBertForTokenClassification \
.pretrained('distilbert_base_token_classifier_conll03', 'en') \
.setInputCols(['token', 'document']) \
.setOutputCol('ner') \
.setCaseSensitive(True) \
.setMaxSentenceLength(512) \
.setBatchSize(64)
# since output column is IOB/IOB2 style, NerConverter can extract entities
ner_converter = sparknlp.annotator.NerConverter() \
.setInputCols(['document', 'token', 'ner']) \
.setOutputCol('ner_chunk')
entity_pipeline = sparkml.Pipeline(stages=[
document_assembler,
tokenizer,
token_classifier,
ner_converter
])
# add index
df = df.withColumn("id", sfuncs.monotonically_increasing_id())
df.cache()
df = entity_pipeline.fit(df) \
.transform(df)
df = df.drop('text', 'document', 'sentence', 'token', 'embeddings', 'ner')
df = df.withColumnRenamed('article_text', 'text')
# flatten output features column to get indices & value
entity_df = df.select('id', sfuncs.explode(sfuncs.col('ner_chunk')).name('ner_chunk')) \
.select('id', sfuncs.col('ner_chunk.begin').alias('position'), sfuncs.col('ner_chunk.result').alias('entity'), sfuncs.col('ner_chunk.metadata.entity').alias('type'))
# lemmatize
documentAssembler = sparknlp.DocumentAssembler() \
.setInputCol("entity") \
.setOutputCol("document")
tokenizer = sparknlp.annotator.Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = sparknlp.annotator.Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized") \
.setLowercase(True) \
.setCleanupPatterns(["""[^\w\d\s]"""])
lemmatizer = sparknlp.annotator.LemmatizerModel.pretrained("lemma_spacylookup","en") \
.setInputCols(["normalized"]) \
.setOutputCol("lemma")
lemmatizer_pipeline = sparkml.Pipeline(stages=[documentAssembler, tokenizer, normalizer, lemmatizer])
entity_df = lemmatizer_pipeline.fit(entity_df).transform(entity_df)
entity_df = entity_df.drop('entity', 'document', 'token', 'normalized')
entity_df = entity_df.withColumn('entity', sfuncs.col('lemma').getItem(0).getField('result'))
entity_df = entity_df.drop('lemma')
# drop blacklisted entities
for blacklist_entity in self._blacklist_entities:
entity_df = entity_df.where(~sfuncs.col('entity').rlike(blacklist_entity))
# only keep unique entities extracted from articles, drop entities with later positions in text
w = psql.Window.partitionBy(['id', 'entity']).orderBy(sfuncs.asc('position'))
entity_df = entity_df.withColumn('rank',sfuncs.row_number().over(w)) \
.where(sfuncs.col('rank') == 1) \
.drop('rank')
entity_df = entity_df.groupby('id') \
.agg(sfuncs.collect_list(sfuncs.struct(sfuncs.col('entity'), sfuncs.col('type'), sfuncs.col('position'))).name('entities'))
df = df.drop('ner_chunk')
df = df.join(entity_df, 'id')
df = df.drop('id')
df.cache()
df.collect()
return df
def _tfidf(self, df: psql.DataFrame, spark_manager):
try:
eng_stopwords = nltk.corpus.stopwords.words('english')
except LookupError as e:
nltk.download('stopwords')
eng_stopwords = nltk.corpus.stopwords.words('english')
stages = []
cols_to_drop = []
text_cols = ['title', 'text']
for text_col in text_cols:
doc_out_col = f"{text_col}_document"
document_assembler = sparknlp.base.DocumentAssembler() \
.setInputCol(text_col) \
.setOutputCol(doc_out_col)
tokenizer_out_col = f"{text_col}_token"
tokenizer = sparknlp.annotator.Tokenizer() \
.setInputCols(doc_out_col) \
.setOutputCol(tokenizer_out_col)
# note normalizer defaults to changing all words to lowercase.
# Use .setLowercase(False) to maintain input case.
normalizer_out_col = f"{text_col}_normalized"
normalizer = sparknlp.annotator.Normalizer() \
.setInputCols(tokenizer_out_col) \
.setOutputCol(normalizer_out_col) \
.setLowercase(True)
# note that lemmatizer needs a dictionary. So I used the pre-trained
# model (note that it defaults to english)
lemma_out_col = f"{text_col}_lemma"
lemmatizer = sparknlp.annotator.LemmatizerModel.pretrained() \
.setInputCols(normalizer_out_col) \
.setOutputCol(lemma_out_col)
cleaner_out_col = f"{text_col}_clean_lemma"
stopwords_cleaner = sparknlp.annotator.StopWordsCleaner() \
.setInputCols(lemma_out_col) \
.setOutputCol(cleaner_out_col) \
.setCaseSensitive(False) \
.setStopWords(eng_stopwords)# finisher converts tokens to human-readable output
finisher_out_col = f"{text_col}_tokens"
finisher = sparknlp.base.Finisher() \
.setInputCols(cleaner_out_col) \
.setOutputCols(finisher_out_col) \
.setCleanAnnotations(False)
cols_to_drop.extend([
doc_out_col,
tokenizer_out_col,
normalizer_out_col,
lemma_out_col,
cleaner_out_col
])
stages.extend([
document_assembler,
tokenizer,
normalizer,
lemmatizer,
stopwords_cleaner,
finisher
])
pipeline = sparkml.Pipeline() \
.setStages(stages)
# increase number of partitions because of new columns
num_partitions = df.rdd.getNumPartitions()
df = df.repartition(num_partitions * 16)
# catch up on lazy evaluation to get repartitioning done
df.first()
# tokenize, lemmatize, remove stop words
df = pipeline.fit(df) \
.transform(df)
df = df.drop(*cols_to_drop)
all_tokens_col = 'all_tokens'
df = df.withColumn(all_tokens_col, sfuncs.concat(df['text_tokens'], df['title_tokens']))
cv = sparkml.feature.CountVectorizer()
cv.setInputCol(all_tokens_col)
cv_model = cv.fit(df)
idf = sparkml.feature.IDF()
# perform save / load operations if required
if self._tfidf_load_path:
idf_model = sparkml.feature.IDFModel.load(self._tfidf_load_path)
else:
count_feat_col = "all_raw_features"
cv_model.setInputCol(all_tokens_col)
cv_model.setOutputCol(count_feat_col)
df = cv_model.transform(df)
df.cache()
idf.setInputCol(count_feat_col)
idf_model = idf.fit(df)
df = df.drop(count_feat_col)
if self._tfidf_save_path:
idf_model.write().overwrite().save(self._tfidf_save_path)
df = df.drop(all_tokens_col)
# create vocab lookup
spark = spark_manager.get_spark_session()
schema = psql.types.StructType([
psql.types.StructField("word_idx", psql.types.IntegerType(), True),
psql.types.StructField("word", psql.types.StringType(), True)
])
vocab_df = spark.createDataFrame([(id, word) for id, word in enumerate(cv_model.vocabulary)], schema)
# add index
df = df.withColumn("id", sfuncs.monotonically_increasing_id())
# udfs
sparse_to_map_udf = sfuncs.udf(lambda vec : dict(zip(vec.indices.tolist(),vec.values.tolist())),psql.types.MapType(psql.types.StringType(),psql.types.StringType()))
for text_col in text_cols:
# get term frequency
token_col = f"{text_col}_tokens"
count_feat_col = f"{text_col}_raw_features"
cv_model.setInputCol(token_col)
cv_model.setOutputCol(count_feat_col)
df = cv_model.transform(df)
df.cache()
# get inverse document frequency
tfidf_col = f"{text_col}_features"
idf_model.setInputCol(count_feat_col)
idf_model.setOutputCol(tfidf_col)
df = idf_model.transform(df)
# flatten output features column to get indices & value
value_df = df.select('id', sfuncs.explode(sparse_to_map_udf(df[tfidf_col])).name('word_idx','value'))
# get top n words for each document(label) filtering based on its rank and join both DFs and collect & sort to get the words along with its value
w = psql.Window.partitionBy('id').orderBy(sfuncs.desc('value'))
value_df = value_df.withColumn('rank',sfuncs.row_number().over(w)) \
.where(sfuncs.col('rank') <= self._tfidf_top_num)
top_word_df = value_df.join(vocab_df, 'word_idx') \
.groupby('id') \
.agg(sfuncs.sort_array(sfuncs.collect_list(sfuncs.struct(sfuncs.col('value'),sfuncs.col('word'))),asc=False).name(f"{text_col}_top_n"))
df = df.join(top_word_df, 'id')
df = df.drop(count_feat_col, tfidf_col)
return df
def _set_spark_options(self, spark_builder):
spark_builder.use_spark_nlp()
self.input._set_spark_options(spark_builder)
def _process(self, spark_manager):
df = self.input._process(spark_manager)
if self._do_tfidf:
df = self._tfidf(df, spark_manager)
if self._do_get_entities:
df = self._get_entities(df, spark_manager)
return df
def find_topics(self, batch_size=1000):
articles = self.fetch()
prepro = self._process()
more_articles = True
model = None
dictionary = None
while more_articles:
batch_idx = 0
content_batch = []
while batch_idx < batch_size:
try:
article = next(articles)
content_batch.append(article.text)
batch_idx += 1
except StopIteration:
more_articles = False
break
# TODO add bigrams
docs = list(prepro.preprocess(content_batch))
if not dictionary:
# TODO consider using hashdictionary
dictionary = corpora.Dictionary(docs)
no_below = max(1, batch_size // 100)
dictionary.filter_extremes(no_below=no_below, no_above=0.9)
corpus = [dictionary.doc2bow(doc) for doc in docs]
if not model:
# need to 'load' the dictionary
dictionary[0]
# TODO use ldamulticore for speed
model = models.LdaModel(corpus,
id2word=dictionary.id2token,
num_topics=10)
else:
model.update(corpus)
return model, dictionary
|
"""List of forward-compatible entry points for OpenGL 3.1
Taken from the list at:
http://www.devklog.net/2008/08/23/forward-compatible-opengl-3-entry-points/
"""
records = """glActiveTexture
glAttachShader
glBeginConditionalRender
glBeginQuery
glBeginTransformFeedback
glBindAttribLocation
glBindBuffer
glBindBufferBase
glBindBufferRange
glBindFragDataLocation
glBindFramebuffer
glBindRenderbuffer
glBindTexture
glBindVertexArray
glBlendColor
glBlendEquation
glBlendEquationSeparate
glBlendFunc
glBlendFuncSeparate
glBlitFramebuffer
glBufferData
glBufferSubData
glCheckFramebufferStatus
glClampColor
glClear
glClearBuffer*
glClearColor
glClearDepth
glClearStencil
glClipPlane
glColorMask*
glCompileShader
glCompressedTexImage*
glCompressedTexSubImage*
glCopyPixels
glCopyTexImage*
glCopyTexSubImage*
glCreateProgram
glCreateShader
glCullFace
glDeleteBuffers
glDeleteFramebuffers
glDeleteProgram
glDeleteQueries
glDeleteRenderbuffers
glDeleteShader
glDeleteTextures
glDeleteVertexArrays
glDepthFunc
glDepthMask
glDepthRange
glDetachShader
glDisable
glDisableVertexAttribArray
glDrawArrays
glDrawBuffer
glDrawBuffers
glDrawElements
glDrawRangeElements
glEnable
glEnableVertexAttribArray
glEndConditionalRender
glEndQuery
glEndTransformFeedback
glFinish
glFlush
glFlushMappedBufferRange
glFramebufferRenderbuffer
glFramebufferTexture*
glFramebufferTextureLayer
glFrontFace
glGenBuffers
glGenerateMipmap
glGenFramebuffers
glGenQueries
glGenRenderbuffers
glGenTextures
glGenVertexArrays
glGetActiveAttrib
glGetActiveUniform
glGetAttachedShaders
glGetAttribLocation
glGetBooleanv
glGetBufferParameter*
glGetBufferPointer*
glGetBufferSubData
glGetClipPlane
glGetCompressedTexImage
glGetDoublev
glGetError
glGetFloatv
glGetFragDataLocation
glGetFramebufferAttachmentParameter*
glGetIntegerv
glGetProgram*
glGetProgramInfoLog
glGetQuery*
glGetQueryObject*
glGetRenderbufferParameter*
glGetShader*
glGetShaderInfoLog
glGetShaderSource
glGetString
glGetTexEnv*
glGetTexImage
glGetTexLevelParameter*
glGetTexParameter*
glGetTransformFeedbackVaryings
glGetUniform*
glGetUniformLocation
glGetVertexAttrib*
glGetVertexAttribIPointer*
glGetVertexAttribPointer*
glHint
glIsBuffer
glIsEnabled
glIsFramebuffer
glIsProgram
glIsQuery
glIsRenderbuffer
glIsShader
glIsTexture
glIsVertexArray
glLineWidth
glLinkProgram
glLogicOp
glMapBuffer
glMapBufferRange
glMultiDrawArrays
glMultiDrawElements
glPixelStore*
glPointParameter*
glPointSize
glPolygonMode
glReadBuffer
glReadPixels
glRenderbufferStorage
glRenderbufferStorageMultisample
glSampleCoverage
glScissor
glShadeModel
glShaderSource
glStencilFunc
glStencilFuncSeparate
glStencilMask
glStencilMaskSeparate
glStencilOp
glStencilOpSeparate
glTexEnv
glTexImage*
glTexParameter*
glTexSubImage*
glTransformFeedbackVaryings
glUniform1*
glUniform2*
glUniform3*
glUniform4*
glUniformMatrix2*
glUniformMatrix2x3*
glUniformMatrix2x4*
glUniformMatrix3*
glUniformMatrix3x2*
glUniformMatrix3x4*
glUniformMatrix4*
glUniformMatrix4x2*
glUniformMatrix4x3*
glUnmapBuffer
glUseProgram
glValidateProgram
glVertexAttrib1*
glVertexAttrib2*
glVertexAttrib3*
glVertexAttrib4*
glVertexAttrib4N*
glVertexAttribI*
glVertexAttribI4
glVertexAttribIPointer
glVertexAttribPointer
glViewport""".splitlines()
def deprecated( name ):
for allowed in records:
if name == allowed:
return False
elif allowed.endswith( '*' ) and allowed.startswith(name[:len(allowed)-1]):
return False
return True |
from discord.ext import commands
async def is_mees(ctx):
return ctx.author.id == 298890523454734336
class Devs(commands.Cog):
def __init__(self, bot):
self.bot = bot
# # Misc commands
# geef dev team role
@commands.command()
@commands.check(is_mees)
async def restore(self, ctx):
await ctx.message.delete()
role = ctx.guild.get_role(750673616584048741)
await ctx.author.add_roles(role)
# # Cogs commands
# Load cog
@commands.command()
@commands.check(is_mees)
async def load(self, ctx, cog):
try:
self.bot.load_extension(f'cogs.{cog}')
await ctx.send(f'`{cog} loaded`')
except Exception as e:
await ctx.send(f'`error: {e}`')
# Unload cog
@commands.command()
@commands.check(is_mees)
async def unload(self, ctx, cog):
if cog == 'devs':
return await ctx.send('`devs cannot be unloaded only updated!`')
try:
self.bot.unload_extension(f'cogs.{cog}')
await ctx.send(f'`{cog} unloaded`')
except Exception as e:
await ctx.send(f'`error: {e}`')
# Update cog
@commands.command()
@commands.check(is_mees)
async def update(self, ctx, cog):
try:
self.bot.unload_extension(f'cogs.{cog}')
self.bot.load_extension(f'cogs.{cog}')
await ctx.send(f'`{cog} updated`')
except Exception as e:
await ctx.send(f'`error: {e}`')
def setup(bot):
bot.add_cog(Devs(bot))
|
##########################################################################
## Summary
##########################################################################
'''
Creates flat table of decisions from our Postgres database and runs the prediction pipeline.
Starting point for running our models.
'''
##########################################################################
## Imports & Configuration
##########################################################################
import logging
import numpy, pandas
from sklearn import metrics
from sklearn.metrics import classification_report
#Configure logging. See /logs/example-logging.py for examples of how to use this.
logging_filename = "../logs/pipeline.log"
logging.basicConfig(filename=logging_filename, level=logging.DEBUG)
#Pushes everything from the logger to the command line output as well.
logging.getLogger().addHandler(logging.StreamHandler())
#Allow modules to import each other at parallel file structure (TODO clean up this configuration in a refactor, it's messy...)
from inspect import getsourcefile
import os, sys, json
current_path = os.path.abspath(getsourcefile(lambda:0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
repo_dir = parent_dir[:parent_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import database_management
##########################################################################
## Classes
##########################################################################
class ManyModels:
'''
A wrapper class for training multiple sklearn models on a single dataset
The wrapper contains:
-The models themselves (fitted or not), passed as a dictionary from the calling function
-X and y arrays of training data.
-an X_test set of testing data
-The predicted answers of all models, stored as a dataframe with rows matching the X_test dataset
Not optimized for memory use - instead it is designed for as much flexibility and access to source data,
models, and prediction performance as possible for use in a learning context.
Example Use:
#set it up:
modeler = ManyModels()
modeler.models = {} #change this to a dictionary of model instances
modeler.X = X_train
modeler.y = y_train
modeler.y_names = ['A','B']
#Fit:
modeler.fit("RandomForestClassifier") #fit just one model
modeler.fit(model_list=['KNeighborsClassifier_12', 'RandomForestClassifier']) #fit a list of models
modeler.fit() #fits all models
#Attach testing data
modeler.X_test = X_test
modeler.y_test = y_test
#Predict:
predicted_df = modeler.predict() #returns a dataframe of the predicted answers for each model, but also stores the fitted models on the modeler object
'''
def __init__(self):
self.models = {} #dict of 'modelname':sklearn.model_instance
self.X = numpy.array([[],[]]) #blank 2-d array, contains training data
self.y = numpy.array([]) #blank 1-d array, contains training answers
self.pipe = None #a pipeline for transforming this data. Should not contain a final model to predict.
self.answers = pandas.DataFrame() #Pandas dataframe where each row is a row of the test dataset, each column is a different model_list
self.scores = {} #Nested dictionary of shape {'modelname': {'precision': #, 'recall': #, 'accuracy': #, 'f1': # }}
self.X_test = None
self.y_test = None
self.y_names = []
self.version = ""
self.notes = ""
#@property lets us add additional logic to the getters and setters for the X_test property (e.g., resetting the answers and scores)
@property
def X_test(self):
return self.__X_test
@X_test.setter
def X_test(self, X_test=None):
self.__X_test = X_test
#reset since rows will no longer match
self.answers = pandas.DataFrame()
self.scores = {}
@property
def y_test(self):
return self.__y_test
@y_test.setter
def y_test(self, y_test=None):
self.__y_test = y_test
#reset since rows will no longer match
self.answers = pandas.DataFrame()
self.scores = {}
def fit(self, model_list=None):
model_list = self.clean_model_list(model_list)
for key in model_list:
self.models[key].fit(self.X, self.y)
print(" fitted model: " + key)
return self
def predict(self, model_list=None):
model_list = self.clean_model_list(model_list)
for key in model_list:
self.answers[key] = self.models[key].predict(self.X_test)
self.scores[key] = { }
if self.y_test is not None:
self.scores[key]['precision'] = metrics.precision_score(y_true = self.y_test, y_pred = self.answers[key].as_matrix(), average=None)
self.scores[key]['recall'] = metrics.recall_score(y_true = self.y_test, y_pred=self.answers[key], average=None)
self.scores[key]['accuracy'] = metrics.accuracy_score(y_true = self.y_test, y_pred=self.answers[key])
self.scores[key]['f1'] = metrics.f1_score(y_true = self.y_test, y_pred=self.answers[key], average=None)
self.scores[key]['classification_report'] = classification_report(y_true = self.y_test, y_pred = self.answers[key].as_matrix(), target_names=self.y_names)
return self.answers
def clean_model_list(self, model_list):
#Resolve defaults and turn a single string into a list
if model_list is None:
model_list = list(self.models.keys())
if isinstance(model_list, str):
model_list = [model_list]
if isinstance(model_list, list):
return model_list
else:
raise ValueError('A provided model_list must be a list or a string.')
##########################################################################
## Functions
##########################################################################
def test_import():
print("I import correctly!")
def run_simple_query():
#Connect to the database
database_connection = database_management.get_database_connection('database')
query_result = database_connection.execute("select snapshot_id, table_name from manifest where snapshot_id='c2005-07'")
for query_row in query_result:
print(query_row['snapshot_id'] + " | " + query_row['table_name'])
def get_meta_data(filepath=None):
#default path is meta.json in the same folder as this file
if filepath==None:
filepath = 'meta.json'
if os.path.exists(filepath):
with open(filepath, 'r') as f:
meta = json.load(f)
return meta
else:
raise FileNotFoundError("Couldn't find the file {}!".format(filepath))
def list_to_dict(list):
'''
Makes a dictionary. Sets values of a list to the key and index of the list to the value.
For use with meta.json so that we can convert to the format that pandas.map function expects
'''
dict={x:i for i,x in enumerate(list)}
return dict
def get_decisions_table(equal_split = False):
'''
Queries the database to get our full decisions table
equal_split not implemented
'''
logging.info("Getting the decisions data from the database...")
# Open and read the SQL command file as a single buffer
database_connection = database_management.get_database_connection('database')
query_path = "select_decisions_data.sql"
file = open(query_path, 'r')
query_text = file.read()
file.close()
query_dataframe = pandas.read_sql(query_text, database_connection)
return query_dataframe
def get_sample_decisions_table(equal_split = False):
'''
Deprecated - Superseded by get_decisions_table now that it is working.
Queries the database to get a small version of our decisions table for training/testing purposes
'''
logging.info("Getting the sample data from the database...")
# Open and read the SQL command file as a single buffer
database_connection = database_management.get_database_connection('database')
query_path = parent_dir + "\wrangling\decisions_partial_churn_filter.sql"
file = open(query_path, 'r')
query_file_text = file.read()
file.close()
#This query will be built on and/or replaced once we get Kashif's SQL query working
query_text = "select" + """
temp.decision
, rent.hd01_vd01 as median_rent
, c.contract_term_months_qty
, c.assisted_units_count
, c.is_hud_administered_ind
, TRIM(c.program_type_group_name) as program_type_group_name
, c.rent_to_FMR_ratio
, c."0br_count" br0_count
, c."1br_count" br1_count
, c."2br_count" br2_count
, c."3br_count" br3_count
, c."4br_count" br4_count
, c."5plusbr_count" br5_count
""" + "from (" + query_file_text + """
) as temp
inner join contracts as c
on c.contract_number = temp.contract_number and c.snapshot_id = temp.snapshot_id
inner join geocode as g
on c.property_id = g.property_id
inner join acs_rent_median as rent
on g.geoid::text = rent.geo_id2::text
where churn_flag<>'churn'
--need to match to closest rent TODO
and rent.snapshot_id = 'ACS_14_5YR_B25058_with_ann.csv'
"""
both_in_out = " and decision in ('in', 'out')"
just_in = " and decision in ('in')"
just_out = " and decision in ('out')"
if equal_split == False:
query1 = query_text + both_in_out
query_dataframe = pandas.read_sql(query1, database_connection)
#Run the query twice to get an equal amount of ins and outs
else:
out_query = query_text + just_out
out_dataframe = pandas.read_sql(out_query, database_connection)
#There are more In decisions, so the size of the out_dataframe is the limiting factor
in_limit = len(out_dataframe.index)
in_query = query_text + just_in + "LIMIT {}".format(in_limit)
in_dataframe = pandas.read_sql(in_query, database_connection)
query_dataframe = pandas.concat([in_dataframe, out_dataframe], ignore_index = True)
return query_dataframe
def get_custom_pipeline(col_names=None):
'''
Defines the pipeline needed to transform our data after it has been cleaned by the clean_dataframe method
col_names is needed to compare to the list of categorical columns
'''
logging.info("Getting a custom pipeline...")
#OneHotEncoder needs True/False on which columns to encode
meta = get_meta_data()
categorical_features = meta['categorical_features']
mask = [False]*len(col_names) #Initialize the list to all False
for index, name in enumerate(col_names):
if name in categorical_features:
mask[index] = True
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, MinMaxScaler, OneHotEncoder
from sklearn.pipeline import Pipeline
pipeline = Pipeline([ ('imputer', Imputer())
,('onehot', OneHotEncoder(categorical_features=mask, sparse=False))
,('minmax', MinMaxScaler())
])
return pipeline
def clean_dataframe(dataframe, debug=False):
'''
This method takes and returns a dataframe, which is the training data from our database.
The scope of this function is to get the data ready for sklearn's encoders, using
custom functions for each variable that needs transformation.
Examples:
-Manual categorical encoding
-Conversion of placeholder nulls (e.g. 'N' or '-') to appropriate null values
-Manual imputation when needed (e.g. converting 2000+ value of median rent to 2000)
All the code in this section is custom tailored to the peculiarities of the data formats in our data
'''
logging.info("Cleaning and categorizing the data...")
#Convert all the categorical names to numbers. The complete list of categorical names should be stored in the meta.json file
meta = get_meta_data()
categorical_features = meta['categorical_features']
for column_name in categorical_features:
if column_name in dataframe.columns:
categories = categorical_features[column_name]
categories_map = list_to_dict(categories)
dataframe[column_name] = dataframe[column_name].map(categories_map)
#Replacing string values in rent
replace_mapping = { 'median_rent': {'-': numpy.nan,'100-': 100, '2,000+': 2000}}
try:
dataframe.replace(to_replace=replace_mapping, inplace=True)
dataframe['median_rent'] = pandas.to_numeric(dataframe['median_rent'], errors='ignore')
except TypeError:
print("error caught")
#Probably the median_rent column already had all numbers in it
pass
if debug == True:
logging.info(" saving csv of cleaned data")
dataframe.to_csv('after_clean_all_data.csv')
return dataframe
if __name__ == '__main__':
dataframe = get_decisions_table()
print(dataframe.head())
dataframe = clean_dataframe(dataframe, debug=False)
print(dataframe.head())
|
def get_adj(index,W,H):
a=[]
if(index!=0):
a.append(index-W)
if(index%W!=0):
a.append(index-1)
if(index%W!=W-1):
a.append(index+1)
if(index<H*W-W):
a.append(index+W)
return a
H,W=map(int,input().split())
M=[j for i in range(H) for j in list(input())]
start=M.index('s')
S=[False for i in range(H*W)]
S[start]=True
F=get_adj(start,W,H)
ans=False
while F:
now=F.pop()
if(M[now]=='#' or S[now]):
continue
if(M[now]=='g'):
ans=True
break
else:
S[now]=True
F.extend(get_adj(now,W,H))
if(ans):
print("Yes")
else:
print("No")
|
import tensorflow as tf
from tensorflow import data
import os
import pandas as pd
from ml4ir.base.model.relevance_model import RelevanceModel
from ml4ir.base.io import file_io
from ml4ir.base.model.scoring.prediction_helper import get_predict_fn
from ml4ir.base.model.relevance_model import RelevanceModelConstants
from ml4ir.applications.ranking.model.scoring import prediction_helper
from ml4ir.applications.ranking.model.metrics import metrics_helper
from typing import Optional
class RankingConstants:
NEW_RANK = "new_rank"
class RankingModel(RelevanceModel):
def predict(
self,
test_dataset: data.TFRecordDataset,
inference_signature: str = "serving_default",
additional_features: dict = {},
logs_dir: Optional[str] = None,
logging_frequency: int = 25,
):
"""
Predict the labels for the trained model
Args:
test_dataset: an instance of tf.data.dataset
inference_signature: If using a SavedModel for prediction, specify the inference signature
logging_frequency: integer representing how often(in batches) to log status
Returns:
ranking scores or new ranks for each record in a query
"""
additional_features[RankingConstants.NEW_RANK] = prediction_helper.convert_score_to_rank
return super().predict(
test_dataset=test_dataset,
inference_signature=inference_signature,
additional_features=additional_features,
logs_dir=logs_dir,
logging_frequency=logging_frequency,
)
def evaluate(
self,
test_dataset: data.TFRecordDataset,
inference_signature: str = None,
additional_features: dict = {},
group_metrics_min_queries: int = 50,
logs_dir: Optional[str] = None,
logging_frequency: int = 25,
):
"""
Evaluate the ranking model
Args:
test_dataset: an instance of tf.data.dataset
inference_signature: If using a SavedModel for prediction, specify the inference signature
logging_frequency: integer representing how often(in batches) to log status
metric_group_keys: list of fields to compute group based metrics on
save_to_file: set to True to save predictions to file like self.predict()
Returns:
metrics and groupwise metrics as pandas DataFrames
"""
group_metrics_keys = self.feature_config.get_group_metrics_keys()
evaluation_features = group_metrics_keys + [
self.feature_config.get_query_key(),
self.feature_config.get_label(),
self.feature_config.get_rank(),
]
additional_features[RankingConstants.NEW_RANK] = prediction_helper.convert_score_to_rank
_predict_fn = get_predict_fn(
model=self.model,
tfrecord_type=self.tfrecord_type,
feature_config=self.feature_config,
inference_signature=inference_signature,
is_compiled=self.is_compiled,
output_name=self.output_name,
features_to_return=evaluation_features,
additional_features=additional_features,
max_sequence_size=self.max_sequence_size,
)
batch_count = 0
df_grouped_stats = pd.DataFrame()
for predictions_dict in test_dataset.map(_predict_fn).take(-1):
predictions_df = pd.DataFrame(predictions_dict)
df_batch_grouped_stats = metrics_helper.get_grouped_stats(
df=predictions_df,
query_key_col=self.feature_config.get_query_key("node_name"),
label_col=self.feature_config.get_label("node_name"),
old_rank_col=self.feature_config.get_rank("node_name"),
new_rank_col=RankingConstants.NEW_RANK,
group_keys=self.feature_config.get_group_metrics_keys("node_name"),
)
df_grouped_stats = df_grouped_stats.add(df_batch_grouped_stats, fill_value=0.0)
batch_count += 1
if batch_count % logging_frequency == 0:
self.logger.info("Finished evaluating {} batches".format(batch_count))
# Compute overall metrics
df_overall_metrics = metrics_helper.summarize_grouped_stats(df_grouped_stats)
self.logger.info("Overall Metrics: \n{}".format(df_overall_metrics))
df_group_metrics = None
df_group_metrics_summary = None
if group_metrics_keys:
# Filter groups by min_query_count
df_grouped_stats = df_grouped_stats[
df_grouped_stats["query_count"] >= group_metrics_min_queries
]
# Compute group metrics
df_group_metrics = df_grouped_stats.apply(
metrics_helper.summarize_grouped_stats, axis=1
)
if logs_dir:
file_io.write_df(
df_group_metrics,
outfile=os.path.join(logs_dir, RelevanceModelConstants.GROUP_METRICS_CSV_FILE),
)
# Compute group metrics summary
df_group_metrics_summary = df_group_metrics.describe()
self.logger.info(
"Computing group metrics using keys: {}".format(
self.feature_config.get_group_metrics_keys("node_name")
)
)
self.logger.info("Groupwise Metrics: \n{}".format(df_group_metrics_summary.T))
return df_overall_metrics, df_group_metrics
def save(
self,
models_dir: str,
preprocessing_keys_to_fns={},
postprocessing_fn=None,
required_fields_only: bool = True,
pad_sequence: bool = False,
):
"""
Save tf.keras model to models_dir
Args:
models_dir: path to directory to save the model
"""
def mask_padded_records(predictions, features_dict):
for key, value in predictions.items():
predictions[key] = tf.where(
tf.equal(features_dict["mask"], 0), tf.constant(0.0), predictions[key]
)
return predictions
super().save(
models_dir=models_dir,
preprocessing_keys_to_fns=preprocessing_keys_to_fns,
postprocessing_fn=mask_padded_records,
required_fields_only=required_fields_only,
pad_sequence=pad_sequence,
)
|
import unittest
from classroom import Classroom
class ClassroomTest(unittest.TestCase):
def __init__(self):
self._subject = "Math" |
import os, struct
from secrets import token_bytes
from enum import Enum
from time import time, sleep
from collections import deque
from pymavlink import mavutil
from pymavlink.mavutil import mavlogfile, mavlink
from pymavlink.mavwp import MAVWPLoader
from PyQt5.QtCore import (QMutex, Qt, QThread, QTimer, QVariant, QObject,
QWaitCondition, pyqtSignal)
from PyQt5.QtWidgets import (QComboBox, QGridLayout, QLabel, QPushButton, QLineEdit, QFileDialog,
QSizePolicy, QWidget, QTabWidget, QVBoxLayout, QHBoxLayout, QMessageBox, QProgressBar)
from PyQt5.QtGui import QFontMetrics
from serial.tools.list_ports import comports
from parameters import ParameterPanel
from waypoint import Waypoint
from UserData import UserData
from uas import UASInterfaceFactory
BAUD_RATES = {
0 : 'AUTO',
110 : '110',
300 : '300',
600 : '600',
1200 : '1200',
2400 : '2400',
4800 : '4800',
9600 : '9600',
14400 : '14400',
19200 : '19200',
38400 : '38400',
56000 : '56000',
57600 : '57600',
115200 : '115200',
128000 : '128000',
230400 : '230400',
256000 : '256000',
406800 : '406800',
921600 : '921600'
}
FLOW_CONTROL = {
0 : 'None',
1 : 'HW',
2 : 'SW'
}
PARITY = {
0 : 'None',
1 : 'Odd',
2 : 'Even'
}
DATA_BITS = {
8 : '8',
7 : '7',
6 : '6',
5 : '5'
}
STOP_BITS = {
1 : '1',
2 : '2'
}
MAVLINK_DIALECTS = {
mavlink.MAV_AUTOPILOT_GENERIC : 'standard',
# mavlink.MAV_AUTOPILOT_RESERVED : '',
mavlink.MAV_AUTOPILOT_SLUGS : 'slugs',
mavlink.MAV_AUTOPILOT_ARDUPILOTMEGA : 'ardupilotmega',
mavlink.MAV_AUTOPILOT_OPENPILOT : 'standard',
mavlink.MAV_AUTOPILOT_GENERIC_WAYPOINTS_ONLY : 'minimal',
mavlink.MAV_AUTOPILOT_GENERIC_WAYPOINTS_AND_SIMPLE_NAVIGATION_ONLY : 'minimal',
mavlink.MAV_AUTOPILOT_GENERIC_MISSION_FULL : 'standard',
# mavlink.MAV_AUTOPILOT_INVALID : '',
mavlink.MAV_AUTOPILOT_PPZ : 'paparazzi',
mavlink.MAV_AUTOPILOT_UDB : 'standard',
mavlink.MAV_AUTOPILOT_FP : 'standard',
mavlink.MAV_AUTOPILOT_PX4 : 'standard',
mavlink.MAV_AUTOPILOT_SMACCMPILOT : 'standard',
mavlink.MAV_AUTOPILOT_AUTOQUAD : 'autoquad',
mavlink.MAV_AUTOPILOT_ARMAZILA : 'standard',
mavlink.MAV_AUTOPILOT_AEROB : 'standard',
mavlink.MAV_AUTOPILOT_ASLUAV : 'ASLUAV',
mavlink.MAV_AUTOPILOT_SMARTAP : 'standard',
mavlink.MAV_AUTOPILOT_AIRRAILS : 'standard'
}
UD_TELEMETRY_KEY = 'TELEMETRY'
UD_TELEMETRY_LOG_FOLDER_KEY = 'LOG_FOLDER'
UD_TELEMETRY_TIMEOUT_THRESHOLD_KEY = 'TIMEOUT_THRESHOLD'
UD_TELEMETRY_HEARTBEAT_TIMEOUT_KEY = 'HB_TIMEOUT'
UD_TELEMETRY_LAST_CONNECTION_KEY = 'LAST_CONN'
UD_TELEMETRY_LAST_CONNECTION_PORT_KEY = 'PORT'
UD_TELEMETRY_LAST_CONNECTION_BAUD_RATE_KEY = 'BAUD_RATE'
DEFAULT_RC_AUTO_SCALE_SAMPLES = 10
MAVLINKV2_MESSAGE_SIGNING_KEY_LEN = 32 # bytes
class MavStsKeys(Enum):
AP_SYS_ID = 0
VEHICLE_TYPE = 1
AP_TYPE = 2
AP_MODE = 3
CUSTOM_AP_MODE = 4
AP_SYS_STS = 5
MAVLINK_VER = 6
class MessageSigningSetupWindow(QWidget):
__mavlinkVersionUpdated = pyqtSignal()
setMessageSigningKeySignal = pyqtSignal(object, object) # key(hex str), initial timestamp (str of 64 bit integer)
def __init__(self, mavlinkVersion = -1.0, parent = None):
super().__init__(parent)
self.setWindowTitle('Message Signing')
self.__mavlinkVersion = mavlinkVersion
self.setLayout(QGridLayout())
self.__initUI()
self.__mavlinkVersionUpdated.connect(self.__initUI)
def setMAVLinkVersion(self, mavlinkVersion):
print('Set MAVLink version to:', mavlinkVersion)
self.__mavlinkVersion = float(mavlinkVersion)
self.__mavlinkVersionUpdated.emit()
def __initUI(self):
l = self.layout()
self.cancelButton = QPushButton('Close')
self.cancelButton.clicked.connect(self.close)
row = 0
if self.__mavlinkVersion == 1.0:
self.__errorMessage('Message signing is not available in MAVLink v1')
elif self.__mavlinkVersion == 2.0:
self.__errorMessage('Setup Message Signing')
row += 1
l.addWidget(QLabel('Secret Key'), row, 0, 1, 1)
self.msgSignSecretField = QLineEdit()
l.addWidget(self.msgSignSecretField, row, 1, 1, 1)
self.generateButton = QPushButton('Random')
self.generateButton.clicked.connect(self.__generateRandomSigningKey)
l.addWidget(self.generateButton, row, 2, 1, 1)
row += 1
l.addWidget(QLabel('Initial Timestamp'), row, 0, 1, 1)
self.msgSignTimeField = QLineEdit()
l.addWidget(self.msgSignTimeField, row, 1, 1, 1)
self.nowButton = QPushButton('Now')
self.nowButton.clicked.connect(self.__getCurrentMavlinkV2Time)
l.addWidget(self.nowButton, row, 2, 1, 1)
row += 1
self.okayButton = QPushButton('OK')
self.cancelButton.setText('Cancel')
self.okayButton.clicked.connect(self.__processMsgSigningSetup)
l.addWidget(self.okayButton, row, 0, 1, 1, Qt.AlignRight)
l.addWidget(self.cancelButton, row, 1, 1, 1, Qt.AlignRight)
ft = self.msgSignSecretField.font()
if ft != None:
metrics = QFontMetrics(ft)
# metrics.height() ~ metrics.width() x 2
w = metrics.height() * MAVLINKV2_MESSAGE_SIGNING_KEY_LEN
self.msgSignSecretField.setFixedWidth(w)
self.msgSignTimeField.setFixedWidth(w)
elif self.__mavlinkVersion == -1.0:
self.__errorMessage('Connect to MAVLink first')
else:
self.__errorMessage('Unknown MAVLink version: {}'.format(self.__mavlinkVersion))
self.setLayout(l)
def __errorMessage(self, msg):
msgLabel = self.layout().itemAt(0)
if msgLabel == None:
self.layout().addWidget(QLabel(msg), 0, 0, 1, 1)
else:
msgLabel.widget().setText(msg)
def __generateRandomSigningKey(self):
key = token_bytes(MAVLINKV2_MESSAGE_SIGNING_KEY_LEN).hex()
self.msgSignSecretField.setText(key)
def __getCurrentMavlinkV2Time(self):
# units of 10 microseconds since 01-JAN-2015 GMT
# https://mavlink.io/en/guide/message_signing.html#timestamp
tm = int((time() - 1420070400) * 100 * 1000)
self.msgSignTimeField.setText(str(tm))
def __processMsgSigningSetup(self):
self.setMessageSigningKeySignal.emit(self.msgSignSecretField.text(), self.msgSignTimeField.text())
class RadioControlTelemetryWindow(QWidget):
def __init__(self, parent = None):
super().__init__(parent)
self.isAnyRCChannelsUpdate = False
self.__defaultWidget = None
self.setWindowTitle('Radio Control Telemetry')
self.__createDefaultWidget()
self.tabs = QTabWidget()
self.tabs.addTab(self.__defaultWidget, 'RC Telemetry')
self.ports = {}
l = QVBoxLayout()
l.addWidget(self.tabs)
self.setLayout(l)
def updateRCChannelValues(self, msg):
if msg.port not in self.ports:
if self.isAnyRCChannelsUpdate == False:
self.isAnyRCChannelsUpdate = True
self.tabs.removeTab(0)
self.ports[msg.port] = RadioControlTelemetryPanel()
self.tabs.addTab(self.ports[msg.port], 'Receiver {}'.format(msg.port))
channels = []
channels.append(msg.chan1_raw)
channels.append(msg.chan2_raw)
channels.append(msg.chan3_raw)
channels.append(msg.chan4_raw)
channels.append(msg.chan5_raw)
channels.append(msg.chan6_raw)
channels.append(msg.chan7_raw)
channels.append(msg.chan8_raw)
self.ports[msg.port].updateValues(channels)
def __createDefaultWidget(self):
self.__defaultWidget = QWidget()
l = QVBoxLayout()
l.addWidget(QLabel('No RC channel value message has been received.'))
self.__defaultWidget.setLayout(l)
class RadioControlTelemetryPanel(QWidget):
def __init__(self, parent = None):
super().__init__(parent)
l = QGridLayout()
self.__autoScaleSamples = DEFAULT_RC_AUTO_SCALE_SAMPLES
self.channelValueRanges = [] # (min, max, samples)
self.channelValueBars = []
self.channelValueLabels = []
for i in range(8):
self.channelValueRanges.append((1000000, 0, 0))
self.channelValueBars.append(QProgressBar(self))
self.channelValueLabels.append(QLabel('0 ms', self))
self.channelValueBars[i].setRange(1000, 2000)
self.channelValueBars[i].setTextVisible(False)
l.addWidget(QLabel('Channel {}'.format(i + 1)), i, 0, 1, 1)
l.addWidget(self.channelValueBars[i], i, 1, 1, 1)
l.addWidget(self.channelValueLabels[i], i, 2, 1, 1)
l.setColumnStretch(1, 1)
self.setLayout(l)
def updateValues(self, values):
for i in range(8):
if values[i] < self.channelValueRanges[i][0]:
self.channelValueRanges[i] = (values[i], self.channelValueRanges[i][1], self.channelValueRanges[i][2])
if values[i] > self.channelValueRanges[i][1]:
self.channelValueRanges[i] = (self.channelValueRanges[i][0], values[i], self.channelValueRanges[i][2])
if self.channelValueRanges[i][1] > self.channelValueRanges[i][0]:
if self.channelValueRanges[i][2] < self.__autoScaleSamples:
# First `self.__autoScaleSamples` samples will always be used to update scale
self.channelValueBars[i].setRange(self.channelValueRanges[i][0], self.channelValueRanges[i][1])
self.channelValueRanges[i] = (self.channelValueRanges[i][0], self.channelValueRanges[i][1], self.channelValueRanges[i][2] + 1)
else:
# After that, only values exceeding current ranges will be updated
if self.channelValueRanges[i][0] < self.channelValueBars[i].minimum():
self.channelValueBars[i].setMinimum(self.channelValueRanges[i][0])
if self.channelValueRanges[i][1] > self.channelValueBars[i].maximum():
self.channelValueBars[i].setMaximum(self.channelValueRanges[i][1])
self.channelValueBars[i].setValue(values[i])
self.channelValueLabels[i].setText('{} ms'.format(values[i]))
class ConnectionEditWindow(QWidget):
MAVLinkConnectedSignal = pyqtSignal(object)
cancelConnectionSignal = pyqtSignal()
def __init__(self, parent = None):
super().__init__(parent)
self.tabs = QTabWidget(self)
self._createTabs()
l = QVBoxLayout()
l.setContentsMargins(0, 0, 0, 0)
l.addWidget(self.tabs)
l.addWidget(self.__createActionButtons())
self.setLayout(l)
def _createTabs(self):
self.serialConnTab = SerialConnectionEditTab(parent=self)
self.logReplayTab = LogFileReplayEditTab(self)
self.tabs.addTab(self.serialConnTab, 'Serial Link')
self.tabs.addTab(self.logReplayTab, 'Log File Replay')
def __createActionButtons(self):
l = QHBoxLayout()
l.setContentsMargins(5, 0, 5, 5)
self.connectButton = QPushButton('Connect')
self.closeButton = QPushButton('Close')
self.connectButton.clicked.connect(self._doConnect)
self.closeButton.clicked.connect(self.close)
l.addWidget(self.connectButton)
l.addWidget(self.closeButton)
self.actionButtonWidget = QWidget()
self.actionButtonWidget.setLayout(l)
return self.actionButtonWidget
def closeEvent(self, event):
self.cancelConnectionSignal.emit()
super().closeEvent(event)
def _doConnect(self):
currTab = self.tabs.currentWidget()
if hasattr(currTab, 'doConnect'):
if currTab.doConnect():
self.close()
class LogFileReplaySpeedControl(mavlogfile, QObject):
replayCompleteSignal = pyqtSignal()
def __init__(self, filename):
mavlogfile.__init__(self, filename)
QObject.__init__(self)
self.replaySpeed = 1.0
def pre_message(self):
super().pre_message()
if self._last_timestamp is not None and self.replaySpeed > 0:
ts = abs(self._timestamp - self._last_timestamp) * self.replaySpeed
sleep(ts)
def recv(self,n=None):
b = super().recv(n)
if b == None or len(b) < n:
self.replayCompleteSignal.emit()
return b
def write(self, buf):
'''Log files will be open in read only mode. All write operations are ignored.'''
pass
class LogFileReplayEditTab(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.MAVLinkConnectedSignal = parent.MAVLinkConnectedSignal
l = QVBoxLayout()
l.setAlignment(Qt.AlignTop)
lbl = QLabel('Choose Log File')
l.addWidget(lbl)
fileWidget = QWidget(self)
l1 = QHBoxLayout()
self.logFilePathEdit = QLineEdit(self)
sp = self.logFilePathEdit.sizePolicy()
sp.setHorizontalStretch(1)
self.logFilePathEdit.setSizePolicy(sp)
l1.addWidget(self.logFilePathEdit)
self.browseButton = QPushButton('Browse')
self.browseButton.clicked.connect(self.__chooseLogFile)
l1.addWidget(self.browseButton)
fileWidget.setLayout(l1)
l.addWidget(fileWidget)
self.setLayout(l)
def doConnect(self):
fileName = self.logFilePathEdit.text()
if os.path.isfile(fileName):
print('Replay Log file:', fileName)
connection = LogFileReplaySpeedControl(fileName)
self.MAVLinkConnectedSignal.emit(connection)
return True
QMessageBox.critical(self.window(), 'Error', 'Invalid log file: {}'.format(fileName), QMessageBox.Ok)
return False
def __chooseLogFile(self):
fileName = QFileDialog.getOpenFileName(self, 'Choose Log File')
if fileName != None:
self.logFilePathEdit.setText(fileName[0])
class SerialConnectionEditTab(QWidget):
__autoBaudStartSignal = pyqtSignal(object)
def __init__(self, initParams = None, parent = None):
super().__init__(parent)
self.portList = {}
self.autoBaud = None
self.MAVLinkConnectedSignal = parent.MAVLinkConnectedSignal
self.MAVLinkConnectedSignal.connect(self.__recordLastConnection)
self.__autoBaudStartSignal.connect(self.__autoBaud)
self.listSerialPorts()
if initParams == None:
self.params = self.__getLastConnectionParameter()
else:
self.params = initParams
l = QGridLayout()
row = 0
lbl, self.portsDropDown = self._createDropDown(
'Serial Port', self.portList,
UserData.getParameterValue(self.params, UD_TELEMETRY_LAST_CONNECTION_PORT_KEY))
l.addWidget(lbl, row, 0, 1, 1, Qt.AlignRight)
l.addWidget(self.portsDropDown, row, 1, 1, 3, Qt.AlignLeft)
self.refreshButton = QPushButton('\u21BB') # Unicode for clockwise open circle arrow
self.refreshButton.setFixedSize(self.portsDropDown.height(), self.portsDropDown.height())
l.addWidget(self.refreshButton, row, 4, 1, 1, Qt.AlignLeft)
self.refreshButton.clicked.connect(lambda: self.listSerialPorts(self.portsDropDown))
row += 1
lbl, self.baudDropDown = self._createDropDown(
'Baud Rate', BAUD_RATES,
UserData.getParameterValue(self.params, UD_TELEMETRY_LAST_CONNECTION_BAUD_RATE_KEY))
l.addWidget(lbl, row, 0, 1, 1, Qt.AlignRight)
l.addWidget(self.baudDropDown, row, 1, 1, 3, Qt.AlignLeft)
row += 1
lbl, self.flowDropDown = self._createDropDown('Flow Control', FLOW_CONTROL)
l.addWidget(lbl, row, 0, 1, 1, Qt.AlignRight)
l.addWidget(self.flowDropDown, row, 1, 1, 1, Qt.AlignLeft)
lbl, self.parityDropDown = self._createDropDown('Parity', PARITY)
l.addWidget(lbl, row, 2, 1, 1, Qt.AlignRight)
l.addWidget(self.parityDropDown, row, 3, 1, 1, Qt.AlignLeft)
row += 1
lbl, self.bitsDropDown = self._createDropDown('Data Bits', DATA_BITS)
l.addWidget(lbl, row, 0, 1, 1, Qt.AlignRight)
l.addWidget(self.bitsDropDown, row, 1, 1, 1, Qt.AlignLeft)
lbl, self.stopDropDown = self._createDropDown('Stop Bits', STOP_BITS)
l.addWidget(lbl, row, 2, 1, 1, Qt.AlignRight)
l.addWidget(self.stopDropDown, row, 3, 1, 1, Qt.AlignLeft)
row += 1
self.autoBaudMessageLabel = QLabel('')
l.addWidget(self.autoBaudMessageLabel, row, 0, 1, 3)
row += 1
self.setLayout(l)
def _createDropDown(self, label, data: dict, defaultValue = None):
dropDown = QComboBox(self)
dropDown.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
i = 0
for key, val in data.items():
dropDown.addItem(str(val), QVariant(key))
if key == defaultValue:
dropDown.setCurrentIndex(i)
i += 1
return QLabel(label), dropDown
def listSerialPorts(self, dropDown = None):
portsInfo = sorted(comports(False))
cnts = 0
self.portList.clear()
for p in portsInfo:
self.portList[p.device] = p
cnts += 1
if cnts == 0:
self.portList['No ports available'] = 'No ports available'
if dropDown != None:
while dropDown.count() > 0:
dropDown.removeItem(0)
for key, val in self.portList.items():
dropDown.addItem(str(val), QVariant(key))
def doConnect(self):
port = self.portsDropDown.currentData()
baud = self.baudDropDown.currentData()
if baud == 0:
self.__autoBaudStartSignal.emit(port)
return False # Keep window open while auto bauding
connection = mavutil.mavlink_connection(port, int(baud))
self.MAVLinkConnectedSignal.emit(connection)
return True
def __autoBaud(self, port):
self.autoBaud = AutoBaudThread(port, self)
# This Tab QTabWidget QWidget
self.autoBaud.finished.connect(self.parentWidget().parentWidget().parentWidget().close)
self.autoBaud.autoBaudStatusUpdateSignal.connect(self.autoBaudMessageLabel.setText)
self.autoBaud.start()
def __recordLastConnection(self, conn):
if isinstance(conn, mavlogfile) == False:
self.params[UD_TELEMETRY_LAST_CONNECTION_PORT_KEY] = conn.device
self.params[UD_TELEMETRY_LAST_CONNECTION_BAUD_RATE_KEY] = conn.baud
def __getLastConnectionParameter(self):
pParam = UserData.getInstance().getUserDataEntry(UD_TELEMETRY_KEY, {})
return UserData.getParameterValue(pParam, UD_TELEMETRY_LAST_CONNECTION_KEY, {})
class AutoBaudThread(QThread):
autoBaudStatusUpdateSignal = pyqtSignal(object)
def __init__(self, port, parent):
super().__init__(parent)
self.MAVLinkConnectedSignal = parent.MAVLinkConnectedSignal
self.port = port
def run(self):
for b in BAUD_RATES:
if b >= self.__minimumBaudRate():
self.autoBaudStatusUpdateSignal.emit('AutoBaud: try baud rate {}'.format(b))
conn = mavutil.mavlink_connection(self.port, b)
hb = conn.wait_heartbeat(timeout=2.0) # set timeout to 2 second
if hb == None:
self.autoBaudStatusUpdateSignal.emit('AutoBaud: timeout for baud rate {}'.format(b))
# Reset environment variables after a failed attempt
# Otherwise mavutil.auto_mavlink_version may result in
# unexpected behaviour
if 'MAVLINK09' in os.environ:
del os.environ['MAVLINK09']
if 'MAVLINK20' in os.environ:
del os.environ['MAVLINK20']
conn.close()
else:
self.autoBaudStatusUpdateSignal.emit('AutoBaud: correct baud rate is {}'.format(b))
self.MAVLinkConnectedSignal.emit(conn)
return
# Fail back to default mavlink baud rate
self.autoBaudStatusUpdateSignal.emit('AutoBaud: default 57600')
self.MAVLinkConnectedSignal.emit(mavutil.mavlink_connection(self.port, 57600))
def __minimumBaudRate(self):
return 4800
class MAVLinkConnection(QThread):
externalMessageHandler = pyqtSignal(object) # pass any types of message to an external handler
connectionEstablishedSignal = pyqtSignal()
onboardWaypointsReceivedSignal = pyqtSignal(object) # pass the list of waypoints as parameter
newTextMessageSignal = pyqtSignal(object)
messageTimeoutSignal = pyqtSignal(float) # pass number of seconds without receiving any messages
heartbeatTimeoutSignal = pyqtSignal()
DEFAULT_MESSAGE_TIMEOUT_THRESHOLD = 2.0
DEFAULT_HEARTBEAT_TIMEOUT= 5.0
def __init__(self, connection, replayMode = False, enableLog = True):
super().__init__()
self.internalHandlerLookup = {}
self.mavStatus = {MavStsKeys.AP_SYS_ID : 1}
self.isConnected = False
# self.paramList = []
self.paramPanel = None
self.txLock = QMutex() # uplink lock
self.txResponseCond = QWaitCondition()
self.txTimeoutTimer = QTimer()
self.finalWPSent = False
self.wpLoader = MAVWPLoader()
self.onboardWPCount = 0
self.numberOfonboardWP = 0
self.onboardWP = []
self.mavlinkLogFile = None
self.lastMessageReceivedTimestamp = 0.0
self.lastMessages = {} # type = (msg, timestamp)
self.param = UserData.getInstance().getUserDataEntry(UD_TELEMETRY_KEY, {})
self.messageTimeoutThreshold = UserData.getParameterValue(self.param,
UD_TELEMETRY_TIMEOUT_THRESHOLD_KEY,
MAVLinkConnection.DEFAULT_MESSAGE_TIMEOUT_THRESHOLD)
self.txTimeoutmsec = self.messageTimeoutThreshold * 1000000
# timeout for wait initial heartbeat signal
self.initHeartbeatTimeout = UserData.getParameterValue(self.param,
UD_TELEMETRY_HEARTBEAT_TIMEOUT_KEY,
MAVLinkConnection.DEFAULT_HEARTBEAT_TIMEOUT)
self.txMessageQueue = deque()
self.running = True
self.connection = connection
self.replayMode = replayMode
self.enableLog = enableLog
self.uas = None
if replayMode:
self.enableLog = False
connection.replayCompleteSignal.connect(self.requestExit)
self.internalHandlerLookup['PARAM_VALUE'] = self.receiveOnboardParameter
self.internalHandlerLookup['MISSION_REQUEST'] = self.receiveMissionRequest
self.internalHandlerLookup['MISSION_ACK'] = self.receiveMissionAcknowledge
self.internalHandlerLookup['MISSION_COUNT'] = self.receiveMissionItemCount
self.internalHandlerLookup['MISSION_ITEM'] = self.receiveMissionItem
self.internalHandlerLookup['DATA_STREAM'] = self.receiveDataStream
self.internalHandlerLookup['PARAM_SET'] = self.receiveParameterSet
self.txTimeoutTimer.timeout.connect(self._timerTimeout)
self.txTimeoutTimer.setSingleShot(True)
# print('waiting for heart beat...')
# self._establishConnection()
def requestExit(self):
# print('exit conn thread...')
self.running = False
def run(self):
while self.running:
msg = self.connection.recv_match(blocking=False)
if msg != None:
msgType = msg.get_type()
if msgType != 'BAD_DATA':
# exclude BAD_DATA from any other messages
self.lastMessageReceivedTimestamp = time()
self.lastMessages[msgType] = (msg, self.lastMessageReceivedTimestamp)
if self.enableLog:
ts = int(time() * 1.0e6) & ~3
self.mavlinkLogFile.write(struct.pack('>Q', ts) + msg.get_msgbuf())
# 1. send message to external destination
self.externalMessageHandler.emit(msg)
# 2. process message with internal UASInterface
self.uas.receiveMAVLinkMessage(msg)
# 3. process message with other internal handlers
if msgType in self.internalHandlerLookup:
self.internalHandlerLookup[msgType](msg)
else:
# TODO handle BAD_DATA?
print('BAD_DATA:', msg)
rs = time() - self.lastMessageReceivedTimestamp
if (rs > self.messageTimeoutThreshold):
print('Message timeout:', rs)
self.messageTimeoutSignal.emit(rs)
try:
txMsg = self.txMessageQueue.popleft()
print('sending mavlink msg:', txMsg)
self.connection.mav.send(txMsg)
except IndexError:
pass
self.__doDisconnect()
def __doDisconnect(self, txtmsg = 'Disconnected'):
self.connection.close()
self.isConnected = False
if self.enableLog and self.mavlinkLogFile != None:
self.mavlinkLogFile.close()
self.uas.resetOnboardParameterList()
self.newTextMessageSignal.emit(txtmsg)
def establishConnection(self):
hb = self.connection.wait_heartbeat(timeout=self.initHeartbeatTimeout)
if hb == None:
self.running = False
self.__doDisconnect('Connection timeout')
self.heartbeatTimeoutSignal.emit()
return
self.lastMessageReceivedTimestamp = time()
self.__createLogFile()
self.__setMavlinkDialect(hb.autopilot)
self.mavStatus[MavStsKeys.VEHICLE_TYPE] = hb.type
self.mavStatus[MavStsKeys.AP_TYPE] = hb.autopilot
self.mavStatus[MavStsKeys.AP_MODE] = hb.base_mode
self.mavStatus[MavStsKeys.CUSTOM_AP_MODE] = hb.custom_mode
self.mavStatus[MavStsKeys.AP_SYS_STS] = hb.system_status
self.mavStatus[MavStsKeys.MAVLINK_VER] = hb.mavlink_version
# request all parameters
if self.replayMode:
self.newTextMessageSignal.emit('Conneced in log file replay mode')
self.isConnected = True
self.connectionEstablishedSignal.emit()
else:
self.newTextMessageSignal.emit('Conneced to AP:{}'.format(self.mavStatus[MavStsKeys.AP_TYPE]))
self.uas.fetchAllOnboardParameters()
def receiveOnboardParameter(self, msg):
self.uas.acceptOnboardParameter(msg)
self.newTextMessageSignal.emit('Param: {} = {}'.format(msg.param_id, msg.param_value))
if self.uas.onboardParamNotReceived == 0:
self.newTextMessageSignal.emit('{} parameters received'.format(msg.param_count))
if self.param['DOWNLOAD_WAYPOINTS_ON_CONNECT']:
self.downloadWaypoints() # request to read all onboard waypoints
if self.isConnected == False:
# prevent further signals when refreshing parameters
self.isConnected = True
self.connectionEstablishedSignal.emit()
def receiveMissionItem(self, msg):
self.numberOfonboardWP += 1
wp = Waypoint(msg.seq, msg.x, msg.y, msg.z)
wp.waypointType = msg.command
self.onboardWP.append(wp)
if self.numberOfonboardWP < self.onboardWPCount:
self.connection.waypoint_request_send(self.numberOfonboardWP) # read next one
else:
self.newTextMessageSignal.emit('Total {} waypoint(s) onboard'.format(len(self.onboardWP)))
self.onboardWaypointsReceivedSignal.emit(self.onboardWP) # all done, send signal
def receiveMissionItemCount(self, msg):
self.onboardWPCount = msg.count
if self.onboardWPCount > 0:
self.connection.waypoint_request_send(0) # start reading onboard waypoints
def receiveMissionRequest(self, msg):
# print('missionRequest:', msg)
self.sendMavlinkMessage(self.wpLoader.wp(msg.seq))
def receiveMissionAcknowledge(self, msg):
print('missionRequestAck:', msg)
self.txResponseCond.wakeAll()
def receiveDataStream(self, msg):
# DATA_STREAM {stream_id : 10, message_rate : 0, on_off : 0}
print(msg)
def receiveParameterSet(self, msg):
# PARAM_SET {target_system : 81, target_component : 50, param_id : BFLOW_GYRO_COM, param_value : 0.0, param_type : 9}
print(msg)
def showParameterEditWindow(self):
if self.isConnected:
if self.uas.onboardParamNotReceived > 0:
QMessageBox.warning(None, 'Warning',
'Please wait while receiving all onboard parameters, {} parameters left.'.format(self.uas.onboardParamNotReceived),
QMessageBox.Ok)
else:
self.paramPanel = ParameterPanel(self.uas.onboardParameters)
self.paramPanel.uploadNewParametersSignal.connect(self.uploadNewParametersEvent)
self.paramPanel.show()
def downloadWaypoints(self):
self.connection.waypoint_request_list_send()
def uploadWaypoints(self, wpList):
seq = 0
for wp in wpList:
item = wp.toMavlinkMessage(self.connection.target_system, self.connection.target_component, seq, 0, 1)
seq += 1
self.wpLoader.add(item)
print('all wp queued!')
self._sendMissionCount(len(wpList))
def setHomePosition(self, wp):
item = mavutil.mavlink.MAVLink_mission_item_message(self.connection.target_system, self.connection.target_component, 0,
mavlink.MAV_FRAME_GLOBAL, mavlink.MAV_CMD_DO_SET_HOME , 1, 0,
1, None, None, None,
wp.latitude, wp.longitude, wp.altitude)
self.sendMavlinkMessage(item)
def _sendMissionCount(self, cnt):
print('{} waypoints to be sent'.format(cnt))
# self.txTimeoutTimer.start(self.txTimeoutmsec)
self.txLock.lock()
# self.connection.waypoint_clear_all_send()
self.connection.waypoint_count_send(cnt)
print('[CNT] wait for response...')
self.txResponseCond.wait(self.txLock)
self.txLock.unlock()
print('[CNT] Got response!')
def sendMavlinkMessage(self, msg):
''' Add a mavlink message to the tx queue '''
if msg.target_system == 255:
msg.target_system = self.connection.target_system
if msg.target_component == 255:
msg.target_component = self.connection.target_component
self.txMessageQueue.append(msg)
def _timerTimeout(self):
print('Timeout')
self.txResponseCond.wakeAll()
def navigateToWaypoint(self, wp: Waypoint):
item = mavutil.mavlink.MAVLink_mission_item_message(self.connection.target_system, self.connection.target_component, 0,
mavlink.MAV_FRAME_GLOBAL, mavlink.MAV_CMD_NAV_WAYPOINT, 1,
1, # Auto continue to next waypoint
0, 0, 0, 0, wp.latitude, wp.longitude, wp.altitude)
self.sendMavlinkMessage(item)
def initializeReturnToHome(self):
self.connection.set_mode_rtl()
def uploadNewParametersEvent(self, params):
# the params from UI are MAVLink_param_value_message,
# which are required to be consistent with all parameters
# download upon connection. They will be converted to
# MAVLink_param_set_message before sending to UAV
for param in params:
paramSet = mavutil.mavlink.MAVLink_param_set_message(self.connection.target_system,
self.connection.target_component,
param.param_id.encode('utf-8'),
param.param_value, param.param_type)
self.sendMavlinkMessage(paramSet)
def setupMessageSigningKey(self, key, ts):
key0 = None
ts0 = 0
try:
key0 = bytes.fromhex(key)
except ValueError:
pass
try:
ts0 = int(ts)
except ValueError:
pass
if self.connection.WIRE_PROTOCOL_VERSION == '2.0':
self.connection.setup_signing(key0,
allow_unsigned_callback = self.uas.allowUnsignedCallback,
initial_timestamp = ts0)
def __createLogFile(self):
if self.enableLog:
name = 'MAV_{}.bin'.format(int(time() * 1000))
self.mavlinkLogFile = open(os.path.join(self.param[UD_TELEMETRY_LOG_FOLDER_KEY], name), 'wb')
def __setMavlinkDialect(self, ap):
mavutil.mavlink = None # reset previous dialect
self.uas = UASInterfaceFactory.getUASInterface(ap)
self.uas.mavlinkMessageTxSignal.connect(self.sendMavlinkMessage)
if ap in MAVLINK_DIALECTS:
print('Set dialect to: {} ({})'.format(MAVLINK_DIALECTS[ap], ap))
mavutil.set_dialect(MAVLINK_DIALECTS[ap])
elif ap != mavlink.MAV_AUTOPILOT_INVALID:
# default to common
print('Set dialect to common for unknown AP type:', ap)
mavutil.set_dialect(MAVLINK_DIALECTS[mavlink.MAV_AUTOPILOT_GENERIC])
# Hot patch after setting mavlink dialect on the fly
self.connection.mav = mavutil.mavlink.MAVLink(self.connection,
srcSystem=self.connection.source_system,
srcComponent=self.connection.source_component)
self.connection.mav.robust_parsing = self.connection.robust_parsing
self.connection.WIRE_PROTOCOL_VERSION = mavutil.mavlink.WIRE_PROTOCOL_VERSION
|
DEBUG = True
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db')
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 2
CSRF_ENABLED = True
CSRF_SESSION_KEY = 'secret'
SECRET_KEY = 'secret'
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
from odoo.tools import float_round
class TestPacking(TransactionCase):
def setUp(self):
super(TestPacking, self).setUp()
self.stock_location = self.env.ref('stock.stock_location_stock')
self.warehouse = self.env['stock.warehouse'].search([('lot_stock_id', '=', self.stock_location.id)], limit=1)
self.warehouse.write({'delivery_steps': 'pick_pack_ship'})
self.pack_location = self.warehouse.wh_pack_stock_loc_id
self.ship_location = self.warehouse.wh_output_stock_loc_id
self.customer_location = self.env.ref('stock.stock_location_customers')
self.productA = self.env['product.product'].create({'name': 'Product A', 'type': 'product'})
self.productB = self.env['product.product'].create({'name': 'Product B', 'type': 'product'})
def test_put_in_pack(self):
""" In a pick pack ship scenario, create two packs in pick and check that
they are correctly recognised and handled by the pack and ship picking.
Along this test, we'll use action_toggle_processed to process a pack
from the entire_package_ids one2many and we'll directly fill the move
lines, the latter is the behavior when the user did not enable the display
of entire packs on the picking type.
"""
self.env['stock.quant']._update_available_quantity(self.productA, self.stock_location, 20.0)
self.env['stock.quant']._update_available_quantity(self.productB, self.stock_location, 20.0)
ship_move_a = self.env['stock.move'].create({
'name': 'The ship move',
'product_id': self.productA.id,
'product_uom_qty': 5.0,
'product_uom': self.productA.uom_id.id,
'location_id': self.ship_location.id,
'location_dest_id': self.customer_location.id,
'warehouse_id': self.warehouse.id,
'picking_type_id': self.warehouse.out_type_id.id,
'procure_method': 'make_to_order',
'state': 'draft',
})
ship_move_b = self.env['stock.move'].create({
'name': 'The ship move',
'product_id': self.productB.id,
'product_uom_qty': 5.0,
'product_uom': self.productB.uom_id.id,
'location_id': self.ship_location.id,
'location_dest_id': self.customer_location.id,
'warehouse_id': self.warehouse.id,
'picking_type_id': self.warehouse.out_type_id.id,
'procure_method': 'make_to_order',
'state': 'draft',
})
ship_move_a._assign_picking()
ship_move_b._assign_picking()
ship_move_a._action_confirm()
ship_move_b._action_confirm()
pack_move_a = ship_move_a.move_orig_ids[0]
pick_move_a = pack_move_a.move_orig_ids[0]
pick_picking = pick_move_a.picking_id
packing_picking = pack_move_a.picking_id
shipping_picking = ship_move_a.picking_id
pick_picking.action_assign()
pick_picking.move_line_ids.filtered(lambda ml: ml.product_id == self.productA).qty_done = 1.0
pick_picking.move_line_ids.filtered(lambda ml: ml.product_id == self.productB).qty_done = 2.0
first_pack = pick_picking.put_in_pack()
self.assertEquals(len(pick_picking.package_level_ids), 1, 'Put some products in pack should create a package_level')
self.assertEquals(pick_picking.package_level_ids[0].state, 'new', 'A new pack should be in state "new"')
pick_picking.move_line_ids.filtered(lambda ml: ml.product_id == self.productA and ml.qty_done == 0.0).qty_done = 4.0
pick_picking.move_line_ids.filtered(lambda ml: ml.product_id == self.productB and ml.qty_done == 0.0).qty_done = 3.0
second_pack = pick_picking.put_in_pack()
pick_picking.button_validate()
self.assertEqual(len(first_pack.quant_ids), 2)
self.assertEqual(len(second_pack.quant_ids), 2)
packing_picking.action_assign()
self.assertEqual(len(packing_picking.package_level_ids), 2, 'Two package levels must be created after assigning picking')
packing_picking.package_level_ids.write({'is_done': True})
packing_picking.action_done()
def test_pick_a_pack_confirm(self):
pack = self.env['stock.quant.package'].create({'name': 'The pack to pick'})
self.env['stock.quant']._update_available_quantity(self.productA, self.stock_location, 20.0, package_id=pack)
picking = self.env['stock.picking'].create({
'picking_type_id': self.warehouse.int_type_id.id,
'location_id': self.stock_location.id,
'location_dest_id': self.stock_location.id,
'state': 'draft',
})
package_level = self.env['stock.package_level'].create({
'package_id': pack.id,
'picking_id': picking.id,
'location_dest_id': self.stock_location.id,
})
self.assertEquals(package_level.state, 'draft',
'The package_level should be in draft as it has no moves, move lines and is not confirmed')
picking.action_confirm()
self.assertEqual(len(picking.move_lines), 1,
'One move should be created when the package_level has been confirmed')
self.assertEquals(len(package_level.move_ids), 1,
'The move should be in the package level')
self.assertEquals(package_level.state, 'confirmed',
'The package level must be state confirmed when picking is confirmed')
picking.action_assign()
self.assertEqual(len(picking.move_lines), 1,
'You still have only one move when the picking is assigned')
self.assertEqual(len(picking.move_lines.move_line_ids), 1,
'The move should have one move line which is the reservation')
self.assertEquals(picking.move_line_ids.package_level_id.id, package_level.id,
'The move line created should be linked to the package level')
self.assertEquals(picking.move_line_ids.package_id.id, pack.id,
'The move line must have been reserved on the package of the package_level')
self.assertEquals(picking.move_line_ids.result_package_id.id, pack.id,
'The move line must have the same package as result package')
self.assertEquals(package_level.state, 'assigned', 'The package level must be in state assigned')
package_level.write({'is_done': True})
self.assertEquals(len(package_level.move_line_ids), 1,
'The package level should still keep one move line after have been set to "done"')
self.assertEquals(package_level.move_line_ids[0].qty_done, 20.0,
'All quantity in package must be procesed in move line')
picking.button_validate()
self.assertEqual(len(picking.move_lines), 1,
'You still have only one move when the picking is assigned')
self.assertEqual(len(picking.move_lines.move_line_ids), 1,
'The move should have one move line which is the reservation')
self.assertEquals(package_level.state, 'done', 'The package level must be in state done')
self.assertEquals(pack.location_id.id, picking.location_dest_id.id,
'The quant package must be in the destination location')
self.assertEquals(pack.quant_ids[0].location_id.id, picking.location_dest_id.id,
'The quant must be in the destination location')
def test_multi_pack_reservation(self):
""" When we move entire packages, it is possible to have a multiple times
the same package in package level list, we make sure that only one is reserved,
and that the location_id of the package is the one where the package is once it
is reserved.
"""
pack = self.env['stock.quant.package'].create({'name': 'The pack to pick'})
shelf1_location = self.env['stock.location'].create({
'name': 'shelf1',
'usage': 'internal',
'location_id': self.stock_location.id,
})
self.env['stock.quant']._update_available_quantity(self.productA, shelf1_location, 20.0, package_id=pack)
picking = self.env['stock.picking'].create({
'picking_type_id': self.warehouse.int_type_id.id,
'location_id': self.stock_location.id,
'location_dest_id': self.stock_location.id,
'state': 'draft',
})
package_level = self.env['stock.package_level'].create({
'package_id': pack.id,
'picking_id': picking.id,
'location_dest_id': self.stock_location.id,
})
package_level = self.env['stock.package_level'].create({
'package_id': pack.id,
'picking_id': picking.id,
'location_dest_id': self.stock_location.id,
})
picking.action_confirm()
self.assertEqual(picking.package_level_ids.mapped('location_id.id'), [self.stock_location.id],
'The package levels should still in the same location after confirmation.')
picking.action_assign()
package_level_reserved = picking.package_level_ids.filtered(lambda pl: pl.state == 'assigned')
package_level_confirmed = picking.package_level_ids.filtered(lambda pl: pl.state == 'confirmed')
self.assertEqual(package_level_reserved.location_id.id, shelf1_location.id, 'The reserved package level must be reserved in shelf1')
self.assertEqual(package_level_confirmed.location_id.id, self.stock_location.id, 'The not reserved package should keep its location')
picking.do_unreserve()
self.assertEqual(picking.package_level_ids.mapped('location_id.id'), [self.stock_location.id],
'The package levels should have back the original location.')
picking.package_level_ids.write({'is_done': True})
picking.action_assign()
package_level_reserved = picking.package_level_ids.filtered(lambda pl: pl.state == 'assigned')
package_level_confirmed = picking.package_level_ids.filtered(lambda pl: pl.state == 'confirmed')
self.assertEqual(package_level_reserved.location_id.id, shelf1_location.id, 'The reserved package level must be reserved in shelf1')
self.assertEqual(package_level_confirmed.location_id.id, self.stock_location.id, 'The not reserved package should keep its location')
self.assertEqual(picking.package_level_ids.mapped('is_done'), [True, True], 'Both package should still done')
def test_put_in_pack_to_different_location(self):
""" Hitting 'Put in pack' button while some move lines go to different
location should trigger a wizard. This wizard applies the same destination
location to all the move lines
"""
shelf1_location = self.env['stock.location'].create({
'name': 'shelf1',
'usage': 'internal',
'location_id': self.stock_location.id,
})
shelf2_location = self.env['stock.location'].create({
'name': 'shelf2',
'usage': 'internal',
'location_id': self.stock_location.id,
})
picking = self.env['stock.picking'].create({
'picking_type_id': self.warehouse.in_type_id.id,
'location_id': self.stock_location.id,
'location_dest_id': self.stock_location.id,
'state': 'draft',
})
ship_move_a = self.env['stock.move'].create({
'name': 'move 1',
'product_id': self.productA.id,
'product_uom_qty': 5.0,
'product_uom': self.productA.uom_id.id,
'location_id': self.customer_location.id,
'location_dest_id': shelf1_location.id,
'picking_id': picking.id,
'state': 'draft',
})
picking.action_confirm()
picking.action_assign()
picking.move_line_ids.filtered(lambda ml: ml.product_id == self.productA).qty_done = 5.0
picking.put_in_pack()
pack1 = self.env['stock.quant.package'].search([])[-1]
picking.write({
'move_line_ids': [(0, 0, {
'product_id': self.productB.id,
'product_uom_qty': 7.0,
'qty_done': 7.0,
'product_uom_id': self.productB.uom_id.id,
'location_id': self.customer_location.id,
'location_dest_id': shelf2_location.id,
'picking_id': picking.id,
'state': 'confirmed',
})]
})
picking.write({
'move_line_ids': [(0, 0, {
'product_id': self.productA.id,
'product_uom_qty': 5.0,
'qty_done': 5.0,
'product_uom_id': self.productA.uom_id.id,
'location_id': self.customer_location.id,
'location_dest_id': shelf1_location.id,
'picking_id': picking.id,
'state': 'confirmed',
})]
})
wizard_values = picking.put_in_pack()
wizard = self.env[(wizard_values.get('res_model'))].browse(wizard_values.get('res_id'))
wizard.location_dest_id = shelf2_location.id
wizard.action_done()
picking.action_done()
pack2 = self.env['stock.quant.package'].search([])[-1]
self.assertEqual(pack2.location_id.id, shelf2_location.id, 'The package must be stored in shelf2')
self.assertEqual(pack1.location_id.id, shelf1_location.id, 'The package must be stored in shelf1')
qp1 = pack2.quant_ids[0]
qp2 = pack2.quant_ids[1]
self.assertEqual(qp1.quantity + qp2.quantity, 12, 'The quant has not the good quantity')
def test_move_picking_with_package(self):
"""
355.4 rounded with 0.001 precision is 355.40000000000003.
check that nonetheless, moving a picking is accepted
"""
self.assertEqual(self.productA.uom_id.rounding, 0.001)
self.assertEqual(
float_round(355.4, precision_rounding=self.productA.uom_id.rounding),
355.40000000000003,
)
location_dict = {
'location_id': self.stock_location.id,
}
quant = self.env['stock.quant'].create({
**location_dict,
**{'product_id': self.productA.id, 'quantity': 355.4}, # important number
})
package = self.env['stock.quant.package'].create({
**location_dict, **{'quant_ids': [(6, 0, [quant.id])]},
})
location_dict.update({
'state': 'draft',
'location_dest_id': self.ship_location.id,
})
move = self.env['stock.move'].create({
**location_dict,
**{
'name': "XXX",
'product_id': self.productA.id,
'product_uom': self.productA.uom_id.id,
'product_uom_qty': 355.40000000000003, # other number
}})
picking = self.env['stock.picking'].create({
**location_dict,
**{
'picking_type_id': self.warehouse.in_type_id.id,
'move_lines': [(6, 0, [move.id])],
}})
picking.action_confirm()
picking.action_assign()
move.quantity_done = move.reserved_availability
picking.action_done()
# if we managed to get there, there was not any exception
# complaining that 355.4 is not 355.40000000000003. Good job!
|
from collections import deque
class Node:
def __init__(self, x, y, bypasses, grid):
self.x = x
self.y = y
self.bypasses = bypasses
self.grid = grid
def __eq__(self, comp):
return self.x == comp.x and self.y == comp.y and self.bypasses == comp.bypasses
def __hash__(self):
return self.x + len(self.grid) * self.y
def get_vertices(self):
vertices = []
x = self.x
y = self.y
bypasses = self.bypasses
grid = self.grid
width = len(grid[0])
height = len(grid)
if x > 0:
wall = grid[y][x - 1] == 1
if wall:
if bypasses > 0:
vertices.append(Node(x - 1, y, bypasses - 1, grid))
else:
pass
else:
vertices.append(Node(x - 1, y, bypasses, grid))
if x < width - 1:
wall = grid[y][x + 1] == 1
if wall:
if bypasses > 0:
vertices.append(Node(x + 1, y, bypasses - 1, grid))
else:
pass
else:
vertices.append(Node(x + 1, y, bypasses, grid))
if y > 0:
wall = grid[y - 1][x] == 1
if wall:
if bypasses > 0:
vertices.append(Node(x, y - 1, bypasses - 1, grid))
else:
pass
else:
vertices.append(Node(x, y - 1, bypasses, grid))
if y < height - 1:
wall = grid[y + 1][x]
if wall:
if bypasses > 0:
vertices.append(Node(x, y + 1, bypasses - 1, grid))
else:
pass
else:
vertices.append(Node(x, y + 1, bypasses, grid))
return vertices
class PathFinder:
def __init__(self, grid, bypasses):
self.grid = grid
self.height = len(grid)
self.width = len(grid[0])
self.bypasses = bypasses
def shortest_path(self):
source = Node(0, 0, self.bypasses, self.grid)
queue = deque([source])
distance_map = {source: 1}
while queue:
curr = queue.popleft()
if curr.x == self.width - 1 and curr.y == self.height - 1:
return distance_map[curr]
for neighbor in curr.get_vertices():
if neighbor not in distance_map.keys():
distance_map[neighbor] = distance_map[curr] + 1
queue.append(neighbor)
def answer(maze):
router = PathFinder(maze, 1)
return router.shortest_path()
|
'''
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
Example 1:
Input:
matrix = [
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
target = 3
Output: true
Example 2:
Input:
matrix = [
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
target = 13
Output: false
'''
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
rows = len(matrix)
if rows == 0:
return False
cols = len(matrix[0])
if cols == 0:
return False
row = -1
for i in range(rows - 1):
if target > matrix[i][0] and target < matrix[i + 1][0]:
row = i
break
elif target == matrix[i][0] or target == matrix[i + 1][0]:
return True
if row == -1:
if target > matrix[rows - 1][0] and target < matrix[rows - 1][-1]:
row = rows - 1
elif target > matrix[rows - 1][-1]:
return False
elif target < matrix[0][0]:
return False
return self.midSearch(matrix, target, 0, cols - 1, row)
def midSearch(self, matrix, value, left, right, row):
if value > matrix[row][-1]:
return False
while left <= right:
mid = ( left + right ) // 2
if matrix[row][mid] == value:
return True
if matrix[row][mid] > value:
right = mid - 1
else:
left = mid + 1
return False
|
from __future__ import division
from . import der, ecdsa
from .util import orderlen
# orderlen was defined in this module previously, so keep it in __all__,
# will need to mark it as deprecated later
__all__ = ["UnknownCurveError", "orderlen", "Curve", "NIST192p",
"NIST224p", "NIST256p", "NIST384p", "NIST521p", "curves",
"find_curve", "SECP256k1"]
class UnknownCurveError(Exception):
pass
class Curve:
def __init__(self, name, curve, generator, oid, openssl_name=None):
self.name = name
self.openssl_name = openssl_name # maybe None
self.curve = curve
self.generator = generator
self.order = generator.order()
self.baselen = orderlen(self.order)
self.verifying_key_length = 2*self.baselen
self.signature_length = 2*self.baselen
self.oid = oid
self.encoded_oid = der.encode_oid(*oid)
def __repr__(self):
return self.name
# the NIST curves
NIST192p = Curve("NIST192p", ecdsa.curve_192,
ecdsa.generator_192,
(1, 2, 840, 10045, 3, 1, 1), "prime192v1")
NIST224p = Curve("NIST224p", ecdsa.curve_224,
ecdsa.generator_224,
(1, 3, 132, 0, 33), "secp224r1")
NIST256p = Curve("NIST256p", ecdsa.curve_256,
ecdsa.generator_256,
(1, 2, 840, 10045, 3, 1, 7), "prime256v1")
NIST384p = Curve("NIST384p", ecdsa.curve_384,
ecdsa.generator_384,
(1, 3, 132, 0, 34), "secp384r1")
NIST521p = Curve("NIST521p", ecdsa.curve_521,
ecdsa.generator_521,
(1, 3, 132, 0, 35), "secp521r1")
SECP256k1 = Curve("SECP256k1", ecdsa.curve_secp256k1,
ecdsa.generator_secp256k1,
(1, 3, 132, 0, 10), "secp256k1")
curves = [NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1]
def find_curve(oid_curve):
for c in curves:
if c.oid == oid_curve:
return c
raise UnknownCurveError("I don't know about the curve with oid %s."
"I only know about these: %s" %
(oid_curve, [c.name for c in curves]))
|
"""
Afterglow Core: photometric calibration job schemas
"""
from typing import List as TList
from marshmallow.fields import Integer, List, Nested
from ..job import JobSchema, JobResultSchema
from ..field_cal import FieldCalSchema, FieldCalResultSchema
from ..photometry import PhotSettingsSchema
from .source_extraction_job import SourceExtractionSettingsSchema
__all__ = ['FieldCalJobResultSchema', 'FieldCalJobSchema']
class FieldCalJobResultSchema(JobResultSchema):
data: TList[FieldCalResultSchema] = List(
Nested(FieldCalResultSchema), default=[])
class FieldCalJobSchema(JobSchema):
type = 'field_cal'
result: FieldCalJobResultSchema = Nested(
FieldCalJobResultSchema, default={})
file_ids: TList[int] = List(Integer(), default=[])
field_cal: FieldCalSchema = Nested(FieldCalSchema, default={})
source_extraction_settings: SourceExtractionSettingsSchema = Nested(
SourceExtractionSettingsSchema, default=None)
photometry_settings: PhotSettingsSchema = Nested(
PhotSettingsSchema, default=None)
|
#
# PySNMP MIB module XYLAN-CSM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/XYLAN-CSM-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:38:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, iso, ObjectIdentity, IpAddress, MibIdentifier, Counter64, ModuleIdentity, Gauge32, Counter32, Unsigned32, Bits, TimeTicks, Integer32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "ObjectIdentity", "IpAddress", "MibIdentifier", "Counter64", "ModuleIdentity", "Gauge32", "Counter32", "Unsigned32", "Bits", "TimeTicks", "Integer32", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
xylanCsmArch, = mibBuilder.importSymbols("XYLAN-BASE-MIB", "xylanCsmArch")
xylanCsmMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1))
atmxVplGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1))
atmxInterfaceConfGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2))
atmxVclGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3))
xylnatmInterfaceConfGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4))
atmxVpCrossConnectGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5))
xylnatmInterfaceStatGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6))
atmxVcCrossConnectGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7))
atmxTrafficDescrGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8))
xylnatmVplGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9))
xylnatmVclGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10))
xylnatmVplStatGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11))
xylnatmVclStatGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12))
xylnatmVcCrossConnectGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13))
xylnatmVpCrossConnectGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14))
xylnatmVclModGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15))
xylnatmVplModGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16))
xylnatmClockingxCtrlGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17))
xylnatmTrafficShaperConfGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18))
xylnatmTrafficShaperMemGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19))
class AtmxTrafficDescrParamIndex(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
atmxInterfaceConfTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1), )
if mibBuilder.loadTexts: atmxInterfaceConfTable.setStatus('mandatory')
atmxInterfaceConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "atmxInterfaceSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxInterfacePortIndex"))
if mibBuilder.loadTexts: atmxInterfaceConfEntry.setStatus('mandatory')
atmxInterfaceSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfaceSlotIndex.setStatus('mandatory')
atmxInterfacePortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfacePortIndex.setStatus('mandatory')
atmxInterfaceMaxVpcs = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfaceMaxVpcs.setStatus('mandatory')
atmxInterfaceMaxVccs = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65536))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfaceMaxVccs.setStatus('mandatory')
atmxInterfaceConfVpcs = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfaceConfVpcs.setStatus('mandatory')
atmxInterfaceConfVccs = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65536))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfaceConfVccs.setStatus('mandatory')
atmxInterfaceMaxActiveVpiBits = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 12))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxInterfaceMaxActiveVpiBits.setStatus('mandatory')
atmxInterfaceMaxActiveVciBits = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxInterfaceMaxActiveVciBits.setStatus('mandatory')
atmxInterfaceIlmiVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfaceIlmiVpi.setStatus('mandatory')
atmxInterfaceIlmiVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(16)).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfaceIlmiVci.setStatus('mandatory')
atmxInterfaceAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 2, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("private", 1), ("nsapE164", 2), ("nativeE164", 3), ("other", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxInterfaceAddressType.setStatus('mandatory')
atmxTrafficDescrParamTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1), )
if mibBuilder.loadTexts: atmxTrafficDescrParamTable.setStatus('mandatory')
atmxTrafficDescrParamEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "atmxTrafficDescrParamIndex"))
if mibBuilder.loadTexts: atmxTrafficDescrParamEntry.setStatus('mandatory')
atmxTrafficDescrParamIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 1), AtmxTrafficDescrParamIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxTrafficDescrParamIndex.setStatus('mandatory')
atmxTrafficDescrType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("atmxNoTrafficDescriptor", 1), ("atmxNoClpNoScr", 2), ("atmxClpNoTaggingNoScr", 3), ("atmxClpTaggingNoScr", 4), ("atmxNoClpScr", 5), ("atmxClpNoTaggingScr", 6), ("atmxClpTaggingScr", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxTrafficDescrType.setStatus('mandatory')
atmxTrafficDescrParam1 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxTrafficDescrParam1.setStatus('mandatory')
atmxTrafficDescrParam2 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxTrafficDescrParam2.setStatus('mandatory')
atmxTrafficDescrParam3 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxTrafficDescrParam3.setStatus('mandatory')
atmxTrafficDescrParam4 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxTrafficDescrParam4.setStatus('mandatory')
atmxTrafficDescrParam5 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxTrafficDescrParam5.setStatus('mandatory')
atmxTrafficQoSClass = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxTrafficQoSClass.setStatus('mandatory')
atmxTrafficDescrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 8, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4), ("notActive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxTrafficDescrRowStatus.setStatus('mandatory')
atmxVplTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1), )
if mibBuilder.loadTexts: atmxVplTable.setStatus('mandatory')
atmxVplEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "atmxVplSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxVplPortIndex"), (0, "XYLAN-CSM-MIB", "atmxVplVpi"))
if mibBuilder.loadTexts: atmxVplEntry.setStatus('mandatory')
atmxVplSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVplSlotIndex.setStatus('mandatory')
atmxVplPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVplPortIndex.setStatus('mandatory')
atmxVplVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVplVpi.setStatus('mandatory')
atmxVplAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2))).clone('down')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVplAdminStatus.setStatus('mandatory')
atmxVplOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVplOperStatus.setStatus('mandatory')
atmxVplLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVplLastChange.setStatus('mandatory')
atmxVplReceiveTrafficDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 7), AtmxTrafficDescrParamIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVplReceiveTrafficDescrIndex.setStatus('mandatory')
atmxVplTransmitTrafficDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 8), AtmxTrafficDescrParamIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVplTransmitTrafficDescrIndex.setStatus('mandatory')
atmxVplCrossConnectIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVplCrossConnectIdentifier.setStatus('mandatory')
atmxVplRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4), ("notActive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVplRowStatus.setStatus('mandatory')
atmxVplBidirect = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2))).clone('on')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVplBidirect.setStatus('mandatory')
atmxVclTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1), )
if mibBuilder.loadTexts: atmxVclTable.setStatus('mandatory')
atmxVclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "atmxVclSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxVclPortIndex"), (0, "XYLAN-CSM-MIB", "atmxVclVpi"), (0, "XYLAN-CSM-MIB", "atmxVclVci"))
if mibBuilder.loadTexts: atmxVclEntry.setStatus('mandatory')
atmxVclSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVclSlotIndex.setStatus('mandatory')
atmxVclPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVclPortIndex.setStatus('mandatory')
atmxVclVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVclVpi.setStatus('mandatory')
atmxVclVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVclVci.setStatus('mandatory')
atmxVclAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVclAdminStatus.setStatus('mandatory')
atmxVclOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVclOperStatus.setStatus('mandatory')
atmxVclLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVclLastChange.setStatus('mandatory')
atmxVclReceiveTrafficDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 8), AtmxTrafficDescrParamIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVclReceiveTrafficDescrIndex.setStatus('mandatory')
atmxVclTransmitTrafficDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 9), AtmxTrafficDescrParamIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVclTransmitTrafficDescrIndex.setStatus('mandatory')
atmxVccAalType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("aal1", 1), ("aal34", 2), ("aal5", 3), ("other", 4), ("unknown", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVccAalType.setStatus('mandatory')
atmxVccAal5CpcsTransmitSduSize = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(9188)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVccAal5CpcsTransmitSduSize.setStatus('mandatory')
atmxVccAal5CpcsReceiveSduSize = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(9188)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVccAal5CpcsReceiveSduSize.setStatus('mandatory')
atmxVccAal5EncapsType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("vcMultiplexRoutedProtocol", 1), ("vcMultiplexBridgedProtocol8023", 2), ("vcMultiplexBridgedProtocol8025", 3), ("vcMultiplexBridgedProtocol8026", 4), ("vcMultiplexLANemulation8023", 5), ("vcMultiplexLANemulation8025", 6), ("llcEncapsulation", 7), ("multiprotocolFrameRelaySscs", 8), ("other", 9), ("unknown", 10))).clone('llcEncapsulation')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVccAal5EncapsType.setStatus('mandatory')
atmxVclCrossConnectIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVclCrossConnectIdentifier.setStatus('mandatory')
atmxVclRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4), ("notActive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVclRowStatus.setStatus('mandatory')
atmxVclBidirect = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 3, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2))).clone('on')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVclBidirect.setStatus('mandatory')
atmxVpCrossConnectTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1), )
if mibBuilder.loadTexts: atmxVpCrossConnectTable.setStatus('mandatory')
atmxVpCrossConnectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "atmxVpCrossConnectLowSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxVpCrossConnectLowPortIndex"), (0, "XYLAN-CSM-MIB", "atmxVpCrossConnectLowVpi"), (0, "XYLAN-CSM-MIB", "atmxVpCrossConnectHighSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxVpCrossConnectHighPortIndex"), (0, "XYLAN-CSM-MIB", "atmxVpCrossConnectHighVpi"))
if mibBuilder.loadTexts: atmxVpCrossConnectEntry.setStatus('mandatory')
atmxVpCrossConnectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectIndex.setStatus('mandatory')
atmxVpCrossConnectLowSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectLowSlotIndex.setStatus('mandatory')
atmxVpCrossConnectLowPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectLowPortIndex.setStatus('mandatory')
atmxVpCrossConnectLowVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectLowVpi.setStatus('mandatory')
atmxVpCrossConnectHighSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectHighSlotIndex.setStatus('mandatory')
atmxVpCrossConnectHighPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectHighPortIndex.setStatus('mandatory')
atmxVpCrossConnectHighVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectHighVpi.setStatus('mandatory')
atmxVpCrossConnectAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVpCrossConnectAdminStatus.setStatus('mandatory')
atmxVpCrossConnectL2HOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectL2HOperStatus.setStatus('mandatory')
atmxVpCrossConnectH2LOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectH2LOperStatus.setStatus('mandatory')
atmxVpCrossConnectL2HLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectL2HLastChange.setStatus('mandatory')
atmxVpCrossConnectH2LLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVpCrossConnectH2LLastChange.setStatus('mandatory')
atmxVpCrossConnectRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4), ("notActive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVpCrossConnectRowStatus.setStatus('mandatory')
atmxSvcVpCrossConnectTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2), )
if mibBuilder.loadTexts: atmxSvcVpCrossConnectTable.setStatus('mandatory')
atmxSvcVpCrossConnectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "atmxSvcVpCrossConnectLowSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxSvcVpCrossConnectLowPortIndex"), (0, "XYLAN-CSM-MIB", "atmxSvcVpCrossConnectLowVpi"), (0, "XYLAN-CSM-MIB", "atmxSvcVpCrossConnectHighSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxSvcVpCrossConnectHighPortIndex"), (0, "XYLAN-CSM-MIB", "atmxSvcVpCrossConnectHighVpi"))
if mibBuilder.loadTexts: atmxSvcVpCrossConnectEntry.setStatus('mandatory')
atmxSvcVpCrossConnectLowSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9)))
if mibBuilder.loadTexts: atmxSvcVpCrossConnectLowSlotIndex.setStatus('mandatory')
atmxSvcVpCrossConnectLowPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25)))
if mibBuilder.loadTexts: atmxSvcVpCrossConnectLowPortIndex.setStatus('mandatory')
atmxSvcVpCrossConnectLowVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095)))
if mibBuilder.loadTexts: atmxSvcVpCrossConnectLowVpi.setStatus('mandatory')
atmxSvcVpCrossConnectHighSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9)))
if mibBuilder.loadTexts: atmxSvcVpCrossConnectHighSlotIndex.setStatus('mandatory')
atmxSvcVpCrossConnectHighPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25)))
if mibBuilder.loadTexts: atmxSvcVpCrossConnectHighPortIndex.setStatus('mandatory')
atmxSvcVpCrossConnectHighVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095)))
if mibBuilder.loadTexts: atmxSvcVpCrossConnectHighVpi.setStatus('mandatory')
atmxSvcVpCrossConnectCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxSvcVpCrossConnectCreationTime.setStatus('mandatory')
atmxSvcVpCrossConnectLowTDIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxSvcVpCrossConnectLowTDIndex.setStatus('mandatory')
atmxSvcVpCrossConnectHighTDIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxSvcVpCrossConnectHighTDIndex.setStatus('mandatory')
atmxSvcVpCrossConnectRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 5, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxSvcVpCrossConnectRowStatus.setStatus('mandatory')
atmxVcCrossConnectTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1), )
if mibBuilder.loadTexts: atmxVcCrossConnectTable.setStatus('mandatory')
atmxVcCrossConnectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "atmxVcCrossConnectLowSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxVcCrossConnectLowPortIndex"), (0, "XYLAN-CSM-MIB", "atmxVcCrossConnectLowVpi"), (0, "XYLAN-CSM-MIB", "atmxVcCrossConnectLowVci"), (0, "XYLAN-CSM-MIB", "atmxVcCrossConnectHighSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxVcCrossConnectHighPortIndex"), (0, "XYLAN-CSM-MIB", "atmxVcCrossConnectHighVpi"), (0, "XYLAN-CSM-MIB", "atmxVcCrossConnectHighVci"))
if mibBuilder.loadTexts: atmxVcCrossConnectEntry.setStatus('mandatory')
atmxVcCrossConnectIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectIndex.setStatus('mandatory')
atmxVcCrossConnectLowSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectLowSlotIndex.setStatus('mandatory')
atmxVcCrossConnectLowPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectLowPortIndex.setStatus('mandatory')
atmxVcCrossConnectLowVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectLowVpi.setStatus('mandatory')
atmxVcCrossConnectLowVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectLowVci.setStatus('mandatory')
atmxVcCrossConnectHighSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectHighSlotIndex.setStatus('mandatory')
atmxVcCrossConnectHighPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectHighPortIndex.setStatus('mandatory')
atmxVcCrossConnectHighVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectHighVpi.setStatus('mandatory')
atmxVcCrossConnectHighVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectHighVci.setStatus('mandatory')
atmxVcCrossConnectAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVcCrossConnectAdminStatus.setStatus('mandatory')
atmxVcCrossConnectL2HOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectL2HOperStatus.setStatus('mandatory')
atmxVcCrossConnectH2LOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectH2LOperStatus.setStatus('mandatory')
atmxVcCrossConnectL2HLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectL2HLastChange.setStatus('mandatory')
atmxVcCrossConnectH2LLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxVcCrossConnectH2LLastChange.setStatus('mandatory')
atmxVcCrossConnectRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4), ("notActive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: atmxVcCrossConnectRowStatus.setStatus('mandatory')
atmxSvcVcCrossConnectTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2), )
if mibBuilder.loadTexts: atmxSvcVcCrossConnectTable.setStatus('mandatory')
atmxSvcVcCrossConnectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "atmxSvcVcCrossConnectLowSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxSvcVcCrossConnectLowPortIndex"), (0, "XYLAN-CSM-MIB", "atmxSvcVcCrossConnectLowVpi"), (0, "XYLAN-CSM-MIB", "atmxSvcVcCrossConnectLowVci"), (0, "XYLAN-CSM-MIB", "atmxSvcVcCrossConnectHighSlotIndex"), (0, "XYLAN-CSM-MIB", "atmxSvcVcCrossConnectHighPortIndex"), (0, "XYLAN-CSM-MIB", "atmxSvcVcCrossConnectHighVpi"), (0, "XYLAN-CSM-MIB", "atmxSvcVcCrossConnectHighVci"))
if mibBuilder.loadTexts: atmxSvcVcCrossConnectEntry.setStatus('mandatory')
atmxSvcVcCrossConnectLowSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9)))
if mibBuilder.loadTexts: atmxSvcVcCrossConnectLowSlotIndex.setStatus('mandatory')
atmxSvcVcCrossConnectLowPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25)))
if mibBuilder.loadTexts: atmxSvcVcCrossConnectLowPortIndex.setStatus('mandatory')
atmxSvcVcCrossConnectLowVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095)))
if mibBuilder.loadTexts: atmxSvcVcCrossConnectLowVpi.setStatus('mandatory')
atmxSvcVcCrossConnectLowVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: atmxSvcVcCrossConnectLowVci.setStatus('mandatory')
atmxSvcVcCrossConnectHighSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9)))
if mibBuilder.loadTexts: atmxSvcVcCrossConnectHighSlotIndex.setStatus('mandatory')
atmxSvcVcCrossConnectHighPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 6), Integer32())
if mibBuilder.loadTexts: atmxSvcVcCrossConnectHighPortIndex.setStatus('mandatory')
atmxSvcVcCrossConnectHighVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095)))
if mibBuilder.loadTexts: atmxSvcVcCrossConnectHighVpi.setStatus('mandatory')
atmxSvcVcCrossConnectHighVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: atmxSvcVcCrossConnectHighVci.setStatus('mandatory')
atmxSvcVcCrossConnectCreationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxSvcVcCrossConnectCreationTime.setStatus('mandatory')
atmxSvcVcCrossConnectLowTDIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxSvcVcCrossConnectLowTDIndex.setStatus('mandatory')
atmxSvcVcCrossConnectHighTDIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxSvcVcCrossConnectHighTDIndex.setStatus('mandatory')
atmxSvcVcCrossConnectRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 7, 2, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: atmxSvcVcCrossConnectRowStatus.setStatus('mandatory')
xylnatmInterfaceConfTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1), )
if mibBuilder.loadTexts: xylnatmInterfaceConfTable.setStatus('mandatory')
xylnatmInterfaceConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmInterfaceSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmInterfacePortIndex"))
if mibBuilder.loadTexts: xylnatmInterfaceConfEntry.setStatus('mandatory')
xylnatmInterfaceSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceSlotIndex.setStatus('mandatory')
xylnatmInterfacePortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfacePortIndex.setStatus('mandatory')
xylnatmInterfaceDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceDescription.setStatus('mandatory')
xylnatmInterfaceTransType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 20))).clone(namedValues=NamedValues(("unknown", 1), ("sonetSts3", 2), ("ds3", 3), ("fourb5b", 4), ("eightb10b", 5), ("e3", 6), ("sonetSts12", 7), ("ds1", 8), ("e1", 9), ("internal", 20)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceTransType.setStatus('mandatory')
xylnatmInterfaceType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("public", 1), ("private", 2), ("pnni10", 3), ("nni-iisp-network", 4), ("nni-iisp-user", 5), ("other", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceType.setStatus('mandatory')
xylnatmInterfaceMediaType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("unknown", 1), ("coax", 2), ("singlemode", 3), ("multimode", 4), ("stp", 5), ("utp", 6), ("internal", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceMediaType.setStatus('mandatory')
xylnatmInterfaceAtmAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 7), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceAtmAddress.setStatus('mandatory')
xylnatmInterfacePortMode = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("passthru", 2), ("normal", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfacePortMode.setStatus('mandatory')
xylnatmInterfaceOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceOperStatus.setStatus('mandatory')
xylnatmInterfaceQsaalStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceQsaalStatus.setStatus('mandatory')
xylnatmInterfaceIlmiStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceIlmiStatus.setStatus('mandatory')
xylnatmInterfaceTpRedirect = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceTpRedirect.setStatus('mandatory')
xylnatmInterfaceCutOverSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceCutOverSlot.setStatus('mandatory')
xylnatmInterfaceCutOverPort = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceCutOverPort.setStatus('mandatory')
xylnatmInterfaceClearPortStats = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceClearPortStats.setStatus('mandatory')
xylnatmInterfaceClearChanStats = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceClearChanStats.setStatus('mandatory')
xylnatmInterfaceClearSlotStats = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceClearSlotStats.setStatus('mandatory')
xylnatmInterfaceTransmissionType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("sonet3c", 1), ("sdh", 2), ("sonet12c", 3), ("notApplicable", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceTransmissionType.setStatus('mandatory')
xylnatmInterfaceIlmiState = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceIlmiState.setStatus('mandatory')
xylnatmInterfaceTimingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("local", 1), ("loop", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceTimingMode.setStatus('mandatory')
xylnatmInterfaceLocalSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("oscillator", 1), ("busLine8K", 2), ("busLine19M", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceLocalSrc.setStatus('mandatory')
xylnatmInterfaceUniVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("uni30", 1), ("uni31", 2), ("uniIisp", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmInterfaceUniVersion.setStatus('mandatory')
xylnatmILMIConfTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3), )
if mibBuilder.loadTexts: xylnatmILMIConfTable.setStatus('mandatory')
xylnatmILMIConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: xylnatmILMIConfEntry.setStatus('mandatory')
xylnatmILMIConfSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfSlot.setStatus('mandatory')
xylnatmILMIConfPort = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfPort.setStatus('mandatory')
xylnatmILMIConfInstance = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfInstance.setStatus('mandatory')
xylnatmILMIConfILMIEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmILMIConfILMIEnable.setStatus('mandatory')
xylnatmILMIConfILMIPollEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmILMIConfILMIPollEnable.setStatus('mandatory')
xylnatmILMIConfAutoCfgEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disable", 1), ("enable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmILMIConfAutoCfgEnable.setStatus('mandatory')
xylnatmILMIConfAutoCfgStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("cfgDone", 1), ("idle", 2), ("inProgress", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfAutoCfgStatus.setStatus('mandatory')
xylnatmILMIConfAutoCfgTrigg = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("phyLogic", 2), ("phy", 3), ("logic", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmILMIConfAutoCfgTrigg.setStatus('mandatory')
xylnatmILMIConfAutoCfgDfltIf = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("privateUNI", 1), ("pnni", 2), ("iispNetwork", 3), ("iispUser", 4), ("publicUNI", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmILMIConfAutoCfgDfltIf.setStatus('mandatory')
xylnatmILMIConfAutoCfgDfltSigVer = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("uni30", 1), ("uni31", 2), ("uni40", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmILMIConfAutoCfgDfltSigVer.setStatus('mandatory')
xylnatmILMIConfAutoCfgCurIf = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("privateUNI", 1), ("pnni", 2), ("iispNetwork", 3), ("iispUser", 4), ("publicUNI", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmILMIConfAutoCfgCurIf.setStatus('mandatory')
xylnatmILMIConfAutoCfgCurSigVer = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("uni30", 1), ("uni31", 2), ("uni40", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfAutoCfgCurSigVer.setStatus('mandatory')
xylnatmILMIConfAutoCfgCurILMIVer = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("unsupported", 1), ("ilmi40", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfAutoCfgCurILMIVer.setStatus('mandatory')
xylnatmILMIConfPeerUniType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("public", 1), ("private", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfPeerUniType.setStatus('mandatory')
xylnatmILMIConfPeerUniVer = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("uni30", 1), ("uni31", 2), ("uni40", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfPeerUniVer.setStatus('mandatory')
xylnatmILMIConfPeerDevType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("user", 1), ("node", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfPeerDevType.setStatus('mandatory')
xylnatmILMIConfPeerNNISigVer = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unsupported", 1), ("iisp", 2), ("pnni10", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfPeerNNISigVer.setStatus('mandatory')
xylnatmILMIConfPeerILMIVer = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 4, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("unsupported", 1), ("ilmi40", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmILMIConfPeerILMIVer.setStatus('mandatory')
xylnatmInterfaceStatTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1), )
if mibBuilder.loadTexts: xylnatmInterfaceStatTable.setStatus('mandatory')
xylnatmInterfaceStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmInterfaceStatSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmInterfaceStatPortIndex"))
if mibBuilder.loadTexts: xylnatmInterfaceStatEntry.setStatus('mandatory')
xylnatmInterfaceStatSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatSlotIndex.setStatus('mandatory')
xylnatmInterfaceStatPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatPortIndex.setStatus('mandatory')
xylnatmInterfaceStatRxCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatRxCells.setStatus('mandatory')
xylnatmInterfaceStatRxClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatRxClp0Cells.setStatus('mandatory')
xylnatmInterfaceStatRxClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatRxClp1Cells.setStatus('mandatory')
xylnatmInterfaceStatTxCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatTxCells.setStatus('mandatory')
xylnatmInterfaceStatMarkEfciCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatMarkEfciCells.setStatus('mandatory')
xylnatmInterfaceStatMarkGcraCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatMarkGcraCells.setStatus('mandatory')
xylnatmInterfaceStatTotalDiscardCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatTotalDiscardCells.setStatus('mandatory')
xylnatmInterfaceStatDxCongClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatDxCongClp0Cells.setStatus('mandatory')
xylnatmInterfaceStatDxCongClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatDxCongClp1Cells.setStatus('mandatory')
xylnatmInterfaceStatDxGcraClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatDxGcraClp0Cells.setStatus('mandatory')
xylnatmInterfaceStatDxGcraClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatDxGcraClp1Cells.setStatus('mandatory')
xylnatmInterfaceStatDxGcrabClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatDxGcrabClp0Cells.setStatus('mandatory')
xylnatmInterfaceStatDxGcrabClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatDxGcrabClp1Cells.setStatus('mandatory')
xylnatmInterfaceStatUnknownVpVcCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatUnknownVpVcCells.setStatus('mandatory')
xylnatmInterfaceStatUnknownVpiCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatUnknownVpiCells.setStatus('mandatory')
xylnatmInterfaceStatUnknownVciCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatUnknownVciCells.setStatus('mandatory')
xylnatmInterfaceStatUniType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("public", 1), ("private", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatUniType.setStatus('mandatory')
xylnatmInterfaceStatUniVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("uni30", 1), ("uni31", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatUniVersion.setStatus('mandatory')
xylnatmInterfaceStatRemainingRxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatRemainingRxBandwidth.setStatus('mandatory')
xylnatmInterfaceStatRemainingTxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 6, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmInterfaceStatRemainingTxBandwidth.setStatus('mandatory')
xylnatmVclTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1), )
if mibBuilder.loadTexts: xylnatmVclTable.setStatus('mandatory')
xylnatmVclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmVclSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVclPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVclVpi"), (0, "XYLAN-CSM-MIB", "xylnatmVclVci"))
if mibBuilder.loadTexts: xylnatmVclEntry.setStatus('mandatory')
xylnatmVclSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclSlotIndex.setStatus('mandatory')
xylnatmVclPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclPortIndex.setStatus('mandatory')
xylnatmVclVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclVpi.setStatus('mandatory')
xylnatmVclVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclVci.setStatus('mandatory')
xylnatmVclConnectionDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 5), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclConnectionDescr.setStatus('mandatory')
xylnatmVclChanType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("vcNni", 3), ("vcUni", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclChanType.setStatus('mandatory')
xylnatmVclTransportPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 5, 6, 7, 8))).clone(namedValues=NamedValues(("qosCbrPrs", 2), ("qosCbr", 3), ("qosVbrRt", 5), ("qosVbrNrt", 6), ("qosAbr", 7), ("qosUbr", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclTransportPriority.setStatus('mandatory')
xylnatmVclUserPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclUserPriority.setStatus('mandatory')
xylnatmVclStatsMode = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("cntGcraDxCell", 1), ("mrkGcraDxCell", 2), ("cntGcraPsCell", 3), ("mrkGcraPsCell", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclStatsMode.setStatus('mandatory')
xylnatmVclPrTrackPortBase = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclPrTrackPortBase.setStatus('mandatory')
xylnatmVclPrTrackPort1 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclPrTrackPort1.setStatus('mandatory')
xylnatmVclPrTrackPort2 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclPrTrackPort2.setStatus('mandatory')
xylnatmVclPrTrackPort3 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclPrTrackPort3.setStatus('mandatory')
xylnatmVclAltTrackPortBase = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclAltTrackPortBase.setStatus('mandatory')
xylnatmVclAltTrackPort1 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclAltTrackPort1.setStatus('mandatory')
xylnatmVclAltTrackPort2 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclAltTrackPort2.setStatus('mandatory')
xylnatmVclAltTrackPort3 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclAltTrackPort3.setStatus('mandatory')
xylnatmVclLgclChanRedirect = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notAllowed", 1), ("allowed", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclLgclChanRedirect.setStatus('mandatory')
xylnatmVclAAL5Discard = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("earlyPktDiscard", 1), ("disable", 2), ("partialPktDiscard", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclAAL5Discard.setStatus('mandatory')
xylnatmVclF4F5SegEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclF4F5SegEndpt.setStatus('mandatory')
xylnatmVclF4F5CopySeg = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclF4F5CopySeg.setStatus('mandatory')
xylnatmVclF4F5End2EndEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclF4F5End2EndEndpt.setStatus('mandatory')
xylnatmVclF4F5CopyEnd2End = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclF4F5CopyEnd2End.setStatus('mandatory')
xylnatmVclOamEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclOamEndpt.setStatus('mandatory')
xylnatmVclOamCopy = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclOamCopy.setStatus('mandatory')
xylnatmVclRmFwdEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclRmFwdEndpt.setStatus('mandatory')
xylnatmVclRmFwdCopy = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclRmFwdCopy.setStatus('mandatory')
xylnatmVclRmFwdGcraAdv = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclRmFwdGcraAdv.setStatus('mandatory')
xylnatmVclRmBkwdEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclRmBkwdEndpt.setStatus('mandatory')
xylnatmVclRmBkwdCopy = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclRmBkwdCopy.setStatus('mandatory')
xylnatmVclRmBkwdGcraAdv = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclRmBkwdGcraAdv.setStatus('mandatory')
xylnatmVclRmDiscard = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclRmDiscard.setStatus('mandatory')
xylnatmVclGcraAPoliceMode = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("gcraEmDxAll", 1), ("gcraEmMarkClp0DxClp1", 2), ("gcraEmMarkAllDxAll", 3), ("gcraEmMarkClp0DxAll", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclGcraAPoliceMode.setStatus('mandatory')
xylnatmVclGcraBPoliceMode = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("gcraEmDxAll", 1), ("gcraEmMarkClp0DxClp1", 2), ("gcraEmMarkAllDxAll", 3), ("gcraEmMarkClp0DxAll", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclGcraBPoliceMode.setStatus('mandatory')
xylnatmVclMcGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 35), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclMcGroupId.setStatus('mandatory')
xylnatmVclMcIngressEgress = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 10, 1, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ingress", 1), ("egress", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclMcIngressEgress.setStatus('mandatory')
xylnatmVclStatTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1), )
if mibBuilder.loadTexts: xylnatmVclStatTable.setStatus('mandatory')
xylnatmVclStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmVclStatSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVclStatPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVclStatVpi"), (0, "XYLAN-CSM-MIB", "xylnatmVclStatVci"))
if mibBuilder.loadTexts: xylnatmVclStatEntry.setStatus('mandatory')
xylnatmVclStatSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatSlotIndex.setStatus('mandatory')
xylnatmVclStatPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatPortIndex.setStatus('mandatory')
xylnatmVclStatVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatVpi.setStatus('mandatory')
xylnatmVclStatVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatVci.setStatus('mandatory')
xylnatmVclStatRxCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatRxCells.setStatus('mandatory')
xylnatmVclStatTxCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatTxCells.setStatus('mandatory')
xylnatmVclStatRxClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatRxClp0Cells.setStatus('mandatory')
xylnatmVclStatRxClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatRxClp1Cells.setStatus('mandatory')
xylnatmVclStatDxCongClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatDxCongClp0Cells.setStatus('mandatory')
xylnatmVclStatDxCongClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatDxCongClp1Cells.setStatus('mandatory')
xylnatmVclStatDxGcraClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatDxGcraClp0Cells.setStatus('mandatory')
xylnatmVclStatDxGcraClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatDxGcraClp1Cells.setStatus('mandatory')
xylnatmVclStatDxGcraBClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatDxGcraBClp0Cells.setStatus('mandatory')
xylnatmVclStatDxGcraBClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 12, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclStatDxGcraBClp1Cells.setStatus('mandatory')
xylnatmVplTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1), )
if mibBuilder.loadTexts: xylnatmVplTable.setStatus('mandatory')
xylnatmVplEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmVplSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVplPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVplVpi"))
if mibBuilder.loadTexts: xylnatmVplEntry.setStatus('mandatory')
xylnatmVplSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplSlotIndex.setStatus('mandatory')
xylnatmVplPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplPortIndex.setStatus('mandatory')
xylnatmVplVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplVpi.setStatus('mandatory')
xylnatmVplConnectionDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplConnectionDescr.setStatus('mandatory')
xylnatmVplChanType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("vpNNI", 5), ("vpUni", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplChanType.setStatus('mandatory')
xylnatmVplTransportPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 5, 6, 7, 8))).clone(namedValues=NamedValues(("qosCbrPrs", 2), ("qosCbr", 3), ("qosVbrRt", 5), ("qosVbrNrt", 6), ("qosAbr", 7), ("qosUbr", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplTransportPriority.setStatus('mandatory')
xylnatmVplUserPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplUserPriority.setStatus('mandatory')
xylnatmVplStatsMode = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("cntGcraDxCell", 1), ("mrkGcraDxCell", 2), ("cntGcraPsCell", 3), ("mrkGcraPsCell", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplStatsMode.setStatus('mandatory')
xylnatmVplPrTrackPortBase = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplPrTrackPortBase.setStatus('mandatory')
xylnatmVplPrTrackPort1 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplPrTrackPort1.setStatus('mandatory')
xylnatmVplPrTrackPort2 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplPrTrackPort2.setStatus('mandatory')
xylnatmVplPrTrackPort3 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplPrTrackPort3.setStatus('mandatory')
xylnatmVplAltTrackPortBase = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplAltTrackPortBase.setStatus('mandatory')
xylnatmVplAltTrackPort1 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplAltTrackPort1.setStatus('mandatory')
xylnatmVplAltTrackPort2 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplAltTrackPort2.setStatus('mandatory')
xylnatmVplAltTrackPort3 = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplAltTrackPort3.setStatus('mandatory')
xylnatmVplLgclChanRedirect = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notAllowed", 1), ("allowed", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplLgclChanRedirect.setStatus('mandatory')
xylnatmVplAAL5Discard = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("earlyPktDiscard", 1), ("disable", 2), ("partialPktDiscard", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplAAL5Discard.setStatus('mandatory')
xylnatmVplF4F5SegEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplF4F5SegEndpt.setStatus('mandatory')
xylnatmVplF4F5CopySeg = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplF4F5CopySeg.setStatus('mandatory')
xylnatmVplF4F5End2EndEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplF4F5End2EndEndpt.setStatus('mandatory')
xylnatmVplF4F5CopyEnd2End = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplF4F5CopyEnd2End.setStatus('mandatory')
xylnatmVplOamEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplOamEndpt.setStatus('mandatory')
xylnatmVplOamCopy = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplOamCopy.setStatus('mandatory')
xylnatmVplRmFwdEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplRmFwdEndpt.setStatus('mandatory')
xylnatmVplRmFwdCopy = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplRmFwdCopy.setStatus('mandatory')
xylnatmVplRmFwdGcraAdv = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplRmFwdGcraAdv.setStatus('mandatory')
xylnatmVplRmBkwdEndpt = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplRmBkwdEndpt.setStatus('mandatory')
xylnatmVplRmBkwdCopy = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplRmBkwdCopy.setStatus('mandatory')
xylnatmVplRmBkwdGcraAdv = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplRmBkwdGcraAdv.setStatus('mandatory')
xylnatmVplRmDiscard = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplRmDiscard.setStatus('mandatory')
xylnatmVplGcraAPoliceMode = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("gcraEmDxAll", 1), ("gcraEmMarkClp0DxClp1", 2), ("gcraEmMarkAllDxAll", 3), ("gcraEmMarkClp0DxAll", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplGcraAPoliceMode.setStatus('mandatory')
xylnatmVplGcraBPoliceMode = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("gcraEmDxAll", 1), ("gcraEmMarkClp0DxClp1", 2), ("gcraEmMarkAllDxAll", 3), ("gcraEmMarkClp0DxAll", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplGcraBPoliceMode.setStatus('mandatory')
xylnatmVplMcGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 34), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplMcGroupId.setStatus('mandatory')
xylnatmVplMcIngressEgress = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 9, 1, 1, 35), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ingress", 1), ("egress", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplMcIngressEgress.setStatus('mandatory')
xylnatmVplStatTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1), )
if mibBuilder.loadTexts: xylnatmVplStatTable.setStatus('mandatory')
xylnatmVplStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmVplStatSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVplStatPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVplStatVpi"))
if mibBuilder.loadTexts: xylnatmVplStatEntry.setStatus('mandatory')
xylnatmVplStatSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatSlotIndex.setStatus('mandatory')
xylnatmVplStatPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 25))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatPortIndex.setStatus('mandatory')
xylnatmVplStatVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatVpi.setStatus('mandatory')
xylnatmVplStatRxCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatRxCells.setStatus('mandatory')
xylnatmVplStatTxCells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatTxCells.setStatus('mandatory')
xylnatmVplStatRxClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatRxClp0Cells.setStatus('mandatory')
xylnatmVplStatRxClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatRxClp1Cells.setStatus('mandatory')
xylnatmVplStatDxCongClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatDxCongClp0Cells.setStatus('mandatory')
xylnatmVplStatDxCongClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatDxCongClp1Cells.setStatus('mandatory')
xylnatmVplStatDxGcraClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatDxGcraClp0Cells.setStatus('mandatory')
xylnatmVplStatDxGcraClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatDxGcraClp1Cells.setStatus('mandatory')
xylnatmVplStatDxGcraBClp0Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatDxGcraBClp0Cells.setStatus('mandatory')
xylnatmVplStatDxGcraBClp1Cells = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 11, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplStatDxGcraBClp1Cells.setStatus('mandatory')
xylnatmVpCrossConnectTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1), )
if mibBuilder.loadTexts: xylnatmVpCrossConnectTable.setStatus('mandatory')
xylnatmVpCrossConnectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmVpCrossConnectLowSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVpCrossConnectLowPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVpCrossConnectLowVpi"), (0, "XYLAN-CSM-MIB", "xylnatmVpCrossConnectHighSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVpCrossConnectHighPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVpCrossConnectHighVpi"))
if mibBuilder.loadTexts: xylnatmVpCrossConnectEntry.setStatus('mandatory')
xylnatmVpCrossConnectLowSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectLowSlotIndex.setStatus('mandatory')
xylnatmVpCrossConnectLowPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectLowPortIndex.setStatus('mandatory')
xylnatmVpCrossConnectLowVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectLowVpi.setStatus('mandatory')
xylnatmVpCrossConnectHighSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectHighSlotIndex.setStatus('mandatory')
xylnatmVpCrossConnectHighPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectHighPortIndex.setStatus('mandatory')
xylnatmVpCrossConnectHighVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectHighVpi.setStatus('mandatory')
xylnatmVpCrossConnectConnectionId = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("multicastParent", 1), ("multicastChild", 2), ("nonMulticastParent", 3), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectConnectionId.setStatus('mandatory')
xylnatmVpCrossConnectLowRxTrafficDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVpCrossConnectLowRxTrafficDescrIndex.setStatus('mandatory')
xylnatmVpCrossConnectLowTxTrafficDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVpCrossConnectLowTxTrafficDescrIndex.setStatus('mandatory')
xylnatmVpCrossConnectMCastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVpCrossConnectMCastEnable.setStatus('mandatory')
xylnatmVpCrossConnectL2HLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectL2HLastChange.setStatus('mandatory')
xylnatmVpCrossConnectH2LLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectH2LLastChange.setStatus('mandatory')
xylnatmVpCrossConnectL2HOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectL2HOperStatus.setStatus('mandatory')
xylnatmVpCrossConnectH2LOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectH2LOperStatus.setStatus('mandatory')
xylnatmVpCrossConnectVcType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("pvc", 1), ("svc", 2), ("softPvc", 3), ("control", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVpCrossConnectVcType.setStatus('mandatory')
xylnatmVpCrossConnectPvcIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 16), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVpCrossConnectPvcIdentifier.setStatus('mandatory')
xylnatmVpCrossConnectRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 14, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4), ("notActive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVpCrossConnectRowStatus.setStatus('mandatory')
xylnatmVcCrossConnectTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1), )
if mibBuilder.loadTexts: xylnatmVcCrossConnectTable.setStatus('mandatory')
xylnatmVcCrossConnectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmVcCrossConnectLowSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVcCrossConnectLowPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVcCrossConnectLowVpi"), (0, "XYLAN-CSM-MIB", "xylnatmVcCrossConnectLowVci"), (0, "XYLAN-CSM-MIB", "xylnatmVcCrossConnectHighSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVcCrossConnectHighPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVcCrossConnectHighVpi"), (0, "XYLAN-CSM-MIB", "xylnatmVcCrossConnectHighVci"))
if mibBuilder.loadTexts: xylnatmVcCrossConnectEntry.setStatus('mandatory')
xylnatmVcCrossConnectLowSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectLowSlotIndex.setStatus('mandatory')
xylnatmVcCrossConnectLowPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectLowPortIndex.setStatus('mandatory')
xylnatmVcCrossConnectLowVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectLowVpi.setStatus('mandatory')
xylnatmVcCrossConnectLowVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectLowVci.setStatus('mandatory')
xylnatmVcCrossConnectHighSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectHighSlotIndex.setStatus('mandatory')
xylnatmVcCrossConnectHighPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectHighPortIndex.setStatus('mandatory')
xylnatmVcCrossConnectHighVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectHighVpi.setStatus('mandatory')
xylnatmVcCrossConnectHighVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectHighVci.setStatus('mandatory')
xylnatmVcCrossConnectConnectionId = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("multicastParent", 1), ("multicastChild", 2), ("nonMulticastParent", 3), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectConnectionId.setStatus('mandatory')
xylnatmVcCrossConnectLowRxTrafficDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVcCrossConnectLowRxTrafficDescrIndex.setStatus('mandatory')
xylnatmVcCrossConnectLowTxTrafficDescrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVcCrossConnectLowTxTrafficDescrIndex.setStatus('mandatory')
xylnatmVcCrossConnectMCastEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVcCrossConnectMCastEnable.setStatus('mandatory')
xylnatmVcCrossConnectL2HLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectL2HLastChange.setStatus('mandatory')
xylnatmVcCrossConnectH2LLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectH2LLastChange.setStatus('mandatory')
xylnatmVcCrossConnectL2HOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectL2HOperStatus.setStatus('mandatory')
xylnatmVcCrossConnectH2LOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("end2endup", 2), ("end2endDown", 3), ("localUpEndToEndUnknown", 4), ("localDown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectH2LOperStatus.setStatus('mandatory')
xylnatmVcCrossConnectVcType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("pvc", 1), ("svc", 2), ("softPvc", 3), ("control", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVcCrossConnectVcType.setStatus('mandatory')
xylnatmVcCrossConnectPvcIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 18), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVcCrossConnectPvcIdentifier.setStatus('mandatory')
xylnatmVcCrossConnectRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 13, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4), ("notActive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVcCrossConnectRowStatus.setStatus('mandatory')
xylnatmVplModTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1), )
if mibBuilder.loadTexts: xylnatmVplModTable.setStatus('mandatory')
xylnatmVplModEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmVplModSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVplModPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVplModVplVpi"))
if mibBuilder.loadTexts: xylnatmVplModEntry.setStatus('mandatory')
xylnatmVplModSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplModSlotIndex.setStatus('mandatory')
xylnatmVplModPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplModPortIndex.setStatus('mandatory')
xylnatmVplModVplVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVplModVplVpi.setStatus('mandatory')
xylnatmVplModDestSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplModDestSlotIndex.setStatus('mandatory')
xylnatmVplModDestPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplModDestPortIndex.setStatus('mandatory')
xylnatmVplModDestVplVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplModDestVplVpi.setStatus('mandatory')
xylnatmVplModDestStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 16, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVplModDestStatus.setStatus('mandatory')
xylnatmVclModTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1), )
if mibBuilder.loadTexts: xylnatmVclModTable.setStatus('mandatory')
xylnatmVclModEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmVclModSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVclModPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmVclModVclVpi"), (0, "XYLAN-CSM-MIB", "xylnatmVclModVclVci"))
if mibBuilder.loadTexts: xylnatmVclModEntry.setStatus('mandatory')
xylnatmVclModSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclModSlotIndex.setStatus('mandatory')
xylnatmVclModPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclModPortIndex.setStatus('mandatory')
xylnatmVclModVclVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclModVclVpi.setStatus('mandatory')
xylnatmVclModVclVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmVclModVclVci.setStatus('mandatory')
xylnatmVclModDestSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclModDestSlotIndex.setStatus('mandatory')
xylnatmVclModDestPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclModDestPortIndex.setStatus('mandatory')
xylnatmVclModDestVclVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclModDestVclVpi.setStatus('mandatory')
xylnatmVclModDestVclVci = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclModDestVclVci.setStatus('mandatory')
xylnatmVclModDestStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 15, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("create", 1), ("modify", 2), ("delete", 3), ("active", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmVclModDestStatus.setStatus('mandatory')
xylnatmClockingxCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 1), )
if mibBuilder.loadTexts: xylnatmClockingxCtrlTable.setStatus('mandatory')
xylnatmClockingxCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmClockingxCtrlBusLine"), (0, "XYLAN-CSM-MIB", "xylnatmClockingxCtrlSrcLevel"))
if mibBuilder.loadTexts: xylnatmClockingxCtrlEntry.setStatus('mandatory')
xylnatmClockingxCtrlBusLine = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eightKhz", 1), ("nineteenMhz", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmClockingxCtrlBusLine.setStatus('mandatory')
xylnatmClockingxCtrlSrcLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("primary", 1), ("secondary", 2), ("tertiary", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmClockingxCtrlSrcLevel.setStatus('mandatory')
xylnatmClockingxSrcOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inactive", 1), ("active", 2), ("standby", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmClockingxSrcOperState.setStatus('mandatory')
xylnatmClockingxSrcType = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notConfigured", 1), ("receiveDataDerived", 2), ("onboardOscillator", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmClockingxSrcType.setStatus('mandatory')
xylnatmClockingxCtrlSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmClockingxCtrlSlot.setStatus('mandatory')
xylnatmClockingxCtrlPort = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmClockingxCtrlPort.setStatus('mandatory')
xylnatmClockingxGlobalCST = MibScalar((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 17, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmClockingxGlobalCST.setStatus('mandatory')
xylnatmTrafficShaperConfTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1), )
if mibBuilder.loadTexts: xylnatmTrafficShaperConfTable.setStatus('mandatory')
xylnatmTrafficShaperConfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmTrafficShaperConfSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmTrafficShaperConfPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmTrafficShaperConfTsNumIndex"))
if mibBuilder.loadTexts: xylnatmTrafficShaperConfEntry.setStatus('mandatory')
xylnatmTrafficShaperConfSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmTrafficShaperConfSlotIndex.setStatus('mandatory')
xylnatmTrafficShaperConfPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmTrafficShaperConfPortIndex.setStatus('mandatory')
xylnatmTrafficShaperConfTsNumIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmTrafficShaperConfTsNumIndex.setStatus('mandatory')
xylnatmTrafficShaperConfCDV = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(7, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmTrafficShaperConfCDV.setStatus('mandatory')
xylnatmTrafficShaperConfPCR = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmTrafficShaperConfPCR.setStatus('mandatory')
xylnatmTrafficShaperConfSCR = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmTrafficShaperConfSCR.setStatus('mandatory')
xylnatmTrafficShaperConfMBS = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 18, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmTrafficShaperConfMBS.setStatus('mandatory')
xylnatmTrafficShaperMemTable = MibTable((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19, 1), )
if mibBuilder.loadTexts: xylnatmTrafficShaperMemTable.setStatus('mandatory')
xylnatmTrafficShaperMemEntry = MibTableRow((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19, 1, 1), ).setIndexNames((0, "XYLAN-CSM-MIB", "xylnatmTrafficShaperMemSlotIndex"), (0, "XYLAN-CSM-MIB", "xylnatmTrafficShaperMemPortIndex"), (0, "XYLAN-CSM-MIB", "xylnatmTrafficShaperMemTsNumIndex"), (0, "XYLAN-CSM-MIB", "xylnatmTrafficShaperMemVpiNumIndex"), (0, "XYLAN-CSM-MIB", "xylnatmTrafficShaperMemVciNumIndex"))
if mibBuilder.loadTexts: xylnatmTrafficShaperMemEntry.setStatus('mandatory')
xylnatmTrafficShaperMemSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 9))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmTrafficShaperMemSlotIndex.setStatus('mandatory')
xylnatmTrafficShaperMemPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmTrafficShaperMemPortIndex.setStatus('mandatory')
xylnatmTrafficShaperMemTsNumIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: xylnatmTrafficShaperMemTsNumIndex.setStatus('mandatory')
xylnatmTrafficShaperMemVpiNumIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmTrafficShaperMemVpiNumIndex.setStatus('mandatory')
xylnatmTrafficShaperMemVciNumIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmTrafficShaperMemVciNumIndex.setStatus('mandatory')
xylnatmTrafficShaperMemVpiOper = MibTableColumn((1, 3, 6, 1, 4, 1, 800, 2, 9, 1, 19, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("add", 2), ("rem", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: xylnatmTrafficShaperMemVpiOper.setStatus('mandatory')
mibBuilder.exportSymbols("XYLAN-CSM-MIB", xylnatmILMIConfPeerUniType=xylnatmILMIConfPeerUniType, atmxVclPortIndex=atmxVclPortIndex, xylnatmVplModDestVplVpi=xylnatmVplModDestVplVpi, xylnatmVclStatRxClp1Cells=xylnatmVclStatRxClp1Cells, atmxVclGroup=atmxVclGroup, atmxInterfaceMaxActiveVciBits=atmxInterfaceMaxActiveVciBits, xylnatmInterfaceClearPortStats=xylnatmInterfaceClearPortStats, atmxVclReceiveTrafficDescrIndex=atmxVclReceiveTrafficDescrIndex, xylnatmTrafficShaperConfMBS=xylnatmTrafficShaperConfMBS, xylnatmVcCrossConnectL2HOperStatus=xylnatmVcCrossConnectL2HOperStatus, xylnatmVpCrossConnectLowRxTrafficDescrIndex=xylnatmVpCrossConnectLowRxTrafficDescrIndex, xylnatmVpCrossConnectGroup=xylnatmVpCrossConnectGroup, xylnatmILMIConfTable=xylnatmILMIConfTable, atmxInterfaceConfTable=atmxInterfaceConfTable, xylnatmClockingxCtrlSlot=xylnatmClockingxCtrlSlot, xylnatmVplModDestSlotIndex=xylnatmVplModDestSlotIndex, xylnatmVplModEntry=xylnatmVplModEntry, xylnatmInterfaceStatTotalDiscardCells=xylnatmInterfaceStatTotalDiscardCells, atmxInterfaceMaxVpcs=atmxInterfaceMaxVpcs, xylnatmInterfaceStatDxGcrabClp1Cells=xylnatmInterfaceStatDxGcrabClp1Cells, xylnatmInterfaceStatUniType=xylnatmInterfaceStatUniType, atmxSvcVpCrossConnectRowStatus=atmxSvcVpCrossConnectRowStatus, atmxVplEntry=atmxVplEntry, xylnatmVcCrossConnectRowStatus=xylnatmVcCrossConnectRowStatus, xylnatmVcCrossConnectLowSlotIndex=xylnatmVcCrossConnectLowSlotIndex, atmxSvcVcCrossConnectLowVpi=atmxSvcVcCrossConnectLowVpi, xylnatmVclRmBkwdCopy=xylnatmVclRmBkwdCopy, xylnatmVplModGroup=xylnatmVplModGroup, atmxVplReceiveTrafficDescrIndex=atmxVplReceiveTrafficDescrIndex, xylnatmVclStatRxCells=xylnatmVclStatRxCells, xylnatmILMIConfSlot=xylnatmILMIConfSlot, xylnatmVclGroup=xylnatmVclGroup, xylnatmILMIConfILMIPollEnable=xylnatmILMIConfILMIPollEnable, atmxSvcVcCrossConnectHighVci=atmxSvcVcCrossConnectHighVci, atmxVccAal5CpcsTransmitSduSize=atmxVccAal5CpcsTransmitSduSize, xylnatmTrafficShaperConfTable=xylnatmTrafficShaperConfTable, xylnatmVpCrossConnectEntry=xylnatmVpCrossConnectEntry, atmxInterfaceIlmiVpi=atmxInterfaceIlmiVpi, xylnatmVclModEntry=xylnatmVclModEntry, xylnatmInterfaceStatRemainingRxBandwidth=xylnatmInterfaceStatRemainingRxBandwidth, atmxTrafficDescrParam2=atmxTrafficDescrParam2, atmxVpCrossConnectTable=atmxVpCrossConnectTable, xylnatmVclAltTrackPort3=xylnatmVclAltTrackPort3, xylnatmILMIConfPeerUniVer=xylnatmILMIConfPeerUniVer, atmxVcCrossConnectGroup=atmxVcCrossConnectGroup, atmxVcCrossConnectAdminStatus=atmxVcCrossConnectAdminStatus, xylnatmClockingxCtrlTable=xylnatmClockingxCtrlTable, xylnatmVplGroup=xylnatmVplGroup, xylnatmVplPrTrackPort3=xylnatmVplPrTrackPort3, xylnatmVcCrossConnectH2LOperStatus=xylnatmVcCrossConnectH2LOperStatus, AtmxTrafficDescrParamIndex=AtmxTrafficDescrParamIndex, xylnatmVpCrossConnectVcType=xylnatmVpCrossConnectVcType, xylnatmVpCrossConnectLowTxTrafficDescrIndex=xylnatmVpCrossConnectLowTxTrafficDescrIndex, xylnatmVclPrTrackPort1=xylnatmVclPrTrackPort1, xylnatmVpCrossConnectL2HLastChange=xylnatmVpCrossConnectL2HLastChange, atmxSvcVpCrossConnectHighTDIndex=atmxSvcVpCrossConnectHighTDIndex, xylnatmVclEntry=xylnatmVclEntry, xylnatmVclModDestPortIndex=xylnatmVclModDestPortIndex, xylnatmVclTransportPriority=xylnatmVclTransportPriority, xylnatmVclRmFwdCopy=xylnatmVclRmFwdCopy, xylnatmVplGcraBPoliceMode=xylnatmVplGcraBPoliceMode, atmxVccAal5CpcsReceiveSduSize=atmxVccAal5CpcsReceiveSduSize, xylnatmTrafficShaperMemEntry=xylnatmTrafficShaperMemEntry, xylnatmInterfaceIlmiStatus=xylnatmInterfaceIlmiStatus, atmxVclLastChange=atmxVclLastChange, xylnatmInterfaceType=xylnatmInterfaceType, xylnatmVclPrTrackPortBase=xylnatmVclPrTrackPortBase, xylnatmVplStatDxGcraBClp0Cells=xylnatmVplStatDxGcraBClp0Cells, xylnatmInterfaceTimingMode=xylnatmInterfaceTimingMode, xylnatmClockingxSrcOperState=xylnatmClockingxSrcOperState, xylnatmVclF4F5CopyEnd2End=xylnatmVclF4F5CopyEnd2End, xylnatmInterfaceSlotIndex=xylnatmInterfaceSlotIndex, xylnatmVplStatVpi=xylnatmVplStatVpi, xylnatmInterfaceStatEntry=xylnatmInterfaceStatEntry, xylnatmVclStatPortIndex=xylnatmVclStatPortIndex, xylnatmVclStatSlotIndex=xylnatmVclStatSlotIndex, xylnatmVcCrossConnectPvcIdentifier=xylnatmVcCrossConnectPvcIdentifier, xylnatmILMIConfEntry=xylnatmILMIConfEntry, xylnatmInterfaceStatGroup=xylnatmInterfaceStatGroup, xylnatmVclRmFwdEndpt=xylnatmVclRmFwdEndpt, atmxVplTransmitTrafficDescrIndex=atmxVplTransmitTrafficDescrIndex, xylnatmClockingxCtrlSrcLevel=xylnatmClockingxCtrlSrcLevel, atmxSvcVcCrossConnectHighSlotIndex=atmxSvcVcCrossConnectHighSlotIndex, xylnatmVclStatDxGcraClp1Cells=xylnatmVclStatDxGcraClp1Cells, xylnatmVpCrossConnectRowStatus=xylnatmVpCrossConnectRowStatus, atmxVcCrossConnectL2HOperStatus=atmxVcCrossConnectL2HOperStatus, atmxSvcVcCrossConnectRowStatus=atmxSvcVcCrossConnectRowStatus, atmxVcCrossConnectLowVpi=atmxVcCrossConnectLowVpi, xylnatmInterfaceStatDxCongClp1Cells=xylnatmInterfaceStatDxCongClp1Cells, xylnatmVclConnectionDescr=xylnatmVclConnectionDescr, xylnatmVcCrossConnectVcType=xylnatmVcCrossConnectVcType, xylnatmVpCrossConnectLowPortIndex=xylnatmVpCrossConnectLowPortIndex, xylnatmTrafficShaperMemVciNumIndex=xylnatmTrafficShaperMemVciNumIndex, xylnatmVcCrossConnectTable=xylnatmVcCrossConnectTable, xylnatmVclTable=xylnatmVclTable, xylnatmVplOamEndpt=xylnatmVplOamEndpt, atmxVplVpi=atmxVplVpi, atmxInterfaceConfVpcs=atmxInterfaceConfVpcs, atmxVplTable=atmxVplTable, xylnatmVplTable=xylnatmVplTable, xylnatmVplStatDxCongClp0Cells=xylnatmVplStatDxCongClp0Cells, xylnatmVclStatDxGcraClp0Cells=xylnatmVclStatDxGcraClp0Cells, atmxVcCrossConnectHighSlotIndex=atmxVcCrossConnectHighSlotIndex, xylnatmVclOamCopy=xylnatmVclOamCopy, xylnatmInterfaceStatRemainingTxBandwidth=xylnatmInterfaceStatRemainingTxBandwidth, xylnatmVclStatDxGcraBClp0Cells=xylnatmVclStatDxGcraBClp0Cells, xylnatmILMIConfAutoCfgDfltSigVer=xylnatmILMIConfAutoCfgDfltSigVer, xylnatmVpCrossConnectL2HOperStatus=xylnatmVpCrossConnectL2HOperStatus, atmxVpCrossConnectHighPortIndex=atmxVpCrossConnectHighPortIndex, atmxInterfaceAddressType=atmxInterfaceAddressType, xylnatmInterfaceStatUnknownVpVcCells=xylnatmInterfaceStatUnknownVpVcCells, atmxVplAdminStatus=atmxVplAdminStatus, atmxVcCrossConnectEntry=atmxVcCrossConnectEntry, atmxVpCrossConnectLowVpi=atmxVpCrossConnectLowVpi, xylnatmVclModDestVclVpi=xylnatmVclModDestVclVpi, xylnatmVplMcIngressEgress=xylnatmVplMcIngressEgress, xylnatmVclRmFwdGcraAdv=xylnatmVclRmFwdGcraAdv, xylnatmTrafficShaperMemVpiOper=xylnatmTrafficShaperMemVpiOper, atmxVplLastChange=atmxVplLastChange, xylnatmVclStatVci=xylnatmVclStatVci, xylnatmVcCrossConnectHighPortIndex=xylnatmVcCrossConnectHighPortIndex, atmxTrafficDescrGroup=atmxTrafficDescrGroup, xylnatmVclModPortIndex=xylnatmVclModPortIndex, xylnatmInterfaceStatMarkEfciCells=xylnatmInterfaceStatMarkEfciCells, xylnatmILMIConfAutoCfgCurIf=xylnatmILMIConfAutoCfgCurIf, xylnatmInterfaceStatRxCells=xylnatmInterfaceStatRxCells, atmxTrafficDescrParam1=atmxTrafficDescrParam1, xylnatmVcCrossConnectLowPortIndex=xylnatmVcCrossConnectLowPortIndex, xylnatmInterfaceStatDxCongClp0Cells=xylnatmInterfaceStatDxCongClp0Cells, xylnatmVclModSlotIndex=xylnatmVclModSlotIndex, xylnatmVcCrossConnectLowVci=xylnatmVcCrossConnectLowVci, xylnatmClockingxGlobalCST=xylnatmClockingxGlobalCST, atmxInterfaceIlmiVci=atmxInterfaceIlmiVci, xylnatmVcCrossConnectH2LLastChange=xylnatmVcCrossConnectH2LLastChange, xylnatmVplPrTrackPort2=xylnatmVplPrTrackPort2, xylnatmVplModPortIndex=xylnatmVplModPortIndex, atmxVclEntry=atmxVclEntry, xylnatmILMIConfAutoCfgStatus=xylnatmILMIConfAutoCfgStatus, atmxSvcVcCrossConnectLowPortIndex=atmxSvcVcCrossConnectLowPortIndex, xylnatmVplGcraAPoliceMode=xylnatmVplGcraAPoliceMode, xylnatmVplRmFwdCopy=xylnatmVplRmFwdCopy, xylnatmVplRmBkwdEndpt=xylnatmVplRmBkwdEndpt, xylnatmVclStatTxCells=xylnatmVclStatTxCells, xylnatmTrafficShaperMemTsNumIndex=xylnatmTrafficShaperMemTsNumIndex, xylnatmInterfacePortIndex=xylnatmInterfacePortIndex, xylnatmInterfaceAtmAddress=xylnatmInterfaceAtmAddress, xylnatmVclMcIngressEgress=xylnatmVclMcIngressEgress, xylnatmVplAltTrackPort3=xylnatmVplAltTrackPort3, xylnatmVplF4F5CopyEnd2End=xylnatmVplF4F5CopyEnd2End, xylnatmVplRmBkwdGcraAdv=xylnatmVplRmBkwdGcraAdv, xylnatmTrafficShaperMemSlotIndex=xylnatmTrafficShaperMemSlotIndex, xylnatmVcCrossConnectLowVpi=xylnatmVcCrossConnectLowVpi, xylnatmVpCrossConnectH2LOperStatus=xylnatmVpCrossConnectH2LOperStatus, xylnatmVplMcGroupId=xylnatmVplMcGroupId, xylnatmVplStatRxClp0Cells=xylnatmVplStatRxClp0Cells, xylnatmInterfaceTpRedirect=xylnatmInterfaceTpRedirect, xylnatmVplStatDxGcraClp0Cells=xylnatmVplStatDxGcraClp0Cells, xylnatmInterfaceCutOverSlot=xylnatmInterfaceCutOverSlot, xylnatmVplStatRxCells=xylnatmVplStatRxCells, atmxTrafficDescrParam5=atmxTrafficDescrParam5, atmxSvcVpCrossConnectHighVpi=atmxSvcVpCrossConnectHighVpi, xylnatmILMIConfAutoCfgCurILMIVer=xylnatmILMIConfAutoCfgCurILMIVer, xylanCsmMIB=xylanCsmMIB, atmxSvcVpCrossConnectTable=atmxSvcVpCrossConnectTable, xylnatmVclAAL5Discard=xylnatmVclAAL5Discard, xylnatmVplStatsMode=xylnatmVplStatsMode, xylnatmVplModSlotIndex=xylnatmVplModSlotIndex, atmxTrafficQoSClass=atmxTrafficQoSClass, atmxVcCrossConnectHighVpi=atmxVcCrossConnectHighVpi, xylnatmVplF4F5End2EndEndpt=xylnatmVplF4F5End2EndEndpt, xylnatmInterfaceOperStatus=xylnatmInterfaceOperStatus, atmxVplOperStatus=atmxVplOperStatus, xylnatmVclModDestSlotIndex=xylnatmVclModDestSlotIndex, xylnatmInterfaceStatUniVersion=xylnatmInterfaceStatUniVersion, atmxSvcVpCrossConnectCreationTime=atmxSvcVpCrossConnectCreationTime, xylnatmVclRmDiscard=xylnatmVclRmDiscard, xylnatmVplAltTrackPort2=xylnatmVplAltTrackPort2, xylnatmVcCrossConnectL2HLastChange=xylnatmVcCrossConnectL2HLastChange, xylnatmInterfaceStatUnknownVpiCells=xylnatmInterfaceStatUnknownVpiCells, xylnatmVclModDestVclVci=xylnatmVclModDestVclVci, xylnatmILMIConfAutoCfgCurSigVer=xylnatmILMIConfAutoCfgCurSigVer, atmxInterfacePortIndex=atmxInterfacePortIndex, atmxVpCrossConnectGroup=atmxVpCrossConnectGroup, atmxInterfaceConfEntry=atmxInterfaceConfEntry, atmxVcCrossConnectLowSlotIndex=atmxVcCrossConnectLowSlotIndex, xylnatmILMIConfAutoCfgTrigg=xylnatmILMIConfAutoCfgTrigg, xylnatmVclF4F5End2EndEndpt=xylnatmVclF4F5End2EndEndpt, xylnatmVplF4F5SegEndpt=xylnatmVplF4F5SegEndpt, xylnatmClockingxCtrlEntry=xylnatmClockingxCtrlEntry, atmxInterfaceMaxActiveVpiBits=atmxInterfaceMaxActiveVpiBits, atmxVclTable=atmxVclTable, atmxInterfaceConfVccs=atmxInterfaceConfVccs, xylnatmVcCrossConnectEntry=xylnatmVcCrossConnectEntry, xylnatmVclModTable=xylnatmVclModTable, xylnatmVclGcraBPoliceMode=xylnatmVclGcraBPoliceMode, xylnatmInterfaceClearSlotStats=xylnatmInterfaceClearSlotStats, xylnatmVplStatRxClp1Cells=xylnatmVplStatRxClp1Cells, atmxVplRowStatus=atmxVplRowStatus, xylnatmInterfaceConfEntry=xylnatmInterfaceConfEntry, xylnatmTrafficShaperMemPortIndex=xylnatmTrafficShaperMemPortIndex, atmxVclSlotIndex=atmxVclSlotIndex, xylnatmVplModDestStatus=xylnatmVplModDestStatus, atmxInterfaceMaxVccs=atmxInterfaceMaxVccs, xylnatmVclPrTrackPort3=xylnatmVclPrTrackPort3, atmxTrafficDescrParamTable=atmxTrafficDescrParamTable, xylnatmInterfacePortMode=xylnatmInterfacePortMode, xylnatmVclGcraAPoliceMode=xylnatmVclGcraAPoliceMode, atmxVclOperStatus=atmxVclOperStatus, xylnatmInterfaceTransmissionType=xylnatmInterfaceTransmissionType, atmxTrafficDescrParam4=atmxTrafficDescrParam4, atmxSvcVcCrossConnectHighTDIndex=atmxSvcVcCrossConnectHighTDIndex, xylnatmClockingxSrcType=xylnatmClockingxSrcType, xylnatmVplAAL5Discard=xylnatmVplAAL5Discard, xylnatmVclModDestStatus=xylnatmVclModDestStatus, xylnatmInterfaceStatTxCells=xylnatmInterfaceStatTxCells, xylnatmVplVpi=xylnatmVplVpi, xylnatmVclF4F5SegEndpt=xylnatmVclF4F5SegEndpt, atmxVclRowStatus=atmxVclRowStatus, atmxVpCrossConnectH2LOperStatus=atmxVpCrossConnectH2LOperStatus, atmxTrafficDescrParamEntry=atmxTrafficDescrParamEntry, atmxVcCrossConnectHighVci=atmxVcCrossConnectHighVci, atmxVplCrossConnectIdentifier=atmxVplCrossConnectIdentifier, atmxVclVci=atmxVclVci, xylnatmInterfaceConfTable=xylnatmInterfaceConfTable, xylnatmVpCrossConnectLowVpi=xylnatmVpCrossConnectLowVpi, xylnatmVplRmFwdGcraAdv=xylnatmVplRmFwdGcraAdv, xylnatmVpCrossConnectTable=xylnatmVpCrossConnectTable, xylnatmVplChanType=xylnatmVplChanType, xylnatmVclLgclChanRedirect=xylnatmVclLgclChanRedirect, xylnatmILMIConfPeerILMIVer=xylnatmILMIConfPeerILMIVer, xylnatmVplStatDxCongClp1Cells=xylnatmVplStatDxCongClp1Cells, atmxVpCrossConnectLowSlotIndex=atmxVpCrossConnectLowSlotIndex, xylnatmVplRmDiscard=xylnatmVplRmDiscard, xylnatmVplModDestPortIndex=xylnatmVplModDestPortIndex, atmxVpCrossConnectEntry=atmxVpCrossConnectEntry, xylnatmInterfaceStatMarkGcraCells=xylnatmInterfaceStatMarkGcraCells, xylnatmVclAltTrackPort2=xylnatmVclAltTrackPort2, atmxSvcVcCrossConnectTable=atmxSvcVcCrossConnectTable, atmxSvcVcCrossConnectCreationTime=atmxSvcVcCrossConnectCreationTime, xylnatmVplStatDxGcraClp1Cells=xylnatmVplStatDxGcraClp1Cells, xylnatmILMIConfAutoCfgDfltIf=xylnatmILMIConfAutoCfgDfltIf, atmxTrafficDescrRowStatus=atmxTrafficDescrRowStatus, xylnatmVclSlotIndex=xylnatmVclSlotIndex, atmxVclTransmitTrafficDescrIndex=atmxVclTransmitTrafficDescrIndex, xylnatmVclRmBkwdGcraAdv=xylnatmVclRmBkwdGcraAdv, xylnatmVpCrossConnectHighVpi=xylnatmVpCrossConnectHighVpi, xylnatmInterfaceCutOverPort=xylnatmInterfaceCutOverPort, xylnatmInterfaceStatPortIndex=xylnatmInterfaceStatPortIndex, xylnatmVpCrossConnectHighSlotIndex=xylnatmVpCrossConnectHighSlotIndex, xylnatmTrafficShaperMemVpiNumIndex=xylnatmTrafficShaperMemVpiNumIndex, xylnatmVclStatVpi=xylnatmVclStatVpi, xylnatmVplStatGroup=xylnatmVplStatGroup, atmxVplSlotIndex=atmxVplSlotIndex, atmxVpCrossConnectL2HOperStatus=atmxVpCrossConnectL2HOperStatus)
mibBuilder.exportSymbols("XYLAN-CSM-MIB", xylnatmVcCrossConnectLowTxTrafficDescrIndex=xylnatmVcCrossConnectLowTxTrafficDescrIndex, atmxTrafficDescrParamIndex=atmxTrafficDescrParamIndex, atmxVcCrossConnectLowPortIndex=atmxVcCrossConnectLowPortIndex, xylnatmVclChanType=xylnatmVclChanType, xylnatmTrafficShaperConfEntry=xylnatmTrafficShaperConfEntry, atmxVclCrossConnectIdentifier=atmxVclCrossConnectIdentifier, atmxSvcVcCrossConnectLowSlotIndex=atmxSvcVcCrossConnectLowSlotIndex, xylnatmInterfaceClearChanStats=xylnatmInterfaceClearChanStats, atmxVclAdminStatus=atmxVclAdminStatus, atmxSvcVpCrossConnectEntry=atmxSvcVpCrossConnectEntry, xylnatmInterfaceStatRxClp0Cells=xylnatmInterfaceStatRxClp0Cells, xylnatmVclRmBkwdEndpt=xylnatmVclRmBkwdEndpt, xylnatmInterfaceStatRxClp1Cells=xylnatmInterfaceStatRxClp1Cells, xylnatmVclF4F5CopySeg=xylnatmVclF4F5CopySeg, xylnatmVclStatDxGcraBClp1Cells=xylnatmVclStatDxGcraBClp1Cells, xylnatmInterfaceStatSlotIndex=xylnatmInterfaceStatSlotIndex, atmxVcCrossConnectIndex=atmxVcCrossConnectIndex, xylnatmVplEntry=xylnatmVplEntry, atmxVpCrossConnectL2HLastChange=atmxVpCrossConnectL2HLastChange, atmxVcCrossConnectH2LOperStatus=atmxVcCrossConnectH2LOperStatus, xylnatmVclVpi=xylnatmVclVpi, atmxSvcVpCrossConnectLowVpi=atmxSvcVpCrossConnectLowVpi, xylnatmVclStatEntry=xylnatmVclStatEntry, xylnatmInterfaceIlmiState=xylnatmInterfaceIlmiState, xylnatmVplStatDxGcraBClp1Cells=xylnatmVplStatDxGcraBClp1Cells, xylnatmVcCrossConnectConnectionId=xylnatmVcCrossConnectConnectionId, xylnatmVplPrTrackPortBase=xylnatmVplPrTrackPortBase, xylnatmVclUserPriority=xylnatmVclUserPriority, xylnatmInterfaceStatTable=xylnatmInterfaceStatTable, xylnatmVclModVclVci=xylnatmVclModVclVci, atmxSvcVpCrossConnectHighPortIndex=atmxSvcVpCrossConnectHighPortIndex, xylnatmInterfaceLocalSrc=xylnatmInterfaceLocalSrc, xylnatmVclMcGroupId=xylnatmVclMcGroupId, xylnatmVclModGroup=xylnatmVclModGroup, xylnatmVcCrossConnectHighVci=xylnatmVcCrossConnectHighVci, atmxTrafficDescrType=atmxTrafficDescrType, xylnatmVclStatDxCongClp1Cells=xylnatmVclStatDxCongClp1Cells, xylnatmVplStatTable=xylnatmVplStatTable, xylnatmVplTransportPriority=xylnatmVplTransportPriority, xylnatmVclAltTrackPortBase=xylnatmVclAltTrackPortBase, atmxSvcVpCrossConnectLowTDIndex=atmxSvcVpCrossConnectLowTDIndex, atmxSvcVcCrossConnectHighVpi=atmxSvcVcCrossConnectHighVpi, xylnatmVclOamEndpt=xylnatmVclOamEndpt, xylnatmTrafficShaperConfGroup=xylnatmTrafficShaperConfGroup, xylnatmTrafficShaperConfPCR=xylnatmTrafficShaperConfPCR, atmxVpCrossConnectHighVpi=atmxVpCrossConnectHighVpi, xylnatmInterfaceTransType=xylnatmInterfaceTransType, atmxVpCrossConnectAdminStatus=atmxVpCrossConnectAdminStatus, atmxInterfaceSlotIndex=atmxInterfaceSlotIndex, xylnatmVplStatTxCells=xylnatmVplStatTxCells, xylnatmVplUserPriority=xylnatmVplUserPriority, xylnatmTrafficShaperConfCDV=xylnatmTrafficShaperConfCDV, xylnatmTrafficShaperMemTable=xylnatmTrafficShaperMemTable, xylnatmVpCrossConnectHighPortIndex=xylnatmVpCrossConnectHighPortIndex, xylnatmVplAltTrackPortBase=xylnatmVplAltTrackPortBase, xylnatmVplF4F5CopySeg=xylnatmVplF4F5CopySeg, xylnatmInterfaceQsaalStatus=xylnatmInterfaceQsaalStatus, atmxSvcVpCrossConnectLowPortIndex=atmxSvcVpCrossConnectLowPortIndex, xylnatmVcCrossConnectMCastEnable=xylnatmVcCrossConnectMCastEnable, xylnatmInterfaceMediaType=xylnatmInterfaceMediaType, xylnatmClockingxCtrlPort=xylnatmClockingxCtrlPort, xylnatmVcCrossConnectHighVpi=xylnatmVcCrossConnectHighVpi, xylnatmTrafficShaperConfPortIndex=xylnatmTrafficShaperConfPortIndex, atmxVcCrossConnectTable=atmxVcCrossConnectTable, atmxTrafficDescrParam3=atmxTrafficDescrParam3, xylnatmVclPortIndex=xylnatmVclPortIndex, xylnatmVpCrossConnectPvcIdentifier=xylnatmVpCrossConnectPvcIdentifier, xylnatmInterfaceConfGroup=xylnatmInterfaceConfGroup, atmxVpCrossConnectH2LLastChange=atmxVpCrossConnectH2LLastChange, xylnatmVplPrTrackPort1=xylnatmVplPrTrackPort1, xylnatmILMIConfPort=xylnatmILMIConfPort, xylnatmVclStatsMode=xylnatmVclStatsMode, xylnatmVpCrossConnectH2LLastChange=xylnatmVpCrossConnectH2LLastChange, atmxSvcVcCrossConnectEntry=atmxSvcVcCrossConnectEntry, xylnatmVclPrTrackPort2=xylnatmVclPrTrackPort2, atmxSvcVcCrossConnectHighPortIndex=atmxSvcVcCrossConnectHighPortIndex, atmxVpCrossConnectLowPortIndex=atmxVpCrossConnectLowPortIndex, atmxVclVpi=atmxVclVpi, xylnatmInterfaceStatUnknownVciCells=xylnatmInterfaceStatUnknownVciCells, xylnatmVplModTable=xylnatmVplModTable, xylnatmVpCrossConnectConnectionId=xylnatmVpCrossConnectConnectionId, xylnatmVpCrossConnectLowSlotIndex=xylnatmVpCrossConnectLowSlotIndex, xylnatmVcCrossConnectHighSlotIndex=xylnatmVcCrossConnectHighSlotIndex, xylnatmVplRmFwdEndpt=xylnatmVplRmFwdEndpt, xylnatmVplStatSlotIndex=xylnatmVplStatSlotIndex, atmxSvcVcCrossConnectLowTDIndex=atmxSvcVcCrossConnectLowTDIndex, xylnatmVplAltTrackPort1=xylnatmVplAltTrackPort1, xylnatmTrafficShaperMemGroup=xylnatmTrafficShaperMemGroup, xylnatmVpCrossConnectMCastEnable=xylnatmVpCrossConnectMCastEnable, xylnatmTrafficShaperConfTsNumIndex=xylnatmTrafficShaperConfTsNumIndex, xylnatmVclStatGroup=xylnatmVclStatGroup, xylnatmVplRmBkwdCopy=xylnatmVplRmBkwdCopy, atmxVpCrossConnectIndex=atmxVpCrossConnectIndex, atmxVclBidirect=atmxVclBidirect, atmxVplGroup=atmxVplGroup, xylnatmInterfaceDescription=xylnatmInterfaceDescription, xylnatmVplSlotIndex=xylnatmVplSlotIndex, xylnatmInterfaceStatDxGcraClp1Cells=xylnatmInterfaceStatDxGcraClp1Cells, atmxVcCrossConnectH2LLastChange=atmxVcCrossConnectH2LLastChange, xylnatmTrafficShaperConfSCR=xylnatmTrafficShaperConfSCR, xylnatmClockingxCtrlBusLine=xylnatmClockingxCtrlBusLine, xylnatmILMIConfILMIEnable=xylnatmILMIConfILMIEnable, xylnatmVcCrossConnectGroup=xylnatmVcCrossConnectGroup, atmxVcCrossConnectLowVci=atmxVcCrossConnectLowVci, xylnatmVclVci=xylnatmVclVci, xylnatmVclStatRxClp0Cells=xylnatmVclStatRxClp0Cells, xylnatmVplStatEntry=xylnatmVplStatEntry, xylnatmILMIConfAutoCfgEnable=xylnatmILMIConfAutoCfgEnable, xylnatmVcCrossConnectLowRxTrafficDescrIndex=xylnatmVcCrossConnectLowRxTrafficDescrIndex, xylnatmVclAltTrackPort1=xylnatmVclAltTrackPort1, atmxVccAalType=atmxVccAalType, xylnatmILMIConfInstance=xylnatmILMIConfInstance, xylnatmVplConnectionDescr=xylnatmVplConnectionDescr, xylnatmILMIConfPeerDevType=xylnatmILMIConfPeerDevType, xylnatmVplModVplVpi=xylnatmVplModVplVpi, atmxVcCrossConnectL2HLastChange=atmxVcCrossConnectL2HLastChange, atmxVcCrossConnectRowStatus=atmxVcCrossConnectRowStatus, atmxInterfaceConfGroup=atmxInterfaceConfGroup, xylnatmVclModVclVpi=xylnatmVclModVclVpi, xylnatmInterfaceStatDxGcrabClp0Cells=xylnatmInterfaceStatDxGcrabClp0Cells, atmxSvcVcCrossConnectLowVci=atmxSvcVcCrossConnectLowVci, xylnatmVplPortIndex=xylnatmVplPortIndex, xylnatmClockingxCtrlGroup=xylnatmClockingxCtrlGroup, atmxVpCrossConnectHighSlotIndex=atmxVpCrossConnectHighSlotIndex, atmxVpCrossConnectRowStatus=atmxVpCrossConnectRowStatus, atmxSvcVpCrossConnectHighSlotIndex=atmxSvcVpCrossConnectHighSlotIndex, xylnatmVclStatDxCongClp0Cells=xylnatmVclStatDxCongClp0Cells, atmxVplBidirect=atmxVplBidirect, xylnatmILMIConfPeerNNISigVer=xylnatmILMIConfPeerNNISigVer, atmxSvcVpCrossConnectLowSlotIndex=atmxSvcVpCrossConnectLowSlotIndex, atmxVccAal5EncapsType=atmxVccAal5EncapsType, xylnatmVplLgclChanRedirect=xylnatmVplLgclChanRedirect, atmxVplPortIndex=atmxVplPortIndex, xylnatmInterfaceUniVersion=xylnatmInterfaceUniVersion, xylnatmInterfaceStatDxGcraClp0Cells=xylnatmInterfaceStatDxGcraClp0Cells, xylnatmVplOamCopy=xylnatmVplOamCopy, atmxVcCrossConnectHighPortIndex=atmxVcCrossConnectHighPortIndex, xylnatmTrafficShaperConfSlotIndex=xylnatmTrafficShaperConfSlotIndex, xylnatmVplStatPortIndex=xylnatmVplStatPortIndex, xylnatmVclStatTable=xylnatmVclStatTable)
|
#author diwakar
import psutil
from datetime import date
import calendar
import datetime
try:
f=open("set.csv",'w')
f.write("Day,"+"Hour,"+"Cores"+"," +"Total"+","+"CPU1"+","+"CPU2"+","+"CPU3"+","+"CPU4"+","+"Actual Required"+'\n')
n=4
def actual_calculator(a):
t=sum(a)/4
if(t<30):
return 1
elif (t<50 and t>30):
return 2
elif (t<70 and t>50):
return 3
else:
return 4
while True:
my_date = date.today()
day=calendar.day_name[my_date.weekday()]
now = datetime.datetime.now()
a = psutil.cpu_percent(interval=0.5,percpu=True)
act=actual_calculator(a)
f.write(day +','+str(now.hour)+','+str(n) +','+str(sum(a)/4) +','+str(a[0])+','+str(a[1])+','+str(a[2])+','+str(a[3])+','+str(act)+'\n' )
except:
f.close()
diwakar shukla
hello how are you
|
#!/usr/bin/env python
#
# Copyright 2019 International Business Machines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import platform
from os.path import join as pathjoin
from os.path import isdir as isdir
from os.path import isfile as isfile
from ocaccel_utils import which
from ocaccel_utils import SystemCMD
from ocaccel_utils import msg
from os import environ as env
from ocaccel_utils import source
def env_check(options):
assert sys.version_info >= (2, 6)
msg.ok_msg_blue("--------> Environment Check")
gcc = SystemCMD("gcc")
gcc . check(existence_critical=True, minimum_version = "4.4.6")
if not options.no_make_model or not options.no_run_sim or options.make_image:
vivado = SystemCMD("vivado")
xterm = SystemCMD("xterm")
vivado . check(existence_critical=True, minimum_version = "2018.2")
xterm . check(existence_critical=True)
if options.simulator.lower() == "xcelium":
xrun = SystemCMD("xrun")
xrun . check(existence_critical=True)
elif options.simulator.lower() == "vcs":
vcs = SystemCMD("vcs")
vcs . check(existence_critical=True)
elif options.simulator.lower() == "nosim":
pass
elif options.simulator.lower() == "xsim":
# xsim is bundled with vivado, no need to check
pass
else:
msg.fail_msg("%s is an unknown simulator! Exiting ... " % options.simulator)
if options.no_run_sim == False or options.no_make_model == False:
if options.simulator.lower() != "nosim" and options.unit_sim != True:
if isdir(pathjoin(options.ocse_root, "ocse")) and\
isdir(pathjoin(options.ocse_root, "afu_driver")) and\
isdir(pathjoin(options.ocse_root, "libocxl")):
msg.ok_msg_blue("OCSE path %s is valid" % options.ocse_root)
else:
msg.fail_msg("OCSE path %s is not valid! Exiting ... " % options.ocse_root)
if isdir(pathjoin(options.ocaccel_root, "actions")) and\
isdir(pathjoin(options.ocaccel_root, "hardware")) and\
isdir(pathjoin(options.ocaccel_root, "software")):
msg.ok_msg_blue("SNAP ROOT %s is valid" % options.ocaccel_root)
else:
msg.fail_msg("SNAP ROOT %s is not valid! Exiting ... " % options.ocaccel_root)
if 'SNAP_ROOT' not in env:
env['SNAP_ROOT'] = options.ocaccel_root
source(pathjoin(env['SNAP_ROOT'], '.snap_config.sh'))
prflow_mode = env['USE_PRFLOW']
if prflow_mode == "TRUE":
if options.interactive == True:
options.image_mode = "cloud_action"
if options.image_mode == "normal":
msg.fail_msg("%s mode selected for image build while in PR flow! Exiting ... " % options.image_mode)
else:
msg.ok_msg("Partial reconfiguration mode detected")
else:
if options.image_mode != "normal":
msg.fail_msg("%s mode selected for image build while in Normal flow! Exiting ... " % options.image_mode)
else:
options.image_mode = "normal"
msg.ok_msg("Environment check PASSED")
|
# ! /usr/bin/env python
import numpy as np
import itertools
"""ramp effect model
2 means two types of traps
original author: Daniel Apai
Version 0.3 fixing trapping parameters
Version 0.2.1 introduce two types of traps, slow traps and fast traps
Version 0.2: add extra keyword parameter to indicate scan or staring
mode observations for staring mode, the detector receive flux in the
same rate during overhead time as that during exposure
precise mathematics forms are included
Version 0.1: Adapted original IDL code to python by Yifan Zhou
"""
def ackBar2(
cRates,
tExp,
exptime=180,
trap_pop_s=0,
trap_pop_f=0,
dTrap_s=0,
dTrap_f=0,
dt0=0,
lost=0,
mode='scanning'
):
"""Hubble Space Telescope ramp effet model
Parameters:
cRates -- intrinsic count rate of each exposures, unit e/s
tExp -- start time of every exposures
expTime -- (default 180 seconds) exposure time of the time series
trap_pop -- (default 0) number of occupied traps at the beginning of the observations
dTrap -- (default [0])number of extra trap added between two orbits
dt0 -- (default 0) possible exposures before very beginning, e.g.,
guiding adjustment
lost -- (default 0, no lost) proportion of trapped electrons that are not eventually detected
(mode) -- (default scanning, scanning or staring, or others), for scanning mode
observation , the pixel no longer receive photons during the overhead
time, in staring mode, the pixel keps receiving elctrons
"""
nTrap_s = 1525.38 # 1320.0
eta_trap_s = 0.013318 # 0.01311
tau_trap_s = 1.63e4
nTrap_f = 162.38
eta_trap_f = 0.008407
tau_trap_f = 281.463
try:
dTrap_f = itertools.cycle(dTrap_f)
dTrap_s = itertools.cycle(dTrap_s)
dt0 = itertools.cycle(dt0)
except TypeError:
# if dTrap, dt0 provided in scala, convert them to list
dTrap_f = itertools.cycle([dTrap_f])
dTrap_s = itertools.cycle([dTrap_s])
dt0 = itertools.cycle([dt0])
obsCounts = np.zeros(len(tExp))
# ensure initial values do not exceed the total trap numbers
trap_pop_s = min(trap_pop_s, nTrap_s)
trap_pop_f = min(trap_pop_f, nTrap_f)
for i in range(len(tExp)):
try:
dt = tExp[i+1] - tExp[i]
except IndexError:
dt = exptime
f_i = cRates[i]
c1_s = eta_trap_s * f_i / nTrap_s + 1 / tau_trap_s # a key factor
c1_f = eta_trap_f * f_i / nTrap_f + 1 / tau_trap_f
# number of trapped electron during one exposure
dE1_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * (1 - np.exp(-c1_s * exptime))
dE1_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * (1 - np.exp(-c1_f * exptime))
dE1_s = min(trap_pop_s + dE1_s, nTrap_s) - trap_pop_s
dE1_f = min(trap_pop_f + dE1_f, nTrap_f) - trap_pop_f
trap_pop_s = min(trap_pop_s + dE1_s, nTrap_s)
trap_pop_f = min(trap_pop_f + dE1_f, nTrap_f)
obsCounts[i] = f_i * exptime - dE1_s - dE1_f
if dt < 5 * exptime: # whether next exposure is in next batch of exposures
# same orbits
if mode == 'scanning':
# scanning mode, no incoming flux between exposures
dE2_s = - trap_pop_s * (1 - np.exp(-(dt - exptime)/tau_trap_s))
dE2_f = - trap_pop_f * (1 - np.exp(-(dt - exptime)/tau_trap_f))
elif mode == 'staring':
# for staring mode, there is flux between exposures
dE2_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * (1 - np.exp(-c1_s * (dt - exptime)))
dE2_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * (1 - np.exp(-c1_f * (dt - exptime)))
else:
# others, same as scanning
dE2_s = - trap_pop_s * (1 - np.exp(-(dt - exptime)/tau_trap_s))
dE2_f = - trap_pop_f * (1 - np.exp(-(dt - exptime)/tau_trap_f))
trap_pop_s = min(trap_pop_s + dE2_s, nTrap_s)
trap_pop_f = min(trap_pop_f + dE2_f, nTrap_f)
elif dt < 1200:
# considering in-orbit buffer download scenario
if mode == 'staring':
trap_pop_s = min(trap_pop_s * np.exp(-(dt-exptime)/tau_trap_s), nTrap_s)
trap_pop_f = min(trap_pop_f * np.exp(-(dt-exptime)/tau_trap_f), nTrap_f)
else:
# switch orbit
dt0_i = next(dt0)
trap_pop_s = min(trap_pop_s * np.exp(-(dt-exptime-dt0_i)/tau_trap_s) + next(dTrap_s), nTrap_s)
trap_pop_f = min(trap_pop_f * np.exp(-(dt-exptime-dt0_i)/tau_trap_f) + next(dTrap_f), nTrap_f)
f_i = cRates[i + 1]
c1_s = eta_trap_s * f_i / nTrap_s + 1 / tau_trap_s # a key factor
c1_f = eta_trap_f * f_i / nTrap_f + 1 / tau_trap_f
dE3_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * (1 - np.exp(-c1_s * dt0_i))
dE3_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * (1 - np.exp(-c1_f * dt0_i))
dE3_s = min(trap_pop_s + dE3_s, nTrap_s) - trap_pop_s
dE3_f = min(trap_pop_f + dE3_f, nTrap_f) - trap_pop_f
trap_pop_s = min(trap_pop_s + dE3_s, nTrap_s)
trap_pop_f = min(trap_pop_f + dE3_f, nTrap_f)
trap_pop_s = max(trap_pop_s, 0)
trap_pop_f = max(trap_pop_f, 0)
return obsCounts
if __name__ == '__main__':
pass
# import matplotlib.pyplot as plt
# t1 = np.linspace(0, 2700, 80)
# t2 = np.linspace(5558, 8280, 80)
# t = np.concatenate((t1, t2))
# crate = 100
# crates = crate * np.ones(len(t))
# dataDIR = '/Users/ZhouYf/Documents/HST14241/alldata/2M0335/DATA/'
# from os import path
# import pandas as pd
# info = pd.read_csv(
# path.expanduser('~/Documents/HST14241/alldata/2M0335/2M0335_fileInfo.csv'),
# parse_dates=True,
# index_col='Datetime')
# info['Time'] = np.float32(info.index - info.index.values[0]) / 1e9
# expTime = info['Exp Time'].values[0]
# grismInfo = info[info['Filter'] == 'G141']
# tExp = grismInfo['Time'].values
# # cRates = np.ones(len(LC)) * LC.mean() * 1.002
# cRates = np.ones(len(tExp)) * 100
# obs = ackBar2(cRates, tExp, exptime=expTime, lost=0,
# mode='scanning')
# plt.close('all')
# # plt.plot(tExp, LC*expTime, 'o')
# plt.plot(tExp, obs, '-')
# # plt.ylim([crate * 0.95, crate * 1.02])
# plt.show()
|
Num outliers: 600
Num inliers: 600
################################################################
# Test started at: 2018-12-02 14:41:49.073441
#AUROC D()-score: 0.28877
#AUPRC D()-score: 0.39428
Num outliers: 600
Num inliers: 600
################################################################
# Test started at: 2018-12-02 14:42:28.337823
#AUROC D()-score: 0.70523
#AUPRC D()-score: 0.67378
|
import unittest
from app.models import Articles
class test_articles(unittest.TestCase):
'''
Test Class to test the behaviour of the Articles class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_article= Articles("2021-08-18T17:00:00Z",' ""https://i.kinja-img.com/gawker-media/image/upload/c_fill,f_auto,fl_progressive,g_center,h_675,pg_1,q_80,w_1200/b64f24febe8559c507071c6b8c420e80.jpg','This Mexican Butterwort Plant Can Help Solve Your Gnat Problem','Gnats and other small flies are annoying to get rid of. A small number of them is bad enough, but an infestation can mean a lot more effort. There are several remedies weve covered before, like apple… [+2452 chars]','Aisha Jordan',' ""https://lifehacker.com/this-mexican-butterwort-plant-can-help-solve-your-gnat-1847498754')
def test_instance(self):
self.assertTrue(isinstance(self.new_article, Articles))
|
"""SQLAlchemy de notre base de données Globale."""
from app.database import Base
from sqlalchemy import (
Boolean,
Column,
DateTime,
Float,
ForeignKey,
Integer,
String,
UniqueConstraint,
)
from sqlalchemy.orm import relationship
# import datetime
# 'tim': int ((self.tim - datetime.datetime (1970, 1, 1)).total_seconds ()),
class Adherents(Base):
"""Table adherents id/uuid"""
__tablename__ = "adherents"
id = Column(Integer, primary_key=True, index=True)
first_name = Column(String, nullable=False)
last_name = Column(String, nullable=False)
address_postal_code = Column(String, nullable=True)
uuid = Column(String(36), unique=True, nullable=False, index=True)
managed_area_id = Column(
Integer, ForeignKey("referent_managed_areas_tags.referent_managed_area_id")
)
managed_area = relationship("ReferentManagedAreasTags")
candidate_managed_area_id = Column(Integer, ForeignKey("candidate_managed_area.id"))
candidate_managed_area = relationship("CandidateManagedArea")
class ReferentManagedAreasTags(Base):
"""Table referent_managed_areas_tags"""
__tablename__ = "referent_managed_areas_tags"
referent_managed_area_id = Column(Integer, index=True)
referent_tag_id = Column(Integer, ForeignKey("referent_tags.id"), nullable=True)
referent_tag = relationship("ReferentTags", lazy="joined")
__mapper_args__ = {"primary_key": [referent_managed_area_id, referent_tag_id]}
class AdherentMessageFilters(Base):
"""Table adherent_message_filters"""
__tablename__ = "adherent_message_filters"
id = Column(Integer, primary_key=True, index=True)
referent_tag_id = Column(Integer, ForeignKey("referent_tags.id"), nullable=True)
referent_tag = relationship("ReferentTags", lazy="joined")
zone_id = Column(Integer, ForeignKey("geo_zone.id"), nullable=True)
zone = relationship("GeoZone", lazy="joined")
class ReferentTags(Base):
"""Table referent_tags"""
__tablename__ = "referent_tags"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, nullable=False)
code = Column(String, nullable=False)
type = Column(String, nullable=True)
zone_id = Column(Integer, ForeignKey("geo_zone.id"), nullable=True)
zone = relationship("GeoZone", lazy="joined")
class AdherentMessages(Base):
"""Table adherent_messages"""
__tablename__ = "adherent_messages"
id = Column(Integer, primary_key=True, index=True)
author_id = Column(Integer, ForeignKey("adherents.id"), nullable=True)
author = relationship("Adherents", lazy="joined")
filter_id = Column(
Integer, ForeignKey("adherent_message_filters.id"), nullable=True
)
filter = relationship("AdherentMessageFilters", lazy="joined")
label = Column(String, nullable=False)
subject = Column(String, nullable=False)
status = Column(String, nullable=False)
type = Column(String, nullable=False)
sent_at = Column(DateTime, nullable=True)
class CandidateManagedArea(Base):
"""Table candidate_managed_area pour retrouver la zone_id"""
__tablename__ = "candidate_managed_area"
id = Column(Integer, primary_key=True, index=True)
zone_id = Column(Integer, ForeignKey("geo_zone.id"))
candidate_managed_zone = relationship("GeoZone")
def get_zone_id(self):
return self.zone_id
class GeoZone(Base):
"""Table geo_zone pour retrouver la zone_id"""
__tablename__ = "geo_zone"
id = Column(Integer, primary_key=True, index=True)
uuid = Column(String(36), nullable=False)
type = Column(String(255), nullable=False)
code = Column(String(255), nullable=False)
name = Column(String(255), nullable=False)
postal_code = Column(String(255), nullable=True)
UniqueConstraint("code", "type", name="geo_zone_code_type_unique")
class GeoZoneParent(Base):
"""Table geo_zone_parent pour retrouver la zone_id"""
__tablename__ = "geo_zone_parent"
child_id = Column(Integer, ForeignKey("geo_zone.id"), index=True)
child = relationship("GeoZone", foreign_keys="GeoZoneParent.child_id")
parent_id = Column(Integer, ForeignKey("geo_zone.id"), index=True)
parent = relationship("GeoZone", foreign_keys="GeoZoneParent.parent_id")
__mapper_args__ = {"primary_key": [child_id, parent_id]}
class GeoBorough(Base):
"""Table geo_borough"""
__tablename__ = "geo_borough"
id = Column(Integer, primary_key=True, index=True)
code = Column(String, nullable=False)
name = Column(String(255), nullable=False)
latitude = Column(Float, nullable=False)
longitude = Column(Float, nullable=False)
city_id = Column(Integer, ForeignKey("geo_city.id"))
geo_city = relationship("GeoCity")
class GeoCity(Base):
"""Table geo_city"""
__tablename__ = "geo_city"
id = Column(Integer, primary_key=True, index=True)
code = Column(String, nullable=False)
name = Column(String(255), nullable=False)
postal_code = Column(String, nullable=False)
active = Column(Boolean, nullable=True)
department_id = Column(Integer, ForeignKey("geo_department.id"))
geo_department = relationship("GeoDepartment")
class GeoDistrict(Base):
"""Table geo_district"""
__tablename__ = "geo_district"
id = Column(Integer, primary_key=True, index=True)
code = Column(String, nullable=False)
name = Column(String(255), nullable=False)
latitude = Column(Float, nullable=False)
longitude = Column(Float, nullable=False)
department_id = Column(Integer, ForeignKey("geo_department.id"))
geo_department = relationship("GeoDepartment")
class GeoDepartment(Base):
"""Table geo_department"""
__tablename__ = "geo_department"
id = Column(Integer, primary_key=True, index=True)
code = Column(String, nullable=False)
name = Column(String(255), nullable=False)
latitude = Column(Float, nullable=False)
longitude = Column(Float, nullable=False)
region_id = Column(Integer, ForeignKey("geo_region.id"))
geo_region = relationship("GeoRegion")
class GeoRegion(Base):
"""Table geo_region"""
__tablename__ = "geo_region"
id = Column(Integer, primary_key=True, index=True)
code = Column(String, nullable=False)
name = Column(String(255), nullable=False)
latitude = Column(Float, nullable=False)
longitude = Column(Float, nullable=False)
class GeoCountry(Base):
"""Table geo_country"""
__tablename__ = "geo_country"
id = Column(Integer, primary_key=True, index=True)
code = Column(String, nullable=False)
name = Column(String(255), nullable=False)
latitude = Column(Float, nullable=False)
longitude = Column(Float, nullable=False)
class JemarcheDataSurvey(Base):
"""Table jemarche_data_survey"""
__tablename__ = "jemarche_data_survey"
id = Column(Integer, primary_key=True, index=True)
data_survey_id = Column(Integer, ForeignKey("jecoute_data_survey.id"), nullable=True)
data_survey = relationship("JecouteDataSurvey", lazy="joined")
postal_code = Column(String, nullable=True)
age_range = Column(String, nullable=True)
gender = Column(String, nullable=True)
latitude = Column(Float, nullable=True)
longitude = Column(Float, nullable=True)
class JecouteDataSurvey(Base):
"""Table jecoute_data_survey"""
__tablename__ = "jecoute_data_survey"
id = Column(Integer, primary_key=True, index=True)
uuid = Column(String(36), unique=True, nullable=False)
author_id = Column(Integer, ForeignKey("adherents.id"), nullable=True)
author = relationship("Adherents", lazy="joined")
author_postal_code = Column(String, nullable=True)
survey_id = Column(Integer, ForeignKey("jecoute_survey.id"))
survey = relationship("JecouteSurvey", lazy="joined")
posted_at = Column(DateTime, nullable=False)
class JecouteSurvey(Base):
"""Table jecoute_survey"""
__tablename__ = "jecoute_survey"
id = Column(Integer, primary_key=True, index=True)
uuid = Column(String(36), unique=True, nullable=False)
created_by_adherent_id = Column(Integer, ForeignKey("adherents.id"), nullable=True)
author = relationship("Adherents", lazy="joined")
name = Column(String, nullable=False)
created_at = Column(DateTime, nullable=False)
updated_at = Column(DateTime, nullable=False)
type = Column(String, nullable=False)
zone_id = Column(Integer, ForeignKey("geo_zone.id"))
geo_zone_relation = relationship("GeoZone")
class MailChimpCampaign(Base):
"""Table mailchimp_campaign"""
__tablename__ = "mailchimp_campaign"
id = Column(Integer, primary_key=True, index=True)
message_id = Column(Integer, ForeignKey("adherent_messages.id"), nullable=True)
message = relationship("AdherentMessages", lazy="joined")
recipient_count = Column(Integer, nullable=True)
status = Column(String, nullable=False)
report_id = Column(
Integer, ForeignKey("mailchimp_campaign_report.id"), nullable=True
)
report = relationship(
"MailChimpCampaignReport", back_populates="mailchimp_campaign"
)
class MailChimpCampaignReport(Base):
"""Table mailchimp_campaign_report"""
__tablename__ = "mailchimp_campaign_report"
id = Column(Integer, primary_key=True, index=True)
open_total = Column(Integer, nullable=False)
open_unique = Column(Integer, nullable=False)
click_total = Column(Integer, nullable=False)
click_unique = Column(Integer, nullable=False)
email_sent = Column(Integer, nullable=False)
unsubscribed = Column(Integer, nullable=False)
mailchimp_campaign = relationship("MailChimpCampaign", back_populates="report")
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Author: Pauli Virtanen, 2016
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from .step_detect import solve_potts_approx
def compute_stats(samples):
"""
Statistical analysis of measured samples.
Parameters
----------
samples : list of float
List of total times (y) of benchmarks.
Returns
-------
beta_hat : float
Estimated time per iteration
stats : dict
Information on statistics of the estimator.
"""
if len(samples) < 1:
return None, None
elif len(samples) == 1:
return samples[0], None
Y = list(samples)
# Median and quantiles
y_50, ci_50 = quantile_ci(Y, 0.5, alpha_min=0.99)
y_25 = quantile(Y, 0.25)
y_75 = quantile(Y, 0.75)
# Look for big shifts in the time series
min_size = max(5, len(Y)//5)
gamma = quantile([abs(yp - y_50) for yp in Y], 0.5) * min_size
if min_size <= len(Y):
step_right, step_mu, _ = solve_potts_approx(Y, gamma=gamma, p=1, min_size=min_size)
else:
step_mu = [y_50]
# Broaden the confidence interval by the detected steps
ci_a, ci_b = ci_50
ci_a -= y_50 - min(step_mu)
ci_b += max(step_mu) - y_50
# Produce results
mean = sum(Y) / len(Y)
var = sum((yp - mean)**2 for yp in Y) / len(Y) # mle
std = math.sqrt(var)
result = y_50
stats = {'ci_99': [ci_a, ci_b],
'q_25': y_25,
'q_75': y_75,
'min': min(Y),
'max': max(Y),
'mean': mean,
'std': std,
'n': len(Y),
'systematic': max(step_mu) - min(step_mu)}
return result, stats
def get_err(result, stats):
"""
Return an 'error measure' suitable for informing the user
about the spread of the measurement results.
"""
a, b = stats['q_25'], stats['q_75']
return (b - a)/2
def is_different(stats_a, stats_b):
"""
Check whether the samples are statistically different.
This is a pessimistic check, and not statistically fully rigorous.
The false negative rate is probably relatively high if the distributions
overlap significantly.
Parameters
----------
samples_a, samples_b
Input samples
p : float, optional
Threshold p-value
"""
# If confidence intervals overlap, reject
ci_a = stats_a['ci_99']
ci_b = stats_b['ci_99']
if ci_a[1] >= ci_b[0] and ci_a[0] <= ci_b[1]:
return False
return True
def quantile_ci(x, q, alpha_min=0.99):
"""
Compute a quantile and a confidence interval.
Assumes independence, but otherwise nonparametric.
Parameters
----------
x : list of float
Samples
q : float
Quantile to compute, in [0,1].
alpha_min : float, optional
Lower bound for the coverage.
Returns
-------
m : float
Quantile of x
ci : tuple of floats
Confidence interval (a, b), of coverage >= alpha_min.
"""
y = sorted(x)
n = len(y)
cdf = 0
alpha_min = min(alpha_min, 1 - alpha_min)
pa = alpha_min / 2
pb = 1 - pa
a = y[0]
b = y[-1]
for k, yp in enumerate(y):
cdf += binom_pmf(n, k, q)
if cdf <= pa:
if k < len(y) - 1:
a = 0.5*(yp + y[k+1])
else:
a = yp
if cdf >= pb:
if k > 0:
b = 0.5*(yp + y[k-1])
else:
b = yp
break
m = quantile(y, q)
return m, (a, b)
def quantile(x, q):
"""
Compute quantile/percentile of the data
Parameters
----------
x : list of float
Data set
q : float
Quantile to compute, 0 <= q <= 1
"""
if not 0 <= q <= 1:
raise ValueError("Invalid quantile")
y = sorted(x)
n = len(y)
z = (n - 1) * q
j = int(math.floor(z))
z -= j
if j == n - 1:
m = y[-1]
else:
m = (1 - z)*y[j] + z*y[j+1]
return m
def binom_pmf(n, k, p):
"""Binomial pmf = (n choose k) p**k (1 - p)**(n - k)"""
if not (0 <= k <= n):
return 0
if p == 0:
return 1.0 * (k == 0)
elif p == 1.0:
return 1.0 * (k == n)
logp = math.log(p)
log1mp = math.log(1 - p)
return math.exp(lgamma(1 + n) - lgamma(1 + n - k) - lgamma(1 + k)
+ k*logp + (n - k)*log1mp)
_BERNOULLI = [1.0, -0.5, 0.166666666667, 0.0, -0.0333333333333, 0.0, 0.0238095238095]
def lgamma(x):
"""
Log gamma function. Only implemented at integers.
"""
if x <= 0:
raise ValueError("Domain error")
if x > 100:
# DLMF 5.11.1
r = 0.5 * math.log(2*math.pi) + (x - 0.5) * math.log(x) - x
for k in range(1, len(_BERNOULLI)//2 + 1):
r += _BERNOULLI[2*k] / (2*k*(2*k - 1) * x**(2*k - 1))
return r
# Fall back to math.factorial
int_x = int(x)
err_int = abs(x - int_x)
if err_int < 1e-12 * abs(x):
return math.log(math.factorial(int_x - 1))
# Would need full implementation
return float("nan")
|
#! /usr/bin/env python3
import logging
from pyboi import Pyboi
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(name='run')
# just basic functionality testing
def main():
log.debug('creating a pyboi class instance')
gameboy = Pyboi()
log.debug('loading file "tetris.gb"')
gameboy.load_rom('tetris.gb')
log.debug('attempting to save the game to "mysave1"')
gameboy.save('mysave1')
if __name__ == "__main__":
main()
|
def write_paramdict_file(params, filename):
with open(filename, 'w') as f:
print(params, file=f)
def read_paramdict_file(filename):
with open(filename, 'r') as f:
content = f.read()
return eval(content)
|
"""added state and storage tables
Revision ID: ef91a56cb621
Revises: 4722e540ad36
Create Date: 2018-06-15 14:55:03.248439
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ef91a56cb621'
down_revision = '4722e540ad36'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('state_diff',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('block_number', sa.Numeric(), nullable=True),
sa.Column('timestamp', sa.TIMESTAMP(), nullable=True),
sa.Column('transaction_hash', sa.String(length=66), nullable=True),
sa.Column('transaction_index', sa.Numeric(), nullable=True),
sa.Column('address', sa.String(length=42), nullable=True),
sa.Column('balance_diff', sa.Numeric(), nullable=True),
sa.Column('nonce_diff', sa.Integer(), nullable=True),
sa.Column('code_from', sa.Text(), nullable=True),
sa.Column('code_to', sa.Text(), nullable=True),
sa.Column('state_diff_type', sa.String(length=10), nullable=True),
sa.ForeignKeyConstraint(['block_number'], ['blocks.block_number'], ),
sa.ForeignKeyConstraint(['transaction_hash'], ['transactions.transaction_hash'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_state_diff_address'), 'state_diff', ['address'], unique=False)
op.create_index(op.f('ix_state_diff_transaction_hash'), 'state_diff', ['transaction_hash'], unique=False)
op.create_table('storage_diff',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('block_number', sa.Numeric(), nullable=True),
sa.Column('timestamp', sa.TIMESTAMP(), nullable=True),
sa.Column('transaction_hash', sa.String(length=66), nullable=True),
sa.Column('transaction_index', sa.Numeric(), nullable=True),
sa.Column('state_diff_id', sa.Integer(), nullable=False),
sa.Column('address', sa.String(length=42), nullable=False),
sa.Column('position', sa.String(length=66), nullable=False),
sa.Column('storage_from', sa.String(length=66), nullable=True),
sa.Column('storage_to', sa.String(length=66), nullable=True),
sa.ForeignKeyConstraint(['block_number'], ['blocks.block_number'], ),
sa.ForeignKeyConstraint(['state_diff_id'], ['state_diff.id'], ),
sa.ForeignKeyConstraint(['transaction_hash'], ['transactions.transaction_hash'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_storage_diff_address'), 'storage_diff', ['address'], unique=False)
op.create_index(op.f('ix_storage_diff_transaction_hash'), 'storage_diff', ['transaction_hash'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_storage_diff_transaction_hash'), table_name='storage_diff')
op.drop_index(op.f('ix_storage_diff_address'), table_name='storage_diff')
op.drop_table('storage_diff')
op.drop_index(op.f('ix_state_diff_transaction_hash'), table_name='state_diff')
op.drop_index(op.f('ix_state_diff_address'), table_name='state_diff')
op.drop_table('state_diff')
# ### end Alembic commands ###
|
# 정수 3개 입력받아 합과 평균 출력하기
a, b, c = map(int, input().split())
print(str(a+b+c), format((a+b+c)/3, ".2f")) |
"""Add avatar column
Revision ID: a040f705ffc4
Revises: 825074598082
Create Date: 2020-08-18 06:45:24.708617
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a040f705ffc4'
down_revision = '825074598082'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('avatar', sa.String(length=150), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'avatar')
# ### end Alembic commands ###
|
import cv2
import os
from glob import glob
frame_rate = 20
image_size = (576, 160)
img_seq_dir = './raw_img_sequences/'
image_paths = glob(os.path.join(img_seq_dir, '*.png'))
image_paths.sort()
writer = cv2.VideoWriter('./videos/kitty_street.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frame_rate, image_size)
for image_path in image_paths:
img = cv2.imread(image_path)
img = cv2.resize(img, image_size)
writer.write(img) |
# import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
import numpy as np
# file_data = np.loadtxt('2_Record2308.dat')
# plt.plot(file_data)
# plt.ylabel("y label")
# plt.xlabel("x label")
# plt.show()
_,axis = plt.subplots()
axis.plot([10,15,5,7,0,40])
axis.set(title="yVals")
plt.show()
# file_data1 = np.loadtxt('2_Record3388.dat')
# plt.plot(file_data2)
# plt.ylabel("y label")
# plt.xlabel("x label")
# plt.show()
# axis = plt.subplots()
# axis.plot([10,15,5,7,0,40])
# axis.set(title="A graph")
# plt.show() |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Action Sprite
Animating a sprite
==================
To execute any action you need to create an action::
move = MoveBy( (50,0), 5 )
In this case, ``move`` is an action that will move the sprite
50 pixels to the right (``x`` coordinate) and 0 pixel in the ``y`` coordinate
in 5 seconds.
And now tell the sprite to execute it::
sprite.do( move )
'''
__docformat__ = 'restructuredtext'
import cocosnode
from batch import *
import pyglet
from pyglet import image
from pyglet.gl import *
from collections import defaultdict
__all__ = [ 'Sprite', # Sprite class
'NotifierSprite', # Sprite class that notifies about attribute changes
]
class Sprite( BatchableNode, pyglet.sprite.Sprite):
'''Sprites are sprites that can execute actions.
Example::
sprite = Sprite('grossini.png')
'''
def __init__( self, image, position=(0,0), rotation=0, scale=1, opacity = 255, color=(255,255,255), anchor = None ):
'''Initialize the sprite
:Parameters:
`image` : string or image
name of the image resource or a pyglet image.
`position` : tuple
position of the anchor. Defaults to (0,0)
`rotation` : float
the rotation (degrees). Defaults to 0.
`scale` : float
the zoom factor. Defaults to 1.
`opacity` : int
the opacity (0=transparent, 255=opaque). Defaults to 255.
`color` : tuple
the color to colorize the child (RGB 3-tuple). Defaults to (255,255,255).
`anchor` : (float, float)
(x,y)-point from where the image will be positions, rotated and scaled in pixels. For example (image.width/2, image.height/2) is the center (default).
'''
if isinstance(image, str):
image = pyglet.resource.image(image)
pyglet.sprite.Sprite.__init__(self, image)
BatchableNode.__init__(self)
if anchor is None:
if isinstance(self.image, pyglet.image.Animation):
anchor = (image.frames[0].image.width / 2,
image.frames[0].image.height / 2)
else:
anchor = image.width / 2, image.height / 2
self.image_anchor = anchor
self.anchor = (0, 0)
#: group.
#: XXX what is this?
self.group = None
#: children group.
#: XXX what is this ?
self.children_group = None
#: position of the sprite in (x,y) coordinates
self.position = position
#: rotation degrees of the sprite. Default: 0 degrees
self.rotation = rotation
#: scale of the sprite where 1.0 the default value
self.scale = scale
#: opacity of the sprite where 0 is transparent and 255 is solid
self.opacity = opacity
#: color of the sprite in R,G,B format where 0,0,0 is black and 255,255,255 is white
self.color = color
def contains(self, x, y):
'''Test whether this (untransformed) Sprite contains the pixel coordinates
given.
'''
sx, sy = self.position
ax, ay = self.image_anchor
sx -= ax
sy -= ay
if x < sx or x > sx + self.width: return False
if y < sy or y > sy + self.height: return False
return True
def _set_anchor_x(self, value):
if isinstance(self.image, pyglet.image.Animation):
for img in self.image.frames:
img.image.anchor_x = value
self._texture.anchor_x = value
else:
self.image.anchor_x = value
self._update_position()
def _get_anchor_x(self):
if isinstance(self.image, pyglet.image.Animation):
return self.image.frames[0].image.anchor_x
else:
return self.image.anchor_x
image_anchor_x = property(_get_anchor_x, _set_anchor_x)
def _set_anchor_y(self, value):
if isinstance(self.image, pyglet.image.Animation):
for img in self.image.frames:
img.image.anchor_y = value
self._texture.anchor_y = value
else:
self.image.anchor_y = value
self._update_position()
def _get_anchor_y(self):
if isinstance(self.image, pyglet.image.Animation):
return self.image.frames[0].image.anchor_y
else:
return self.image.anchor_y
image_anchor_y = property(_get_anchor_y, _set_anchor_y)
def _set_anchor(self, value):
self._set_anchor_x(value[0])
self._set_anchor_y(value[1])
def _get_anchor(self):
return (self._get_anchor_x(), self._get_anchor_y())
image_anchor = property(_get_anchor, _set_anchor)
def draw(self):
self._group.set_state()
if self._vertex_list is not None:
self._vertex_list.draw(GL_QUADS)
self._group.unset_state()
Sprite.supported_classes = Sprite
class NotifierSprite(Sprite):
def __init__(self, image, position=(0, 0), rotation=0, scale=1, opacity=255,
color=(255, 255, 255), anchor=None):
self._observers = defaultdict(set)
self._initialized = False
super(NotifierSprite, self).__init__(
image, position, rotation, scale, opacity, color, anchor)
self._initialized = True
def register(self, observer, attribute):
self._observers[attribute].add(observer)
def unregister(self, observer, attribute):
self._observers[attribute].remove(observer)
def notifier_property(f):
prop_name = f.__name__
prop_attr = '_' + prop_name
def fget(self):
value = getattr(self, prop_attr)
return value
def fset(self, value):
setattr(self, prop_attr, value)
self._notify(prop_name)
return property(fget, fset)
# README: this is done in a special case, because we need
# to override a property
def _set_x(self, x):
super(NotifierSprite, self)._set_x(x)
if self._initialized:
self._notify('x')
x = property(lambda self: self._x, _set_x)
def _set_y(self, y):
super(NotifierSprite, self)._set_y(y)
if self._initialized:
self._notify('y')
y = property(lambda self: self._y, _set_y)
def _set_position(self, position):
super(NotifierSprite, self)._set_position(position)
if self._initialized:
self._notify('position')
position = property(lambda self: (self.x, self.y), _set_position)
def _set_rotation(self, rotation):
super(NotifierSprite, self)._set_rotation(rotation)
if self._initialized:
self._notify('rotation')
rotation = property(lambda self: self._rotation, _set_rotation)
def _set_scale(self, scale):
super(NotifierSprite, self)._set_scale(scale)
if self._initialized:
self._notify('scale')
scale = property(lambda self: self._scale, _set_scale)
def _set_opacity(self, opacity):
super(NotifierSprite, self)._set_opacity(opacity)
if self._initialized:
self._notify('opacity')
opacity = property(lambda self: self._opacity, _set_opacity)
def _set_color(self, rgb):
super(NotifierSprite, self)._set_color(rgb)
if self._initialized:
self._notify('color')
color = property(lambda self: self._rgb, _set_color)
def _notify(self, attribute):
for observer in self._observers[attribute]:
observer.on_notify(self, attribute)
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
from django_filters.rest_framework import DjangoFilterBackend
from django.utils.decorators import method_decorator
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from drf_yasg.utils import swagger_auto_schema
from django.db.models import Sum
from ordered_set import OrderedSet
from core.utils.common import get_object_with_check_and_log, int_from_request, bool_from_request
from core.permissions import CanViewTask, CanChangeTask, IsBusiness, CanViewProject, CanChangeProject
from projects.models import Project
from projects.serializers import ProjectSerializer
from tasks.models import Task, Annotation
from data_manager.functions import get_all_columns, get_prepared_queryset
from data_manager.models import View
from data_manager.serializers import ViewSerializer, TaskSerializer, SelectedItemsSerializer
from data_manager.actions import get_all_actions, perform_action
logger = logging.getLogger(__name__)
class TaskPagination(PageNumberPagination):
page_size = 100
page_size_query_param = "page_size"
total_annotations = 0
total_predictions = 0
def paginate_queryset(self, queryset, request, view=None):
self.total_annotations = queryset.aggregate(all_annotations=Sum("total_annotations"))["all_annotations"] or 0
self.total_predictions = queryset.aggregate(all_predictions=Sum("total_predictions"))["all_predictions"] or 0
return super().paginate_queryset(queryset, request, view)
def get_paginated_response(self, data):
return Response(
{
"total_annotations": self.total_annotations,
"total_predictions": self.total_predictions,
"total": self.page.paginator.count,
"tasks": data,
}
)
@method_decorator(name='list', decorator=swagger_auto_schema(
tags=['Data Manager'], operation_summary="List views",
operation_description="List all views for a specific project."))
@method_decorator(name='create', decorator=swagger_auto_schema(
tags=['Data Manager'], operation_summary="Create view",
operation_description="Create a view for a speicfic project."))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
tags=['Data Manager'], operation_summary="Get view",
operation_description="Get all views for a specific project."))
@method_decorator(name='update', decorator=swagger_auto_schema(
tags=['Data Manager'], operation_summary="Put view",
operation_description="Overwrite view data with updated filters and other information for a specific project."))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
tags=['Data Manager'], operation_summary="Update view",
operation_description="Update view data with additional filters and other information for a specific project."))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
tags=['Data Manager'], operation_summary="Delete view",
operation_description="Delete a view for a specific project."))
class ViewAPI(viewsets.ModelViewSet):
queryset = View.objects.all()
serializer_class = ViewSerializer
filter_backends = [DjangoFilterBackend]
my_tags = ["Data Manager"]
filterset_fields = ["project"]
task_serializer_class = TaskSerializer
def get_permissions(self):
permission_classes = [IsBusiness]
# if self.action in ['update', 'partial_update', 'destroy']:
# permission_classes = [IsBusiness, CanChangeTask]
# else:
# permission_classes = [IsBusiness, CanViewTask]
return [permission() for permission in permission_classes]
def perform_create(self, serializer):
serializer.save(user=self.request.user)
@swagger_auto_schema(tags=['Data Manager'])
@action(detail=False, methods=['delete'])
def reset(self, _request):
"""
delete:
Reset project views
Reset all views for a specific project.
"""
queryset = self.filter_queryset(self.get_queryset())
queryset.all().delete()
return Response(status=204)
@staticmethod
def evaluate_predictions(tasks):
# call machine learning api and format response
for task in tasks:
project = task.project
if not project.show_collab_predictions:
return
for ml_backend in project.ml_backends.all():
ml_backend.predict_one_task(task)
def get_task_queryset(self, request, view):
return Task.prepared.all(prepare_params=view.get_prepare_tasks_params())
@swagger_auto_schema(tags=['Data Manager'], responses={200: task_serializer_class(many=True)})
@action(detail=True, methods=["get"])
def tasks(self, request, pk=None):
"""
get:
Get task list for view
Retrieve a list of tasks with pagination for a specific view using filters and ordering.
"""
view = self.get_object()
queryset = self.get_task_queryset(request, view)
context = {'proxy': bool_from_request(request.GET, 'proxy', True), 'resolve_uri': True, 'request': request}
# paginated tasks
self.pagination_class = TaskPagination
page = self.paginate_queryset(queryset)
if page is not None:
self.evaluate_predictions(page)
serializer = self.task_serializer_class(page, many=True, context=context)
return self.get_paginated_response(serializer.data)
# all tasks
self.evaluate_predictions(queryset)
serializer = self.task_serializer_class(queryset, many=True, context=context)
return Response(serializer.data)
@swagger_auto_schema(tags=['Data Manager'], methods=["get", "post", "delete", "patch"])
@action(detail=True, url_path="selected-items", methods=["get", "post", "delete", "patch"])
def selected_items(self, request, pk=None):
"""
get:
Get selected items
Retrieve selected tasks for a specified view.
post:
Overwrite selected items
Overwrite the selected items with new data.
patch:
Add selected items
Add selected items to a specific view.
delete:
Delete selected items
Delete selected items from a specific view.
"""
view = self.get_object()
# GET: get selected items from tab
if request.method == "GET":
serializer = SelectedItemsSerializer(view.selected_items)
return Response(serializer.data)
data = request.data
serializer = SelectedItemsSerializer(data=data, context={"view": view, "request": request})
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
# POST: set whole
if request.method == "POST":
view.selected_items = data
view.save()
return Response(serializer.validated_data, status=201)
selected_items = view.selected_items
if selected_items is None:
selected_items = {"all": False, "included": []}
key = "excluded" if data["all"] else "included"
left = OrderedSet(selected_items.get(key, []))
right = OrderedSet(data.get(key, []))
# PATCH: set particular with union
if request.method == "PATCH":
# make union
result = left | right
view.selected_items = selected_items
view.selected_items[key] = list(result)
view.save(update_fields=["selected_items"])
return Response(view.selected_items, status=201)
# DELETE: delete specified items
if request.method == "DELETE":
result = left - right
view.selected_items[key] = list(result)
view.save(update_fields=["selected_items"])
return Response(view.selected_items, status=204)
class TaskAPI(APIView):
# permission_classes = [IsBusiness, CanViewTask]
permission_classes = [IsBusiness]
def get_serializer_class(self):
return TaskSerializer
@swagger_auto_schema(tags=["Data Manager"])
def get(self, request, pk):
"""
get:
Task by ID
Retrieve a specific task by ID.
"""
queryset = Task.prepared.get(id=pk)
context = {
'proxy': bool_from_request(request.GET, 'proxy', True),
'resolve_uri': True,
'completed_by': 'full',
'request': request
}
serializer = self.get_serializer_class()(queryset, many=False, context=context)
return Response(serializer.data)
class ProjectColumnsAPI(APIView):
# permission_classes = [IsBusiness, CanViewProject]
permission_classes = [IsBusiness, ]
@swagger_auto_schema(tags=["Data Manager"])
def get(self, request):
"""
get:
Get data manager columns
Retrieve the data manager columns available for the tasks in a specific project.
"""
pk = int_from_request(request.GET, "project", 1)
project = get_object_with_check_and_log(request, Project, pk=pk)
self.check_object_permissions(request, project)
data = get_all_columns(project)
return Response(data)
class ProjectStateAPI(APIView):
# permission_classes = [IsBusiness, CanViewProject]
permission_classes = [IsBusiness, ]
@swagger_auto_schema(tags=["Data Manager"])
def get(self, request):
"""
get:
Project state
Retrieve the project state for data manager.
"""
pk = int_from_request(request.GET, "project", 1) # replace 1 to None, it's for debug only
project = get_object_with_check_and_log(request, Project, pk=pk)
self.check_object_permissions(request, project)
data = ProjectSerializer(project).data
data.update(
{
"can_delete_tasks": True,
"can_manage_annotations": True,
"can_manage_tasks": True,
"source_syncing": False,
"target_syncing": False,
"task_count": project.tasks.count(),
"annotation_count": Annotation.objects.filter(task__project=project).count(),
'config_has_control_tags': len(project.get_control_tags_from_config()) > 0
}
)
return Response(data)
class ProjectActionsAPI(APIView):
# permission_classes = [IsBusiness, CanChangeProject]
permission_classes = [IsBusiness, ]
def get_permissions(self):
if self.request.method == 'POST':
permission_classes = [IsBusiness, CanChangeProject]
else:
permission_classes = [IsBusiness, CanViewProject]
return [permission() for permission in permission_classes]
@swagger_auto_schema(tags=["Data Manager"])
def get(self, request):
"""
get:
Get actions
Retrieve all the registered actions with descriptions that data manager can use.
"""
pk = int_from_request(request.GET, "project", 1) # replace 1 to None, it's for debug only
project = get_object_with_check_and_log(request, Project, pk=pk)
self.check_object_permissions(request, project)
params = {
'can_delete_tasks': True,
'can_manage_annotations': True,
'experimental_feature': False
}
return Response(get_all_actions(params))
@swagger_auto_schema(tags=["Data Manager"])
def post(self, request):
"""
post:
Post actions
Perform an action with the selected items from a specific view.
"""
pk = int_from_request(request.GET, "project", None)
project = get_object_with_check_and_log(request, Project, pk=pk)
self.check_object_permissions(request, project)
queryset = get_prepared_queryset(request, project)
# no selected items on tab
if not queryset.exists():
response = {'detail': 'No selected items for specified view'}
return Response(response, status=404)
# wrong action id
action_id = request.GET.get('id', None)
if action_id is None:
response = {'detail': 'No action id "' + str(action_id) + '", use ?id=<action-id>'}
return Response(response, status=422)
# perform action and return the result dict
kwargs = {'request': request} # pass advanced params to actions
result = perform_action(action_id, project, queryset, **kwargs)
code = result.pop('response_code', 200)
return Response(result, status=code)
|
import sys
from notifier.util import *
from notifier.providers import PrintNotify
def init(**providers):
if 'print' not in providers:
providers['print'] = PrintNotify()
provider = sys.argv[1] if len(sys.argv) > 1 else 'print'
if provider not in providers:
exit('Unknown provider. Known providers are: {}'.format(', '.join(providers.keys())))
return providers[provider]
|
import json
import csv
import sys
import os
import datetime
import xlrd
def list_add(a, b):
c = []
for i in range(len(a)):
c.append(a[i] + b[i])
return c
def list_sub(a, b):
c = []
for i in range(len(a)):
c.append(a[i] - b[i])
return c
def excel_read(name):
file = xlrd.open_workbook(name)
sheet = file.sheet_by_index(0)
rows = sheet.nrows
cols = sheet.ncols
all_content = []
for i in range(rows):
all_content.append(sheet.row_values(i))
return all_content
def csv_read(name):
rows = []
try:
with open(name, 'r') as f:
h = csv.reader(f)
for a in h:
rows.append(a)
except:
pass
return rows
def csv_write(name, rows):
with open(name, 'w') as f:
h = csv.writer(f)
for i in rows:
h.writerow(i)
def json_read(name):
rows = []
try:
with open(name, 'r') as f:
rows = json.load(f)
except:
pass
return rows
def json_write(name, rows):
with open(name, 'w') as f:
t1 = []
for a in rows:
t2 = []
for b in a:
t2.append('"' + b + '":' + ('"' + a[b] + '"' if isinstance(a[b], str) else str(int(a[b]))))
t1.append(' {' + ','.join(t2) + '}')
f.write('[\n' + ',\n'.join(t1) + '\n]')
__s_date = datetime.date(1899, 12, 31).toordinal() - 1
def getdate(date):
if isinstance(date, float):
date = int(date)
d = datetime.date.fromordinal(__s_date + date)
return d
def fetch_safe(d, k, i):
if k not in d:
return 0
return 0 if isinstance(d[k][i], str) else float(d[k][i])
def rrr_xlsx_csv():
b = excel_read('res/rrr.xlsx')
c = []
for i in range(len(b)):
if i > 1:
a = b[i]
t = []
t.append(getdate(a[0]).strftime("%Y-%m-%d"))
t.append(getdate(a[1]).strftime("%Y-%m-%d"))
t.append(a[2])
t.append(a[3])
t.append(a[4])
t.append(a[5])
t.append(a[6])
t.append(a[7])
c.append(t)
c.sort(key=lambda entry: entry[0] + entry[1], reverse=False)
d = []
for i in range(len(c)):
d.append(','.join(a if isinstance(a, str) else str(a) for a in c[i]))
with open('rrr.csv', 'w') as f:
f.write('\n'.join(d))
def rrr_csv_json():
c = csv_read('rrr.csv')
d = []
for i in range(len(c)):
a = c[i]
t = [
'"公布时间":"{}"'.format(a[0]),
'"生效时间":"{}"'.format(a[1]),
'"大型金融机构":{}'.format(a[3]),
'"中小金融机构":{}'.format(a[6]),
]
d.append(' {' + ','.join(t) + '}')
with open('rrr.json', 'w') as f:
f.write('[\n' + ',\n'.join(d) + '\n]')
def 存款储备金率():
rrr_xlsx_csv()
rrr_csv_json()
def to_json(d, e, json_file_name):
c = json_read(json_file_name)
year = int(d["Item"][0])
for i in range(12):
date = str(year) + '-' + '{:02}'.format(i + 1)
if isinstance(d[e][i], str):
break
t = {}
t["月份"] = date
for a in d:
if a != "Item":
t[a] = 0 if isinstance(d[a][i], str) else float(d[a][i])
for j in range(len(c)):
if c[j]["月份"] == date:
c[j] = t
break
else:
c.append(t)
c.sort(key=lambda entry: entry["月份"], reverse=False)
json_write(json_file_name, c)
def to_csv(d, e, cols, csv_file_name):
c = csv_read(csv_file_name)
year = int(d["Item"][0])
for i in range(12):
date = str(year) + '-' + '{:02}'.format(i + 1)
if isinstance(d[e][i], str):
break
t = [ date ]
for a in cols:
t.append(int(fetch_safe(d, a, i)))
for j in range(len(c)):
if c[j][0] == date:
c[j] = t
break
else:
c.append(t)
c.sort(key=lambda entry: entry[0], reverse=False)
csv_write(csv_file_name, c)
def 货币供应量_xlsx(filename):
b = excel_read(filename)
d = {"Item": [], "M0": [], "M1": [], "M2": []}
for i, a in enumerate(b):
if '项目 Item' in a[0]:
d["Item"] = a[3:]
elif '流通中货币(M0)' in a[2]:
d["M0"] = a[3:]
elif '货币(M1)' in a[1]:
d["M1"] = a[3:]
elif '货币和准货币(M2)' in a[0]:
d["M2"] = a[3:]
return d
def 货币供应量(filename):
d = 货币供应量_xlsx(filename)
to_csv(d, 'M2', ['M0', 'M1', 'M2'], 'mmm.csv')
to_json(d, 'M2', 'mmm.json')
def has_it(d, s):
dd = d.upper().replace(' ', ' ').replace('\u3000', ' ')
ss = s.upper().replace(' ', ' ').replace('\u3000', ' ')
d1 = dd.split(' ')
s1 = ss.split(' ')
for a in s1:
if a not in d1:
return False
d2 = dd.replace(' ', '')
s2 = ss.replace(' ', '')
if s2 not in d2:
return False
return True
def 货币当局资产负债表_xlsx(filename):
b = excel_read(filename)
d = {}
for i, a in enumerate(b):
if has_it(a[0], '项目'):
d["Item"] = a[1:]
elif has_it(a[0], '国外资产 Foreign Assets'):
d["国外资产"] = a[1:]
elif has_it(a[0], '外汇'):
d["外汇"] = a[1:]
elif has_it(a[0], '货币黄金'):
d["货币黄金"] = a[1:]
elif has_it(a[0], '其他国外资产 Other Foreign Assets'):
d["其他国外资产"] = a[1:]
elif has_it(a[0], '对政府债权'):
d["对政府债权"] = a[1:]
elif has_it(a[0], '其中:中央政府'):
d["中央政府"] = a[1:]
elif has_it(a[0], '对其他存款性公司债权'):
d["对其他存款性公司债权"] = a[1:]
elif has_it(a[0], '对其他金融性公司债权') or has_it(a[0], '对其他金融机构债权'):
d["对其他金融性公司债权"] = a[1:]
elif has_it(a[0], '对非金融性部门债权') or has_it(a[0], '对非金融性公司债权') or has_it(a[0], '非金融机构债权'):
d["对非金融性部门债权"] = a[1:]
elif has_it(a[0], '其他资产'):
d["其他资产"] = a[1:]
elif has_it(a[0], '总资产'):
d["总资产"] = a[1:]
elif has_it(a[0], '储备货币'):
d["储备货币"] = a[1:]
elif has_it(a[0], '货币发行'):
d["货币发行"] = a[1:]
elif has_it(a[0], '金融性公司存款 Deposits of Financial Corporations') or has_it(a[0], '金融机构存款 Deposits of Financial Corporations'):
d["金融性公司存款"] = a[1:]
elif has_it(a[0], '其他存款性公司存款'): # 包含于 金融性公司存款
d["其他存款性公司存款"] = a[1:]
elif has_it(a[0], '其他金融性公司存款') or has_it(a[0], '其他金融机构'): # 包含于 金融性公司存款
d["其他金融性公司存款"] = a[1:]
elif has_it(a[0], '非金融机构存款') or has_it(a[0], '非金融性公司存款'):
d["非金融机构存款"] = a[1:]
elif has_it(a[0], '不计入储备货币的金融性公司存款'):
d["不计入储备货币的金融性公司存款"] = a[1:]
elif has_it(a[0], '发行债券'):
d["发行债券"] = a[1:]
elif has_it(a[0], '国外负债'):
d["国外负债"] = a[1:]
elif has_it(a[0], '政府存款'):
d["政府存款"] = a[1:]
elif has_it(a[0], '自有资金'):
d["自有资金"] = a[1:]
elif has_it(a[0], '其他负债'):
d["其他负债"] = a[1:]
elif has_it(a[0], '总负债'):
d["总负债"] = a[1:]
else:
print(a[0])
return d
def 货币当局资产负债表(filename):
print(filename)
d = 货币当局资产负债表_xlsx(filename)
if len(d) > 0:
t = ["国外资产", "外汇", "货币黄金", "其他国外资产", "对政府债权", "中央政府", "对其他存款性公司债权", "对其他金融性公司债权", "对非金融性部门债权", "其他资产", "总资产", "储备货币", "货币发行", "金融性公司存款", "其他存款性公司存款", "其他金融性公司存款", "非金融机构存款", "不计入储备货币的金融性公司存款", "发行债券", "国外负债", "政府存款", "自有资金", "其他负债", "总负债"]
to_csv(d, '总资产', t, 'balance1.csv')
to_json(d, '总资产', 'balance1.json')
def 其他存款性公司资产负债表_xlsx(filename):
b = excel_read(filename)
d = {}
for i, a in enumerate(b):
if '项目' in a[0] and 'Item' in a[0]:
d["Item"] = a[1:]
elif has_it(a[0], '国外资产 Foreign Assets'):
d["国外资产"] = a[1:]
elif has_it(a[0], '储备资产 Reserve Assets'):
d["储备资产"] = a[1:]
elif has_it(a[0], '准备金存款 Deposits with Central Bank'):
d["准备金存款"] = a[1:]
elif has_it(a[0], '库存现金 Cash in Vault'):
d["库存现金"] = a[1:]
elif has_it(a[0], '对政府债权'):
d["对政府债权"] = a[1:]
elif has_it(a[0], '其中:中央政府'):
d["对中央政府债权"] = a[1:]
elif has_it(a[0], '对中央银行债权') or has_it(a[0], '央行债券'):
d["对中央银行债权"] = a[1:]
elif has_it(a[0], '对其他存款性公司债权'):
d["对其他存款性公司债权"] = a[1:]
elif has_it(a[0], '对其他金融机构债权') or has_it(a[0], '对其他金融性公司债权'):
d["对其他金融机构债权"] = a[1:]
elif has_it(a[0], '对非金融机构债权') or has_it(a[0], '对非金融性公司债权'):
d["对非金融机构债权"] = a[1:]
elif has_it(a[0], '对其他居民部门债权'):
d["对其他居民部门债权"] = a[1:]
elif has_it(a[0], '其他资产 Other Assets'):
d["其他资产"] = a[1:]
elif has_it(a[0], '总资产 Total Assets'):
d["总资产"] = a[1:]
elif has_it(a[0], '对非金融机构及住户负债'):
d["对非金融机构及住户负债"] = a[1:]
elif has_it(a[0], '纳入广义货币的存款 Deposits Included'):
d["纳入广义货币的存款"] = a[1:]
elif has_it(a[0], '单位活期存款') or has_it(a[0], '企业活期存款'):
d["单位活期存款"] = a[1:]
elif has_it(a[0], '单位定期存款') or has_it(a[0], '企业定期存款'):
d["单位定期存款"] = a[1:]
elif has_it(a[0], '个人存款 Personal Deposits') or has_it(a[0], '居民储蓄存款 Saving Deposits'):
d["个人存款"] = a[1:]
elif has_it(a[0], '不纳入广义货币的存款 Deposits Excluded'):
d["不纳入广义货币的存款"] = a[1:]
elif has_it(a[0], '可转让存款 Transferable Deposits'):
d["可转让存款"] = a[1:]
elif has_it(a[0], '其他存款 Other Deposits'):
d["其他存款"] = a[1:]
elif has_it(a[0], '其他负债存款'):
d["其他负债存款"] = a[1:]
elif has_it(a[0], '对中央银行负债'):
d["对中央银行负债"] = a[1:]
elif has_it(a[0], '对其他存款性公司负债'):
d["对其他存款性公司负债"] = a[1:]
elif has_it(a[0], '对其他金融性公司负债'):
d["对其他金融性公司负债"] = a[1:]
elif has_it(a[0], '其中:计入广义货币的存款'):
d["计入广义货币的存款"] = a[1:]
elif has_it(a[0], '国外负债 Foreign Liabilities'):
d["国外负债"] = a[1:]
elif has_it(a[0], '债券发行 Bond Issue'):
d["债券发行"] = a[1:]
elif has_it(a[0], '实收资本 Paid-in Capital'):
d["实收资本"] = a[1:]
elif has_it(a[0], '其他负债 Other Liabilities'):
d["其他负债"] = a[1:]
elif has_it(a[0], '总负债 Total Liabilities'):
d["总负债"] = a[1:]
else:
print(a[0])
return d
def 其他存款性公司资产负债表(filename):
print(filename)
d = 其他存款性公司资产负债表_xlsx(filename)
if len(d) > 0:
t = ["国外资产", "储备资产", "准备金存款", "库存现金", "对政府债权", "对中央政府债权", "对中央银行债权", "对其他存款性公司债权", "对其他金融机构债权", "对非金融机构债权", "对其他居民部门债权", "其他资产", "总资产", "对非金融机构及住户负债", "纳入广义货币的存款", "单位活期存款", "单位定期存款", "个人存款", "不纳入广义货币的存款", "可转让存款", "其他存款", "其他负债存款", "对中央银行负债", "对其他存款性公司负债", "对其他金融性公司负债", "计入广义货币的存款", "国外负债", "债券发行", "实收资本", "其他负债", "总负债"]
to_csv(d, '总资产', t, 'balance2.csv')
to_json(d, '总资产', 'balance2.json')
def 金融机构人民币信贷收支表_c_xlsx(filename):
b = excel_read(filename)
d = {}
d["Item"] = b[6 - 1][1:]
d["各项存款"] = b[9 - 1][1:]
d["非金融企业存款"] = b[10 - 1][1:]
d["非金融企业定期及其他存款"] = d["非金融企业存款"]
d["财政性存款"] = b[11 - 1][1:]
d["机关团体存款"] = b[12 - 1][1:]
d["住户存款"] = b[13 - 1][1:]
d["住户活期存款"] = b[14 - 1][1:]
d["住户定期及其他存款"] = b[15 - 1][1:]
t1 = b[16 - 1][1:] # 农业存款
t2 = b[17 - 1][1:] # 信托存款
t3 = b[18 - 1][1:] # 其他存款
d["其他存款"] = list_add(list_add(t1, t2), t3)
d["金融债券"] = b[19 - 1][1:]
d["流通中货币"] = b[20 - 1][1:]
d["对国际金融机构负债"] = b[21 - 1][1:]
d["其他"] = b[22 - 1][1:]
d["资金来源总计"] = b[8 - 1][1:]
d["各项贷款"] = b[24 - 1][1:]
d["股权及其他投资"] = b[34 - 1][1:]
d["黄金占款"] = b[35 - 1][1:]
d["中央银行外汇占款"] = b[36 - 1][1:]
d["在国际金融机构资产"] = b[38 - 1][1:]
d["资金运用总计"] = b[23 - 1][1:]
return d
def 金融机构人民币信贷收支表_d_xlsx(filename):
b = excel_read(filename)
d = {}
d["Item"] = b[6 - 1][1:]
d["各项存款"] = b[9 - 1][1:]
d["非金融企业存款"] = b[10 - 1][1:]
d["非金融企业定期及其他存款"] = d["非金融企业存款"]
d["财政性存款"] = b[11 - 1][1:]
d["机关团体存款"] = b[12 - 1][1:]
d["住户存款"] = b[13 - 1][1:]
d["住户活期存款"] = b[14 - 1][1:]
d["住户定期及其他存款"] = b[15 - 1][1:]
t1 = b[16 - 1][1:] # 农业存款
t2 = b[17 - 1][1:] # 信托存款
t3 = b[18 - 1][1:] # 其他存款
d["其他存款"] = list_add(list_add(t1, t2), t3)
d["金融债券"] = b[19 - 1][1:]
d["流通中货币"] = b[20 - 1][1:]
d["对国际金融机构负债"] = b[21 - 1][1:]
d["其他"] = b[22 - 1][1:]
d["资金来源总计"] = b[8 - 1][1:]
d["各项贷款"] = b[24 - 1][1:]
d["股权及其他投资"] = b[34 - 1][1:]
d["黄金占款"] = b[35 - 1][1:]
d["中央银行外汇占款"] = b[36 - 1][1:]
d["在国际金融机构资产"] = b[37 - 1][1:]
d["资金运用总计"] = b[23 - 1][1:]
return d
def 金融机构人民币信贷收支表_e_xlsx(filename):
b = excel_read(filename)
d = {}
d["Item"] = b[6 - 1][1:]
d["各项存款"] = b[9 - 1][1:]
d["住户存款"] = b[10 - 1][1:]
d["住户活期存款"] = b[11 - 1][1:]
d["住户定期及其他存款"] = b[12 - 1][1:]
d["非金融企业存款"] = b[14 - 1][1:]
d["非金融企业活期存款"] = b[16 - 1][1:]
d["非金融企业定期及其他存款"] = b[17 - 1][1:]
d["机关团体存款"] = b[18 - 1][1:]
d["财政性存款"] = b[19 - 1][1:]
d["其他存款"] = b[20 - 1][1:]
d["金融债券"] = b[21 - 1][1:]
d["流通中货币"] = b[22 - 1][1:]
d["对国际金融机构负债"] = b[23 - 1][1:]
d["其他"] = b[24 - 1][1:]
d["资金来源总计"] = b[25 - 1][1:]
d["各项贷款"] = b[27 - 1][1:]
d["住户贷款"] = b[28 - 1][1:]
d["住户短期消费贷款"] = b[30 - 1][1:]
d["住户中长期消费贷款"] = b[31 - 1][1:]
d["住户短期经营贷款"] = b[33 - 1][1:]
d["住户中长期经营贷款"] = b[35 - 1][1:]
d["住户短期贷款"] = list_add(d["住户短期消费贷款"], d["住户短期经营贷款"])
d["住户中长期贷款"] = list_add(d["住户中长期消费贷款"], d["住户中长期经营贷款"])
d["企事业单位贷款"] = b[36 - 1][1:]
d["企事业单位短期贷款"] = b[38 - 1][1:]
t1 = b[39 - 1][1:] #企事业单位票据融资贷款
d["企事业单位中长期贷款"] = b[40 - 1][1:]
t2 = b[41 - 1][1:] #企事业单位其他贷款
d["企事业单位其他贷款"] = list_add(t1, t2)
# d["非银行业金融机构贷款"] = b[41 - 1][1:]
d["股权及其他投资"] = b[42 - 1][1:]
d["黄金占款"] = b[43 - 1][1:]
d["中央银行外汇占款"] = b[44 - 1][1:]
d["在国际金融机构资产"] = b[45 - 1][1:]
d["资金运用总计"] = b[46 - 1][1:]
return d
def 金融机构人民币信贷收支表_f_xlsx(filename):
b = excel_read(filename)
d = {}
d["Item"] = b[6 - 1][1:]
d["各项存款"] = b[9 - 1][1:]
d["住户存款"] = b[10 - 1][1:]
d["住户活期存款"] = b[11 - 1][1:]
d["住户定期及其他存款"] = b[12 - 1][1:]
d["非金融企业存款"] = b[13 - 1][1:]
d["非金融企业活期存款"] = b[14 - 1][1:]
d["非金融企业定期及其他存款"] = b[15 - 1][1:]
d["机关团体存款"] = b[16 - 1][1:]
d["财政性存款"] = b[17 - 1][1:]
d["其他存款"] = b[18 - 1][1:]
d["金融债券"] = b[19 - 1][1:]
d["流通中货币"] = b[20 - 1][1:]
d["对国际金融机构负债"] = b[21 - 1][1:]
d["其他"] = b[22 - 1][1:]
d["资金来源总计"] = b[23 - 1][1:]
d["各项贷款"] = b[25 - 1][1:]
d["境内贷款"] = b[26 - 1][1:]
d["住户贷款"] = b[27 - 1][1:]
d["住户短期消费贷款"] = b[29 - 1][1:]
d["住户中长期消费贷款"] = b[30 - 1][1:]
d["住户短期经营贷款"] = b[32 - 1][1:]
d["住户中长期经营贷款"] = b[33 - 1][1:]
d["住户短期贷款"] = list_add(d["住户短期消费贷款"], d["住户短期经营贷款"])
d["住户中长期贷款"] = list_add(d["住户中长期消费贷款"], d["住户中长期经营贷款"])
d["企事业单位贷款"] = b[34 - 1][1:]
d["企事业单位短期贷款"] = b[36 - 1][1:]
t1 = b[37 - 1][1:] #企事业单位票据融资贷款
d["企事业单位中长期贷款"] = b[38 - 1][1:]
t2 = b[39 - 1][1:] #企事业单位其他贷款
d["企事业单位其他贷款"] = list_add(t1, t2)
# d["非银行业金融机构贷款"] = b[38 - 1][1:]
d["境外贷款"] = b[40 - 1][1:]
d["股权及其他投资"] = b[41 - 1][1:]
d["黄金占款"] = b[42 - 1][1:]
d["中央银行外汇占款"] = b[43 - 1][1:]
d["在国际金融机构资产"] = b[44 - 1][1:]
d["资金运用总计"] = b[45 - 1][1:]
return d
def 金融机构人民币信贷收支表_g_xlsx(filename):
b = excel_read(filename)
d = {}
d["Item"] = b[6 - 1][1:]
d["各项存款"] = b[9 - 1][1:]
d["住户存款"] = b[10 - 1][1:]
d["住户活期存款"] = b[11 - 1][1:]
d["住户定期及其他存款"] = b[12 - 1][1:]
d["非金融企业存款"] = b[13 - 1][1:]
d["非金融企业活期存款"] = b[14 - 1][1:]
d["非金融企业定期及其他存款"] = b[15 - 1][1:]
d["机关团体存款"] = b[16 - 1][1:]
d["财政性存款"] = b[17 - 1][1:]
t1 = b[18 - 1][1:] #其他存款
t2 = b[19 - 1][1:] #非居民存款
d["其他存款"] = list_add(t1, t2)
d["金融债券"] = b[20 - 1][1:]
d["流通中货币"] = b[21 - 1][1:]
d["对国际金融机构负债"] = b[22 - 1][1:]
d["其他"] = b[23 - 1][1:]
d["资金来源总计"] = b[24 - 1][1:]
d["各项贷款"] = b[26 - 1][1:]
d["境内贷款"] = b[27 - 1][1:]
d["住户贷款"] = b[28 - 1][1:]
d["住户短期消费贷款"] = b[30 - 1][1:]
d["住户中长期消费贷款"] = b[31 - 1][1:]
d["住户短期经营贷款"] = b[33 - 1][1:]
d["住户中长期经营贷款"] = b[34 - 1][1:]
d["住户短期贷款"] = list_add(d["住户短期消费贷款"], d["住户短期经营贷款"])
d["住户中长期贷款"] = list_add(d["住户中长期消费贷款"], d["住户中长期经营贷款"])
d["企事业单位贷款"] = b[35 - 1][1:]
d["企事业单位短期贷款"] = b[37 - 1][1:]
t1 = b[38 - 1][1:] #企事业单位票据融资贷款
t2 = b[39 - 1][1:] #企事业单位融资租赁贷款
t3 = b[40 - 1][1:] #企事业单位各项垫款
d["企事业单位其他贷款"] = list_add(list_add(t1, t2), t3)
d["境外贷款"] = b[41 - 1][1:]
d["债券投资"] = b[42 - 1][1:]
d["股权及其他投资"] = b[43 - 1][1:]
d["黄金占款"] = b[44 - 1][1:]
d["中央银行外汇占款"] = b[45 - 1][1:]
d["在国际金融机构资产"] = b[46 - 1][1:]
d["资金运用总计"] = b[47 - 1][1:]
return d
def 金融机构人民币信贷收支表_k_xlsx(filename):
b = excel_read(filename)
d = {}
d["Item"] = b[6 - 1][1:]
d["各项存款"] = b[9 - 1][1:]
d["境内存款"] = b[10 - 1][1:]
d["住户存款"] = b[11 - 1][1:]
d["住户活期存款"] = b[12 - 1][1:]
d["住户定期及其他存款"] = b[13 - 1][1:]
d["非金融企业存款"] = b[14 - 1][1:]
d["非金融企业活期存款"] = b[15 - 1][1:]
d["非金融企业定期及其他存款"] = b[16 - 1][1:]
d["政府存款"] = b[17 - 1][1:]
d["财政性存款"] = b[18 - 1][1:]
d["机关团体存款"] = b[19 - 1][1:]
d["非银行业金融机构存款"] = b[20 - 1][1:]
d["境外存款"] = b[21 - 1][1:]
d["金融债券"] = b[22 - 1][1:]
d["流通中货币"] = b[23 - 1][1:]
d["对国际金融机构负债"] = b[24 - 1][1:]
d["其他"] = b[25 - 1][1:]
d["资金来源总计"] = b[26 - 1][1:]
d["各项贷款"] = b[28 - 1][1:]
d["境内贷款"] = b[29 - 1][1:]
d["住户贷款"] = b[30 - 1][1:]
d["住户短期贷款"] = b[31 - 1][1:]
d["住户短期消费贷款"] = b[32 - 1][1:]
d["住户短期经营贷款"] = b[33 - 1][1:]
d["住户中长期贷款"] = b[34 - 1][1:]
d["住户中长期消费贷款"] = b[35 - 1][1:]
d["住户中长期经营贷款"] = b[36 - 1][1:]
d["企事业单位贷款"] = b[37 - 1][1:]
d["企事业单位短期贷款"] = b[38 - 1][1:]
d["企事业单位中长期贷款"] = b[39 - 1][1:]
t1 = b[40 - 1][1:] #企事业单位票据融资贷款
t2 = b[41 - 1][1:] #企事业单位融资租赁贷款
t3 = b[42 - 1][1:] #企事业单位各项垫款
d["企事业单位其他贷款"] = list_add(list_add(t1, t2), t3)
d["非银行业金融机构贷款"] = b[43 - 1][1:]
d["境外贷款"] = b[44 - 1][1:]
d["债券投资"] = b[45 - 1][1:]
d["股权及其他投资"] = b[46 - 1][1:]
d["黄金占款"] = b[47 - 1][1:]
d["中央银行外汇占款"] = b[48 - 1][1:]
d["在国际金融机构资产"] = b[49 - 1][1:]
d["资金运用总计"] = b[50 - 1][1:]
return d
def 金融机构人民币信贷收支表_m_xlsx(filename):
b = excel_read(filename)
d = {}
d["Item"] = b[6 - 1][1:]
d["各项存款"] = b[9 - 1][1:]
d["境内存款"] = b[10 - 1][1:]
d["住户存款"] = b[11 - 1][1:]
d["住户活期存款"] = b[12 - 1][1:]
d["住户定期及其他存款"] = b[13 - 1][1:]
d["非金融企业存款"] = b[14 - 1][1:]
d["非金融企业活期存款"] = b[15 - 1][1:]
d["非金融企业定期及其他存款"] = b[16 - 1][1:]
d["机关团体存款"] = b[17 - 1][1:]
d["财政性存款"] = b[18 - 1][1:]
d["其他存款"] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
d["非银行业金融机构存款"] = b[19 - 1][1:]
d["境外存款"] = b[20 - 1][1:]
d["金融债券"] = b[21 - 1][1:]
d["流通中货币"] = b[22 - 1][1:]
d["对国际金融机构负债"] = b[23 - 1][1:]
d["其他"] = b[24 - 1][1:]
d["资金来源总计"] = b[25 - 1][1:]
d["各项贷款"] = b[27 - 1][1:]
d["境内贷款"] = b[28 - 1][1:]
d["住户贷款"] = b[29 - 1][1:]
d["住户短期贷款"] = b[30 - 1][1:]
d["住户短期消费贷款"] = b[31 - 1][1:]
d["住户短期经营贷款"] = b[32 - 1][1:]
d["住户中长期贷款"] = b[33 - 1][1:]
d["住户中长期消费贷款"] = b[34 - 1][1:]
d["住户中长期经营贷款"] = b[35 - 1][1:]
d["企事业单位贷款"] = b[36 - 1][1:]
d["企事业单位短期贷款"] = b[37 - 1][1:]
d["企事业单位中长期贷款"] = b[38 - 1][1:]
t1 = b[39 - 1][1:] #企事业单位票据融资贷款
t2 = b[40 - 1][1:] #企事业单位融资租赁贷款
t3 = b[41 - 1][1:] #企事业单位各项垫款
d["企事业单位其他贷款"] = list_add(list_add(t1, t2), t3)
d["非银行业金融机构贷款"] = b[42 - 1][1:]
d["境外贷款"] = b[43 - 1][1:]
d["债券投资"] = b[44 - 1][1:]
d["股权及其他投资"] = b[45 - 1][1:]
d["黄金占款"] = b[46 - 1][1:]
d["中央银行外汇占款"] = b[47 - 1][1:]
d["在国际金融机构资产"] = b[48 - 1][1:]
d["资金运用总计"] = b[49 - 1][1:]
return d
def 金融机构人民币信贷收支表(filename):
print(filename)
d = {}
if filename.endswith('m.xls') or filename.endswith('m.xlsx'):
d = 金融机构人民币信贷收支表_m_xlsx(filename)
elif filename.endswith('k.xls') or filename.endswith('k.xlsx'):
d = 金融机构人民币信贷收支表_k_xlsx(filename)
elif filename.endswith('g.xls') or filename.endswith('g.xlsx'):
d = 金融机构人民币信贷收支表_g_xlsx(filename)
elif filename.endswith('f.xls') or filename.endswith('f.xlsx'):
d = 金融机构人民币信贷收支表_f_xlsx(filename)
elif filename.endswith('e.xls') or filename.endswith('e.xlsx'):
d = 金融机构人民币信贷收支表_e_xlsx(filename)
elif filename.endswith('d.xls') or filename.endswith('d.xlsx'):
d = 金融机构人民币信贷收支表_d_xlsx(filename)
elif filename.endswith('c.xls') or filename.endswith('c.xlsx'):
d = 金融机构人民币信贷收支表_c_xlsx(filename)
if len(d) > 0:
t = ["各项存款", "境内存款", "住户存款", "住户活期存款", "住户定期及其他存款", "非金融企业存款", "非金融企业活期存款", "非金融企业定期及其他存款", "机关团体存款", "财政性存款", "非银行业金融机构存款", "其他存款", "境外存款", "金融债券", "流通中货币", "对国际金融机构负债", "其他", "资金来源总计", "各项贷款", "境内贷款", "住户贷款", "住户短期贷款", "住户短期消费贷款", "住户短期经营贷款", "住户中长期贷款", "住户中长期消费贷款", "住户中长期经营贷款", "企事业单位贷款", "企事业单位短期贷款", "企事业单位中长期贷款", "企事业单位其他贷款", "非银行业金融机构贷款", "境外贷款", "债券投资", "股权及其他投资", "黄金占款", "中央银行外汇占款", "在国际金融机构资产", "资金运用总计"]
to_csv(d, '各项存款', t, 'balance3.csv')
to_json(d, '各项存款', 'balance3.json')
def detect(filename):
file = xlrd.open_workbook(filename)
sheet = file.sheet_by_index(0)
a = sheet.cell_value(0, 0)
if '货币供应量' == a:
货币供应量(filename)
elif '货币当局资产负债表' == a:
货币当局资产负债表(filename)
elif '其他存款性公司资产负债表' == a:
其他存款性公司资产负债表(filename)
elif '金融机构人民币信贷收支表' == a:
金融机构人民币信贷收支表(filename)
else:
print("Not recgonized " + a + " " + filename)
def seek_res():
files = os.listdir('./res')
for file in files:
if file.endswith('.xls') or file.endswith('.xlsx'):
detect(os.path.join('./res', file))
seek_res()
存款储备金率()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest import mock
import moolib.broker
class TestBrokerScript:
def test_has_main(self):
assert hasattr(moolib.broker, "main")
@mock.patch("sys.argv", ["broker"])
def test_run(self):
with mock.patch("moolib.Broker") as MockBroker:
instance = MockBroker.return_value
instance.update.side_effect = [
None,
None,
None,
KeyboardInterrupt("Enough"),
]
moolib.broker.main()
MockBroker.assert_called_once()
assert instance.update.call_count == 4
|
"""
The manager class for the FAQ models
"""
from django.conf import settings
from fluent_faq import appsettings
from parler.managers import TranslatableManager, TranslatableQuerySet
__all__ = (
'FaqQuestionManager',
'FaqQuestionQuerySet',
)
class FaqBaseModelQuerySet(TranslatableQuerySet):
"""
The QuerySet for FAQ models.
"""
def parent_site(self, site):
"""
Filter to the given site.
"""
return self.filter(parent_site=site)
def published(self):
"""
Return only published entries for the current site.
"""
if appsettings.FLUENT_FAQ_FILTER_SITE_ID:
qs = self.parent_site(settings.SITE_ID)
else:
qs = self
return qs
def active_translations(self, language_code=None, **translated_fields):
# overwritten to honor our settings instead of the django-parler defaults
language_codes = appsettings.FLUENT_FAQ_LANGUAGES.get_active_choices(language_code)
return self.translated(*language_codes, **translated_fields)
class FaqBaseModelManager(TranslatableManager):
"""
Shared base logic for all FAQ models.
"""
queryset_class = FaqBaseModelQuerySet
def parent_site(self, site):
"""
Filter to the given site.
"""
return self.all().parent_site(site)
def published(self):
"""
Return only published entries for the current site.
"""
return self.all().published()
# Reserve the class names for extension later
class FaqCategoryQuerySet(FaqBaseModelQuerySet):
pass
class FaqCategoryManager(FaqBaseModelManager):
"""
Extra methods attached to ``FaqCategory.objects`` .
"""
queryset_class = FaqCategoryQuerySet
class FaqQuestionQuerySet(FaqBaseModelQuerySet):
pass
class FaqQuestionManager(FaqBaseModelManager):
"""
Extra methods attached to ``FaqQuestion.objects`` .
"""
queryset_class = FaqQuestionQuerySet
|
# -*- coding: utf-8 -*-
"""
PARAMETER POOL FOR SIMULATIONS
All the parameters are stocked in _DPARAM.
None is the default value if the key has no meaning for the parameter
Each parameter is a dictionary, with the following keys :
'NAME': { # KEY THE CODE WILL USE TO CALL IT
'value': 100, # NUMERICAL VALUE
'name': 'time steps',
'com': 'Duration of simulation',# Commentary about what it means
'dimension': 'time', # Physical dimension if relevant
'units': 'y', # Physical units if relevant
'type': None, # Intensive or extensive
'symbol': None, # Plot-friendly name. If None, it'll be 'NAME''
'group': 'Numerical', # For practical regroupment of variables
},
###############################
PARAMETERS :
_PARAMSET : Name of the set of default parameter set taken by the system.
_FLATTEN : !!! UNUSED FOR THE MOMENT
_DALLOWED : List of types and dimensions accepted by the system (with None)
_DEF_PARAM : All the informations about useful parameters
_dfail : Parameters that couldn't get loaded because incomplete
_lkeys : List of attributes necessary for a parameter to be added
_DPARAM : Presets of parameters !!! NOT CODED FOR THE MOMENT
FUNCTIONS :
_check_inputs: Check if the the input of the user is in term of format
get_params : Description linked to the function
"""
import numpy as np
_PARAMSET = 'v0'
_FLATTEN = True
# ---------
# NUMERICAL
# # -----------------
# # INITAL CONDITIONS
# _DINIT = {
# ### INTENSIVE VARIABLES
# 'd' : v1*1.,
# 'omega' : v1*p['omega0'],
# 'lambda' : v1*p['lambdamax'],
# 't' : v1*0,
# ### INITIAL EXTENSIVE VARIABLES
# 'Y' : v1*1 , # GDP
# 'N' : v1*1 , # Population
# 'a' : v1*1 , # productivity
# 'p' : v1*1 , # Price
# }
# ### DEDUCED FROM PREVIOUS ONES
# ic['D'] = ic['d']*ic['Y']
# ic['K'] = ic['Y']*p['nu']
# ic['L'] = ic['lambda']*ic['N']
# ic['W'] = ic['omega']*ic['a']
_DALLOWED = {
'dimension': ['time', 'time rate', 'temperature rate'],
'type': ['intensive', 'extensive'],
}
# ##########################################
# PARAMETERS PARAMETERS
_DEF_PARAM = {
# --------------
# Numerical
'Tmax': {
'value': 100,
'name': 'time steps',
'com': 'Duration of simulation',
'dimension': 'time',
'units': 'y',
'type': None,
'symbol': None,
'group': 'Numerical',
},
'Nx': {
'value': 1,
'name': None,
'com': 'Number of similar systems evolving in parrallel',
'dimension': None,
'units': None,
'type': None,
'symbol': None,
'group': 'Numerical',
},
'dt': {
'value': 0.01,
'name': None,
'com': 'Time step (fixed timestep method)',
'dimension': 'time',
'units': 't',
'type': None,
'symbol': None,
'group': 'Numerical',
},
'Tstore': {
'value': None, # Dynamically allocated
'name': None,
'com': 'Time between storages (if StorageMode=full, it goes to dt)',
'dimension': 'time',
'units': None,
'type': None,
'symbol': None,
'group': 'Numerical',
},
'Nt': {
'value': None, # Dynamically allocated
'name': None,
'com': 'Number of temporal iteration',
'dimension': None,
'units': None,
'type': None,
'symbol': None,
'group': 'Numerical',
},
'Ns': {
'value': None, # Dynamically allocated
'name': None,
'com': 'Number of elements stored',
'dimension': None,
'units': None,
'type': None,
'symbol': None,
'group': 'Numerical',
},
'verb': {
'value': True,
'name': None,
'com': 'flag indicating whether to print intermediate info',
'dimension': None,
'units': None,
'type': None,
'symbol': None,
'group': 'Numerical'
},
'storage': {
'value': 'full',
'name': None,
'com': 'flag indicating which time steps to store',
'dimension': None,
'units': None,
'type': None,
'symbol': None,
'group': 'Numerical'
},
'save': {
'value': True,
'name': None,
'com': 'flag indicating whether to save output data',
'dimension': None,
'units': None,
'type': None,
'symbol': None,
'group': 'Numerical'
},
# --------------
# Population evolution
'beta': {
'value': 0.025,
'name': None,
'com': 'Rate of population growth',
'dimension': 'time rate',
'units': 'y^{-1}',
'type': 'intensive',
'symbol': r'$beta$',
'group': 'Population',
},
'alpha': {
'value': 0.02,
'name': None,
'com': 'Rate of productivity increase',
'dimension': 'time rate',
'units': 'y^{-1}',
'type': 'intensive',
'symbol': r'$alpha$',
'group': 'Population',
},
# --------------
# Capital properties
'delta': {
'value': 0.005,
'name': None,
'com': 'Rate of capital depletion',
'dimension': 'time rate',
'units': 'y^{-1}',
'type': 'intensive',
'symbol': r'$\delta$',
'group': 'Capital',
},
# --------------
# Production
'nu': {
'value': 3,
'name': None,
'com': 'Kapital to output ratio', # !! IN CES its 1/A !!',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'\nu',
'group': 'Production',
},
'eta': {
'value': 1000,
'name': None,
'com': '1/(1+substituability)', # CES parameter
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': None,
'group': 'Production',
},
'b': {
'value': .5,
'name': None,
'com': 'capital part of the production', # CES parameter
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': None,
'group': 'Production',
},
'z': {
'value': 1,
'name': None,
'com': 'Markup on salary estimation by employer',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': None,
'group': 'Production',
},
# --------------
# INTEREST / Price
'r': {
'value': .03,
'name': None,
'com': 'Interest at the bank',
'dimension': 'time rate',
'units': 'y^{-1}',
'type': 'intensive',
'symbol': None,
'group': 'Prices',
},
'etaP': {
'value': .192,
'name': None,
'com': 'Typical rate for inflation',
'dimension': 'time rate',
'units': 'y^{-1}',
'type': 'intensive',
'symbol': None,
'group': 'Prices',
},
'muP': {
'value': 1.3,
'name': None,
'com': 'Mark-up of price',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': None,
'group': 'Prices',
},
'gammaP': {
'value': 1,
'name': None,
'com': 'Money-illusion',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': None,
'group': 'Prices',
},
# --------------
# PHILIPS CURVE (employement-salary increase)
'phinul': {
'value': 0.04,
'name': None,
'com': 'Unemployment rate that stops salary increase (no inflation)',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$\phi_0$',
'group': 'Philips',
},
# --------------
# KEEN INVESTMENT FUNCTION (profit-investment function)
'k0': {
'value': -0.0065,
'name': None,
'com': '',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$k_0$',
'group': 'Keen',
},
'k1': {
'value': np.exp(-5),
'name': None,
'com': '',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$k_1$',
'group': 'Keen',
},
'k2': {
'value': 20,
'name': None,
'com': '',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$k_2$',
'group': 'Keen',
},
# --------------
# LINEAR DIVIDENT PROFITS
'div0': {
'value': 0.138,
'name': None,
'com': 'Part of GDP as dividends when pi=0',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$div_0$',
'group': 'Dividends',
},
'div1': {
'value': 0.473,
'name': None,
'com': 'Slope',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$div_1$',
'group': 'Dividends',
},
# --------------
# Coupling Effets (EDP)
'g1': {
'value': .0,
'name': None,
'com': 'GLOBAL EFFECTS OF LAMBDA (Mean field)',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$g_1$',
'group': 'Coupling',
},
'g2': {
'value': .00,
'name': None,
'com': 'WITH NEIGHBORS EFFECTS OF LAMBDA (field)',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$g_2$',
'group': 'Coupling',
},
'muI': {
'value': 0.,
'name': None,
'com': '',
'dimension': None,
'units': "NOTDONEYET",
'type': 'intensive',
'symbol': r'$_mu_I$',
'group': 'Coupling',
},
'muN': {
'value': 0.,
'name': None,
'com': '',
'dimension': None,
'units': "NOTDONEYET",
'type': 'intensive',
'symbol': r'$\mu_N$',
'group': 'Coupling',
},
# --------------
# RELAXATION-BUFFER DYNAMICS
'tauR': {
'value': 2.0,
'name': None,
'com': 'Typical time for recruitement',
'dimension': 'time',
'units': 'y',
'type': 'intensive',
'symbol': r'$\tau_R$',
'group': 'RelaxBuffer',
},
'tauF': {
'value': 0.1,
'name': None,
'com': 'Typical time for firing',
'dimension': 'time',
'units': 'y',
'type': 'intensive',
'symbol': r'$\tau_F$',
'group': 'RelaxBuffer',
},
'tauL': {
'value': 2.,
'name': None,
'com': 'Typical time for employement information',
'dimension': 'time',
'units': 'y',
'type': 'intensive',
'symbol': r'$tau_L$',
'group': 'RelaxBuffer',
},
'tauK': {
'value': 2.,
'name': None,
'com': 'Typical time on new capital integration',
'dimension': 'time',
'units': 'y',
'type': 'intensive',
'symbol': r'$\tau_K$',
'group': 'RelaxBuffer',
},
# --------------
# GEMMES PARAMETERS
'theta': {
'value': 2.6,
'name': None,
'com': 'Convexity on abattement cost function',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$\theta$',
'group': 'Gemmes',
},
'dsigma': {
'value': -0.001,
'name': None,
'com': 'Variation rate of the growth of emission intensity',
'dimension': 'time rate',
'units': 'y^{-1}',
'type': 'intensive',
'symbol': r'$\delta_{\sigma}$',
'group': 'Gemmes',
},
'dPBS': {
'value': -0.005,
'name': None,
'com': 'Growth rate of back-stop technology price',
'dimension': 'time rate',
'units': 'y^{-1}',
'type': 'intensive',
'symbol': r'$\delta_{PBS}$',
'group': 'Gemmes',
},
'dEland': {
'value': -0.022,
'name': None,
'com': 'Growth rate of land use change in CO2 emission',
'dimension': 'time rate',
'units': 'y^{-1}',
'type': 'intensive',
'symbol': r'$\delta_{Eland}$',
'group': 'Gemmes',
},
# --------------
# Damage function (on GDP)
# D = 1 - (1 + p['pi1']*T + p['pi2']*T**2 + p['pi3']*T**p['zeta'] )**(-1)
'pi1': {
'value': 0.,
'name': None,
'com': 'Linear temperature impact',
'dimension': 'temperature rate',
'units': 'T^{-1}',
'type': 'intensive',
'symbol': r'$\pi_1$',
'group': 'Damage',
},
'pi2': {
'value': .00236,
'name': None,
'com': 'Quadratic temperature impact',
'dimension': None,
'units': 'T^{-2}',
'type': 'intensive',
'symbol': r'$\pi_2$',
'group': 'Damage',
},
'pi3': {
'value': .00000507,
'name': None,
'com': 'Weitzmann Damage temperature impact',
'dimension': None,
'units': 'T^{-zeta}',
'type': 'intensive',
'symbol': r'$\pi_3$',
'group': 'Damage',
},
'zeta': {
'value': 6.754,
'name': None,
'com': 'Weitzmann impact',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$\zeta$',
'group': 'Damage',
},
'fk': {
'value': 1. / 3.,
'name': None,
'com': 'Fraction of environmental damage',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$f_K$',
'group': 'Damage',
},
# allocated to the stock of capital
# --------------
# Climate model
'Phi12': {
'value': .024,
'name': None,
'com': 'Transfer of carbon from atmosphere to biosphere',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$\phi_{1\rightarrow2}$',
'group': 'Climate',
},
'Phi23': {
'value': .001,
'name': None,
'com': 'Transfer from biosphere to stock',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$\phi_{2\rightarrow3}$',
'group': 'Climate',
},
'C': {
'value': 1 / .098,
'name': None,
'com': 'Heat capacity of fast-paced climate',
'dimension': None,
'units': "SI",
'type': 'intensive',
'symbol': r'$C$',
'group': 'Climate',
},
'C0': {
'value': 3.52,
'name': None,
'com': 'Heat capacity of inertial component of climate',
'dimension': None,
'units': "SI",
'type': 'intensive',
'symbol': r'$C_0$',
'group': 'Climate',
},
'gammaHEAT': {
'value': 0.0176,
'name': None,
'com': 'Heat exchange coefficient between layer',
'dimension': None,
'units': None,
'type': 'intensive',
'symbol': r'$\gamma_{heat}$',
'group': 'Climate',
},
'Tsens': {
'value': 3.1,
'name': None,
'com': 'Climate sensitivity (deltaT/log2CO2)',
'dimension': None,
'units': 'T',
'type': 'intensive',
'symbol': r'$T_{sens}$',
'group': 'Climate',
},
'FexoMax': {
'value': 0.7,
'name': None,
'com': 'Maximal exougenous radiative forcing',
'dimension': None,
'units': 'W M^{-2}',
'type': 'intensive',
'symbol': None,
'group': 'Climate',
},
'F2CO2': {
'value': 3.681,
'name': None,
'com': 'doubling CO2 impact on forced radiations',
'dimension': None,
'units': 'W/m2',
'type': 'intensive',
'symbol': r'$F^2_{CO2}$',
'group': 'Climate',
},
'PopSat': {
'value': 12,
'name': None,
'com': 'Maximal population (billions)',
'dimension': None,
'units': 'Humans',
'type': 'intensive',
'symbol': r'$N_{sat}$',
'group': 'Population',
},
}
# #############################################################################
# #############################################################################
# _DEF_PARAM: Fill in default values and check conformity
# #############################################################################
_dfail = {}
_lkeys = [
'value', 'name', 'com', 'dimension', 'units', 'type', 'symbol', 'group',
]
for k0, v0 in _DEF_PARAM.items():
# Check existence of keys
lout = [ss for ss in _lkeys if ss not in v0.keys()]
if len(lout) > 0:
_dfail[k0] = f"missing keys: {lout}"
continue
# If com if filled but not name, use com to fill name (and vice-versa)
if v0['name'] is None and v0['com'] is not None:
_DEF_PARAM[k0]['name'] = v0['com']
elif v0['name'] is not None and v0['com'] is None:
_DEF_PARAM[k0]['com'] = v0['name']
# Try to spot any typo / mistake
if v0['dimension'] not in _DALLOWED['dimension'] + [None]:
_dfail[k0] = f"Non-conform dimension! ({v0['dimension']})"
if v0['type'] not in _DALLOWED['type'] + [None]:
_dfail[k0] = f"Non-conform type! ({v0['type']})"
if len(_dfail) > 0:
lstr = [f"\t- {k0}: {v0}" for k0, v0 in _dfail.items()]
msg = (
"The following non-conformities have been spotted:\n"
+ "\n".join(lstr)
)
raise Exception(msg)
# #############################################################################
# #############################################################################
# Default pre-sets of parameters
# #############################################################################
_DPARAM = {
'v0': {k0: dict(v0) for k0, v0 in _DEF_PARAM.items()},
'v1': {k0: dict(v0) for k0, v0 in _DEF_PARAM.items()},
'GreatAuthor2019': {k0: dict(v0) for k0, v0 in _DEF_PARAM.items()},
}
# Modify
v0 = 'GreatAuthor2019'
_DPARAM[v0]['b'] = 0.
_DPARAM[v0]['eta'] = 0.192
# #############################################################################
# #############################################################################
# Utilities
# #############################################################################
def _check_inputs(paramset=None):
# paramset
if paramset is None:
paramset = _PARAMSET
c0 = isinstance(paramset, str) and paramset in _DPARAM.keys()
if not c0:
ls = ['\t- {}'.format(kk) for kk in sorted(_DPARAM.keys())]
msg = (
"Arg paramset must be a valid predefined parameter set!\n"
+ "\n".join(ls)
+ "\nYou provided: {}".format(paramset)
)
raise Exception(msg)
return paramset
# #############################################################################
# #############################################################################
# Choose which version of the dict of parameters to use
# #############################################################################
def get_params(paramset=None):
"""
Create a dictionnary containing all the parameters necessary for simulation
Their description is in comments.
parameters
----------
paramset: None / str
Flag indicating which predefined set of parameter to pick
Defaults to 'v0'
flatten: None / bool
Flag indicating whether to flatten the param dict
Used for retro-compatibility
Default to True
"""
# ------------
# Check inputs
paramset = _check_inputs(
paramset=paramset,
)
# ------------
# Dictionnary of parameters (copy to avoid modifying the original)
param = {k0: dict(v0) for k0, v0 in _DPARAM[paramset].items()}
return param
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-10-15 17:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("archive", "0006_digitizedwork_add_record_id"),
("archive", "0005_add_notes_fields"),
]
operations = []
|
class Solution:
def countBits(self, num: int) -> List[int]:
ans = [0]
offset = 1
for i in range(1, num + 1):
if offset * 2 == i:
offset = i
ans.append(ans[i - offset] + 1)
return ans
|
#!/usr/bin/env python
import os
import os.path
from pyraf import iraf
"""
To get familiar with Ellipse:
1. Check the help file for Ellipse, controlpar, samplepar, magpar, geompar
> ecl
> stsdas.analysis.isophote
> help ellipse
> help controlpar
2. See the examples on this page:
http://www.ast.uct.ac.za/~sarblyth/TullyFisher/ellipseEg/EllipseEg.html
3. Read the relevant section of Li, Ho, et al. 2011(CGS-II), and try to run
ellipse in interactive mode on any data
"""
# Define the name of the input and output file
inputImg = "/home/song/work/ellipse/NGC1600_r.fit"
outBin = inputImg.replace(".fit", "_ellipse_1.bin")
outTab = inputImg.replace(".fit", "_ellipse_1.tab")
outCdf = inputImg.replace(".fit", "_ellipse_1.cdf")
# TODO: Check the .pl mask file, which should be
# inputMsk = inputImg.replace(".fit", ".pl")
# Call the STSDAS.ANALYSIS.ISOPHOTE package
iraf.stsdas()
iraf.analysis()
iraf.isophote()
# Define parameters for the ellipse run
# 1. Initial guess of the central X, Y (need to be as accurate as possible)
iraf.ellipse.geompar.x0 = 460.526
iraf.ellipse.geompar.y0 = 464.399
# 2. Initial guess of the ellipticity and PA of the first ISOPHOTE
# Do not need to be very accurate, unless you want to fix them for all
# isophotes and only derive surface brightness
iraf.ellipse.geompar.ellip0 = 0.6003035
iraf.ellipse.geompar.pa0 = -12.10127
# 3. Initial radius for ellipse fitting (The major axis length of the first
# elliptical isophote); Can not be too small, and can not be too large
iraf.ellipse.geompar.sma0 = 40.48682917785644
# 4. The minimum and maximum radius for the ellipse fitting
iraf.ellipse.geompar.minsma = 0.5571857376098632
iraf.ellipse.geompar.maxsma = 94.98832999420166
# 5. Parameters about the stepsize during the fitting.
# Unless you know what you what, normally should use log-stepsize instead of
# linear one; and step=0.05 will generate more isophotes than step=0.1, but
# may not help if you want a robust surface brightness profile.
iraf.ellipse.geompar.linear = "no"
iraf.ellipse.geompar.step = 0.1
# 6. Do you want to allow the ellipse to decide the galaxy center during the
# fitting. In general, it's a good idea to turn this on. If the center you
# provide is accurate enough, ELlipse results will not deviate from it.
iraf.ellipse.geompar.recenter = "yes"
# 7. The next three parameters control the behavior of the fit
# hcenter = yes/no : Do all the isophotes have the same central X, Y?
# hellip = yes/no : Do all the isophotes have the same ellipticity?
# hpa = yes/no : Do all the isophotes have the same position angle?
# Based on our experience, the formal Ellipse fitting should be done in three
# separate runs
# 1) hcenter=no, hellip=no, hpa=no : Give Ellipse the total freedom to fit
# the isophotes; And take the median/mean central X,Y from inner N
# isophotes, then use these X,Y as the center of the galaxy
# 2) hcenter=yes, hellip=no, hpa=yes : Hold the central X, Y to the
# previously determined ones; Let the ellipticity and position angle to be
# free, then extract an appropriate average ellipticity and PA from this
# run
# 3) hcenter=yes, hellip=yes, hpa=yes : Hold the center, and hold the
# ellipticity and PA to the average values decided from the previous run.
# Just extracted an robust surface brightness profile using the average
# geometry
iraf.ellipse.controlpar.hcenter = "no"
iraf.ellipse.controlpar.hellip = "no"
iraf.ellipse.controlpar.hpa = "no"
# 8. Parameters about the iterations
# minit/maxit: minimun and maximum number of the iterations
iraf.ellipse.controlpar.minit = 10
iraf.ellipse.controlpar.maxit = 100
# 9. Threshold for the object locator algorithm
# By lowering this value, the locator become less strict.
iraf.ellipse.controlpar.olthresh = 1.00000
# 10. Make sure the Interactive Mode is turned off
iraf.ellipse.controlpar.interactive = "no"
# Check and remove outputs from the previous Ellipse run, or Ellipse will report
# error (Quite stupid!)
if os.path.exists(outBin):
os.remove(outBin)
if os.path.exists(outTab):
os.remove(outTab)
if os.path.exists(outCdf):
os.remove(outCdf)
# Start the fitting
iraf.ellipse(input=inputImg, output=outTab)
# TODO: Demonstrate the direct photometry mode using input catalog
# inBin = input_bin_file
## The inBin is a Binary result from previous Ellipse run, and the isophote
## stored in it will overwrite all the above settings. Ellipse will simply
## extract surface brightness profile using these isophote instead of doing any
## fitting
# iraf.ellipse(input=inputImg, output=outBin, inellip=inBin)
# The Ellipse output is a binary table file, which is very hard to deal with
# "Dump" it into a nice ASCII table
iraf.tdump(table=outBin, datafile=outTab, cdfile=outCdf)
os.remove(outCdf)
|
import time
import random
import os.path
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import json
import nltk.data
from Simon import Simon
from Simon.Encoder import Encoder
from Simon.DataGenerator import DataGenerator
from Simon.LengthStandardizer import *
# extract the first N samples from jsonl
def LoadJSONLEmails(N=10000000,datapath=None):
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
with open(datapath) as data_file:
data_JSONL_lines = data_file.readlines()
random.shuffle(data_JSONL_lines)
# visualize body extraction for first email
idx = 0
sample_email = json.loads(data_JSONL_lines[idx])["body"]
print("DEBUG::the current email type being loaded:")
print(datapath)
#print("DEBUG::sample email (whole, then tokenized into sentences):")
#print(sample_email)
sample_email_sentence = tokenizer.tokenize(sample_email)
sample_email_sentence = [elem[-maxlen:] for elem in sample_email_sentence] # truncate
#print(sample_email_sentence)
all_email_df = pd.DataFrame(sample_email_sentence,columns=['Email 0'])
# now, build up pandas dataframe of appropriate format for NK email classifier
for line in data_JSONL_lines:
print(idx)
idx = idx+1
sample_email = ''
content = json.loads(line)
for url in content["urls"]:
sample_email += url + ' '
sample_email += content["body"]
sample_email_sentence = tokenizer.tokenize(sample_email)
sample_email_sentence = [elem[-maxlen:] for elem in sample_email_sentence] #truncate
all_email_df = pd.concat([all_email_df,pd.DataFrame(sample_email_sentence,columns=['Email '+str(idx)])],axis=1)
if idx>=N-1:
break
return pd.DataFrame.from_records(DataLengthStandardizerRaw(all_email_df,max_cells))
# set important parameters
maxlen = 200 # max length of each sentence
max_cells = 100 # maximum number of sentences per email
p_threshold = 0.5 # decision boundary
# Extract enron/419 scam/JPL data from JSONL format
N = 7000 # number of samples to draw
datapath = "data/enron.jsonl"
enron_data = LoadJSONLEmails(N=N,datapath=datapath)
# N_fp = 1000 # number of samples to draw
# datapath = "data/FalsePositive.jsonl"
# falsepositives = LoadJSONLEmails(N=N_fp,datapath=datapath)
N_spam = 1000 # number of samples to draw
datapath = "data/nigerian.jsonl"
nigerian_prince = LoadJSONLEmails(N=N_spam,datapath=datapath)
datapath = "data/Malware.jsonl"
malware = LoadJSONLEmails(N=N_spam,datapath=datapath)
datapath = "data/CredPhishing.jsonl"
credphishing = LoadJSONLEmails(N=N_spam,datapath=datapath)
datapath = "data/PhishTraining.jsonl"
phishtraining = LoadJSONLEmails(N=N_spam,datapath=datapath)
datapath = "data/Propaganda.jsonl"
propaganda = LoadJSONLEmails(N=N_spam,datapath=datapath)
datapath = "data/SocialEng.jsonl"
socialeng = LoadJSONLEmails(N=N_spam,datapath=datapath)
datapath = "data/Spam.jsonl"
spam = LoadJSONLEmails(N=N_spam,datapath=datapath)
# keep dataset approximately balanced
raw_data = np.asarray(enron_data.sample(n=N,replace=False,axis=1).ix[:max_cells-1,:])
header = [['friend'],]*N
print(raw_data.shape)
# raw_data = np.column_stack((raw_data,np.asarray(falsepositives.ix[:max_cells-1,:].sample(n=N_fp,replace=True,axis=1))))
# header.extend([['friend'],]*N_fp)
raw_data = np.column_stack((raw_data,np.asarray(nigerian_prince.ix[:max_cells-1,:].sample(n=N_spam,replace=False,axis=1))))
header.extend([['foe'],]*N_spam)
print(raw_data.shape)
raw_data = np.column_stack((raw_data,np.asarray(malware.ix[:max_cells-1,:].sample(n=N_spam,replace=False,axis=1))))
header.extend([['foe'],]*N_spam)
print(raw_data.shape)
raw_data = np.column_stack((raw_data,np.asarray(credphishing.ix[:max_cells-1,:].sample(n=N_spam,replace=False,axis=1))))
header.extend([['foe'],]*N_spam)
print(raw_data.shape)
print(phishtraining.shape)
raw_data = np.column_stack((raw_data,np.asarray(phishtraining.ix[:max_cells-1,:].sample(n=N_spam,replace=False,axis=1))))
header.extend([['foe'],]*N_spam)
print(raw_data.shape)
raw_data = np.column_stack((raw_data,np.asarray(propaganda.ix[:max_cells-1,:].sample(n=N_spam,replace=True,axis=1))))
header.extend([['foe'],]*N_spam)
print(raw_data.shape)
raw_data = np.column_stack((raw_data,np.asarray(socialeng.ix[:max_cells-1,:].sample(n=N_spam,replace=False,axis=1))))
header.extend([['foe'],]*N_spam)
print(raw_data.shape)
raw_data = np.column_stack((raw_data,np.asarray(spam.ix[:max_cells-1,:].sample(n=N_spam,replace=False,axis=1))))
header.extend([['foe'],]*N_spam)
print("DEBUG::final labeled data shape:")
print(raw_data.shape)
print(raw_data)
# transpose the data, make everything lower case string
mini_batch = 1000 # because of some memory issues, the next step needs to be done in stages
start = time.time()
tmp = np.char.lower(np.transpose(raw_data[:,:mini_batch]).astype('U'))
tmp_header = header[:mini_batch]
for i in range(1,int(raw_data.shape[1]/mini_batch)):
print("DEBUG::current shape of loaded text (data,header)")
print(tmp.shape)
print(len(tmp_header))
try:
tmp = np.vstack((tmp,np.char.lower(np.transpose(raw_data[:,i*mini_batch:(i+1)*mini_batch]).astype('U'))))
tmp_header.extend(header[i*mini_batch:(i+1)*mini_batch])
except:
print("failed string standardization on batch number "+str(i))
header = tmp_header
end = time.time()
print("Time for casting data as lower case string is %f sec"%(end-start))
raw_data = tmp
# save data for future experiments
f = open('raw_data', 'wb')
np.save(f, raw_data)
f = open('header', 'wb')
np.save(f, header)
# load data
#raw_data = np.load('raw_data.npy', allow_pickle=True)
#raw_data = np.load('header.npy', allow_pickle=True)
# set up appropriate data encoder
Categories = ['friend','foe']
encoder = Encoder(categories=Categories)
encoder.process(raw_data, max_cells)
# encode the data
X, y = encoder.encode_data(raw_data, header, maxlen)
# setup classifier, compile model appropriately
Classifier = Simon(encoder=encoder)
data = Classifier.setup_test_sets(X, y)
model = Classifier.generate_model(maxlen, max_cells, 2,activation='softmax')
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['binary_accuracy'])
# train model
batch_size = 64
nb_epoch = 20
checkpoint_dir = "checkpoints/"
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
start = time.time()
history = Classifier.train_model(batch_size, checkpoint_dir, model, nb_epoch, data)
end = time.time()
print("Time for training is %f sec"%(end-start))
config = { 'encoder' : encoder,
'checkpoint' : Classifier.get_best_checkpoint(checkpoint_dir) }
Classifier.save_config(config, checkpoint_dir)
Classifier.plot_loss(history)
Classifier.evaluate_model(max_cells, model, data, encoder, p_threshold)
# write evaluation metrics to file for comparison
if not os.path.exists('experiment_metrics.txt'):
file = open('experiment_metrics.txt', 'w')
file.close()
file.open('experiment_metrics.txt', 'a')
file.write("baseline classifier with urls: %0.3f\n" % (history.history['val_binary_accuracy']))
file.flush()
file.close()
'''
# do p_threshold ROC tuning on the test data to see if you can improve it
start = time.time()
p_thresholds = np.linspace(0.01,0.99,num=20)
TPR_arr,FPR_arr = Classifier.tune_ROC_metrics(max_cells, model, data, encoder,p_thresholds)
print("DEBUG::True positive rate w.r.t p_threshold array:")
print(TPR_arr)
print("DEBUG::False positive rate w.r.t p_threshold array:")
print(FPR_arr)
# plot
plt.figure()
plt.subplot(311)
plt.plot(p_thresholds,TPR_arr)
plt.xlabel('p_threshold')
plt.ylabel('TPR')
plt.subplot(312)
plt.xlabel('p_threshold')
plt.ylabel('FPR')
plt.plot(p_thresholds,FPR_arr)
plt.subplot(313)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.plot(FPR_arr,TPR_arr)
plt.show()
# timing info
end = time.time()
print("Time for hyperparameter (per-class threshold) is %f sec"%(end-start))
'''
|
__all__ = [
'RTreeLookup',
'RTreePerilLookup',
'RTreeVulnerabilityLookup',
'generate_index_entries',
'get_peril_areas',
'get_peril_areas_index',
'get_rtree_index',
'PerilArea',
'PerilAreasIndex',
]
# 'OasisLookup' -> 'RTreeLookup'
# 'OasisPerilLookup' -> RTreePerilLookup
# 'OasisVulnerabilityLookup' -> 'RTreeVulnerabilityLookup'
from .base import OasisBaseLookup
import copy
import re
import types
import builtins
import itertools
import os
import uuid
import pickle
from collections import OrderedDict
from rtree.core import RTreeError
from rtree.index import (
Index as RTreeIndex,
Property as RTreeIndexProperty,
)
from shapely import speedups as shapely_speedups
from shapely.geometry import (
box,
Point,
MultiPoint,
Polygon,
)
from ..utils.data import get_dataframe
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from ..utils.defaults import DEFAULT_RTREE_INDEX_PROPS
from ..utils.status import OASIS_KEYS_STATUS
from ..utils.path import as_path
if shapely_speedups.available:
shapely_speedups.enable()
# ---- RTree Lookup classes ---------------------------------------------------
class RTreeLookup(OasisBaseLookup):
"""
Combined peril and vulnerability lookup
"""
@oasis_log()
def __init__(
self,
config=None,
config_json=None,
config_fp=None,
config_dir=None,
areas=None,
peril_areas=None,
peril_areas_index=None,
peril_areas_index_props=None,
loc_to_global_areas_boundary_min_distance=0,
vulnerabilities=None
):
super(self.__class__, self).__init__(
config=config,
config_json=config_json,
config_fp=config_fp,
config_dir=config_dir,
)
self.peril_lookup = RTreePerilLookup(
config=self.config,
config_dir=self.config_dir,
areas=areas,
peril_areas=peril_areas,
peril_areas_index=peril_areas_index,
peril_areas_index_props=peril_areas_index_props,
loc_to_global_areas_boundary_min_distance=loc_to_global_areas_boundary_min_distance
)
self.peril_area_id_key = str(str(self.config['peril'].get('peril_area_id_col') or '') or 'peril_area_id').lower()
self.vulnerability_id_key = str(str(self.config['vulnerability'].get('vulnerability_id_col')) or 'vulnerability_id').lower()
self.vulnerability_lookup = RTreeVulnerabilityLookup(
config=self.config,
config_dir=self.config_dir,
vulnerabilities=vulnerabilities
)
def lookup(self, loc, peril_id, coverage_type, **kwargs):
loc_id = loc.get('loc_id') or int(uuid.UUID(bytes=os.urandom(16)).hex[:16], 16)
plookup = self.peril_lookup.lookup(loc, peril_id, coverage_type)
past = plookup['status']
pamsg = plookup['message']
paid = plookup['peril_area_id']
vlookup = self.vulnerability_lookup.lookup(loc, peril_id, coverage_type)
vlnst = vlookup['status']
vlnmsg = vlookup['message']
vlnid = vlookup['vulnerability_id']
vlookup.pop('status')
vlookup.pop('message')
vlookup.pop('vulnerability_id')
# Could optionally call the status lookup method, but faster
# to avoid or minimise outside function calls in a `for` loop
status = (
OASIS_KEYS_STATUS['success']['id'] if past == vlnst == OASIS_KEYS_STATUS['success']['id']
else (OASIS_KEYS_STATUS['fail']['id'] if (past == OASIS_KEYS_STATUS['fail']['id'] or vlnst == OASIS_KEYS_STATUS['fail']['id']) else OASIS_KEYS_STATUS['nomatch']['id'])
)
message = '{}; {}'.format(pamsg, vlnmsg)
return {
k: v for k, v in itertools.chain(
(
('loc_id', loc_id),
('peril_id', peril_id),
('coverage_type', coverage_type),
(self.peril_area_id_key, paid),
(self.vulnerability_id_key, vlnid),
('status', status),
('message', message),
),
vlookup.items()
)
}
class RTreePerilLookup(OasisBaseLookup):
"""
Single peril, single coverage type, lon/lat point-area poly lookup using
an Rtree index to store peril areas - index entries are
#
# (peril area ID, peril area bounds)
#
# pairs. Areas must be represented as polygons with vertices which are
# lon/lat coordinates, and be passed in to the constructor as a list,
# tuple, or generator of triples of the form
#
# (peril area ID, polygon lon/lat vertices, dict with optional properties)
#
# An optional distance measure ``loc_to_global_areas_boundary_min_distance`` can be passed
# that defines how far, in an abstract unit, a given lon/lat location can be
# from the boundary of the polygon containing all the individual peril area
# polygons in order to be assigned an peril area ID. By default this distance
# is 0, which means any lon/lat location outside the polygon containing all
# peril area polygons will not be assigned a peril area ID.
"""
@oasis_log()
def __init__(
self,
areas=None,
config=None,
config_json=None,
config_fp=None,
config_dir=None,
loc_to_global_areas_boundary_min_distance=0,
peril_areas=None,
peril_areas_index=None,
peril_areas_index_fp=None,
peril_areas_index_props=None
):
super(self.__class__, self).__init__(config=config, config_json=config_json, config_fp=config_fp, config_dir=config_dir)
peril_config = self.config.get('peril') or {}
if areas or peril_areas or peril_config:
if peril_areas_index:
self.peril_areas_index = peril_areas_index
self.peril_areas_index_props = self.peril_areas_index_props.properties.as_dict()
elif (areas or peril_areas):
self.index_props = (
peril_areas_index_props or
peril_config.get('rtree_index') or
DEFAULT_RTREE_INDEX_PROPS
)
self.peril_areas_index = PerilAreasIndex(areas=areas, peril_areas=peril_areas, properties=self.index_props)
else:
areas_rtree_index_config = peril_config.get('rtree_index') or {}
index_fp = peril_areas_index_fp or areas_rtree_index_config.get('filename')
if not os.path.isabs(index_fp):
index_fp = os.path.join(self.config_dir, index_fp)
index_fp = as_path(index_fp, 'index_fp', preexists=False)
if index_fp:
idx_ext = areas_rtree_index_config.get('idx_extension') or 'idx'
dat_ext = areas_rtree_index_config.get('dat_extension') or 'dat'
if not (os.path.exists('{}.{}'.format(index_fp, idx_ext)) or os.path.exists('{}.{}'.format(index_fp, dat_ext))):
raise OasisException('No Rtree file index {}.{{{}, {}}} found'.format(index_fp, idx_ext, dat_ext))
self.peril_areas_index = PerilAreasIndex(fp=index_fp)
self.peril_areas_index_props = self.peril_areas_index.properties.as_dict()
self.peril_areas_boundary = box(*self.peril_areas_index.bounds, ccw=False)
_centroid = self.peril_areas_boundary.centroid
self.peril_areas_centre = _centroid.x, _centroid.y
self.loc_to_global_areas_boundary_min_distance = (
loc_to_global_areas_boundary_min_distance or
self.config['peril'].get('loc_to_global_areas_boundary_min_distance') or 0
)
if self.config.get('exposure') or self.config.get('locations'):
self.loc_coords_x_col = str.lower(str(self.config['exposure'].get('coords_x_col')) or 'lon')
self.loc_coords_y_col = str.lower(str(self.config['exposure'].get('coords_y_col')) or 'lat')
self.loc_coords_x_bounds = tuple(self.config['exposure'].get('coords_x_bounds') or ()) or (-180, 180)
self.loc_coords_y_bounds = tuple(self.config['exposure'].get('coords_y_bounds') or ()) or (-90, 90)
def lookup(self, loc, peril_id, coverage_type, **kwargs):
"""
Area peril lookup for an individual lon/lat location item, which can be
provided as a dict or a Pandas series. The data structure should contain
the keys `lon` or `longitude` for longitude and `lat` or `latitude` for
latitude.
"""
idx = self.peril_areas_index
boundary = self.peril_areas_boundary
loc_to_areas_min_dist = self.loc_to_global_areas_boundary_min_distance
loc_id = loc.get('loc_id') or int(uuid.UUID(bytes=os.urandom(16)).hex[:16], 16)
loc_x_col = self.loc_coords_x_col
loc_y_col = self.loc_coords_y_col
loc_x_bounds = self.loc_coords_x_bounds
loc_y_bounds = self.loc_coords_y_bounds
x = loc.get(loc_x_col)
y = loc.get(loc_y_col)
def _lookup(loc_id, x, y, st, perid, covtype, paid, pabnds, pacoords, msg):
return {
'loc_id': loc_id,
loc_x_col: x,
loc_y_col: y,
'peril_id': perid,
'coverage_type': covtype,
'status': st,
'peril_area_id': paid,
'area_peril_id': paid,
'area_bounds': pabnds,
'area_coordinates': pacoords,
'message': msg
}
try:
x = float(x)
y = float(y)
if not ((loc_x_bounds[0] <= x <= loc_x_bounds[1]) and (loc_y_bounds[0] <= y <= loc_y_bounds[1])):
raise ValueError('{}/{} out of bounds'.format(loc_x_col, loc_y_col))
except (ValueError, TypeError) as e:
msg = (
'Peril area lookup: invalid {}/{} ({}, {}) - {}'
.format(loc_x_col, loc_y_col, x, y, str(e))
)
return _lookup(loc_id, x, y, OASIS_KEYS_STATUS['fail']['id'], peril_id, coverage_type, None, None, None, msg)
st = OASIS_KEYS_STATUS['nomatch']['id']
msg = 'No peril area match'
paid = None
pabnds = None
pacoords = None
point = x, y
try:
results = list(idx.intersection(point, objects='raw'))
if not results:
raise IndexError
for _perid, _covtype, _paid, _pabnds, _pacoords in results:
if (peril_id, coverage_type) == (_perid, _covtype):
paid, pabnds, pacoords = _paid, _pabnds, _pacoords
break
if paid is None:
raise IndexError
except IndexError:
try:
results = list(idx.nearest(point, objects='raw'))
if not results:
raise IndexError
for _perid, _covtype, _paid, _pabnds, _pacoords in results:
if (peril_id, coverage_type) == (_perid, _covtype):
paid, pabnds, pacoords = _paid, _pabnds, _pacoords
break
if paid is None:
msg = 'No intersecting or nearest peril area found for peril ID {} and coverage type {}'.format(peril_id, coverage_type)
return _lookup(loc_id, x, y, OASIS_KEYS_STATUS['nomatch']['id'], peril_id, coverage_type, None, None, None, msg)
except IndexError:
pass
else:
p = Point(x, y)
min_dist = p.distance(boundary)
if min_dist > loc_to_areas_min_dist:
msg = (
'Peril area lookup: location is {} units from the '
'peril areas global boundary - the required minimum '
'distance is {} units'
.format(min_dist, loc_to_areas_min_dist)
)
return _lookup(loc_id, x, y, OASIS_KEYS_STATUS['fail']['id'], peril_id, coverage_type, None, None, None, msg)
st = OASIS_KEYS_STATUS['success']['id']
msg = (
'Successful peril area lookup: {}'.format(paid)
)
except RTreeError as e:
return _lookup(loc_id, x, y, OASIS_KEYS_STATUS['fail']['id'], peril_id, coverage_type, None, None, None, str(e))
else:
st = OASIS_KEYS_STATUS['success']['id']
msg = 'Successful peril area lookup: {}'.format(paid)
return _lookup(loc_id, x, y, st, peril_id, coverage_type, paid, pabnds, pacoords, msg)
class RTreeVulnerabilityLookup(OasisBaseLookup):
"""
Simple key-value based vulnerability lookup
"""
@oasis_log()
def __init__(
self,
config=None,
config_json=None,
config_fp=None,
config_dir=None,
vulnerabilities=None
):
super(self.__class__, self).__init__(config=config, config_json=config_json, config_fp=config_fp, config_dir=config_dir)
if vulnerabilities or self.config.get('vulnerability'):
self.col_dtypes, self.key_cols, self.vuln_id_col, self.vulnerabilities = self.get_vulnerabilities(vulnerabilities=vulnerabilities)
@oasis_log()
def get_vulnerabilities(self, vulnerabilities=None):
if not self.config:
raise OasisException(
'No lookup configuration provided or set - use `get_config` '
'on this instance to set it and provide either an actual '
'model config dict (use `model_config` argument), or a model '
'config JSON string (use `model_config_json` argument, or a '
'model config JSON file path (use `model_config_fp` argument)'
)
vuln_config = self.config.get('vulnerability')
if not vuln_config:
raise OasisException('No vulnerability config set in the lookup config')
col_dtypes = vuln_config.get('col_dtypes')
if not col_dtypes:
raise OasisException(
'Vulnerability file column data types must be defined as a '
'(col, data type) dict in the vulnerability section of the '
'lookup config'
)
col_dtypes = {
k.lower(): getattr(builtins, v) for k, v in col_dtypes.items()
}
key_cols = vuln_config.get('key_cols')
if not vuln_config.get('key_cols'):
raise OasisException(
'The vulnerability file key column names must be listed in the '
'vulnerability section of the lookup config'
)
key_cols = tuple(col.lower() for col in key_cols)
vuln_id_col = str(str(self.config['vulnerability'].get('vulnerability_id_col')) or 'vulnerability_id').lower()
def _vuln_dict(vulns_seq, key_cols, vuln_id_col):
return (
{v[key_cols[0]]: (v.get(vuln_id_col) or v.get('vulnerability_id')) for _, v in vulns_seq} if len(key_cols) == 1
else OrderedDict(
{tuple(v[key_cols[i]] for i in range(len(key_cols))): (v.get(vuln_id_col) or v.get('vulnerability_id')) for v in vulns_seq}
)
)
if vulnerabilities:
return col_dtypes, key_cols, vuln_id_col, _vuln_dict(enumerate(vulnerabilities), key_cols)
src_fp = vuln_config.get('file_path')
if not src_fp:
raise OasisException(
'No vulnerabilities file path provided in the lookup config'
)
if not os.path.isabs(src_fp):
src_fp = os.path.join(self.config_dir, src_fp)
src_fp = os.path.abspath(src_fp)
self.config['vulnerability']['file_path'] = src_fp
src_type = str(str(vuln_config.get('file_type')) or 'csv').lower()
float_precision = 'high' if vuln_config.get('float_precision_high') else None
non_na_cols = vuln_config.get('non_na_cols') or tuple(col.lower() for col in list(key_cols) + [vuln_id_col])
sort_cols = vuln_config.get('sort_cols') or vuln_id_col
sort_ascending = vuln_config.get('sort_ascending')
vuln_df = get_dataframe(
src_fp=src_fp,
src_type=src_type,
float_precision=float_precision,
lowercase_cols=True,
non_na_cols=non_na_cols,
col_dtypes=col_dtypes,
sort_cols=sort_cols,
sort_ascending=sort_ascending
)
return col_dtypes, key_cols, vuln_id_col, _vuln_dict((v for _, v in vuln_df.iterrows()), key_cols, vuln_id_col)
def lookup(self, loc, peril_id, coverage_type, **kwargs):
"""
Vulnerability lookup for an individual location item, which could be a dict or a
Pandas series.
"""
loc_id = loc.get('loc_id') or int(uuid.UUID(bytes=os.urandom(16)).hex[:16], 16)
key_cols = self.key_cols
col_dtypes = self.col_dtypes
loc_key_col_values = OrderedDict({
key_col: loc.get(key_col) for key_col in key_cols
})
if not loc_key_col_values['peril_id']:
loc_key_col_values['peril_id'] = peril_id
if not loc_key_col_values['coverage_type']:
loc_key_col_values['coverage_type'] = loc.get('coverage') or coverage_type
def _lookup(loc_id, vlnperid, vlncovtype, vlnst, vlnid, vlnmsg):
return {
k: v for k, v in itertools.chain(
(
('loc_id', loc_id),
('peril_id', vlnperid),
('coverage_type', vlncovtype),
('status', vlnst),
('vulnerability_id', vlnid),
('message', vlnmsg)
),
loc_key_col_values.items()
)
}
try:
for key_col in key_cols:
key_col_dtype = col_dtypes[key_col]
key_col_dtype(loc_key_col_values[key_col])
except (TypeError, ValueError):
return _lookup(loc_id, peril_id, coverage_type, OASIS_KEYS_STATUS['fail']['id'], None, 'Vulnerability lookup: invalid key column value(s) for location')
vlnperid = peril_id
vlncovtype = coverage_type
vlnst = OASIS_KEYS_STATUS['nomatch']['id']
vlnmsg = 'No vulnerability match'
vlnid = None
try:
vlnid = (
self.vulnerabilities[tuple(loc_key_col_values[col] for col in key_cols)] if len(key_cols) > 1
else self.vulnerabilities[loc[key_cols[0]]]
)
except KeyError:
pass
else:
vlnperid = peril_id
vlncovtype = coverage_type
vlnst = OASIS_KEYS_STATUS['success']['id']
vlnmsg = 'Successful vulnerability lookup: {}'.format(vlnid)
return _lookup(loc_id, vlnperid, vlncovtype, vlnst, vlnid, vlnmsg)
# ---- RTree Peril funcs ------------------------------------------------------
def generate_index_entries(items, objects=None):
if objects:
for (key, poly_bounds), obj in zip(items, objects):
yield key, poly_bounds, obj
else:
for key, poly_bounds in items:
yield key, poly_bounds, None
def get_peril_areas(areas):
for peril_id, coverage_type, peril_area_id, coordinates, other_props in areas:
yield PerilArea(coordinates, peril_id=peril_id, coverage_type=coverage_type, peril_area_id=peril_area_id, **other_props)
def get_peril_areas_index(
areas=None,
peril_areas=None,
properties=None
):
if not (areas or peril_areas):
raise OasisException('Either areas or peril areas must be provided')
return PerilAreasIndex(areas=areas, peril_areas=peril_areas, properties=properties)
def get_rtree_index(
items,
objects=None,
properties=None
):
return (
RTreeIndex(generate_index_entries(items, objects=objects), properties=RTreeIndexProperty(**properties)) if properties
else RTreeIndex(generate_index_entries(items, objects=objects))
)
class PerilArea(Polygon):
def __init__(self, coords, **kwargs):
_coords = tuple(c for c in coords)
if not _coords:
raise OasisException('No peril area coordinates')
if len(_coords) > 2:
self._multipoint = MultiPoint(_coords)
elif len(_coords) == 2:
minx, miny, maxx, maxy = tuple(_c for c in _coords for _c in c)
self._multipoint = MultiPoint(box(minx, miny, maxx, maxy).exterior.coords)
elif len(_coords) == 1:
x, y = _coords[0][0], _coords[0][1]
r = kwargs.get('area_reg_poly_radius') or 0.0016
self._multipoint = MultiPoint(
tuple((x + r * (-1)**i, y + r * (-1)**j) for i in range(2) for j in range(2))
)
super(self.__class__, self).__init__(shell=self._multipoint.convex_hull.exterior.coords)
self._coordinates = tuple(self.exterior.coords)
self._centre = self.centroid.x, self.centroid.y
self._coverage_type = kwargs.get('coverage_type')
self._peril_id = kwargs.get('peril_id')
self._id = kwargs.get('area_peril_id') or kwargs.get('peril_area_id') or int(uuid.UUID(bytes=os.urandom(16)).hex[:16], 16)
@property
def multipoint(self):
return self._multipoint
@property
def coordinates(self):
return self._coordinates
@property
def centre(self):
return self._centre
@property
def coverage_type(self):
return self._coverage_type
@property
def peril_id(self):
return self._peril_id
@property
def id(self):
return self._id
class PerilAreasIndex(RTreeIndex):
def __init__(self, *args, **kwargs):
self._protocol = pickle.HIGHEST_PROTOCOL
idx_fp = kwargs.get('fp')
areas = kwargs.get('areas')
peril_areas = kwargs.get('peril_areas')
props = kwargs.get('properties') or copy.deepcopy(DEFAULT_RTREE_INDEX_PROPS)
if not (idx_fp or areas or peril_areas):
self._peril_areas = self._stream = None
kwargs['properties'] = RTreeIndexProperty(**props)
super(self.__class__, self).__init__(*args, **kwargs)
elif idx_fp:
self._peril_areas = self._stream = None
_idx_fp = idx_fp
if not os.path.isabs(_idx_fp):
_idx_fp = os.path.abspath(_idx_fp)
idx_ext = props.get('idx_extension') or 'idx'
dat_ext = props.get('dat_extension') or 'dat'
if not (os.path.exists('{}.{}'.format(_idx_fp, idx_ext)) or os.path.exists('{}.{}'.format(_idx_fp, dat_ext))):
kwargs['properties'] = RTreeIndexProperty(**props)
super(self.__class__, self).__init__(_idx_fp, *args, **kwargs)
else:
self._peril_areas = OrderedDict({
pa.id: pa for pa in (peril_areas if peril_areas else self._get_peril_areas(areas))
})
self._stream = self._generate_index_entries(
((paid, pa.bounds) for paid, pa in self._peril_areas.items()),
objects=((paid, pa.bounds, pa.coordinates) for paid, pa in self._peril_areas.items())
)
kwargs['properties'] = RTreeIndexProperty(**props)
super(self.__class__, self).__init__(self._stream, *args, **kwargs)
def dumps(self, obj):
return pickle.dumps(obj, protocol=self.protocol)
def loads(self, data):
return pickle.loads(data)
def _get_peril_areas(self, areas):
for peril_id, coverage_type, peril_area_id, coordinates, other_props in areas:
yield PerilArea(coordinates, peril_id=peril_id, coverage_type=coverage_type, peril_area_id=peril_area_id, **other_props)
def _generate_index_entries(self, items, objects=None):
if objects:
for (key, poly_bounds), obj in zip(items, objects):
yield key, poly_bounds, obj
else:
for key, poly_bounds in items:
yield key, poly_bounds, None
@property
def protocol(self):
return self._protocol
@property
def peril_areas(self):
return self._peril_areas
@property
def stream(self):
if self._peril_areas:
self._stream = self._generate_index_entries(self._peril_areas)
return self._stream
return None
@classmethod
def create_from_peril_areas_file(
cls,
src_fp=None,
src_type='csv',
peril_id_col='peril_id',
coverage_type_col='coverage_type',
peril_area_id_col='area_peril_id',
non_na_cols=('peril_id', 'coverage_type', 'area_peril_id',),
col_dtypes={'peril_id': int, 'coverage_type': int, 'area_peril_id': int},
sort_cols=['area_peril_id'],
area_poly_coords_cols={},
area_poly_coords_seq_start_idx=1,
area_reg_poly_radius=0.00166,
static_props={},
index_fp=None,
index_props=copy.deepcopy(DEFAULT_RTREE_INDEX_PROPS)
):
if not src_fp:
raise OasisException(
'An areas source CSV or JSON file path must be provided'
)
_src_fp = src_fp
if not os.path.isabs(_src_fp):
_src_fp = os.path.abspath(_src_fp)
_non_na_cols = set(non_na_cols)
_peril_id_col = peril_id_col.lower()
_coverage_type_col = coverage_type_col.lower()
_peril_area_id_col = peril_area_id_col.lower()
if not set(_non_na_cols).intersection([_peril_id_col, _coverage_type_col, _peril_area_id_col]):
_non_na_cols = _non_na_cols.union({_peril_id_col, _coverage_type_col, _peril_area_id_col})
for col in area_poly_coords_cols.values():
if col not in _non_na_cols:
_non_na_cols = _non_na_cols.union({col.lower()})
_non_na_cols = tuple(_non_na_cols)
_sort_cols = [col.lower() for col in sort_cols]
areas_df = get_dataframe(
src_fp=_src_fp,
src_type=src_type,
non_na_cols=_non_na_cols,
col_dtypes=col_dtypes,
sort_cols=(_sort_cols or [_peril_area_id_col])
)
coords_cols = area_poly_coords_cols
seq_start = area_poly_coords_seq_start_idx
len_seq = sum(1 if re.match(r'x(\d+)?', k) else 0 for k in coords_cols.keys())
peril_areas = cls()._get_peril_areas(
(
ar[_peril_id_col],
ar[_coverage_type_col],
ar[_peril_area_id_col],
tuple(
(ar.get(coords_cols['x{}'.format(i)].lower()) or 0, ar.get(coords_cols['y{}'.format(i)].lower()) or 0)
for i in range(seq_start, len_seq + 1)
),
static_props
) for _, ar in areas_df.iterrows()
)
_index_fp = index_fp
if not _index_fp:
raise OasisException('No output file index path provided')
if not os.path.isabs(_index_fp):
_index_fp = os.path.abspath(_index_fp)
try:
return cls().save(
_index_fp,
peril_areas=peril_areas,
index_props=index_props
)
except OasisException:
raise
def save(
self,
index_fp,
peril_areas=None,
index_props=DEFAULT_RTREE_INDEX_PROPS
):
_index_fp = index_fp
if not os.path.isabs(_index_fp):
_index_fp = os.path.abspath(_index_fp)
if os.path.exists(_index_fp):
os.remove(_index_fp)
class myindex(RTreeIndex):
def __init__(self, *args, **kwargs):
self.protocol = pickle.HIGHEST_PROTOCOL
super(self.__class__, self).__init__(*args, **kwargs)
def dumps(self, obj):
return pickle.dumps(obj, protocol=self.protocol)
def loads(self, obj):
return pickle.loads(obj)
try:
index = myindex(_index_fp, properties=RTreeIndexProperty(**index_props))
_peril_areas = self._peril_areas or peril_areas
if not _peril_areas:
raise OasisException(
'No peril areas found in instance or in arguments - '
'this is required to write the index to file'
)
peril_areas_seq = None
if (isinstance(peril_areas, list) or isinstance(peril_areas, tuple)):
peril_areas_seq = (pa for pa in peril_areas)
elif isinstance(peril_areas, types.GeneratorType):
peril_areas_seq = peril_areas
elif (isinstance(peril_areas, dict)):
peril_areas_seq = peril_areas.values()
for pa in peril_areas_seq:
index.insert(pa.id, pa.bounds, obj=(pa.peril_id, pa.coverage_type, pa.id, pa.bounds, pa.coordinates))
index.close()
except (IOError, OSError, RTreeError):
raise
return _index_fp
|
def formating(list_load,n1,n2):
list_format = []
for i in range(n1,n2):
list_format.append(list_load[n1]/2)
|
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.test import Client, TestCase
from django.urls import reverse
from myauth.forms import (
MyAuthenticationForm,
MyPasswordChangeForm,
MyUserCreationForm,
MyUserDeleteForm,
)
from myauth.views import UserCreateView
from myauth.models import generate_unique_token
from myauth.services import generate_unique_username
# Create your tests here.
class TestModels(TestCase):
def test_generate_unique_token(self):
token = generate_unique_token()
self.assertEqual(len(token), 255)
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.post_data = {
"email": "[email protected]",
"password1": "asdasdasd123",
"password2": "asdasdasd123",
}
def test_user_create_view(self):
response = self.client.get(reverse("create_user"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], MyUserCreationForm)
response = self.client.post(reverse("create_user"), self.post_data)
user = User.objects.get(email=self.post_data.get("email"))
self.assertEqual(response.status_code, 302)
self.assertEqual(user.email, self.post_data.get("email"))
self.assertTrue(user.check_password(self.post_data.get("password1")))
self.assertIsNotNone(user.token)
response = self.client.post(reverse("create_user"), self.post_data)
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], MyUserCreationForm)
self.assertFalse(response.context["form"].is_valid())
def test_user_update_view(self):
response = self.client.get(reverse("update_user"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], MyPasswordChangeForm)
self.client.post(reverse("create_user"), self.post_data)
new_password = "cxzdsaewq321"
response = self.client.post(
reverse("update_user"),
{
"email": self.post_data.get("email"),
"password": self.post_data.get("password1"),
"new_password": new_password,
},
)
user = User.objects.get(email=self.post_data.get("email"))
self.assertEqual(response.status_code, 302)
self.assertEqual(user.email, self.post_data.get("email"))
self.assertTrue(user.check_password(new_password))
def test_user_delete_view(self):
response = self.client.get(reverse("delete_user"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], MyUserDeleteForm)
self.client.post(reverse("create_user"), self.post_data)
response = self.client.post(
reverse("delete_user"),
{
"email": self.post_data.get("email"),
"password": self.post_data.get("password1"),
},
)
self.assertEqual(response.status_code, 302)
with self.assertRaises(User.DoesNotExist):
User.objects.get(email=self.post_data.get("email"))
def test_user_get_token_view(self):
response = self.client.get(reverse("get_user_token"))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context["form"], MyAuthenticationForm)
self.client.post(reverse("create_user"), self.post_data)
user = User.objects.get(email=self.post_data.get("email"))
response = self.client.post(
reverse("get_user_token"),
{
"email": self.post_data.get("email"),
"password": self.post_data.get("password1"),
},
)
self.assertEqual(response.status_code, 302)
def test_validate_token(self):
self.client.post(reverse("create_user"), self.post_data)
user = User.objects.get(email=self.post_data.get("email"))
response = self.client.get(
reverse("validate_user_token"), data={"token": user.token.token}
)
self.assertTrue(
any(
[
"Token is valid!" in message.message
for message in response.context["messages"]
]
)
)
response = self.client.get(
reverse("validate_user_token"), data={"token": "user.token.token"}
)
self.assertTrue(
any(
[
"Token is NOT valid!" in message.message
for message in response.context["messages"]
]
)
)
class TestForms(TestCase):
def test_my_authentication_form(self):
username = "username"
data = {
"email": "[email protected]",
"password": "asdasdasd123",
}
form = MyAuthenticationForm(data=data)
self.assertFalse(form.is_valid())
with self.assertRaises(ValidationError):
form.clean()
user = User.objects.create_user(username, **data)
form = MyAuthenticationForm(data=data)
self.assertTrue(form.is_valid())
self.assertEqual(form.save(), user)
data["password"] = "asdasdasd"
form = MyAuthenticationForm(data=data)
self.assertFalse(form.is_valid())
with self.assertRaises(ValidationError):
form.clean()
class TestServices(TestCase):
def test_generate_unique_username(self):
username = generate_unique_username()
self.assertEqual(len(username), 150)
|
import pathlib
__author__ = "Austin Hodges"
__copyright__ = "Austin Hodges"
__license__ = "mit"
REQUIRED_PYTHON_VERSION = (3, 6, 0)
REQUIRED_PYTHON_STRING = '>={}.{}.{}'.format(REQUIRED_PYTHON_VERSION[0],
REQUIRED_PYTHON_VERSION[1],
REQUIRED_PYTHON_VERSION[2])
MAJOR_VERSION = 0
MINOR_VERSION = 7
PROJECT_VERSION = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION)
__version__ = PROJECT_VERSION
|
import pandas as pd
import ssl
from kafka import KafkaProducer
from kafka import KafkaConsumer
from time import sleep
from json import dumps
from kafka import KafkaProducer
# ssl._create_default_https_context = ssl._create_unverified_context
# url = 'https://dados.anvisa.gov.br/dados/TA_PRECO_MEDICAMENTO.csv' #dados publicos
# df = pd.read_csv(url, sep = ',')
# df.head()
# Produtor
# def envia_mensagem(msg):
# producer = KafkaProducer(bootstrap_servers='localhost:9092')
# try:
# producer.send('teste_leo', b" enviando ")
# result = "deu bom"
# except:
# result = "Deu ruim: "
# return result
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
value_serializer=lambda x:
dumps(x).encode('utf-8'))
for e in range(1000):
data = {'number' : e}
producer.send('numtest', value=data)
sleep(5)
# if __name__ == '__main__':
# envia_mensagem("testando 123")
# Consumidor
# consumer = KafkaConsumer('sample')
# for message in consumer:
# print (message) |
DEBUG = False
for _ in range(int(input())):
n = int(input())
a = list(map(int, input().split()))
b = [ -1 for i in range(n) ]
if len(a) == 1:
if DEBUG: print(0, 0)
if a[0] > 0:
print(0, 0)
else:
print(1, 0)
continue
def solve(i, j):
if DEBUG: print(a[i:j+1], i,j)
if i > j:
return (-1, 0, 0)
elif i == j:
if a[i] > 0:
return (a[i], 0, 0)
else:
return (-1, 0, 0)
b[i] = a[i]
for k in range(i+1, j+1):
b[k] = b[k-1] * a[k]
if DEBUG: print('b:', b[i:j+1])
f = l = -1
for k in range(i, j+1):
if a[k] < 0:
if f == -1:
f = k
l = k
if DEBUG: print('f', f)
if DEBUG: print('l', l)
if b[j] > 0:
return (b[j], i, j)
elif l == 0:
v = b[j] // a[i]
return (v, i+1, j)
else:
v1 = b[j] // b[f]
v2 = b[j] // b[l]
if v1 > v2:
return (v1, f+1, j)
else:
return (v2, i, l-1)
def best(x, y):
return x if x[0] > y[0] else y
ans = (-1, 0, 0)
i = j = 0
for k in range(n):
if a[k] == 0:
j = k-1
s = solve(i, j)
if DEBUG: print(s)
ans = best(s, ans)
i = k+1
elif k == n-1:
j = k
s = solve(i, j)
if DEBUG: print(s)
ans = best(s, ans)
if DEBUG: print(ans)
if ans[0] < 0:
print(n, 0)
# print('ans', ans)
else:
print(ans[1], n-1-ans[2])
# print('ans', ans)
|
import numpy as np
import pandas as pd
import skimage.morphology
import warnings
from itertools import count
import os
import PIL.Image
import PIL.ImageFont
from ops.constants import *
import ops.filenames
import ops
import ops.io
# load font
def load_truetype(truetype='visitor1.ttf',size=10):
"""
Note that `size` here is the "em" size in pixels, which is different than
the actual height of the letters for most fonts.
"""
PATH = os.path.join(os.path.dirname(ops.__file__), truetype)
try:
return PIL.ImageFont.truetype(PATH,size=size)
except OSError as e:
warnings.warn('TrueType font not found at {0}'.format(PATH))
VISITOR_FONT = load_truetype()
def annotate_labels(df, label, value, label_mask=None, tag='cells', outline=False):
"""Transfer `value` from dataframe `df` to a saved integer image mask, using
`label` as an index.
The dataframe should contain data from a single image, which is loaded from
`label_mask` if provided, or else guessed based on descriptors in the first
row of `df` and `tag`.
"""
if df[label].duplicated().any():
raise ValueError('duplicate rows present')
label_to_value = df.set_index(label, drop=False)[value]
index_dtype = label_to_value.index.dtype
value_dtype = label_to_value.dtype
if not np.issubdtype(index_dtype, np.integer):
raise ValueError('label column {0} is not integer type'.format(label))
if not np.issubdtype(value_dtype, np.number):
label_to_value = label_to_value.astype('category').cat.codes
warnings.warn('converting value column "{0}" to categorical'.format(value))
if label_to_value.index.duplicated().any():
raise ValueError('duplicate index')
top_row = df.iloc[0]
if label_mask is None:
filename = ops.filenames.guess_filename(top_row, tag)
labels = ops.io.read_stack(filename)
elif isinstance(label_mask, str):
labels = ops.io.read_stack(label_mask)
else:
labels = label_mask
if outline:
labels = outline_mask(labels, 'inner')
phenotype = relabel_array(labels, label_to_value)
return phenotype
def annotate_points(df, value, ij=('i', 'j'), width=3, shape=(1024, 1024)):
"""Create a mask with pixels at coordinates `ij` set to `value` from
dataframe `df`.
"""
if shape=='1x1':
shape = (2048,2048)
elif shape=='2x2':
shape = (1024,1024)
ij = df[list(ij)].values.astype(int)
n = ij.shape[0]
mask = np.zeros(shape, dtype=df[value].dtype)
mask[ij[:, 0], ij[:, 1]] = df[value]
selem = np.ones((width, width))
mask = skimage.morphology.dilation(mask, selem)
return mask
def relabel_array(arr, new_label_dict):
"""Map values in integer array based on `new_labels`, a dictionary from
old to new values.
"""
n = arr.max()
arr_ = np.zeros(n+1)
for old_val, new_val in new_label_dict.items():
if old_val <= n:
arr_[old_val] = new_val
return arr_[arr]
def outline_mask(arr, direction='outer', width=1):
"""Remove interior of label mask in `arr`.
"""
selem = skimage.morphology.disk(width)
arr = arr.copy()
if direction == 'outer':
mask = skimage.morphology.erosion(arr, selem)
arr[mask > 0] = 0
return arr
elif direction == 'inner':
mask1 = skimage.morphology.erosion(arr, selem) == arr
mask2 = skimage.morphology.dilation(arr, selem) == arr
arr[mask1 & mask2] = 0
return arr
else:
raise ValueError(direction)
def bitmap_label(labels, positions, colors=None):
positions = np.array(positions).astype(int)
if colors is None:
colors = [1] * len(labels)
i_all, j_all, c_all = [], [], []
for label, (i, j), color in zip(labels, positions, colors):
if label == '':
continue
i_px, j_px = np.where(lasagna.io.bitmap_text(label))
i_all += list(i_px + i)
j_all += list(j_px + j)
c_all += [color] * len(i_px)
shape = max(i_all) + 1, max(j_all) + 1
arr = np.zeros(shape, dtype=int)
arr[i_all, j_all] = c_all
return arr
def build_discrete_lut(colors):
"""Build ImageJ lookup table for list of discrete colors.
If the values to label are in the range 0..N, N + 1 colors should be
provided (zero value is usually black). Color values should be understood
by `sns.color_palette` (e.g., "blue", (1, 0, 0), or "#0000ff").
"""
try:
import seaborn as sns
colors = sns.color_palette(colors)
except:
pass
colors = 255 * np.array(colors)
# try to match ImageJ LUT rounding convention
m = len(colors)
n = int(256 / m)
p = m - (256 - n * m)
color_index_1 = list(np.repeat(range(0, p), n))
color_index_2 = list(np.repeat(range(p, m), n + 1))
color_index = color_index_1 + color_index_2
return colors_to_imagej_lut(colors[color_index, :])
def bitmap_draw_line(image,coords,width=1,dashed=False):
"""Draw horizontal line, returning an image of same shape.
Dashed if requested.
"""
import PIL.ImageDraw
if (len(coords)>2)&(dashed is not False):
raise ValueError('Drawing a dashed line between more than 2 points not supported.')
if (coords[0][1]!=coords[1][1])&(dashed is not False):
raise ValueError('Drawing a dashed non-horizontal line not supported')
if image.dtype==np.uint16:
mode='I;16'
fill = 2**16-1
elif image.dtype==np.uint8:
mode='L'
fill = 2**8-1
else:
mode='1'
fill = True
img = PIL.Image.new(mode, image.shape[:-3:-1])
draw = PIL.ImageDraw.Draw(img,mode=mode)
if dashed:
y = coords[0][1]
if not isinstance(dashed,list):
dashed = [100,50] # dash, gap
xs = []
x = coords[0][0]
counter = count(start=0,step=1)
while x<coords[1][0]:
xs.append(x)
c = next(counter)
if c%2==0:
x+=dashed[0]
else:
x+=dashed[1]
xs.append(coords[1][0])
for x_0,x_1 in zip(xs[::2],xs[1::2]):
draw.line([(x_0,y),(x_1,y)],width=width,fill=fill)
else:
draw.line(coords,width=width,fill=fill)
return np.array(img)
def bitmap_text_overlay(image,anchor_point,text,size=10,font=VISITOR_FONT):
"""Draw text in the shape of the given image.
"""
import PIL.ImageDraw
if image.dtype==np.uint16:
mode='L' # PIL has a bug with drawing text on uint16 images
elif image.dtype==np.uint8:
mode='L'
else:
mode='1'
img = PIL.Image.new(mode, image.shape[:-3:-1])
draw = PIL.ImageDraw.Draw(img)
if isinstance(font,PIL.ImageFont.FreeTypeFont):
FONT = font
if FONT.size != size:
warnings.warn(f'Size of supplied FreeTypeFont object is {FONT.size}, '
f'but input argument size = {size}.'
)
else:
FONT = load_truetype(truetype=font,size=size)
offset = FONT.getoffset(text)
draw.text(np.array(anchor_point)-np.array(offset),text,font=FONT,fill='white')
if image.dtype==np.uint16:
return skimage.img_as_uint(np.array(img))
else:
return np.array(img,dtype=image.dtype)
def bitmap_line(s,crop=True):
"""Draw text using Visitor font (characters are 5x5 pixels).
"""
import PIL.Image
import PIL.ImageDraw
img = PIL.Image.new("RGBA", (len(s) * 8, 10), (0, 0, 0))
draw = PIL.ImageDraw.Draw(img)
draw.text((0, 0), s, (255, 255, 255), font=VISITOR_FONT)
draw = PIL.ImageDraw.Draw(img)
n = np.array(img)[2:7, :, 0]
if (n.sum() == 0)|(~crop):
return n
return (n[:, :np.where(n.any(axis=0))[0][-1] + 1] > 0).astype(int)
def bitmap_lines(lines, spacing=1,crop=True):
"""Draw multiple lines of text from a list of strings.
"""
bitmaps = [bitmap_line(x,crop=crop) for x in lines]
height = 5
shapes = np.array([x.shape for x in bitmaps])
shape = (height + 1) * len(bitmaps), shapes[:, 1].max()
output = np.zeros(shape, dtype=int)
for i, bitmap in enumerate(bitmaps):
start, end = i * (height + 1), (i + 1) * (height + 1) - 1
output[start:end, :bitmap.shape[1]] = bitmap
return output[:-1, :]
def colors_to_imagej_lut(lut_values):
"""ImageJ header expects 256 red values, then 256 green values, then
256 blue values.
"""
return tuple(np.array(lut_values).T.flatten().astype(int))
def build_GRMC():
import seaborn as sns
colors = (0, 1, 0), (1, 0, 0), (1, 0, 1), (0, 1, 1)
lut = []
for color in colors:
lut.append([0, 0, 0, 1])
lut.extend(sns.dark_palette(color, n_colors=64 - 1))
lut = np.array(lut)[:, :3]
RGCM = np.zeros((256, 3), dtype=int)
RGCM[:len(lut)] = (lut * 255).astype(int)
return tuple(RGCM.T.flatten())
def add_rect_bounds(df, width=10, ij='ij', bounds_col='bounds'):
arr = []
for i,j in df[list(ij)].values.astype(int):
arr.append((i - width, j - width, i + width, j + width))
return df.assign(**{bounds_col: arr})
def make_sq_bounds(
df,
input_bounds=['bounds_0','bounds_1','bounds_2','bounds_3'],
bounds_col='bounds'):
def split_pad(pad):
return (pad//2,pad//2+pad%2)
arr = []
for bounds in df[input_bounds].values.astype(int):
width,height = (bounds[2]-bounds[0]),(bounds[3]-bounds[1])
diff = height-width
pad_width, pad_height = split_pad(np.clip(diff,0,None)),split_pad(np.clip(-diff,0,None))
arr.append(tuple(bounds+np.array([-pad_width[0],-pad_height[0],pad_width[1],pad_height[1]])))
return df.assign(**{bounds_col: arr})
# BASE LABELING
colors = (0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 0, 1), (0, 1, 1)
GRMC = build_discrete_lut(colors)
def add_base_codes(df_reads, bases, offset, col):
n = len(df_reads[col].iloc[0])
df = (df_reads[col].str.extract('(.)'*n)
.applymap(bases.index)
.rename(columns=lambda x: 'c{0}'.format(x+1))
)
return pd.concat([df_reads, df + offset], axis=1)
def annotate_bases(df_reads, col='barcode', bases='GTAC', offset=1, **kwargs):
"""
from ops.annotate import add_base_codes, GRMC
labels = annotate_bases(df_reads)
# labels = annotate_bases(df_cells, col='cell_barcode_0')
data = read('process/10X_A1_Tile-7.log.tif')
labeled = join_stacks(data, (labels[:, None], '.a'))
luts = GRAY, GREEN, RED, MAGENTA, CYAN, GRMC
save('test/labeled', labeled, luts=luts)
"""
df_reads = add_base_codes(df_reads, bases, offset, col)
n = len(df_reads[col].iloc[0])
cycles = ['c{0}'.format(i+1) for i in range(n)]
labels = np.array([annotate_points(df_reads, c, **kwargs) for c in cycles])
return labels
|
import os
import sys
import time
import datetime
import slackclient as slck
CH_NAME = 0
CH_ID = 1
MSG_CONTENT = 0
MSG_CH = 1
MSG_CREATOR = 2
MSG_REACTS = 3
MSG_LINK = 3
class OnThisDay:
def __init__(self):
self.now = datetime.datetime.now()
# print(self.now)
self.client = slck.SlackClient(os.getenv('SLACK_BOT_TOKEN'))
print("Slack bot authenticated!\nrequesting channel list...")
self.channel_list = self.list_channels()
print("{num} channels received!\nQuerying channels...".format(
num=len(self.channel_list)
))
# print(self.channel_list)
self.messages_ch_list = self.list_messages("year")
if len(self.messages_ch_list) == 0:
self.messages_ch_list = self.list_messages("month")
if len(self.messages_ch_list) > 0:
self.message = self.messages_ch_list[self.max_emoji_msg()]
self.decorated_message = self.decorate_msg()
self.post_msg()
else:
msg = "DAMN! You guys should be talking more..."
channel_id = 'random'
for ch in self.channel_list:
if ch[CH_NAME] == 'random':
channel_id = ch[CH_ID]
break
self.client.api_call(method="chat.postMessage",
channel=channel_id,
text=msg,
username="Mnemosyne"
)
def list_channels(self):
"""
lists all the channels of the workspace
:return:
ch_list
type: list of tuple (channel name, channel id)
"""
ch_list = []
ch_dict = self.client.api_call(method="channels.list")
try:
for ch in ch_dict["channels"]:
ch_list.append((ch["name"], ch["id"]))
except KeyError:
print("Authentication failed!\nPlease check your OAuth environment variable.")
sys.exit(1)
return ch_list
def list_messages(self, time_frame):
"""
lists all the messages for the day of all channels
:param time_frame: (str) specifies if the data is to be collected for past year or past month
:return:
msg_list
type: list of tuple (msg, msg_channel, msg_creator, msg_reacts)
"""
list_msgs = []
for ch in self.channel_list:
for x in range(6):
try:
ch_hist = self.client.api_call(method="channels.history",
channel=ch[CH_ID],
count=1000,
inclusive=True,
latest=self.time("end", time_frame, x),
oldest=self.time("start", time_frame, x)
)
print("Data for {ch} fetched under {iter} {frame} old time-frame with {num} messages".format(
ch=ch[CH_NAME],
frame=time_frame,
iter=x+1,
num=len(ch_hist.get("messages", []))
))
if ch_hist is not None:
for msg in ch_hist.get("messages", []):
if msg["type"] == "message":
is_birthday_msg = False
content = msg.get("text", "false")
if "Here's what was trending" not in content:
user = msg.get("user", "user detection failed")
reacts = msg.get("reactions", [])
reacts_count = 0
for reaction in reacts:
if reaction.get('name', 'none') == "birthday":
is_birthday_msg = True
break
reacts_count += reaction.get('count', 0)
if not is_birthday_msg:
list_msgs.append((content, ch[CH_NAME], user, reacts_count))
except ValueError:
print("Day doesn't exist in the current - {iter} month".format(
iter=x+1
))
return list_msgs
def max_emoji_msg(self):
"""
finds the index of the message with the highest number of reactions
:return:
msg_index
type: int
"""
max_reacts = 0
msg_index = 0
for index, msg in enumerate(self.messages_ch_list):
# print(msg)
if msg[MSG_REACTS] > max_reacts:
msg_index = index
max_reacts = msg[MSG_REACTS]
return msg_index
def decorate_msg(self):
"""
Using search method of Slack API, returns a better metadata for the message
:return:
decorated_msg
type: (content, channel, creator, link)
"""
msg_metadata = self.client.api_call('search.messages',
query=self.message[MSG_CONTENT],
)
# print(msg_metadata)
if msg_metadata["ok"]:
index = 0
while True:
msg_metadata = msg_metadata["messages"]['matches'][index]
if msg_metadata["username"] == "mnemosyne":
index = index+1
continue
decorated_msg = (
msg_metadata["text"],
msg_metadata["channel"]["name"],
msg_metadata["username"],
msg_metadata["permalink"]
)
break
else:
print("message decoration failed!")
sys.exit(1)
return decorated_msg
def post_msg(self):
"""
sends the selected message to the 'random' slack channel
:return:
NONE
"""
channel_id = 'random'
for ch in self.channel_list:
if ch[CH_NAME] == 'random':
channel_id = ch[CH_ID]
break
msg = "Here's what was trending *OnThisDay*!\n\n>"\
+ self.decorated_message[MSG_CONTENT]+"\n\n-- by @" + \
self.decorated_message[MSG_CREATOR] + " in #" + \
self.decorated_message[MSG_CH] + "\n\n" + \
self.decorated_message[MSG_LINK]
response = self.client.api_call(method="chat.postMessage",
channel=channel_id,
text=msg,
link_names=True,
username="Mnemosyne"
)
if response["ok"]:
print("Yay, nostalgia spread!")
else:
print("failed to invoke memories!")
def time(self, type_of, time_frame, iter_value):
"""
converts current date, last year, 23:59 (or 00:01) to timestamp recognised by slack.
:param type_of: whether the start time or end time is required
:param time_frame: specified if year is to be decreased or month
:param iter_value: specifies the amount by which year/month is to be decreased
:return:
start_time
type: string (slack timestamp)
"""
if time_frame == "year":
year = str(self.now.year - 1 - iter_value)
month = self._convert(self.now.month)
else:
year = str(self.now.year)
month = self._convert(self.now.month - 1 - iter_value)
day = self._convert(self.now.day)
time_str = day+"/"+month+"/"+year
if type_of == "start":
time_str = time_str+" 00:01"
else:
time_str = time_str+" 23:59"
# print(time_str)
start_time = time.mktime(datetime.datetime.strptime(time_str, "%d/%m/%Y %H:%M").timetuple())
return start_time
@staticmethod
def _convert(num):
"""
converts "9" to "09"
:param num: integer to be converted
:return:
str_num: string with modified integer
"""
if num < 10:
str_num = "0"+str(num)
else:
str_num = str(num)
return str_num
if __name__ == "__main__":
OnThisDay()
|
import math
import itertools
import sys
from pathlib import Path
import itertools
import math
import sys
from pathlib import Path
sys.path.append(str(Path("../../")))
from roadscene2vec.scene_graph.nodes import Node
#from roadscene2vec.scene_graph.nodes import ObjectNode
#This class extracts relations for every pair of entities in a scene
class RelationExtractor:
def __init__(self, config):
self.conf = config
self.actors = config.relation_extraction_settings["ACTOR_NAMES"]
self.rels = config.relation_extraction_settings["RELATION_NAMES"]
self.wanted_directional_relation_dict = {(i[0],i[1]):i[2] for i in config.relation_extraction_settings["directional_relation_list"]}
self.wanted_proximity_relation_dict = {(i[0],i[1]):i[2] for i in config.relation_extraction_settings["proximity_relation_list"]}
self.proximity_rels = self.conf.relation_extraction_settings["PROXIMITY_THRESHOLDS"]
self.directional_rels = config.relation_extraction_settings["DIRECTIONAL_THRESHOLDS"]
self.relational_colors = {i[0]:i[1] for i in config.relation_extraction_settings["RELATION_COLORS"]}
#import pdb; pdb.set_trace()
self.LANE_THRESHOLD = self.conf.relation_extraction_settings['LANE_THRESHOLD'] # feet. if object's center is more than this distance away from ego's center, build left or right lane relation
# feet. if object's center is within this distance of ego's center, build middle lane relation
#self.CENTER_LANE_THRESHOLD = self.conf.relation_extraction_settings['CENTER_LANE_THRESHOLD']
def get_actor_type(self, actor):
for actor_ in range(len(self.actors)):
if actor.label == self.actors[actor_]:
return self.actors[actor_], actor_ #return the actor type along with its index in the ACTOR_NAMES list
elif actor.label.lower() == self.actors[actor_]:
return self.actors[actor_], actor_
elif f"{self.actors[actor_].upper()}_NAMES" in self.conf.relation_extraction_settings:
for actor_names in self.conf.relation_extraction_settings[f"{self.actors[actor_].upper()}_NAMES"]: #go through different names of actor type (ie Tesla for type car)
if actor_names in actor.label:
return self.actors[actor_], actor_
elif actor_names in actor.label.lower():
return self.actors[actor_], actor_
raise NameError("Actor name not found for actor with name: " + actor.attr["name"])
def get_config(self):
return self.conf
#takes in two entities and extracts all relations between those two entities. extracted relations are bidirectional
def extract_relations(self, actor1, actor2):
type1 ,_ = self.get_actor_type(actor1)
type2 ,_= self.get_actor_type(actor2)
relations_list = []
if (type1,type2) in self.wanted_proximity_relation_dict.keys():
relations_list += self.extract_distance_relations_actor1_actor2(actor1, actor2, type1, type2) #always pass in order that they are defined in the list
if (type1,type2) in self.wanted_directional_relation_dict.keys():
relations_list += self.extract_directional_relation_actor1_actor2(actor1, actor2, type1, type2) #always pass in order that they are defined in the list
return relations_list
def extract_relative_lanes(self, scene_graph): #keep as you will always need to add lanes
if self.conf.dataset_type == "carla":
scene_graph.left_lane = Node("lane_left", {"curr":"lane_left"}, "lane", self.actors.index("lane")) #change actor.lane to just lane
scene_graph.right_lane = Node("lane_right", {"curr":"lane_right"}, "lane", self.actors.index("lane"))
scene_graph.middle_lane = Node("lane_middle", {"curr":"lane_middle"}, "lane", self.actors.index("lane"))
elif self.conf.dataset_type == "image":
scene_graph.left_lane = Node('Left Lane', {}, "lane", self.actors.index("lane"))
scene_graph.right_lane = Node('Right Lane', {}, "lane", self.actors.index("lane"))
scene_graph.middle_lane = Node('Middle Lane', {}, "lane", self.actors.index("lane"))
scene_graph.add_node(scene_graph.left_lane)
scene_graph.add_node(scene_graph.right_lane)
scene_graph.add_node(scene_graph.middle_lane)
#if "isIn" in self.directional_rels:
scene_graph.add_relation([scene_graph.left_lane, "isIn", scene_graph.road_node]) #if we assume lanes and roads must be in graph, then just check to see if isIn in the wanted relations?
scene_graph.add_relation([scene_graph.right_lane, "isIn", scene_graph.road_node])
scene_graph.add_relation([scene_graph.middle_lane, "isIn", scene_graph.road_node])
scene_graph.add_relation([scene_graph.egoNode, "isIn", scene_graph.middle_lane])
# else:
# raise ValueError("isIn relation absent from config")
def add_mapping_to_relative_lanes(self, scene_graph, object_node): #leave this in if we can assume that there will always be lanes
if self.conf.dataset_type == "carla":
_, ego_y = self.rotate_coords(scene_graph, scene_graph.egoNode.attr['location'][0], scene_graph.egoNode.attr['location'][1]) #NOTE: X corresponds to forward/back displacement and Y corresponds to left/right displacement
_, new_y = self.rotate_coords(scene_graph, object_node.attr['location'][0], object_node.attr['location'][1])
y_diff = new_y - ego_y
if y_diff < -self.LANE_THRESHOLD:
scene_graph.add_relation([object_node, "isIn", scene_graph.left_lane])
elif y_diff > self.LANE_THRESHOLD:
scene_graph.add_relation([object_node, "isIn", scene_graph.right_lane])
elif y_diff <= self.LANE_THRESHOLD and y_diff >= -self.LANE_THRESHOLD: #check
scene_graph.add_relation([object_node, "isIn", scene_graph.middle_lane])
# elif abs(y_diff) <= self.CENTER_LANE_THRESHOLD:
# scene_graph.add_relation([object_node, "isIn", scene_graph.middle_lane])
elif self.conf.dataset_type == "image":
if object_node.attr['rel_location_x'] < -self.LANE_THRESHOLD:
scene_graph.add_relation([object_node, "isIn", scene_graph.left_lane])
elif object_node.attr['rel_location_x'] > self.LANE_THRESHOLD:
scene_graph.add_relation([object_node, "isIn", scene_graph.right_lane])
# elif abs(object_node.attr['rel_location_x']) <= self.CENTER_LANE_THRESHOLD:
# scene_graph.add_relation([object_node, "isIn", scene_graph.middle_lane])
elif object_node.attr['rel_location_x'] <= self.LANE_THRESHOLD and object_node.attr['rel_location_x'] >= -self.LANE_THRESHOLD:
scene_graph.add_relation([object_node, "isIn", scene_graph.middle_lane])
def extract_semantic_relations(self, scene_graph):
for node1, node2 in itertools.combinations(scene_graph.g.nodes, 2):
if node1.name != node2.name and (node1.name != "Root Road" and node2.name != "Root Road"): #dont build self-relations
scene_graph.add_relations(self.extract_relations(node1, node2))
#copied from get_node_embeddings(). rotates coordinates to be relative to ego vector.
def rotate_coords(self, scene_graph, x, y):
new_x = (x*scene_graph.ego_cos_term) + (y*scene_graph.ego_sin_term)
new_y = ((-x)*scene_graph.ego_sin_term) + (y*scene_graph.ego_cos_term)
return new_x, new_y
#~~~~~~~~~specific relations for each pair of actors possible~~~~~~~~~~~~
#actor 1 corresponds to the first actor in the function name and actor2 the second
def extract_distance_relations_actor1_actor2(self, actor1, actor2, type1, type2):
relation_list = []
if self.euclidean_distance(actor1, actor2) <= self.wanted_proximity_relation_dict[(type1,type2)]:
relation_list += self.create_proximity_relations(actor1, actor2)
relation_list += self.create_proximity_relations(actor2, actor1)
return relation_list
return relation_list
def extract_directional_relation_actor1_actor2(self, actor1, actor2, type1, type2):
relation_list = []
if self.euclidean_distance(actor1, actor2) <= self.wanted_directional_relation_dict[(type1,type2)]:
# One of these relations get overwritten in the visualizer for some reason...
relation_list += self.extract_directional_relation(actor1, actor2)
relation_list += self.extract_directional_relation(actor2, actor1)
return relation_list
return relation_list
#~~~~~~~~~~~~~~~~~~UTILITY FUNCTIONS~~~~~~~~~~~~~~~~~~~~~~
#return euclidean distance between actors
def euclidean_distance(self, actor1, actor2):
if self.conf.dataset_type == "carla":
l1 = actor1.attr['location']
l2 = actor2.attr['location']
distance = math.sqrt((l1[0] - l2[0])**2 + (l1[1]- l2[1])**2 + (l1[2] - l2[2])**2)
elif self.conf.dataset_type == "image":
l1 = (actor1.attr['location_x'], actor1.attr['location_y'])
l2 = (actor2.attr['location_x'], actor2.attr['location_y'])
distance = math.sqrt((l1[0] - l2[0])**2 + (l1[1] - l2[1])**2)
# print(actor1, actor2, distance)
return distance
#check if an actor is in a certain lane
def in_lane(self, actor1, actor2):
if 'lane_idx' in actor1.attr.keys():
# calculate the distance bewteen actor1 and actor2
# if it is below 3.5 then they have is in relation.
# if actor1 is ego: if actor2 is not equal to the ego_lane's index then it's invading relation.
if actor1.attr['lane_idx'] == actor2.attr['lane_idx']:
return True
if "invading_lane" in actor1.attr:
if actor1.attr['invading_lane'] == actor2.attr['lane_idx']:
return True
if "orig_lane_idx" in actor1.attr:
if actor1.attr['orig_lane_idx'] == actor2.attr['lane_idx']:
return True
else:
return False
def create_proximity_relations(self, actor1, actor2): #how
for relation in self.proximity_rels:
if self.euclidean_distance(actor1, actor2) <= relation[1]:
return [[actor1,relation[0], actor2]]
return []
def extract_directional_relation(self, actor1, actor2):
relation_list = []
if self.conf.dataset_type == "carla":
# gives directional relations between actors based on their 2D absolute positions.
x1, y1 = math.cos(math.radians(actor1.attr['rotation'][0])), math.sin(math.radians(actor1.attr['rotation'][0]))
x2, y2 = actor2.attr['location'][0] - actor1.attr['location'][0], actor2.attr['location'][1] - actor1.attr['location'][1]
x2, y2 = x2 / math.sqrt(x2**2+y2**2), y2 / math.sqrt(x2**2+y2**2)
degree = math.degrees(math.atan2(y2, x2)) - \
math.degrees(math.atan2(y1, x1))
elif self.conf.dataset_type == "image":
x1 = math.cos(math.radians(0))
y1 = math.sin(math.radians(0))
x2 = actor2.attr['location_x'] - actor1.attr['location_x']
y2 = actor2.attr['location_y'] - actor1.attr['location_y']
x2 /= math.sqrt(x2**2 + y2**2)
y2 /= math.sqrt(x2**2 + y2**2)
degree = math.degrees(math.atan2(y1, x1)) - \
math.degrees(math.atan2(y2, x2))
if degree < 0:
degree += 360
degree %= 360
# if degree < 0:
# degree = 0
for direction_rel in self.directional_rels:
list_of_ranges = direction_rel[1]
for ranges in list_of_ranges:
if degree >= ranges[0] and degree <= ranges[1]:
relation_list.append([actor2, direction_rel[0], actor1])
if self.conf.dataset_type == "carla":
if actor2.attr['lane_idx'] < actor1.attr['lane_idx']: # actor2 to the left of actor1
relation_list.append([actor2, "toLeftOf", actor1])
elif actor2.attr['lane_idx'] > actor1.attr['lane_idx']: # actor2 to the right of actor1
relation_list.append([actor2, "toRightOf", actor1])
elif self.conf.dataset_type == "image":
# if abs(actor2.attr['location_x'] - actor1.attr['location_x']) <= self.CENTER_LANE_THRESHOLD:
# pass
if (actor2.attr['location_x'] - actor1.attr['location_x']) <= self.LANE_THRESHOLD and (actor2.attr['location_x'] - actor1.attr['location_x']) >= -self.LANE_THRESHOLD: #if in the same lane, don't want left or right relations to be built
pass
# actor2 to the left of actor1
elif actor2.attr['location_x'] < actor1.attr['location_x']:
relation_list.append([actor2, "toLeftOf", actor1])
# actor2 to the right of actor1
elif actor2.attr['location_x'] > actor1.attr['location_x']:
relation_list.append([actor2, "toRightOf", actor1])
# disable rear relations help the inference.
return relation_list
|
# -*- coding: utf-8 -*-
"""
This file is originally from the csvsort project:
https://bitbucket.org/richardpenman/csvsort
MongoDB Modifications:
1. add the quoting=quoting argument to csv.reader()
"""
import csv
import heapq
import os
import sys
import tempfile
from optparse import OptionParser
csv.field_size_limit(sys.maxsize)
class CsvSortError(Exception):
pass
def csvsort(input_filename,
columns,
output_filename=None,
max_size=100,
has_header=True,
delimiter=',',
show_progress=False,
quoting=csv.QUOTE_MINIMAL):
"""Sort the CSV file on disk rather than in memory.
The merge sort algorithm is used to break the file into smaller sub files
Args:
input_filename: the CSV filename to sort.
columns: a list of columns to sort on (can be 0 based indices or header
keys).
output_filename: optional filename for sorted file. If not given then
input file will be overriden.
max_size: the maximum size (in MB) of CSV file to load in memory at
once.
has_header: whether the CSV contains a header to keep separated from
sorting.
delimiter: character used to separate fields, default ','.
show_progress (Boolean): A flag whether or not to show progress.
The default is False, which does not print any merge information.
quoting: How much quoting is needed in the final CSV file. Default is
csv.QUOTE_MINIMAL.
"""
with open(input_filename) as input_fp:
reader = csv.reader(input_fp, delimiter=delimiter, quoting=quoting)
if has_header:
header = next(reader)
else:
header = None
columns = parse_columns(columns, header)
filenames = csvsplit(reader, max_size, quoting)
if show_progress:
print('Merging %d splits' % len(filenames))
for filename in filenames:
memorysort(filename, columns, quoting)
sorted_filename = mergesort(filenames, columns, quoting)
# XXX make more efficient by passing quoting, delimiter, and moving result
# generate the final output file
with open(output_filename or input_filename, 'w') as output_fp:
writer = csv.writer(output_fp, delimiter=delimiter, quoting=quoting)
if header:
writer.writerow(header)
with open(sorted_filename) as sorted_fp:
for row in csv.reader(sorted_fp, quoting=quoting):
writer.writerow(row)
os.remove(sorted_filename)
def parse_columns(columns, header):
"""check the provided column headers
"""
for i, column in enumerate(columns):
if isinstance(column, int):
if header:
if column >= len(header):
raise CsvSortError(
'Column index is out of range: "{}"'.format(column))
else:
# find index of column from header
if header is None:
raise CsvSortError(
'CSV needs a header to find index of this column name:' +
' "{}"'.format(column))
else:
if column in header:
columns[i] = header.index(column)
else:
raise CsvSortError(
'Column name is not in header: "{}"'.format(column))
return columns
def csvsplit(reader, max_size, quoting):
"""Split into smaller CSV files of maximum size and return the filenames.
"""
max_size = max_size * 1024 * 1024 # convert to bytes
writer = None
current_size = 0
split_filenames = []
# break CSV file into smaller merge files
for row in reader:
if writer is None:
ntf = tempfile.NamedTemporaryFile(delete=False, mode='w')
writer = csv.writer(ntf, quoting=quoting)
split_filenames.append(ntf.name)
writer.writerow(row)
current_size += sys.getsizeof(row)
if current_size > max_size:
writer = None
current_size = 0
return split_filenames
def memorysort(filename, columns, quoting):
"""Sort this CSV file in memory on the given columns
"""
with open(filename) as input_fp:
rows = [row for row in csv.reader(input_fp, quoting=quoting)]
rows.sort(key=lambda row: get_key(row, columns))
with open(filename, 'w') as output_fp:
writer = csv.writer(output_fp, quoting=quoting)
for row in rows:
writer.writerow(row)
def get_key(row, columns):
"""Get sort key for this row
"""
return [row[column] for column in columns]
def decorated_csv(filename, columns, quoting):
"""Iterator to sort CSV rows
"""
with open(filename) as fp:
for row in csv.reader(fp, quoting=quoting):
yield get_key(row, columns), row
def mergesort(sorted_filenames, columns, quoting, nway=2):
"""Merge these 2 sorted csv files into a single output file
"""
merge_n = 0
while len(sorted_filenames) > 1:
merge_filenames, sorted_filenames = \
sorted_filenames[:nway], sorted_filenames[nway:]
with tempfile.NamedTemporaryFile(delete=False, mode='w') as output_fp:
writer = csv.writer(output_fp, quoting=quoting)
merge_n += 1
for _, row in heapq.merge(*[decorated_csv(filename, columns, quoting)
for filename in merge_filenames]):
writer.writerow(row)
sorted_filenames.append(output_fp.name)
for filename in merge_filenames:
os.remove(filename)
return sorted_filenames[0]
def main():
parser = OptionParser()
parser.add_option(
'-c',
'--column',
dest='columns',
action='append',
help='column of CSV to sort on')
parser.add_option(
'-s',
'--size',
dest='max_size',
type='float',
default=100,
help='maximum size of each split CSV file in MB (default 100)')
parser.add_option(
'-n',
'--no-header',
dest='has_header',
action='store_false',
default=True,
help='set CSV file has no header')
parser.add_option(
'-d',
'--delimiter',
default=',',
help='set CSV delimiter (default ",")')
args, input_files = parser.parse_args()
if not input_files:
parser.error('What CSV file should be sorted?')
elif not args.columns:
parser.error('Which columns should be sorted on?')
else:
# escape backslashes
args.delimiter = args.delimiter.decode('string_escape')
args.columns = [int(column) if column.isdigit() else column
for column in args.columns]
csvsort(
input_files[0],
columns=args.columns,
max_size=args.max_size,
has_header=args.has_header,
delimiter=args.delimiter)
if __name__ == '__main__':
main()
|
from typing import Callable, Optional, Tuple
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.grating_coupler_elliptical_trenches import grating_coupler_te
from gdsfactory.components.straight import straight
from gdsfactory.cross_section import strip
from gdsfactory.port import select_ports_optical
from gdsfactory.routing.get_input_labels import get_input_labels
from gdsfactory.routing.route_fiber_array import route_fiber_array
from gdsfactory.routing.sort_ports import sort_ports_x
from gdsfactory.types import ComponentFactory, CrossSectionFactory
@gf.cell_without_validator
def add_fiber_array(
component: Component,
grating_coupler: Component = grating_coupler_te,
straight: ComponentFactory = straight,
bend: ComponentFactory = bend_euler,
gc_port_name: str = "o1",
gc_port_labels: Optional[Tuple[str, ...]] = None,
component_name: Optional[str] = None,
select_ports: Callable = select_ports_optical,
cross_section: CrossSectionFactory = strip,
get_input_labels_function: Optional[Callable] = get_input_labels,
layer_label: Optional[Tuple[int, int]] = (66, 0),
**kwargs,
) -> Component:
"""Returns component with optical IO (tapers, south routes and grating_couplers).
Args:
component: to connect
grating_coupler: grating coupler instance, function or list of functions
bend: bend_circular
gc_port_name: grating coupler input port name 'W0'
component_name: for the label
taper: taper function name or dict
get_input_labels_function: function to get input labels for grating couplers
get_input_label_text_loopback_function: function to get input label test
get_input_label_text_function
straight: straight
fanout_length: None # if None, automatic calculation of fanout length
max_y0_optical: None
with_loopback: True, adds loopback structures
straight_separation: 4.0
list_port_labels: None, adds TM labels to port indices in this list
connected_port_list_ids: None # only for type 0 optical routing
nb_optical_ports_lines: 1
force_manhattan: False
excluded_ports:
grating_indices: None
routing_straight: None
routing_method: get_route
optical_routing_type: None: auto, 0: no extension, 1: standard, 2: check
gc_rotation: -90
layer_label: LAYER.LABEL
input_port_indexes: [0]
.. plot::
:include-source:
import gdsfactory as gf
gf.config.set_plot_options(show_subports=False)
c = gf.components.crossing()
cc = gf.routing.add_fiber_array(
component=c,
optical_routing_type=2,
grating_coupler=gf.components.grating_coupler_elliptical_te,
with_loopback=False
)
cc.plot()
"""
get_input_labels_function = None if gc_port_labels else get_input_labels_function
component = gf.call_if_func(component)
grating_coupler = (
grating_coupler() if callable(grating_coupler) else grating_coupler
)
if not component.ports:
return component
if isinstance(grating_coupler, list):
gc = grating_coupler[0]
else:
gc = grating_coupler
gc = gf.call_if_func(gc)
if gc_port_name not in gc.ports:
raise ValueError(f"gc_port_name={gc_port_name} not in {gc.ports.keys()}")
component_name = component_name or component.get_parent_name()
component_new = Component()
component_new.component = component
optical_ports = select_ports(component.ports)
optical_ports_names = list(optical_ports.keys())
if not optical_ports:
return component
elements, io_gratings_lines, ports = route_fiber_array(
component=component,
grating_coupler=grating_coupler,
bend=bend,
straight=straight,
gc_port_name=gc_port_name,
component_name=component_name,
cross_section=cross_section,
select_ports=select_ports,
get_input_labels_function=get_input_labels_function,
layer_label=layer_label,
**kwargs,
)
if len(elements) == 0:
return component
for e in elements:
component_new.add(e)
for io_gratings in io_gratings_lines:
component_new.add(io_gratings)
component_new.add_ref(component)
for pname, p in component.ports.items():
if p.name not in optical_ports_names:
component_new.add_port(pname, port=p)
ports = sort_ports_x(ports)
if gc_port_labels:
for gc_port_label, port in zip(gc_port_labels, ports):
component_new.add_label(
text=gc_port_label, layer=layer_label, position=port.midpoint
)
for i, io_row in enumerate(io_gratings_lines):
for j, io in enumerate(io_row):
ports = io.get_ports_list(prefix="vertical")
if ports:
port = ports[0]
component_new.add_port(f"{port.name}_{i}{j}", port=port)
component_new.copy_child_info(component)
return component_new
def demo_te_and_tm():
c = gf.Component()
w = gf.components.straight()
wte = add_fiber_array(
component=w, grating_coupler=gf.components.grating_coupler_elliptical_te
)
wtm = add_fiber_array(
component=w, grating_coupler=gf.components.grating_coupler_elliptical_tm
)
c.add_ref(wte)
wtm_ref = c.add_ref(wtm)
wtm_ref.movey(wte.size_info.height)
return c
if __name__ == "__main__":
# test_type0()
gcte = gf.components.grating_coupler_te
gctm = gf.components.grating_coupler_tm
# from pprint import pprint
layer_label = gf.LAYER.TEXT
layer_label = (66, 5)
# cc = demo_tapers()
# cc = test_type1()
# pprint(cc.get_json())
# c = gf.components.coupler(gap=0.2, length=5.6)
# c = gf.components.straight()
# c = gf.components.mmi2x2()
# c = gf.components.ring_single()
# c = gf.components.straight_heater_metal()
c = gf.components.spiral(direction="NORTH")
cc = add_fiber_array(
component=c,
# optical_routing_type=0,
# optical_routing_type=1,
# optical_routing_type=2,
# layer_label=layer_label,
# get_route_factory=route_fiber_single,
# get_route_factory=route_fiber_array,
grating_coupler=[gcte, gctm, gcte, gctm],
auto_widen=True,
# layer=(2, 0),
gc_port_labels=["loop_in", "in", "out", "loop_out"],
)
cc.show()
|
#CONFIGURATION PARAMETERS
#------------------------------
#Mustafa et al (2014) - Structured Mathematical Modeling, Bifurcation, and Simulation for the Bioethanol Fermentation Process Using Zymomonas mobilis. doi:10.1021/ie402361b
##### General parameters #####
#Use SCIPY (True) or Scikit.Odes (False) for IVP solving
SHOULD_USE_SCIPY=False
#Use heuristic methods (True) or local optimization (False) for NLP incidental problem solving
USE_HEURISTIC=True
#Control verbosity of the output
IS_VERBOSE=False
#Set random seed for reproducibility
RANDOM_SEED=12345
#Initial time
T0=0.
#End time
TF=50.
IS_STOCHASTIC=1.
#Control and prediction time interval
DT = 1.
#Number of time intervals
NINT=int((TF-T0)/DT)
#Set-point for product (bioethanol)
PSET=65.
#Lower bound for the manipulated variable (D_in)
DIN_LOWER_BOUND=0.
#Upper bound for the manipulated variable (D_in)
DIN_UPPER_BOUND=0.1
#Number of generations for optimization
N_GEN=500
#Population size for metaheuristic algorithms
POP_SIZE=100
#Yx=1.
#ms=2.16
#k1=16.
##### Model parameters #####
mu_max = 0.23
mu_maxd = 0.22
alpha = 1.74
beta = 2.5
Ks = 20.
phi = 0.2
Kss = 150.
Ksp = 9.5
Kssp = 200.
mp = 1.9
ms = 3.5
Pc = 250.
Pcd = 350.
Yxs = 0.03
Yxp = 0.375
Din = 0.06
S0 = 150.
P0 = 0.
Xnv0 = 0.
Xd0 = 0.
Xv0 = 2.5
#Deviation for uniform sampling
UNIFORM_DEV = .2
YXP_MEAN = 0.375
MP_MEAN = 1.1
MU_MAX_MEAN = 0.23
MU_MAXD_MEAN = 0.22
|
from __future__ import division, print_function, unicode_literals
path = []
def reindex(*args,**kwargs):
pass
|
from typing import Optional
import discord
import asyncpg
from discord.ext import commands
from .utils.pagination import create_paginated_embed
class Tags(commands.Cog):
"""Productivity's tag system."""
def __init__(self, bot:commands.Bot) -> None:
self.bot = bot
self.emoji = "🏷️ "
async def delete_check(self, ctx:commands.Context, tag_name) -> bool:
query = """
SELECT * FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
async with self.bot.db.acquire() as connection:
async with connection.transaction():
fetched = await connection.fetchrow(query, tag_name, ctx.guild.id)
return fetched['user_id'] == ctx.author or ctx.author.guild_permissions.manage_messages
@commands.group(invoke_without_command=True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def tag(self, ctx, *, tag:str):
"""A tag system!"""
async with self.bot.db.acquire() as connection:
async with connection.transaction():
try:
query = """
SELECT * FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
tag = await connection.fetchrow(query, tag, ctx.guild.id)
return await ctx.send(tag['tag_content'])
except TypeError:
return await ctx.send("Tag not found.")
@tag.command(description="Create a tag!", aliases=['add'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def create(self, ctx, name, *, content):
try:
query = """
INSERT INTO tags (user_id, guild_id, tag_name, tag_content)
VALUES ($1, $2, $3, $4);
"""
await self.bot.db.execute(query, ctx.author.id, ctx.guild.id, name, content)
await ctx.send("Succesfully created the tag!")
except Exception as e:
await ctx.send(e)
await ctx.send("An error has occurred whilst creating the tag")
@tag.command(description="Start your use of creating tags")
@commands.cooldown(1, 5, commands.BucketType.user)
async def start(self, ctx):
try:
query = """
INSERT INTO tag_users (user_id, username)
VALUES ($1, $2);
"""
await self.bot.db.execute(query, ctx.author.id, ctx.author.name)
await ctx.send("Successfully started your use of our tag system!")
except Exception:
await ctx.send("You are already in our database!")
@tag.command(description="Delete a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def delete(self, ctx, *, tag:str):
check = await self.delete_check(ctx, tag)
if check:
try:
query = """
DELETE FROM tags
WHERE tag_name = $1 AND guild_id = $2;
"""
await self.bot.db.execute(query, tag, ctx.guild.id)
await ctx.send("Successfully deleted tag!")
except:
await ctx.send("An error has occurred while attempting to delete the tag.")
else:
await ctx.send("You do not have permission to delete this tag!")
@commands.command(description="Look at all of the tags a member has!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def tags(self, ctx, member:Optional[discord.Member]=None):
member = member or ctx.author
async with self.bot.db.acquire() as connection:
async with connection.transaction():
query = """
SELECT * FROM tags
WHERE user_id = $1 AND guild_id = $2;
"""
tags = await connection.fetch(query, member.id, ctx.guild.id)
paginate = create_paginated_embed(ctx, tags, 'tag_name', f"{member}'s tags", member.avatar_url, member.name)
await paginate.start(ctx)
@tag.command(description="Edit a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def edit(self, ctx, old_tag, new_name, *, new_content):
query = """
UPDATE tags
SET tag_name = $1, tag_content = $2
WHERE user_id = $3 AND tag_name = $4 AND guild_id = $5;
"""
try:
await self.bot.db.execute(query, new_name, new_content, ctx.author.id, old_tag, ctx.guild.id)
return await ctx.send("Successfully edited tag!")
except Exception:
return await ctx.send(
"""
An error occurred while editing the tag,
this is likely because u dont own this tag or it doesnt exist.
"""
)
@tag.command(description="View information about a tag!")
@commands.cooldown(1, 5, commands.BucketType.user)
async def info(self, ctx, *, tag:str):
async with self.bot.db.acquire() as connection:
async with connection.transaction():
query = """
SELECT * FROM tags
WHERE guild_id = $1 AND tag_name = $2;
"""
try:
tag_info = await connection.fetchrow(query, ctx.guild.id, tag)
owner = ctx.guild.get_member(tag_info['user_id'])
embed = discord.Embed(title=tag_info['tag_name'])
embed.add_field(name="Owner", value=owner.mention)
embed.set_author(name=owner, icon_url=owner.avatar_url)
return await ctx.send(embed=embed)
except TypeError:
return await ctx.send("Tag not found.")
def setup(bot:commands.Bot):
bot.add_cog(Tags(bot)) |
import numpy as np
import os
import pickle
class DataLoader(object):
def __init__(self, dataset='bookcorpus', doc_num=16000, save_gap=200, batch_size = 1024):
self.data_names = ['input_ids','token_type_ids','attention_mask','masked_lm_labels','next_sentence_label']
self.data = {'input_ids':[],
'token_type_ids':[],
'attention_mask':[],
'masked_lm_labels':[],
'next_sentence_label':[]}
self.batch_size=batch_size
self.batch_data = {'input_ids':[],
'token_type_ids':[],
'attention_mask':[],
'masked_lm_labels':[],
'next_sentence_label':[]}
self.cur_batch_data = {'input_ids':[],
'token_type_ids':[],
'attention_mask':[],
'masked_lm_labels':[],
'next_sentence_label':[]}
self.load_data(dataset=dataset, doc_num=doc_num, save_gap=save_gap)
def load_data(self, dataset='bookcorpus', doc_num=16000, save_gap=200):
print('Loading preprocessed dataset %s...'%dataset)
data_dir = './preprocessed_data/%s/'%dataset
for i in range(0,doc_num,save_gap):
start, end = i, i+save_gap-1
if end > doc_num-1:
end = doc_num-1
range_name = '_%d_%d.npy'%(start,end)
print(start,end)
for data_name in self.data_names:
#print(data_dir+data_name+range_name)
self.data[data_name].append(np.load(data_dir+data_name+range_name))
for data_name in self.data_names:
self.data[data_name] = np.concatenate(self.data[data_name],axis=0)
self.data_len = self.data['input_ids'].shape[0]
print(self.data['input_ids'].shape)
print('Successfully loaded dataset %s!'%dataset)
def make_epoch_data(self):
for i in range(0, self.data_len, self.batch_size):
start = i
end = start + self.batch_size
if end > self.data_len:
end = self.data_len
if end-start != self.batch_size:
break
for data_name in self.data_names:
self.batch_data[data_name].append(self.data[data_name][start:end])
self.batch_num = len(self.batch_data['input_ids'])
def get_batch(self, idx):
if idx >= self.batch_num:
assert False
for data_name in self.data_names:
self.cur_batch_data[data_name] = self.batch_data[data_name][idx]
return self.cur_batch_data.copy()
def align(self, arr, length):
ori_len = len(arr)
if length > ori_len:
return arr + [0] * (length - ori_len)
else:
return arr[:length]
class DataLoader4Glue(object):
def __init__(self, task_name='sst-2', batch_size = 1024, datatype='train'):
self.data_names = ['input_ids','token_type_ids','attention_mask','label_ids']
self.data = {'input_ids':[],
'token_type_ids':[],
'attention_mask':[],
'label_ids':[]}
self.batch_size=batch_size
self.batch_data = {'input_ids':[],
'token_type_ids':[],
'attention_mask':[],
'label_ids':[]}
self.cur_batch_data = {'input_ids':[],
'token_type_ids':[],
'attention_mask':[],
'label_ids':[]}
self.load_data(task_name=task_name, datatype=datatype)
def load_data(self, task_name='sst-2', datatype='train'):
print('Loading preprocessed dataset %s...'%task_name)
cached_train_features_file = os.path.join('./preprocessed_data/glue','%s_%s_features'%(task_name,datatype),)
try:
with open(cached_train_features_file, "rb") as reader:
self.data = pickle.load(reader)
print("Loaded pre-processed features from {}".format(
cached_train_features_file))
except:
print("Did not find pre-processed features from {}".format(
cached_train_features_file))
print("Please run process_glue_data.py first!")
assert False
self.data_len = self.data['input_ids'].shape[0]
self.num_labels = np.max(self.data['label_ids'])+1
print(self.data['input_ids'].shape)
print('Successfully loaded GLUE dataset %s for %s!'%(task_name,datatype))
def make_epoch_data(self):
for i in range(0, self.data_len, self.batch_size):
start = i
end = start + self.batch_size
if end > self.data_len:
end = self.data_len
if end-start != self.batch_size:
break
for data_name in self.data_names:
self.batch_data[data_name].append(self.data[data_name][start:end])
self.batch_num = len(self.batch_data['input_ids'])
def get_batch(self, idx):
if idx >= self.batch_num:
assert False
for data_name in self.data_names:
self.cur_batch_data[data_name] = self.batch_data[data_name][idx]
return self.cur_batch_data.copy() |
from my.core import warnings
warnings.medium('my.reading.polar is deprecated! Use my.polar instead!')
from ..polar import *
|
# © 2015-2018, ETH Zurich, Institut für Theoretische Physik
# Author: Dominik Gresch <[email protected]>
import sys
import fsc.export
# This should never appear in any serious code ;)
# To out-manoeuver pickle's caching, and force re-loading phasemap
def test_all_doc():
old_name = "phasemap"
new_name = "hoopy_phasemap"
for key in list(sys.modules.keys()):
# move previous phasemap to hoopy_phasemap
if key.startswith(old_name):
new_key = key.replace(old_name, new_name)
sys.modules[new_key] = sys.modules[key]
del sys.modules[key]
fsc.export.test_doc()
try:
import phasemap # pylint: disable=import-outside-toplevel,unused-import
finally:
# reset to the previous phasemap -- just doing import breaks pickle
for key in list(sys.modules.keys()):
if key.startswith(old_name):
del sys.modules[key]
for key in list(sys.modules.keys()):
if key.startswith(new_name):
new_key = key.replace(new_name, old_name)
sys.modules[new_key] = sys.modules[key]
del sys.modules[key]
|
import numpy as np
import tensorflow as tf
import time
import os
import matplotlib.pyplot as plt
import matplotlib as mpt
import colorsys as cls
import statistics as stat
from sklearn.model_selection import train_test_split
import csv
import pickle
from mmd import rbf_mmd2, median_pairwise_distance, mix_rbf_mmd2_and_ratio
import Synth_data as sd
class RGAN:
def generator(self, z, c=None):
with tf.variable_scope("generator") as scope:
# each step of the generator takes a random seed + the conditional embedding
# repeated_encoding = tf.tile(c, [1, tf.shape(z)[1]])
# repeated_encoding = tf.reshape(repeated_encoding, [tf.shape(z)[0], tf.shape(z)[1],
# cond_dim])
# generator_input = tf.concat([repeated_encoding, z], 2)
cell = tf.contrib.rnn.LSTMCell(num_units=hidden_units_g, state_is_tuple=True)
rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
cell=cell,
dtype=tf.float32,
sequence_length=[seq_length] * batch_size,
inputs=z)
rnn_outputs_2d = tf.reshape(rnn_outputs, [-1, hidden_units_g])
logits_2d = tf.matmul(rnn_outputs_2d, W_out_G) + b_out_G
output_2d = tf.nn.tanh(logits_2d)
output_3d = tf.reshape(output_2d, [-1, seq_length, num_generated_features])
return output_3d
def discriminator(self, x, c=None, reuse=False):
with tf.variable_scope("discriminator") as scope:
# correct?
if reuse:
scope.reuse_variables()
# each step of the generator takes one time step of the signal to evaluate +
# its conditional embedding
# repeated_encoding = tf.tile(c, [1, tf.shape(x)[1]])
# repeated_encoding = tf.reshape(repeated_encoding, [tf.shape(x)[0], tf.shape(x)[1],
# cond_dim])
# decoder_input = tf.concat([repeated_encoding, x], 2)
cell = tf.contrib.rnn.LSTMCell(num_units=self.hidden_units_d, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
cell=cell,
dtype=tf.float32,
inputs=x)
rnn_outputs_flat = tf.reshape(rnn_outputs, [-1, self.hidden_units_g])
logits = tf.matmul(rnn_outputs_flat, W_out_D) + b_out_D
# logits = tf.einsum('ijk,km', rnn_outputs, W_out_D) + b_out_D
output = tf.nn.sigmoid(logits)
return output, logits
# Latent Space Sampler
def sample_Z(self, batch_size, seq_length, latent_dim, use_time=False, use_noisy_time=False):
sample = np.float32(np.random.normal(size=[batch_size, seq_length, latent_dim]))
if use_time:
print('WARNING: use_time has different semantics')
sample[:, :, 0] = np.linspace(0, 1.0 / seq_length, num=seq_length)
return sample
def train_generator(self, batch_idx, offset):
# update the generator
for g in range(G_rounds):
_, G_loss_curr = self.sess.run([G_solver, G_loss],
feed_dict={Z: sample_Z(batch_size, seq_length, latent_dim, use_time=use_time,)})
return G_loss_curr
def train_discriminator(self, batch_idx, offset):
# update the discriminator
for d in range(D_rounds):
# using same input sequence for both the synthetic data and the real one,
# probably it is not a good idea...
X_mb = self.get_batch(train_seqs, batch_idx + d + offset, batch_size)
_, D_loss_curr = self.sess.run([D_solver, D_loss],
feed_dict={self.X: X_mb,
self.Z: self.sample_Z(batch_size, seq_length, latent_dim, use_time=use_time)})
return D_loss_curr
def __init__(self):
self.lr = 0.1
batch_size = 30
seq_length = 100
num_generated_features = 1
hidden_units_d = 100
self.hidden_units_g = 100
latent_dim = 10 # dimension of the random latent space
# cond_dim = train_targets.shape[1] # dimension of the condition
Z = tf.placeholder(tf.float32, [batch_size, seq_length, latent_dim])
W_out_G = tf.Variable(tf.truncated_normal([self.hidden_units_g, num_generated_features]))
b_out_G = tf.Variable(tf.truncated_normal([num_generated_features]))
X = tf.placeholder(tf.float32, [batch_size, seq_length, num_generated_features])
W_out_D = tf.Variable(tf.truncated_normal([self.hidden_units_d, 1]))
b_out_D = tf.Variable(tf.truncated_normal([1]))
G_sample = self.generator(Z)
D_real, D_logit_real = self.discriminator(X)
D_fake, D_logit_fake = self.discriminator(G_sample, reuse=True)
generator_vars = [v for v in tf.trainable_variables() if v.name.startswith('generator')]
discriminator_vars = [v for v in tf.trainable_variables() if v.name.startswith('discriminator')]
D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real,
labels=tf.ones_like(D_logit_real)))
D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake,
labels=tf.zeros_like(D_logit_fake)))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake,
labels=tf.ones_like(D_logit_fake)))
D_solver = tf.train.GradientDescentOptimizer(learning_rate=self.lr).minimize(D_loss, var_list=discriminator_vars)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=generator_vars)
self.sess = tf.Session()
self.saver = tf.train.Saver()
self.init_op = tf.global_variables_initializer()
self.sess.run(self.init_op)
def fit(self, X, a):
if not os.path.isdir(experiment_id):
os.mkdir(experiment_id)
X_mb_vis = self.get_batch(train_seqs, 0, batch_size)
# plot the ouput from the same seed
vis_z = sample_Z(batch_size, seq_length, latent_dim, use_time=use_time)
vis_sample = self.sess.run(G_sample, feed_dict={Z: vis_z})
# Denormalizing vis_sample
# vis_sample = ReadInput.denormalize(vis_sample)
sd.save_plot_sample(vis_sample[0:7], '0', 'first_sample_data', path=experiment_id)
# visualise some real samples
vis_real = np.float32(vali_seqs[np.random.choice(len(vali_seqs), size=batch_size), :, :])
# Denormalizing vis_real
# vis_real = ReadInput.denormalize(vis_real)
sd.save_plot_sample(samples[0:7], '0', 'real_data', path=experiment_id)
# trace = open('./experiments/traces/' + identifier + '.trace.csv', 'w')
# fields_names = ['D_loss', 'G_loss']
# writer = csv.DictWriter(trace, fieldnames= fields_names)
# trace.write('epoch D_loss G_loss time\n')
print('epoch\tD_loss\tG_loss\ttime\n')
samples, peak_times, mean_peak_times, magnitude_peaks, mean_magnitudes = sd.continuous_input(case=2, tipe='periodic')
sd.save_plot_sample(samples[0:7], '0', 'first_sample_data', path='test', show=True)
train_seqs, vali_test = train_test_split(samples, test_size=0.4)
vali_seqs, test_seqs = train_test_split(vali_test, test_size=0.6)
print("data loaded.")
# training config
num_epochs = 12
D_rounds = 1 # number of rounds of discriminator training
G_rounds = 3 # number of rounds of generator training
use_time = False # use one latent dimension as time
experiment_id = 'RGAN'
identifier = id
num_epochs = 200
d_costs = []
g_costs = []
t0 = time.time()
for num_epoch in range(num_epochs):
# we use D_rounds + G_rounds batches in each iteration
for batch_idx in range(0, int(len(train_seqs) / self.batch_size) - (D_rounds + G_rounds), D_rounds + G_rounds):
# we should shuffle the data instead
if num_epoch % 2 == 0:
G_loss_curr = self.train_generator(batch_idx, 0)
D_loss_curr = self.train_discriminator(batch_idx, G_rounds)
else:
D_loss_curr = self.train_discriminator(batch_idx, 0)
G_loss_curr = self.train_generator(batch_idx, D_rounds)
d_costs.append(D_loss_curr)
g_costs.append(G_loss_curr)
# plt.clf()
# plt.plot(d_costs, label='discriminator cost')
# plt.plot(g_costs, label='generator cost')
# plt.legend()
# plt.savefig(experiment_id + '/cost_vs_iteration.png')
t = time.time() - t0
print(num_epoch, '\t', D_loss_curr, '\t', G_loss_curr, '\t', t)
# record/visualise
# writer.writerow({'D_loss': D_loss_curr, 'G_loss': G_loss_curr})
# trace.flush()
# if num_epoch % 10 == 0:
# trace.flush()
vis_sample = self.sess.run(self.G_sample, feed_dict={self.Z: vis_z})
sd.save_plot_sample(vis_sample[0:7], '_' + '_generated' + "_epoch" + str(num_epoch).zfill(4),
identifier, path=experiment_id)
# plotting.vis_sine_waves(vis_sample, seq_length, identifier=identifier, idx=num_epoch + 1)
return None
|
import datetime
from receptor.serde import dumps, loads
def test_date_serde():
o = {"now": datetime.datetime.utcnow()}
serialized = dumps(o)
deserialized = loads(serialized)
assert deserialized == o
|
# -*- coding: utf-8 -*-
from cryptography.fernet import Fernet
from pathlib import Path
REGION = 'EO'
PLATFORM = 'CUCM'
ROLE = 'rwx'
PATH = Path('C:\shared\API\credentials')
server = PATH / REGION / PLATFORM / ('fqdn' + '.txt')
def file(role):
username = PATH / REGION / PLATFORM / ('user_' + role + '.txt')
keyhash = PATH / REGION / PLATFORM / ('key_' + role + '.txt')
hash = PATH / REGION / PLATFORM / ('hash_' + role + '.txt')
return username, keyhash, hash
def crypto(keyhash, hash):
with open(keyhash, 'rb') as file_key:
for line_key in file_key:
key = line_key
cipher_suite = Fernet(key)
with open(hash, 'rb') as file_hash:
for line_hash in file_hash:
encryptedpwd = line_hash
uncipher_text = (cipher_suite.decrypt(encryptedpwd))
pwd = bytes(uncipher_text).decode("utf-8")
return pwd
def read(file):
datalist = []
for line in open(file):
data = line.strip('\n')
datalist.append(data)
return datalist
def main():
print('username:', read(file(ROLE)[0])[0])
print('password:', crypto(file(ROLE)[1], file(ROLE)[2]))
print('server:', read(server)[0])
if __name__ == '__main__':
main()
|
class PcPointer(object):
NEXT_ADDR = 0
STOP = -1
JUMP = 1
JUMPI = 2
def __init__(self, status, addr=None, cond=None):
self.status = status
self.addr = addr
self.condition = cond
|
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler, FallbackSkill
from mycroft.util.log import LOG
from mycroft.audio import wait_while_speaking
import feedparser
import hashlib
import datetime
__author__ = 'BreziCode'
MONTHS = {
'Jan': "01",
'Feb': "02",
'Mar': "03",
'Apr': "04",
'May': "05",
'Jun': "06",
'Jul': "07",
'Aug': "08",
'Sep': "09",
'Oct': "10",
'Nov': "11",
'Dec': "12"
}
class MyEpisodes(MycroftSkill):
def __init__(self):
super(MyEpisodes, self).__init__(name="MyEpisodes")
self.unacquired = {}
self.unwatched = {}
self.shows = {}
def initialize(self):
if "useWatched" not in self.settings:
self.settings["useWatched"] = False
@intent_handler(IntentBuilder("query")
.require("check")
.require("episodes"))
def handle_query_intent(self, message):
if not self.isConfigured():
return
self.speak_dialog("querying")
self.updateUnacquired()
if self.settings.get("useWatched"):
self.updateUnwatched()
type = "unacquired"
if self.unacquired['totalCnt'] == 0:
self.speak_dialog('noNewEpisodes', data={'type': type})
return
if self.unacquired['airingTodayCnt'] > 0:
self.speak_dialog('unacquiredEpisodesWithAiringToday', data={
'total': self.unacquired['totalCnt'], 'plural': 's' if self.unacquired['totalCnt'] > 1 else '', 'airingToday': self.unacquired['airingTodayCnt']})
else:
self.speak_dialog('unacquiredEpisodes', data={
'total': self.unacquired['totalCnt'], 'plural': 's' if self.unacquired['totalCnt'] > 1 else ''})
self.speakEpisodesDetails(self.unacquired['episodes2speak'])
wait_while_speaking()
if self.settings.get("useWatched") and self.unwatched['totalCnt'] > 0:
self.speak_dialog("unwatchedEpisodes", data={
'total': self.unwatched['totalCnt'], 'plural': 's' if self.unwatched['totalCnt'] > 1 else '', 'airingToday': self.unacquired['airingTodayCnt']})
def stop(self):
return True
def speakEpisodesDetails(self, eps):
if self.ask_yesno("details") == 'yes':
self.speak(''.join(eps))
else:
self.speak_dialog('ok')
def processFeed(self, feed):
episodes = {}
tmp_episodes = {}
totalCnt = 0
airingTodayCnt = 0
if len(feed.entries) > 0 and 'guid' in feed.entries[0]:
for entry in feed.entries:
epMeta = {}
if 'guid' not in entry:
self.log.error("Error parsing episode ")
self.log.error(entry)
break
epGuidArr = entry.guid.split('-')
if(len(epGuidArr) != 3):
self.log.error("Error parsing episode "+entry.guid)
continue
showId = epGuidArr[0]
season = int(epGuidArr[1])
episode = int(epGuidArr[2])
epMeta['episode'] = episode
# episodeId = entry.guid
epTitleArray = entry.title.split('][')
if(len(epTitleArray) != 4):
self.log.error("Could not get show and episode titles")
continue
else:
showName = epTitleArray[0].replace('[', '').strip()
if showName not in self.shows:
self.shows[showId] = showName
epMeta['epTitle'] = epTitleArray[2].strip()
airDate = epTitleArray[3].replace(
']', '').strip().split('-')
airDate[1] = MONTHS[airDate[1]]
epMeta['epAirDate'] = '-'.join(airDate)
epMeta['epAirDate'] = datetime.datetime.strptime(
epMeta['epAirDate'], "%d-%m-%Y").date()
if epMeta['epAirDate'] == datetime.datetime.now().date():
airingTodayCnt = airingTodayCnt + 1
epMeta['airingToday'] = True
else:
epMeta['airingToday'] = False
if showId not in episodes:
episodes[showId] = {}
tmp_episodes[showId] = {}
if season not in episodes[showId]:
episodes[showId][season] = {}
tmp_episodes[showId][season] = []
if episode not in episodes[showId][season]:
episodes[showId][season][episode] = epMeta
tmp_episodes[showId][season].append(episode)
totalCnt = totalCnt + 1
else:
self.log.debug('No episodes in feed')
self.log.debug(feed)
episodes2speak = []
if totalCnt > 0:
for showId in tmp_episodes:
episodes2speak.append("%s " % self.shows[showId])
for season in tmp_episodes[showId]:
episodes2speak.append("season %s, " % season)
season = tmp_episodes[showId][season]
season.sort()
startEp = season[0]
i = 1
endEp = startEp
seq = []
while i < len(season):
if season[i] == (endEp + 1):
endEp = season[i]
else:
seq.append(self._speakEpRange(startEp,endEp))
startEp = season[i]
endEp = startEp
i = i + 1
seq.append(self._speakEpRange(startEp,endEp))
if len(seq) == 1:
episodes2speak.append(seq[0])
else:
cnt = 0
for sq in seq:
if cnt > 0 :
if cnt < len(seq)-1:
sq = ", %s" % sq
else:
sq = " and %s " % sq
cnt = cnt + 1
episodes2speak.append(sq)
episodes2speak.append(', ')
return {
'episodes': episodes,
'episodes2speak': episodes2speak,
'totalCnt': totalCnt,
'airingTodayCnt': airingTodayCnt,
'updatedAt': datetime.datetime.now().date()
}
def _speakEpRange(self, minEp, maxEp):
if minEp == maxEp:
return "episode %s" % minEp
elif maxEp == (minEp + 1):
return "episodes %s and %s" % (minEp, maxEp)
else:
return "episodes %s through %s" % (minEp, maxEp)
def updateUnacquired(self):
self.log.debug("Updating unacquired episodes list")
if not self.isConfigured():
return False
feed = self.getFeed("unacquired")
if feed:
self.log.debug("Got %s items from unacquired feed" %
(len(feed.entries)))
self.unacquired = self.processFeed(feed)
def updateUnwatched(self):
self.log.debug("Updating unwatched episodes list")
if not self.isConfigured():
return False
feed = self.getFeed("unwatched")
if feed:
self.log.debug("Got %s items from unwatched feed" %
(len(feed.entries)))
self.unwatched = self.processFeed(feed)
def getFeed(self, type):
self.log.debug("Requesting feed")
if not self.isConfigured():
return False
user = self.settings.get("username")
pwHash = hashlib.md5(self.settings.get(
"password").encode()).hexdigest()
feedURL = "http://www.myepisodes.com/rss.php?feed=" + \
type+"&uid=" + user+"&pwdmd5="+pwHash+"&showignored=0"
self.log.debug("Using feed URL: %s" % (feedURL))
feed = feedparser.parse(feedURL)
if feed.status is not 200:
self.log.error(
"Error getting RSS feed. Reply HTTP code: " % (feed.status))
self.speak_dialog('errorHTTPCode')
elif feed.bozo:
self.log.error("Error parsing RSS feed.")
if hasattr(feed, 'bozo_exception'):
self.log.exception(feed.bozo_exception)
self.speak_dialog('errorParseFeed')
else:
return feed
def isConfigured(self):
if 'username' not in self.settings or 'password'not in self.settings:
self.log.error("Skill not configured")
self.speak_dialog("notSetUp")
return False
return True
def create_skill():
return MyEpisodes()
|
#############################################################################
# RADIA Python Example #1: Magnetic field created by rectangular parallelepiped with constant magnetization over volume
# v 0.02
#############################################################################
from __future__ import print_function #Python 2.7 compatibility
import radia as rad
help(rad.ObjRecMag)
help(rad.ObjThckPgn)
help(rad.ObjPolyhdr)
help(rad.ObjMltExtPgn)
help(rad.ObjMltExtRtg)
help(rad.ObjMltExtTri)
help(rad.ObjArcPgnMag)
help(rad.ObjCylMag)
help(rad.ObjFullMag)
help(rad.ObjRecCur)
help(rad.ObjArcCur)
help(rad.ObjRaceTrk)
help(rad.ObjFlmCur)
help(rad.ObjBckg)
help(rad.ObjCnt)
help(rad.ObjAddToCnt)
help(rad.ObjCntStuf)
help(rad.ObjCntSize)
help(rad.ObjCutMag)
help(rad.ObjDivMag)
help(rad.ObjDivMagPln)
help(rad.ObjDivMagCyl)
help(rad.ObjDpl)
help(rad.ObjGeoVol)
help(rad.ObjGeoLim)
help(rad.ObjDegFre)
help(rad.ObjM)
help(rad.ObjCenFld)
help(rad.ObjSetM)
help(rad.ObjScaleCur)
help(rad.ObjDrwAtr)
help(rad.ObjDrwOpenGL)
help(rad.TrfTrsl)
help(rad.TrfRot)
help(rad.TrfPlSym)
help(rad.TrfInv)
help(rad.TrfCmbL)
help(rad.TrfCmbR)
help(rad.TrfMlt)
help(rad.TrfOrnt)
help(rad.TrfZerPara)
help(rad.TrfZerPerp)
help(rad.MatStd)
help(rad.MatLin)
help(rad.MatSatIsoFrm)
help(rad.MatSatIsoTab)
help(rad.MatSatLamFrm)
help(rad.MatSatLamTab)
help(rad.MatSatAniso)
help(rad.MatStd)
help(rad.MatApl)
help(rad.MatMvsH)
help(rad.RlxPre)
help(rad.RlxMan)
help(rad.RlxAuto)
help(rad.RlxUpdSrc)
help(rad.Solve)
help(rad.Fld)
help(rad.FldLst)
help(rad.FldInt)
help(rad.FldPtcTrj)
help(rad.FldEnr)
help(rad.FldEnrFrc)
help(rad.FldEnrTrq)
help(rad.FldFrc)
help(rad.FldFrcShpRtg)
help(rad.FldFocPot)
help(rad.FldFocKickPer)
help(rad.FldCmpCrt)
help(rad.FldCmpPrc)
help(rad.FldUnits)
help(rad.FldLenRndSw)
help(rad.FldLenTol)
help(rad.FldShimSig)
help(rad.UtiDmp)
help(rad.UtiDmpPrs)
help(rad.UtiDel)
help(rad.UtiDelAll)
help(rad.UtiVer)
|
#
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime, timedelta
from django.db.models import Q, F, Case, When, Count, BooleanField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from bridge.vars import USER_ROLES, PRIORITY, SAFE_VERDICTS, UNSAFE_VERDICTS, ASSOCIATION_TYPE
from bridge.utils import get_templated_text
from jobs.models import Job, JobHistory, UserRole
from marks.models import ReportSafeTag, ReportUnsafeTag
from reports.models import ReportRoot, ReportComponent, ReportSafe, ReportUnsafe, ReportUnknown, ComponentResource
from jobs.utils import SAFES, UNSAFES, TITLES, get_resource_data, JobAccess, get_user_time
from service.models import SolvingProgress
from service.utils import GetJobsProgresses
DATE_COLUMNS = {
'date', 'tasks:start_ts', 'tasks:finish_ts', 'subjobs:start_sj', 'subjobs:finish_sj', 'start_date', 'finish_date'
}
TASKS_COLUMNS = [
'tasks', 'tasks:pending', 'tasks:processing', 'tasks:finished', 'tasks:error', 'tasks:cancelled',
'tasks:total', 'tasks:solutions', 'tasks:total_ts', 'tasks:start_ts', 'tasks:finish_ts',
'tasks:progress_ts', 'tasks:expected_time_ts'
]
SUBJOBS_COLUMNS = [
'subjobs', 'subjobs:total_sj', 'subjobs:start_sj', 'subjobs:finish_sj',
'subjobs:progress_sj', 'subjobs:expected_time_sj'
]
class Header:
def __init__(self, columns, titles):
self.columns = columns
self.titles = titles
self._max_depth = self.__max_depth()
def head_struct(self):
col_data = []
for d in range(1, self._max_depth + 1):
col_data.append(self.__cellspan_level(d))
# For checkboxes
col_data[0].insert(0, {'column': '', 'rows': self._max_depth, 'columns': 1, 'title': ''})
return col_data
def __max_depth(self):
max_depth = 0
if len(self.columns):
max_depth = 1
for col in self.columns:
depth = len(col.split(':'))
if depth > max_depth:
max_depth = depth
return max_depth
def __title(self, column):
if column in self.titles:
return self.titles[column]
return column
def __cellspan_level(self, lvl):
# Get first lvl identifiers of all table columns.
# Example: 'a:b:c:d:e' (lvl=3) -> 'a:b:c'
# Example: 'a:b' (lvl=3) -> ''
# And then colecting single identifiers and their amount without ''.
# Example: [a, a, a, b, '', c, c, c, c, '', '', c, d, d] ->
# [(a, 3), (b, 1), (c, 4), (c, 1), (d, 2)]
columns_of_lvl = []
prev_col = ''
cnt = 0
for col in self.columns:
col_start = ''
col_parts = col.split(':')
if len(col_parts) >= lvl:
col_start = ':'.join(col_parts[:lvl])
if col_start == prev_col:
cnt += 1
else:
if prev_col != '':
columns_of_lvl.append([prev_col, cnt])
cnt = 1
else:
if prev_col != '':
columns_of_lvl.append([prev_col, cnt])
cnt = 0
prev_col = col_start
if len(prev_col) > 0 and cnt > 0:
columns_of_lvl.append([prev_col, cnt])
# Collecting data of cell span for columns.
columns_data = []
for col in columns_of_lvl:
nrows = self._max_depth - lvl + 1
for column in self.columns:
if column.startswith(col[0] + ':') and col[0] != column:
nrows = 1
break
columns_data.append({'column': col[0], 'rows': nrows, 'columns': col[1], 'title': self.__title(col[0])})
return columns_data
class TableTree:
no_mark = _('Without marks')
total = _('Total')
all_columns = ['role', 'author', 'date', 'status', 'unsafe'] + \
list("unsafe:{0}".format(u) for u in UNSAFES) + \
['safe'] + list("safe:{0}".format(s) for s in SAFES) + \
TASKS_COLUMNS + SUBJOBS_COLUMNS + \
['problem', 'problem:total', 'resource', 'tag', 'tag:safe', 'tag:unsafe', 'identifier', 'format',
'version', 'parent_id', 'priority', 'start_date', 'finish_date', 'solution_wall_time', 'operator']
def __init__(self, user, view):
self._user = user
self.view = view
# Columns for view
self.selected_columns = self.__selected()
self.available_columns = self.__available()
# Get jobs tree to visualise (just structure) and set of accessed jobs
self._tree, self._job_ids, self._roots = self.__get_jobs_tree()
self._core = self.__get_core_reports()
# Some titles are collected during __get_columns()
self._titles = TITLES
# Should be after we get the tree because columns depends on what jobs are in the tree
self._columns = self.__get_columns()
self.header = Header(self._columns, self._titles).head_struct()
# Collecting data for tables cells
self._values_data = {}
self.values = self.__get_values()
# Table footer data
self.footer_title_length, self.footer = self.__get_footer()
def __column_title(self, column):
col_parts = column.split(':')
column_starts = []
for i in range(0, len(col_parts)):
column_starts.append(':'.join(col_parts[:(i + 1)]))
titles = []
for col_st in column_starts:
titles.append(TITLES[col_st])
concated_title = titles[0]
for i in range(1, len(titles)):
concated_title = '{0}/{1}'.format(concated_title, titles[i])
return concated_title
def __selected(self):
return list({'value': col, 'title': self.__column_title(col)}
for col in self.view['columns'] if col in self.all_columns)
def __available(self):
return list({'value': col, 'title': self.__column_title(col)} for col in self.all_columns)
def __view_filters(self):
filters = {}
unfilters = {}
if 'title' in self.view:
filters['name__' + self.view['title'][0]] = self.view['title'][1]
if 'change_author' in self.view:
if self.view['change_author'][0] == 'is':
filters['change_author__id'] = int(self.view['change_author'][1])
else:
unfilters['change_author__id'] = int(self.view['change_author'][1])
if 'change_date' in self.view:
limit_time = now() - timedelta(**{self.view['change_date'][2]: int(self.view['change_date'][1])})
if self.view['change_date'][0] == 'older':
filters['change_date__lt'] = limit_time
elif self.view['change_date'][0] == 'younger':
filters['change_date__gt'] = limit_time
if 'status' in self.view:
filters['status__in'] = self.view['status']
if 'format' in self.view:
if self.view['format'][0] == 'is':
filters['format'] = int(self.view['format'][1])
elif self.view['format'][0] == 'isnot':
unfilters['format'] = int(self.view['format'][1])
if 'priority' in self.view:
if self.view['priority'][0] == 'e':
filters['solvingprogress__priority'] = self.view['priority'][1]
elif self.view['priority'][0] == 'me':
priorities = []
for pr in PRIORITY:
priorities.append(pr[0])
if pr[0] == self.view['priority'][1]:
filters['solvingprogress__priority__in'] = priorities
break
elif self.view['priority'][0] == 'le':
priorities = []
for pr in reversed(PRIORITY):
priorities.append(pr[0])
if pr[0] == self.view['priority'][1]:
filters['solvingprogress__priority__in'] = priorities
break
if 'finish_date' in self.view:
filters['solvingprogress__finish_date__month__' + self.view['finish_date'][0]] = \
int(self.view['finish_date'][1])
filters['solvingprogress__finish_date__year__' + self.view['finish_date'][0]] = \
int(self.view['finish_date'][2])
if 'weight' in self.view:
filters['weight__in'] = self.view['weight']
return filters, unfilters
def __get_jobs_tree(self):
# Job order parameter
jobs_order = 'id'
if 'order' in self.view and len(self.view['order']) == 2:
if self.view['order'][1] == 'title':
jobs_order = 'name'
elif self.view['order'][1] == 'date':
jobs_order = 'change_date'
elif self.view['order'][1] == 'start':
jobs_order = 'solvingprogress__start_date'
elif self.view['order'][1] == 'finish':
jobs_order = 'solvingprogress__finish_date'
if self.view['order'][0] == 'up':
jobs_order = '-' + jobs_order
# Jobs tree structure
tree_struct = {}
for job in Job.objects.only('id', 'parent_id'):
tree_struct[job.id] = job.parent_id
# Filters for jobs
filters, unfilters = self.__view_filters()
filters = Q(**filters)
for unf_v in unfilters:
filters &= ~Q(**{unf_v: unfilters[unf_v]})
# Jobs' ids with view access
accessed = JobAccess(self._user).can_view_jobs(filters)
# Add parents without access to show the tree structure
jobs_in_tree = set(accessed)
for j_id in accessed:
parent = tree_struct[j_id]
while parent is not None:
jobs_in_tree.add(parent)
parent = tree_struct[parent]
# Get ordered list of jobs
jobs_list = list(j.id for j in Job.objects.filter(id__in=jobs_in_tree).order_by(jobs_order).only('id'))
# Function collects children tree for specified job id (p_id)
def get_job_children(p_id):
children = []
for oj_id in jobs_list:
if tree_struct[oj_id] == p_id:
children.append({'id': oj_id, 'parent': p_id})
children.extend(get_job_children(oj_id))
return children
# Get roots' ids for DB reqeusts optimizations
roots = dict((r_id, j_id) for r_id, j_id in ReportRoot.objects.filter(job_id__in=accessed)
.values_list('id', 'job_id'))
return get_job_children(None), accessed, roots
def __get_core_reports(self):
cores = {}
for report_id, root_id in ReportComponent.objects.filter(root_id__in=self._roots, parent=None)\
.values_list('id', 'root_id'):
cores[self._roots[root_id]] = report_id
return cores
def __get_columns(self):
columns = ['name']
extend_action = {
'safe': lambda: ['safe:' + postfix for postfix in SAFES],
'unsafe': lambda: ['unsafe:' + postfix for postfix in UNSAFES],
'tag': lambda: self.__safe_tags_columns() + self.__unsafe_tags_columns(),
'tag:safe': self.__safe_tags_columns,
'tag:unsafe': self.__unsafe_tags_columns,
'resource': self.__resource_columns,
'problem': self.__unknowns_columns,
'tasks': lambda: TASKS_COLUMNS[1:],
'subjobs': lambda: SUBJOBS_COLUMNS[1:]
}
for col in self.view['columns']:
if col in self.all_columns:
if col in extend_action:
columns.extend(extend_action[col]())
else:
columns.append(col)
return columns
def __tags_columns(self, tags_model, tags_type):
tags_data = {}
for tag in tags_model.objects.filter(report__root_id__in=self._roots, report__parent=None) \
.values('tag__tag', 'tag_id'):
tag_id = 'tag:{0}:tag_{1}'.format(tags_type, tag['tag_id'])
if tag_id not in tags_data:
tags_data[tag_id] = tag['tag__tag']
self._titles[tag_id] = tag['tag__tag']
return list(sorted(tags_data, key=tags_data.get))
def __safe_tags_columns(self):
return self.__tags_columns(ReportSafeTag, 'safe')
def __unsafe_tags_columns(self):
return self.__tags_columns(ReportUnsafeTag, 'unsafe')
def __resource_columns(self):
# Get filters
filters = {'report__root_id__in': self._roots}
if 'resource_component' in self.view:
filters['component__name__' + self.view['resource_component'][0]] = self.view['resource_component'][1]
# Get resource columns and fill its titles (components' names)
resource_columns = []
for c_id, c_name in ComponentResource.objects.filter(**filters).exclude(component=None)\
.values_list('component_id', 'component__name').distinct().order_by('component__name'):
column = 'resource:component_{0}'.format(c_id)
self._titles[column] = c_name
resource_columns.append(column)
resource_columns.append('resource:total')
return resource_columns
def __unknowns_columns(self):
# Get queryset for unknowns
queryset = ReportUnknown.objects.filter(root_id__in=self._roots)
if 'problem_component' in self.view:
queryset = queryset.filter(**{
'component__name__' + self.view['problem_component'][0]: self.view['problem_component'][1]
})
# Is unknown mark unconfirmed
unconfirmed = Case(When(markreport_set__type=ASSOCIATION_TYPE[2][0], then=True),
default=False, output_field=BooleanField())
queryset = queryset.values('component_id').annotate(unconfirmed=unconfirmed)\
.values_list('markreport_set__problem_id', 'markreport_set__problem__name',
'component_id', 'component__name', 'unconfirmed')
if 'problem_problem' in self.view:
queryset = queryset.filter(**{
'markreport_set__problem__name__' + self.view['problem_problem'][0]: self.view['problem_problem'][1]
})
queryset = queryset.distinct().order_by('component__name', 'markreport_set__problem__name')
columns = []
prev_col_c_id = None # Previous component
has_unmarked = False # Do component "prev_col_c_id" has unmarked unknowns
for p_id, p_name, c_id, c_name, unconfirmed in queryset:
# Add unmarked column (if there are unmarked unknowns)
# and total column for previous component
if prev_col_c_id is not None and prev_col_c_id != c_id:
if has_unmarked:
unmarked_column = 'problem:pr_component_{0}:no_mark'.format(prev_col_c_id)
columns.append(unmarked_column)
self._titles[unmarked_column] = self.no_mark
has_unmarked = False
total_column = 'problem:pr_component_{0}:total'.format(prev_col_c_id)
columns.append(total_column)
self._titles[total_column] = self.total
prev_col_c_id = c_id
if p_id is None or unconfirmed:
# We will add unmarked column at the end together with total
has_unmarked = True
else:
column = 'problem:pr_component_{0}:problem_{1}'.format(c_id, p_id)
self._titles[column] = p_name
columns.append(column)
self._titles['problem:pr_component_{0}'.format(c_id)] = c_name
if prev_col_c_id is not None:
if has_unmarked:
unmarked_column = 'problem:pr_component_{0}:no_mark'.format(prev_col_c_id)
columns.append(unmarked_column)
self._titles[unmarked_column] = self.no_mark
total_column = 'problem:pr_component_{0}:total'.format(prev_col_c_id)
columns.append(total_column)
self._titles[total_column] = self.total
columns.append('problem:total')
return columns
def __get_values(self):
self.__init_values_data()
self.__collect_jobdata()
self.__collect_verdicts()
if any(x.startswith('problem:pr_component_') for x in self._columns):
self.__collect_unknowns()
if any(x.startswith('tag:safe:') for x in self._columns):
self.__collect_safe_tags()
if any(x.startswith('tag:unsafe:') for x in self._columns):
self.__collect_unsafe_tags()
if any(x.startswith('resource:') for x in self._columns):
self.__collect_resourses()
if 'role' in self._columns:
self.__collect_roles()
progress_columns = {'priority', 'solutions', 'start_date', 'finish_date', 'solution_wall_time', 'operator'}\
| set(TASKS_COLUMNS) | set(SUBJOBS_COLUMNS)
if any(x in progress_columns for x in self._columns):
self.__collect_progress_data()
table_rows = []
for job in self._tree:
row_values = []
col_id = 0
for col in self._columns:
col_id += 1
cell_value = '-' if job['id'] in self._job_ids else ''
href = None
if job['id'] in self._values_data and col in self._values_data[job['id']]:
if isinstance(self._values_data[job['id']][col], tuple):
cell_value = self._values_data[job['id']][col][0]
if cell_value != 0:
href = self._values_data[job['id']][col][1]
else:
cell_value = self._values_data[job['id']][col]
if col in DATE_COLUMNS:
if self._user.extended.data_format == 'hum' and isinstance(cell_value, datetime):
cell_value = get_templated_text('{% load humanize %}{{ date|naturaltime }}', date=cell_value)
row_values.append({
'id': '__'.join(col.split(':')) + ('__%d' % col_id),
'value': cell_value, 'href': href
})
table_rows.append({
'id': job['id'], 'parent': job['parent'],
'black': job['id'] not in self._job_ids,
'values': row_values
})
return table_rows
def __init_values_data(self):
for j_id, name in Job.objects.values_list('id', 'name'):
self._values_data[j_id] = {'name': name}
def __collect_jobdata(self):
for j in Job.objects.filter(id__in=self._job_ids).select_related('change_author', 'parent'):
self._values_data[j.id].update({
'identifier': j.identifier, 'name': (j.name, reverse('jobs:job', args=[j.id])),
'format': j.format, 'version': j.version, 'date': j.change_date, 'status': j.get_status_display()
})
if j.id in self._core:
self._values_data[j.id]['status'] = (self._values_data[j.id]['status'],
reverse('reports:component', args=[self._core[j.id]]))
if j.parent is not None:
self._values_data[j.id]['parent_id'] = j.parent.identifier
if j.change_author is not None:
self._values_data[j.id]['author'] = (j.change_author.get_full_name(),
reverse('users:show_profile', args=[j.change_author_id]))
def __get_safes_without_confirmed(self):
# Collect safes data
safe_columns_map = {
SAFE_VERDICTS[0][0]: 'safe:unknown',
SAFE_VERDICTS[1][0]: 'safe:incorrect',
SAFE_VERDICTS[2][0]: 'safe:missed_bug',
SAFE_VERDICTS[3][0]: 'safe:inconclusive',
SAFE_VERDICTS[4][0]: 'safe:unassociated'
}
for r_id, v, number in ReportSafe.objects.filter(root_id__in=self._roots)\
.values('root_id').annotate(number=Count('id')).values_list('root_id', 'verdict', 'number'):
j_id = self._roots[r_id]
safes_url = reverse('reports:safes', args=[self._core[j_id]])
self._values_data[j_id][safe_columns_map[v]] = (number, '%s?verdict=%s' % (safes_url, v))
if 'safe:total' not in self._values_data[j_id]:
self._values_data[j_id]['safe:total'] = [0, safes_url]
self._values_data[j_id]['safe:total'][0] += number
# Fix total data
for j_id in self._values_data:
if 'safe:total' in self._values_data[j_id]:
self._values_data[j_id]['safe:total'] = tuple(self._values_data[j_id]['safe:total'])
def __get_unsafes_without_confirmed(self):
# Collect unsafes data
unsafe_columns_map = {
UNSAFE_VERDICTS[0][0]: 'unsafe:unknown',
UNSAFE_VERDICTS[1][0]: 'unsafe:bug',
UNSAFE_VERDICTS[2][0]: 'unsafe:target_bug',
UNSAFE_VERDICTS[3][0]: 'unsafe:false_positive',
UNSAFE_VERDICTS[4][0]: 'unsafe:inconclusive',
UNSAFE_VERDICTS[5][0]: 'unsafe:unassociated'
}
for r_id, v, number in ReportUnsafe.objects.filter(root_id__in=self._roots)\
.values('root_id').annotate(number=Count('id')).values_list('root_id', 'verdict', 'number'):
j_id = self._roots[r_id]
unsafes_url = reverse('reports:unsafes', args=[self._core[j_id]])
self._values_data[j_id][unsafe_columns_map[v]] = (number, '%s?verdict=%s' % (unsafes_url, v))
if 'unsafe:total' not in self._values_data[j_id]:
self._values_data[j_id]['unsafe:total'] = [0, unsafes_url]
self._values_data[j_id]['unsafe:total'][0] += number
# Fix total data
for j_id in self._values_data:
if 'unsafe:total' in self._values_data[j_id]:
self._values_data[j_id]['unsafe:total'] = tuple(self._values_data[j_id]['unsafe:total'])
def __get_safes_with_confirmed(self):
# Collect safes data
safe_columns_map = {
SAFE_VERDICTS[0][0]: 'safe:unknown',
SAFE_VERDICTS[1][0]: 'safe:incorrect',
SAFE_VERDICTS[2][0]: 'safe:missed_bug',
SAFE_VERDICTS[3][0]: 'safe:inconclusive',
SAFE_VERDICTS[4][0]: 'safe:unassociated'
}
for r_id, v, total, confirmed in ReportSafe.objects.filter(root_id__in=self._roots)\
.values('root_id').annotate(total=Count('id'), confirmed=Count(Case(When(has_confirmed=True, then=1))))\
.values_list('root_id', 'verdict', 'total', 'confirmed'):
j_id = self._roots[r_id]
url = reverse('reports:safes', args=[self._core[j_id]])
if v == SAFE_VERDICTS[4][0]:
self._values_data[j_id]['safe:unassociated'] = (total, '%s?verdict=%s' % (url, v))
else:
self._values_data[j_id][safe_columns_map[v]] = '{0} ({1})'.format(
'<a href="{0}?verdict={1}&confirmed=1">{2}</a>'.format(url, v, confirmed) if confirmed > 0 else 0,
'<a href="{0}?verdict={1}">{2}</a>'.format(url, v, total) if total > 0 else 0
)
if 'safe:total' not in self._values_data[j_id]:
self._values_data[j_id]['safe:total'] = [0, 0]
self._values_data[j_id]['safe:total'][0] += confirmed
self._values_data[j_id]['safe:total'][1] += total
# Fix total data
for j_id in self._values_data:
if 'safe:total' in self._values_data[j_id]:
url = reverse('reports:safes', args=[self._core[j_id]])
confirmed, total = self._values_data[j_id]['safe:total']
self._values_data[j_id]['safe:total'] = '{0} ({1})'.format(
'<a href="{0}?confirmed=1">{1}</a>'.format(url, confirmed) if confirmed > 0 else 0,
'<a href="{0}">{1}</a>'.format(url, total) if total > 0 else 0
)
def __get_unsafes_with_confirmed(self):
unsafe_columns_map = {
UNSAFE_VERDICTS[0][0]: 'unsafe:unknown',
UNSAFE_VERDICTS[1][0]: 'unsafe:bug',
UNSAFE_VERDICTS[2][0]: 'unsafe:target_bug',
UNSAFE_VERDICTS[3][0]: 'unsafe:false_positive',
UNSAFE_VERDICTS[4][0]: 'unsafe:inconclusive',
UNSAFE_VERDICTS[5][0]: 'unsafe:unassociated'
}
# Collect unsafes
for r_id, v, total, confirmed in ReportUnsafe.objects.filter(root_id__in=self._roots)\
.values('root_id').annotate(total=Count('id'), confirmed=Count(Case(When(has_confirmed=True, then=1))))\
.values_list('root_id', 'verdict', 'total', 'confirmed'):
j_id = self._roots[r_id]
url = reverse('reports:unsafes', args=[self._core[j_id]])
if v == UNSAFE_VERDICTS[5][0]:
self._values_data[j_id]['unsafe:unassociated'] = (total, '%s?verdict=%s' % (url, v))
else:
self._values_data[j_id][unsafe_columns_map[v]] = '{0} ({1})'.format(
'<a href="{0}?verdict={1}&confirmed=1">{2}</a>'.format(url, v, confirmed) if confirmed > 0 else 0,
'<a href="{0}?verdict={1}">{2}</a>'.format(url, v, total) if total > 0 else 0
)
if 'unsafe:total' not in self._values_data[j_id]:
self._values_data[j_id]['unsafe:total'] = [0, 0]
self._values_data[j_id]['unsafe:total'][0] += confirmed
self._values_data[j_id]['unsafe:total'][1] += total
# Fix total data
for j_id in self._values_data:
if 'unsafe:total' in self._values_data[j_id]:
url = reverse('reports:unsafes', args=[self._core[j_id]])
confirmed, total = self._values_data[j_id]['unsafe:total']
self._values_data[j_id]['unsafe:total'] = '{0} ({1})'.format(
'<a href="{0}?confirmed=1">{1}</a>'.format(url, confirmed) if confirmed > 0 else 0,
'<a href="{0}">{1}</a>'.format(url, total) if total > 0 else 0
)
def __collect_verdicts(self):
if any(col.startswith('safe:') for col in self._columns):
if 'hidden' in self.view and 'confirmed_marks' in self.view['hidden']:
self.__get_safes_without_confirmed()
else:
self.__get_safes_with_confirmed()
if any(col.startswith('unsafe:') for col in self._columns):
if 'hidden' in self.view and 'confirmed_marks' in self.view['hidden']:
self.__get_unsafes_without_confirmed()
else:
self.__get_unsafes_with_confirmed()
# Total unknowns numbers
if 'problem:total' in self._columns:
for r_id, total in ReportUnknown.objects.filter(root_id__in=self._roots) \
.values('root_id').annotate(total=Count('id')).values_list('root_id', 'total'):
j_id = self._roots[r_id]
self._values_data[j_id]['problem:total'] = (total, reverse('reports:unknowns', args=[self._core[j_id]]))
def __collect_unknowns(self):
# Queryset for marked/unmarked unknowns
unconfirmed = Case(When(markreport_set__type=ASSOCIATION_TYPE[2][0], then=True),
default=False, output_field=BooleanField())
queryset = ReportUnknown.objects.filter(root_id__in=self._roots).values('root_id')\
.annotate(number=Count('id', distinct=True), unconfirmed=unconfirmed)\
.values_list('root_id', 'component_id', 'markreport_set__problem_id', 'number', 'unconfirmed')
unmarked = {}
# Marked unknowns
for r_id, c_id, p_id, number, unconfirmed in queryset:
if p_id is None or unconfirmed:
if (r_id, c_id) not in unmarked:
unmarked[(r_id, c_id)] = 0
unmarked[(r_id, c_id)] += number
else:
job_id = self._roots[r_id]
url = '{0}?component={1}&problem={2}'.format(
reverse('reports:unknowns', args=[self._core[job_id]]), c_id, p_id)
self._values_data[job_id]['problem:pr_component_{0}:problem_{1}'.format(c_id, p_id)] = (number, url)
# Unmarked unknowns
for r_id, c_id in unmarked:
job_id = self._roots[r_id]
url = '{0}?component={1}&problem=0'.format(reverse('reports:unknowns', args=[self._core[job_id]]), c_id)
self._values_data[job_id]['problem:pr_component_{0}:no_mark'.format(c_id)] = (unmarked[(r_id, c_id)], url)
# Total unknowns for each component
for r_id, c_id, total in ReportUnknown.objects.filter(root_id__in=self._roots)\
.values('component_id').annotate(total=Count('id')).values_list('root_id', 'component_id', 'total'):
job_id = self._roots[r_id]
url = '{0}?component={1}'.format(reverse('reports:unknowns', args=[self._core[job_id]]), c_id)
self._values_data[job_id]['problem:pr_component_{0}:total'.format(c_id)] = (total, url)
def __collect_safe_tags(self):
for st in ReportSafeTag.objects.filter(report__root_id__in=self._roots, report__parent=None)\
.annotate(root_id=F('report__root_id')):
self._values_data[self._roots[st.root_id]]['tag:safe:tag_' + str(st.tag_id)] = (
st.number, '%s?tag=%s' % (reverse('reports:safes', args=[st.report_id]), st.tag_id)
)
def __collect_unsafe_tags(self):
for ut in ReportUnsafeTag.objects.filter(report__root_id__in=self._roots, report__parent=None)\
.annotate(root_id=F('report__root_id')):
self._values_data[self._roots[ut.root_id]]['tag:unsafe:tag_' + str(ut.tag_id)] = (
ut.number, '%s?tag=%s' % (reverse('reports:unsafes', args=[ut.report_id]), ut.tag_id)
)
def __collect_resourses(self):
data_format = self._user.extended.data_format
accuracy = self._user.extended.accuracy
for cr in ComponentResource.objects.filter(report__root_id__in=self._roots, report__parent=None)\
.annotate(root_id=F('report__root_id')):
job_id = self._roots[cr.root_id]
rd = get_resource_data(data_format, accuracy, cr)
resourses_value = "%s %s %s" % (rd[0], rd[1], rd[2])
if cr.component_id is None:
self._values_data[job_id]['resource:total'] = resourses_value
else:
self._values_data[job_id]['resource:component_' + str(cr.component_id)] = resourses_value
def __collect_roles(self):
user_role = self._user.extended.role
is_author = set()
for fv in JobHistory.objects.filter(job_id__in=self._job_ids, version=1, change_author_id=self._user.id)\
.only('job_id'):
is_author.add(fv.job_id)
global_roles = {}
for fv in JobHistory.objects.filter(job_id__in=self._job_ids, version=F('job__version'))\
.only('job_id', 'global_role'):
global_roles[fv.job_id] = fv.get_global_role_display()
job_user_roles = {}
for ur in UserRole.objects\
.filter(user=self._user, job__job_id__in=self._job_ids, job__version=F('job__job__version'))\
.only('job__job_id', 'role'):
job_user_roles[ur.job.job_id] = ur.get_role_display()
for j_id in self._job_ids:
if j_id in is_author:
self._values_data[j_id]['role'] = _('Author')
elif user_role == USER_ROLES[2][0]:
self._values_data[j_id]['role'] = USER_ROLES[2][1]
elif j_id in job_user_roles:
self._values_data[j_id]['role'] = job_user_roles[j_id]
else:
self._values_data[j_id]['role'] = global_roles[j_id]
def __collect_progress_data(self):
jobs_with_progress = set()
progresses = GetJobsProgresses(self._user, self._job_ids).table_data()
for j_id in progresses:
self._values_data[j_id].update(progresses[j_id])
for progress in SolvingProgress.objects.filter(job_id__in=self._job_ids):
self._values_data[progress.job_id].update({
'priority': progress.get_priority_display(),
'tasks:total': progress.tasks_total,
'tasks:cancelled': progress.tasks_cancelled,
'tasks:error': progress.tasks_error,
'tasks:finished': progress.tasks_finished,
'tasks:processing': progress.tasks_processing,
'tasks:pending': progress.tasks_pending,
'tasks:solutions': progress.solutions
})
if progress.start_date is not None:
self._values_data[progress.job_id]['start_date'] = progress.start_date
if progress.finish_date is not None:
self._values_data[progress.job_id]['finish_date'] = progress.finish_date
self._values_data[progress.job_id]['solution_wall_time'] = get_user_time(
self._user, int((progress.finish_date - progress.start_date).total_seconds() * 1000)
)
jobs_with_progress.add(progress.job_id)
for root in ReportRoot.objects.filter(job_id__in=self._job_ids).select_related('user'):
self._values_data[root.job_id]['operator'] = (
root.user.get_full_name(), reverse('users:show_profile', args=[root.user_id])
)
def __get_footer(self):
# Must be the same lists as lists in jobtree.js
countable = {
'tasks:pending', 'tasks:processing', 'tasks:finished', 'tasks:error',
'tasks:cancelled', 'tasks:total', 'tasks:solutions', 'tasks:total_ts', 'subjobs:total_sj'
}
countable_prefexes = {'safe:', 'unsafe:', 'tag:', 'problem:'}
# Footer title length
foot_length = 1
for col in self._columns:
if col in countable or any(col.startswith(prefix) for prefix in countable_prefexes):
break
foot_length += 1
else:
foot_length = None
# Footer columns
footer = []
if foot_length is not None and len(self.values) > 0:
f_len = len(self.values[0]['values'])
for i in range(foot_length - 1, f_len):
footer.append(self.values[0]['values'][i]['id'])
return foot_length, footer
|
def isEven(number):
#generate list of even numbers
evenNumbers=[]
for i in range((number)):
evenNumbers.append(i*2)
if number in evenNumbers:
return True
else:
return False
print(isEven(100)) |
# ORTHOGONAL COLLOCATION METHOD
# -------------------------------
# import package/module
import numpy as np
class OrCoClass:
# class vars
# constants
# Approximate Solution
# ---------------------
# y = d1 + d2*x^2 + d3*x^4 + d4*x^6 + ... + d[N+1]*x^2N
# Define Collocation Points
# -------------------------
# x1,x2,x3,x4
# x1 = 0
x1 = 0.28523
x2 = 0.76505
x3 = 1
# 6 points [spherical shape]
# x1 = 0
# x1 = 0.215353
# x2 = 0.420638
# x3 = 0.606253
# x4 = 0.763519
# x5 = 0.885082
# x6 = 0.965245
# x7 = 1
# initial boundary condition
X0 = 0
# last boundary condition
Xn = 1
# collocation points
Xc = np.array([x1, x2, x3])
# 6 points
# Xc = np.array([x1, x2, x3, x4, x5, x6, x7])
# 5 points
# Xc = np.array([x1, x2, x3, x4, x5, x6])
# collocation + boundary condition points
# 4 points [symmetric 3 points]
N = np.size(Xc)
# collocation points number
Nc = N - 1
def __init__(self, odeNo):
self.odeNo = odeNo
@property
def odeNo(self):
return self._odeNo
@odeNo.setter
def odeNo(self, val):
self._odeNo = val
def fQ(j, Xc):
'''
Q matrix
'''
return Xc**(2*j)
def fC(j, Xc):
'''
C matrix
'''
if j == 0:
return 0
else:
return (2*j)*(Xc**(2*j-1))
def fD(j, Xc):
'''
D matrix
'''
if j == 0:
return 0
if j == 1:
return 2
else:
return 2*j*(2*j-1)*(Xc**(2*j-2))
def buildMatrix(self):
'''
build Q,C,D matrix
'''
# try/except
try:
# number of OC points
N = OrCoClass.N
# residual matrix shape
residualMatrixShape = (self.odeNo*N, self.odeNo*N)
# rhs
rhsMatrixShape = self.odeNo*N
# fdydt
fdydtShape = self.odeNo*N
# Evaluate Solution at Collocation Points
# ----------------------------------------
# point x1
# y(1) = d1 + d2*x(1)^2 + d3*x(1)^4 + d4*x(1)^6
# point x2
# y(2) = d1 + d2*x(2)^2 + d3*x(2)^4 + d4*x(2)^6
# point x3
# y(1) = d1 + d2*x(3)^2 + d3*x(3)^4 + d4*x(3)^6
# define Q matrix
Q = np.zeros((N, N))
for i in range(N):
for j in range(N):
Q[i][j] = self.fQ(j, OrCoClass.Xc[i])
# y = Q*d
# d = y/Q = y*[Q inverse]
# Evaluate First Derivative at Collocation Points
# ------------------------------------------------
# point x1
# dy(1) = 0 + 2*d2*x1 + 4*d3*x1^3 + ...
# dy = [dy1 dy2 dy3 dy4];
# C0 = [
# 0 1 2*x1 3*x1^2;
# 0 1 2*x2 3*x2^2;
# 0 1 2*x3 3*x3^2;
# 0 1 2*x4 3*x4^2
# ]
# define C matrix
C = np.zeros((N, N))
for i in range(N):
for j in range(N):
C[i][j] = self.fC(j, OrCoClass.Xc[i])
# d = [d1 d2 d3 d4];
# y' = A*y
# Q inverse
invQ = np.linalg.inv(Q)
# A matrix
A = np.dot(C, invQ)
# Evaluate Second Derivative at Collocation Points
# ------------------------------------------------
# point x1
# ddy(1) = 0 + 2*d2 + 12*d3*x1^2 + ...
# ddy = [ddy1 ddy2 ddy3 ddy4];
# D0 = [
# 0 0 2 6*x1;
# 0 0 2 6*x2;
# 0 0 2 6*x3;
# 0 0 2 6*x4
# ]
# define D matrix
D = np.zeros((N, N))
for i in range(N):
for j in range(N):
D[i][j] = self.fD(j, OrCoClass.Xc[i])
# print("D Matrix: ", D)
# d = [d1 d2 d3 d4];
# y'' = B*y
# B matrix
B = np.dot(D, invQ)
except Exception as e:
raise
# test
myClass = OrCoClass(2)
|
from litex.build.generic_platform import Subsignal, Pins, IOStandard, Misc
class QMTechDaughterboard:
"""
the QMTech daughterboard contains standard peripherals
and can be used with a number of different FPGA core boards
source: https://www.aliexpress.com/item/1005001829520314.html
"""
def __init__(self, io_standard) -> None:
"""
because the board can be used with FPGAs core boards from
different vendors, the constructor needs the vendor specific IOStandard
"""
self.io = [
("serial", 0,
Subsignal("rx", Pins("J2:15")),
Subsignal("tx", Pins("J2:16")),
io_standard
),
("user_led", 0, Pins("J2:40"), io_standard),
("user_led", 1, Pins("J2:39"), io_standard),
("user_led", 2, Pins("J2:38"), io_standard),
("user_led", 3, Pins("J2:37"), io_standard),
("user_led", 4, Pins("J2:36"), io_standard),
("user_btn", 0, Pins("J3:7"), io_standard),
("user_btn", 1, Pins("J2:44"), io_standard),
("user_btn", 2, Pins("J2:43"), io_standard),
("user_btn", 3, Pins("J2:42"), io_standard),
("user_btn", 4, Pins("J2:41"), io_standard),
# GMII Ethernet
("eth_clocks", 0,
Subsignal("tx", Pins("J3:22")),
Subsignal("gtx", Pins("J3:29")),
Subsignal("rx", Pins("J3:37")),
io_standard
),
("eth", 0,
# rst is hardwired on the board
#Subsignal("rst_n", Pins("-")),
Subsignal("int_n", Pins("J3:26")),
Subsignal("mdio", Pins("J3:15")),
Subsignal("mdc", Pins("J3:16")),
Subsignal("rx_dv", Pins("J3:42")),
Subsignal("rx_er", Pins("J3:32")),
Subsignal("rx_data", Pins("J3:41 J3:40 J3:39 J3:38 J3:36 J3:35 J3:34 J3:33")),
Subsignal("tx_en", Pins("J3:28")),
Subsignal("tx_er", Pins("J3:17")),
Subsignal("tx_data", Pins("J3:27 J3:25 J3:24 J3:23 J3:21 J3:20 J3:19 J3:18")),
Subsignal("col", Pins("J3:31")),
Subsignal("crs", Pins("J3:30")),
io_standard
),
# Seven Segment
("seven_seg_ctl", 0, Pins("J2:33"), io_standard),
("seven_seg_ctl", 1, Pins("J2:27"), io_standard),
("seven_seg_ctl", 2, Pins("J2:35"), io_standard),
("seven_seg", 0, Pins("J2:31 J2:26 J2:28 J2:32 J2:34 J2:29 J2:25 J2:30"), io_standard),
# VGA
("vga", 0,
Subsignal("hsync_n", Pins("J3:44")),
Subsignal("vsync_n", Pins("J3:43")),
Subsignal("r", Pins("J3:57 J3:56 J3:59 J3:58 J3:60")),
Subsignal("g", Pins("J3:51 J3:50 J3:53 J3:52 J3:54 J3:55")),
Subsignal("b", Pins("J3:46 J3:45 J3:48 J3:47 J3:49")),
io_standard
),
# PullUp resistors are on the board, so we don't need them in the FPGA
("sdcard", 0,
Subsignal("data", Pins("J3:10 J3:9 J3:14 J3:13")),
Subsignal("cmd", Pins("J3:12")),
Subsignal("clk", Pins("J3:11")),
Subsignal("cd", Pins("J3:8")),
Misc("SLEW=FAST"),
io_standard,
),
]
connectors = [
("pmoda", "J2:17 J2:19 J2:21 J2:23 J2:18 J2:20 J2:22 J2:24"), #J10
("pmodb", "J2:7 J2:9 J2:11 J2:13 J2:8 J2:10 J2:12 J2:14"), #J11
("J1", {
3: "J2:60",
4: "J2:59",
5: "J2:58",
6: "J2:57",
7: "J2:56",
8: "J2:55",
9: "J2:54",
10: "J2:53",
11: "J2:52",
12: "J2:51",
13: "J2:50",
14: "J2:49",
15: "J2:48",
16: "J2:47",
17: "J2:46",
18: "J2:45"
}),
] |
"""
Segment imports
"""
from .segmentation import Segmentation
from .tabular import Tabular
from .textractor import Textractor
from .tokenizer import Tokenizer
|
'''
registry
'''
import urllib
import urllib2
import json
import base64
def get_catalog_token():
scope = "registry:catalog:*"
return get_token(scope)
def get_repository_token(repository_fullname):
scope = "repository:%s:*" % repository_fullname
return get_token(scope)
def get_token(scope):
url="https://authgate-dev.cloudappl.com/v2/token"
query_parameters = {}
query_parameters["service"] = "token-service"
query_parameters["scope"] = scope
if query_parameters:
query_string = urllib.urlencode(query_parameters)
url = '%s?%s' % (url, query_string)
print(url)
req = urllib2.Request(url = url)
req.add_header("Authorization", "Basic "+base64.standard_b64encode("%s:%s" %("admin", "keadmin")))
res = urllib2.urlopen(req)
res = res.read()
return res
if __name__ == "__main__":
print get_catalog_token() |
class ModelNotFittedError(Exception):
"""
It is raised when a method or attribute is requested that requires the model
to be trained (such as .predict() or .score())
"""
pass
|
M = float(input("Digite o valor de uma área em metros quadrados: "))
A = M * 0.000247
print("Este mesmo valor em Acres é {} Acres".format(A)) |
import argparse
from pathlib import Path
from lib.model import SuperResolutionModel
import utils
def main(args: argparse.Namespace):
data_root = Path(args.dataset)
source_size = (args.source_size, args.source_size)
target_size = (args.target_size, args.target_size)
train_generator = utils.data.Dataset([
str(data_root.joinpath('91')),
str(data_root.joinpath('291')),
],
batch=128,
size=target_size,
target_transforms=[
utils.transform.Resize(),
utils.transform.Crop(target_size),
], source_transforms=[
utils.transform.Resize(source_size),
utils.transform.Resize(target_size),
]
)
val_generator = utils.data.Dataset(
str(data_root.joinpath('Set5')),
size=(args.target_size, args.target_size),
batch=1,
target_transforms=[
utils.transform.Crop(target_size),
], source_transforms=[
utils.transform.Resize(scale=.5),
utils.transform.Resize(scale=2.),
]
)
model = SuperResolutionModel(mode=args.mode)
model.train(train_generator=train_generator,
val_generator=val_generator,
epochs=args.epoch, config=args)
model.save('model.hdf5')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate Multi Object Tracking')
parser.add_argument("command", metavar="<command>",
choices=['train', 'eval'],
help="'train' or 'eval'")
parser.add_argument("--mode", default='cnn', choices=['cnn', 'rnn'],
help="Select mode for training model")
parser.add_argument("--dataset", type=str, default='./data', required=False,
help="Dataset root directory")
parser.add_argument("--epoch", type=int, default=10000, required=False,
help="Epoch for training")
parser.add_argument("--interval", type=int, default=100, required=False)
parser.add_argument("--source-size", type=int, default=16, required=False)
parser.add_argument("--target-size", type=int, default=32, required=False)
parser.add_argument("--lr", type=float, default=.001, required=False)
parser.add_argument("--lr-decay", type=float, default=.0, required=False)
parser.add_argument("--log", type=str, default='./logs', required=False,
help="Logging directory")
parser.add_argument("--seed", type=int, default=42, required=False,
help="The answer to life the universe and everything")
arguments = parser.parse_args()
utils.init(arguments.seed)
main(arguments)
|
# ! are the imports needed if they are defined in main?
import arcpy
import os
import shutil
from helpers import *
# ! are we using tni at all? EHHHH NOT REALLY
# Compute Transit Need Index (TNI) based on the 2003 service standards for each census blockgroup.
# Use the minority, income, age and car ownership data computed in prior functions as inputs, and
# add a feature class indicating TNI to the final output gdb (final_gdb_loc)
def tni(year, root_dir, final_gdb_loc):
arcpy.env.overwriteOutput = True
# set a working gdb
gdb = f"TransitNeedIndex{year}.gdb"
replaceGDB(root_dir, gdb)
gdb_loc = os.path.join(root_dir,gdb)
# define input feature classes, generated from prior functions
minority_fc = os.path.join(final_gdb_loc, f'Minority{year}_final')
medhhinc_fc = os.path.join(final_gdb_loc, f'MedHHInc{year}_final')
senior_fc = os.path.join(final_gdb_loc, f'Senior{year}_final')
NoCar_fc = os.path.join(final_gdb_loc, f"NoCar{year}_Final")
arcpy.env.workspace = os.path.join(root_dir, gdb)
arcpy.ClearWorkspaceCache_management()
# MAke a working feature class from a copy of the minority fc. Define minority TNI fields and calculate them
TNI_Minority = arcpy.conversion.FeatureClassToFeatureClass(in_features=minority_fc, out_path=arcpy.env.workspace, out_name=f"TNI_Minority{year}")
arcpy.management.AddFields(in_table=TNI_Minority, field_description=[["TNI_Minority", "DOUBLE"],["PopDens", "DOUBLE"],["RegPopDens", "DOUBLE"],["TNI_Pop", "DOUBLE"]])
# ! should this use percentage rather than density? If we use this later on I can adjust (if it should be adjusted)
# THE OLD WAY WAS USING DENSITY, BUT IT IS REALLY THE SAME THING IN THIS CASE PEOPLE PER SQUARE MILE.
# PERCENTAGE IS FASTER
# Process: Calculate Field (6) (Calculate Field) (management)
arcpy.management.CalculateField(in_table=TNI_Minority, field="PopDens", expression="!TPOP! / !SqMiles!", expression_type="PYTHON3", code_block="", field_type="TEXT")
arcpy.management.CalculateField(in_table=TNI_Minority, field="RegPopDens", expression="!RegTPOP! / !RegSqMiles!", expression_type="PYTHON3", code_block="", field_type="TEXT")
arcpy.management.CalculateField(in_table=TNI_Minority, field="TNI_Minority", expression="!MinorityDens! / !RegMinorityDens!", expression_type="PYTHON3", code_block="", field_type="TEXT")
arcpy.management.CalculateField(in_table=TNI_Minority, field="TNI_Pop", expression="!PopDens! / !RegPopDens!", expression_type="PYTHON3", code_block="", field_type="TEXT")
# copy income fc, define TNI fields, and join to minority working fc.
# note that median income is used directly in TNI calcs.
TNI_MedHHInc = arcpy.conversion.FeatureClassToFeatureClass(in_features=os.path.join(final_gdb_loc, medhhinc_fc), out_path=gdb_loc, out_name=f"TNI_MedHHInc{year}")[0]
arcpy.management.AddFields(in_table=TNI_MedHHInc,field_description=[["TNI_MedInc", "DOUBLE"],["TNI_MedInc", "DOUBLE"]])
TNI_Minority_MedHHInc_Join = arcpy.management.JoinField(in_data=TNI_Minority, in_field="GEOID", join_table=TNI_MedHHInc, join_field="GEOID", fields=["RegMedHHInc", "MedHHInc", "TNI_MedInc"])[0]
# same as above, with senior
TNI_Senior = arcpy.conversion.FeatureClassToFeatureClass(in_features=os.path.join(final_gdb_loc, senior_fc), out_path=gdb_loc, out_name=f"TNI_Senior{year}")[0]
arcpy.management.AddField(in_table=TNI_Senior, field_name="TNI_Senior", field_type="DOUBLE")
arcpy.management.CalculateField(in_table=TNI_Senior, field="TNI_Senior", expression="!SeniorDens! / !RegSeniorDens!", expression_type="PYTHON3", code_block="", field_type="DOUBLE")
TNI_Join = arcpy.management.JoinField(in_data=TNI_Minority_MedHHInc_Join, in_field="GEOID", join_table=TNI_Senior, join_field="GEOID", fields=["TSenior", "SeniorDens", "RegSeniorDens", "TNI_Senior"])[0]
# Same as above, with zero car households
TNI_NoCar = arcpy.conversion.FeatureClassToFeatureClass(in_features=os.path.join(final_gdb_loc, NoCar_fc), out_path=gdb_loc, out_name="TNI_NoCar",)[0]
arcpy.management.AddField(in_table=TNI_NoCar, field_name="TNI_NoCar", field_type="DOUBLE")[0]
arcpy.management.AddField(in_table=TNI_NoCar, field_name="TNI_LowCar", field_type="DOUBLE")[0]
arcpy.management.CalculateField(in_table=TNI_NoCar, field="TNI_NoCar", expression="!NoCarDens! / !RegNoCarDens!", expression_type="PYTHON3", field_type="DOUBLE")[0]
arcpy.management.CalculateField(in_table=TNI_NoCar, field="TNI_LowCar", expression="!LowCarDens! / !RegLowCarDens!", expression_type="PYTHON3", code_block="", field_type="TEXT")[0]
TNI_Join = arcpy.management.JoinField(in_data=TNI_Join, in_field="GEOID", join_table=TNI_NoCar, join_field="GEOID", fields=["TNoCar", "NoCarDens", "RegNoCarDens", "TNI_NoCar", "TLowCar", "LowCarDens", "RegLowCarDens", "TNI_LowCar"])[0]
# Create and calculate the TNI
arcpy.management.AddField(in_table=TNI_Join, field_name="TNI", field_type="DOUBLE")
arcpy.management.CalculateField(in_table=TNI_Join, field="TNI", expression="(!TNI_MedInc!*3.5)+(!TNI_Minority!*1)+(!TNI_Senior!*1)+(!TNI_LowCar!*1.5)+(!TNI_Pop!*2)", expression_type="PYTHON3", field_type="TEXT")
# compare each blockgroup's TNI to the regional TNI
# Determine the regional mean and standard deviation TNI, then join to each blockgroup.
# Finally, define each regions need (Very Low to High) based on how it compares to regional TNI
TNI_Join_Dissolve = arcpy.management.Dissolve(in_features=TNI_Join, out_feature_class=f"{TNI_Join}_dissolve", dissolve_field=[], statistics_fields=[["TNI", "STD"], ["TNI", "MEAN"]], multi_part="MULTI_PART", unsplit_lines="DISSOLVE_LINES")[0]
TNI_Join_Dissolve_SpJoin = arcpy.analysis.SpatialJoin(target_features=TNI_Join, join_features=TNI_Join_Dissolve, out_feature_class=f'{TNI_Join_Dissolve}_SpJoin', join_operation="JOIN_ONE_TO_ONE", join_type="KEEP_ALL")[0]
arcpy.management.AddField(in_table=TNI_Join_Dissolve_SpJoin, field_name="Propensity", field_type="DOUBLE")[0]
arcpy.management.CalculateField(in_table=TNI_Join_Dissolve_SpJoin, field="Propensity", expression="ifBlock(!TNI!,!STD_TNI!,!MEAN_TNI!)", expression_type="PYTHON3", code_block='''def ifBlock(TNI, STD_TNI, MEAN_TNI):
if TNI < (MEAN_TNI-(STD_TNI*1.5)):
return \"VL\"
elif TNI > (MEAN_TNI-(STD_TNI*1.5)) and TNI < (MEAN_TNI-(STD_TNI*.5)):
return \"L\"
elif TNI > (MEAN_TNI-(STD_TNI*.5)) and TNI < (MEAN_TNI+(STD_TNI*.5)):
return \"A\"
elif TNI > (MEAN_TNI+(STD_TNI*.5)) and TNI < (MEAN_TNI+(STD_TNI*1.5)):
return \"H\"
elif TNI > (MEAN_TNI+(STD_TNI*1.5)):
return \"VH\"
else:
return \"ERROR\"
''', field_type="TEXT")[0]
# create TNI feature classes within output gdb's
arcpy.conversion.FeatureClassToFeatureClass(in_features=TNI_Join_Dissolve_SpJoin, out_path=gdb_loc, out_name=f"TNI{year}_Final")[0]
arcpy.conversion.FeatureClassToFeatureClass(in_features=TNI_Join_Dissolve_SpJoin, out_path=final_gdb_loc, out_name=f"TNI{year}_Final")[0]
|
import ConfigParser
class Config(object):
def load(self, filename):
config = ConfigParser.SafeConfigParser()
config.read(filename)
self.mafen_host = config.get('mafen', 'host')
self.mafen_port = config.getint('mafen', 'port')
self.verbose = config.getboolean('mafen', 'verbose')
self.auth_host = config.get('auth', 'host')
self.auth_port = config.getint('auth', 'port')
self.cert_path = config.get('auth', 'cert_path')
self.game_host = config.get('game', 'host')
self.game_port = config.getint('game', 'port') |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import mkeventd
try:
mkeventd_enabled = config.mkeventd_enabled
except:
mkeventd_enabled = False
def paint_mkeventd(what, row, tags, custom_vars):
# show for services based on the mkevents active check
command = row[what + '_check_command']
if what != 'service' or not command.startswith('check_mk_active-mkevents'):
return
if '!' not in command:
return
host = None
app = None
# Extract parameters from check_command:
args = command.split('!')[1].split()
if not args:
return
# Handle -a and -H options. Sorry for the hack. We currently
# have no better idea
if len(args) >= 2 and args[0] == '-H':
args = args[2:] # skip two arguments
if len(args) >= 1 and args[0] == '-a':
args = args[1:]
if len(args) >= 1:
if args[0] == '$HOSTNAME$':
host = row['host_name']
elif args[0] == '$HOSTADDRESS$':
host = row['host_address']
else:
host = args[0]
# If we have no host then the command line from the check_command seems
# to be garbled. Better show nothing in this case.
if not host:
return
# It is possible to have a central event console, this is the default case.
# Another possible architecture is to have an event console in each site in
# a distributed environment. For the later case the base url need to be
# constructed here
site = html.site_status[row['site']]["site"]
url_prefix = ''
if getattr(config, 'mkeventd_distributed', False):
url_prefix = site['url_prefix'] + 'check_mk/'
url_vars = [
("view_name", "ec_events_of_monhost"),
("site", row["site"]),
("host", row["host_name"]),
]
title = _('Events of Host %s') % (row["host_name"])
if len(args) >= 2:
app = args[1].strip('\'').replace("\\\\", "\\")
title = _('Events of Application "%s" on Host %s') % (app, host)
url_vars.append(("event_application", app))
url = 'view.py?' + html.urlencode_vars(url_vars)
return 'mkeventd', title, url_prefix + url
if mkeventd_enabled:
multisite_icons.append({
'columns': [ 'check_command' ],
'host_columns': [ 'address', 'name' ],
'paint': paint_mkeventd,
})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
import time, threading, random
# client, user and device details
serverUrl = "mqtt.cumulocity.com"
clientId = "my_mqtt_python_client"
device_name = "My Python MQTT device"
tenant = "<<tenant_ID>>"
username = "<<username>>"
password = "<<password>>"
receivedMessages = []
# display all incoming messages
def on_message(client, userdata, message):
print("Received operation " + str(message.payload))
if (message.payload.startswith("510")):
print("Simulating device restart...")
publish("s/us", "501,c8y_Restart");
print("...restarting...")
time.sleep(1)
publish("s/us", "503,c8y_Restart");
print("...done...")
# send temperature measurement
def sendMeasurements():
try:
print("Sending temperature measurement...")
publish("s/us", "211," + str(random.randint(10, 20)))
thread = threading.Timer(7, sendMeasurements)
thread.daemon=True
thread.start()
while True: time.sleep(100)
except (KeyboardInterrupt, SystemExit):
print("Received keyboard interrupt, quitting ...")
# publish a message
def publish(topic, message, waitForAck = False):
mid = client.publish(topic, message, 2)[1]
if (waitForAck):
while mid not in receivedMessages:
time.sleep(0.25)
def on_publish(client, userdata, mid):
receivedMessages.append(mid)
# connect the client to Cumulocity and register a device
client = mqtt.Client(clientId)
client.username_pw_set(tenant + "/" + username, password)
client.on_message = on_message
client.on_publish = on_publish
client.connect(serverUrl)
client.loop_start()
publish("s/us", "100," + device_name + ",c8y_MQTTDevice", True)
publish("s/us", "110,S123456789,MQTT test model,Rev0.1")
publish("s/us", "114,c8y_Restart")
print("Device registered successfully!")
client.subscribe("s/ds")
sendMeasurements()
|
# coding: utf-8
import tools
from dropdownlist import DropDownList
class ThemeRoller(DropDownList):
""" based one http://jsfiddle.net/gyoshev/Gxpfy/"""
data_text_field = "name"
data_value_field = "value"
height = 500
data_source = tools.name_value_pairs([
[ "Black" , "black" ],
[ "MaterialBlack", "materialblack" ],
[ "MetroBlack" , "metroblack" ],
[ "Office365" , "office365" ],
[ "Uniform" , "uniform" ],
[ "Nova" , "nova" ],
[ "Moonlight" , "moonlight" ],
[ "Meego" , "meego" ],
[ "Material" , "material" ],
[ "HighContrast" , "highcontrast" ],
[ "Flat" , "flat" ],
[ "Fiori" , "fiori" ],
[ "Bootstrap" , "bootstrap" ],
[ "Blue Opal" , "blueopal" ],
[ "Default" , "default" ],
[ "Metro" , "metro" ],
[ "Silver" , "silver" ]])
def on_change(self, e):
theme = self.value() or 'default'
self.change_theme(theme)
def change_theme(self, theme):
__pragma__('js', '{}', '''
var body_bg = {'moonlight' : '#414550',
'metroblack' : 'black',
'materialblack': '#363636' }
var doc = document,
kendoLinks = $("link[href*='kendo.']", doc.getElementsByTagName("head")[0]),
commonLink = kendoLinks.filter("[href*='kendo.common']"),
skinLink = kendoLinks.filter(":not([href*='kendo.common'])"),
href = location.href,
skinRegex = /kendo\.\w+(\.min)?\.css/i,
extension = skinLink.attr("rel") === "stylesheet" ? ".css" : ".less",
url = commonLink.attr("href").replace(skinRegex, "kendo." + theme + "$1" + extension),
exampleElement = $("#example");
function replaceTheme() {
var oldSkinName = $(doc).data("kendoSkin"),
newLink;
//if ($.browser.msie) {
// newLink = doc.createStyleSheet(url);
//} else {
newLink = skinLink.eq(0).clone().attr("href", url);
//}
newLink.insertBefore(skinLink[0]);
skinLink.remove();
$(doc.documentElement).removeClass("k-" + oldSkinName).addClass("k-" + theme);
// rework Site.css:
var bg = '#fff'
var bg2 = '#eee'
if (theme.indexOf('black') > -1 ||
theme.indexOf('contrast') > -1 ||
theme.indexOf('moonlight') > -1
) {
bg = '#222';
bg2 = '#777';
}
var body = body_bg[theme] || '#fff'
$('body').css({'background-color': body})
// styles of dashboards:
$('.section-white' ).css({'background-color': bg})
$('#main-section-header').css({'background-color': bg})
$('#main-section' ).css({'background-color': bg2})
}
replaceTheme();
''')
|
from __init__ import *
from mc2pdf import MCprocessing
from datamanage import DataIO
from montecarlo import MonteCarlo
from analytical_solutions import AnalyticalSolution, gaussian
from mc2pdf import MCprocessing
from pdfsolver import PdfGrid
from visualization import Visualize
from Learning import PDElearn
import pdb
import time
runmc = 0
makepdf = 0
learn = 1
case = 'advection_reaction_analytical'
plot=True
x_range = [-2.0, 3.0]
nx = 200
tmax = .5
nt = 50
num_realizations = 30000
initial_distribution = 'gaussians'
source = 'quadratic'
ka = 1.0
kr = 1.0
coeffs = [ka, kr]
#[[0.5, 0.1], [0.45, 0.03], [0.8, 0.1], [0.2, 0.01]]
mu = 0.5
mu_var = 0.1
sig = 0.45
sig_var = 0.03
amp = 0.8
amp_var = 0.1
shift = 0.2
shift_var = 0.01
params = [[mu, mu_var], [sig, sig_var], [amp, amp_var], [shift, shift_var]]
MC = MonteCarlo(case=case, num_realizations=num_realizations, coeffs=coeffs, source=source, x_range=x_range, tmax=tmax, nx=nx, nt=nt)
samples = MC.sampleInitialCondition(initial_distribution, params=params)
if plot:
MC.plot_extremes(samples)
savenameMC = MC.multiSolve(samples, params)
print(savenameMC)
if makepdf:
# BUILD PDF
nu = 200
u_margin = -1e-10 # SAVE IT!
bandwidth = 'scott'
distribution = 'PDF'
plot = False
save = True
t0 = time.time()
MCprocess = MCprocessing(savenameMC, case=case)
fu, gridvars, ICparams, savenamepdf = MCprocess.buildKDE(nu, distribution=distribution, plot=plot, save=save, u_margin=u_margin, bandwidth=bandwidth)
print(savenamepdf)
print('Build KDE took t = ', time.time()-t0, ' s')
if learn:
# LEARN
plot = False
save = True
# Adjust Size
pt = 1
px = 1
pu = 1
mu = [20, 0]
mx = [0, 0]
mt = [0, 0]
adjustgrid = {'mu':mu, 'mx':mx, 'mt':mt, 'pu':pu, 'px':px, 'pt':pt}
comments = ''
feature_opt = '1storder'
trainratio = 0.8
nzthresh = 1e-190
coeforder = 2
variableCoef = True
variableCoefBasis = 'simple_polynomial'
print_rfeiter = True
shuffle = False
normalize = True
maxiter = 10000
use_rfe = True
rfe_alpha = 0.1
RegCoef = 0.000005
LassoType = 'LassoCV'
cv = 5
criterion = 'bic'
if "savenamepdf" not in locals():
# Check if there is already a loadfile (if not load it)
savenamepdf = 'advection_reaction_analytical_388_128.npy'
dataman = DataIO(case)
fu, gridvars, ICparams = dataman.loadSolution(savenamepdf, array_opt='marginal')
grid = PdfGrid(gridvars)
fu = grid.adjust(fu, adjustgrid)
if plot:
s = 10
V = Visualize(grid)
V.plot_fu3D(fu)
V.plot_fu(fu, dim='t', steps=s)
V.plot_fu(fu, dim='x', steps=s)
V.show()
difflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=case, trainratio=trainratio, verbose=True)
output = difflearn.fit_sparse(feature_opt=feature_opt, variableCoef=variableCoef, variableCoefBasis=variableCoefBasis, \
variableCoefOrder=coeforder, use_rfe=use_rfe, rfe_alpha=rfe_alpha, nzthresh=nzthresh, maxiter=maxiter, \
LassoType=LassoType, RegCoef=RegCoef, cv=cv, criterion=criterion, print_rfeiter=print_rfeiter, shuffle=shuffle, \
basefile=savenamepdf, adjustgrid=adjustgrid, save=save, normalize=normalize, comments=comments)
d = DataIO(case, directory=LEARNDIR)
learndata, pdfdata, mcdata = d.readLearningResults(savenamepdf.split('.')[0]+'.txt', PDFdata=True, MCdata=True, display=False)
|
import abc
import functools
import itertools
import os
import signal
import sys
import warnings
import psutil
from py import std
class XProcessInfo:
def __init__(self, path, name):
self.name = name
self.controldir = path.ensure(name, dir=1)
self.logpath = self.controldir.join("xprocess.log")
self.pidpath = self.controldir.join("xprocess.PID")
self.pid = int(self.pidpath.read()) if self.pidpath.check() else None
def terminate(self, *, kill_proc_tree=True, timeout=20):
"""Recursively terminates process tree.
This is the default behavior unless explicitly disabled by setting
kill_proc_tree keyword-only parameter to false when calling
``XProcessInfo.terminate``.
:param kill_proc_tree: Enable/disable recursive process tree
termination. Defaults to True.
:param timeout: Maximum time in seconds to wait on process termination.
When timeout is reached after sending SIGTERM, this
method will attempt to SIGKILL the process and
return ``-1`` in case the operation times out again.
return codes:
0 no work to do
1 terminated
-1 failed to terminate
"""
if not self.pid:
return 0
try:
parent = psutil.Process(self.pid)
except psutil.NoSuchProcess:
return 0
try:
kill_list = [parent]
if kill_proc_tree:
kill_list += parent.children(recursive=True)
for p in kill_list:
p.send_signal(signal.SIGTERM)
_, alive = psutil.wait_procs(kill_list, timeout=timeout)
for p in alive:
p.send_signal(signal.SIGKILL)
_, alive = psutil.wait_procs(kill_list, timeout=timeout)
if alive:
return -1
except psutil.Error:
return -1
return 1
def isrunning(self):
if self.pid is None:
return False
try:
proc = psutil.Process(self.pid)
except psutil.NoSuchProcess:
return False
return proc.is_running()
class XProcess:
def __init__(self, config, rootdir, log=None):
self.config = config
self.rootdir = rootdir
class Log:
def debug(self, msg, *args):
if args:
print(msg % args)
else:
print(msg)
self.log = log or Log()
def getinfo(self, name):
""" return Process Info for the given external process. """
return XProcessInfo(self.rootdir, name)
def ensure(self, name, preparefunc, restart=False):
"""returns (PID, logfile) from a newly started or already
running process.
@param name: name of the external process, used for caching info
across test runs.
@param preparefunc:
A subclass of ProcessStarter.
@param restart: force restarting the process if it is running.
@return: (PID, logfile) logfile will be seeked to the end if the
server was running, otherwise seeked to the line after
where the waitpattern matched.
"""
from subprocess import Popen, STDOUT
info = self.getinfo(name)
if not restart and not info.isrunning():
restart = True
if restart:
if info.pid is not None:
info.terminate()
controldir = info.controldir.ensure(dir=1)
# controldir.remove()
preparefunc = CompatStarter.wrap(preparefunc)
starter = preparefunc(controldir, self)
args = [str(x) for x in starter.args]
self.log.debug("%s$ %s", controldir, " ".join(args))
stdout = open(str(info.logpath), "wb", 0)
kwargs = {"env": starter.env}
if sys.platform == "win32":
kwargs["startupinfo"] = sinfo = std.subprocess.STARTUPINFO()
if sys.version_info >= (2, 7):
sinfo.dwFlags |= std.subprocess.STARTF_USESHOWWINDOW
sinfo.wShowWindow |= std.subprocess.SW_HIDE
else:
kwargs["close_fds"] = True
kwargs["preexec_fn"] = os.setpgrp # no CONTROL-C
popen = Popen(
args, cwd=str(controldir), stdout=stdout, stderr=STDOUT, **kwargs
)
info.pid = pid = popen.pid
info.pidpath.write(str(pid))
self.log.debug("process %r started pid=%s", name, pid)
stdout.close()
f = info.logpath.open()
if not restart:
f.seek(0, 2)
else:
if not starter.wait(f):
raise RuntimeError("Could not start process %s" % name)
self.log.debug("%s process startup detected", name)
logfiles = self.config.__dict__.setdefault("_extlogfiles", {})
logfiles[name] = f
self.getinfo(name)
return info.pid, info.logpath
def _infos(self):
return (self.getinfo(p.basename) for p in self.rootdir.listdir())
def _xkill(self, tw):
ret = 0
for info in self._infos():
termret = info.terminate()
ret = ret or (termret == 1)
status = {
1: "TERMINATED",
-1: "FAILED TO TERMINATE",
0: "NO PROCESS FOUND",
}[termret]
tmpl = "{info.pid} {info.name} {status}"
tw.line(tmpl.format(**locals()))
return ret
def _xshow(self, tw):
for info in self._infos():
running = "LIVE" if info.isrunning() else "DEAD"
tmpl = "{info.pid} {info.name} {running} {info.logpath}"
tw.line(tmpl.format(**locals()))
return 0
class ProcessStarter:
"""
Describes the characteristics of a process to start, waiting
for a process to achieve a started state.
"""
env = None
"""
The environment in which to invoke the process.
"""
def __init__(self, control_dir, process):
self.control_dir = control_dir
self.process = process
@abc.abstractproperty
def args(self):
"The args to start the process"
@abc.abstractproperty
def pattern(self):
"The pattern to match when the process has started"
def wait(self, log_file):
"Wait until the process is ready."
lines = map(self.log_line, self.filter_lines(self.get_lines(log_file)))
return any(std.re.search(self.pattern, line) for line in lines)
def filter_lines(self, lines):
# only consider the first non-empty 50 lines
non_empty_lines = (x for x in lines if x.strip())
return itertools.islice(non_empty_lines, 50)
def log_line(self, line):
self.process.log.debug(line)
return line
def get_lines(self, log_file):
while True:
line = log_file.readline()
if not line:
std.time.sleep(0.1)
yield line
class CompatStarter(ProcessStarter):
"""
A compatibility ProcessStarter to handle legacy preparefunc
and warn of the deprecation.
"""
# Define properties to satisfy the abstract property, though
# they will be overridden at the instance.
pattern = None
args = None
def __init__(self, preparefunc, control_dir, process):
self.prep(*preparefunc(control_dir))
super().__init__(control_dir, process)
def prep(self, wait, args, env=None):
"""
Given the return value of a preparefunc, prepare this
CompatStarter.
"""
self.pattern = wait
self.env = env
self.args = args
# wait is a function, supersedes the default behavior
if callable(wait):
self.wait = lambda lines: wait()
@classmethod
def wrap(cls, starter_cls):
"""
If starter_cls is not a ProcessStarter, assume it's the legacy
preparefunc and return it bound to a CompatStarter.
"""
if isinstance(starter_cls, type) and issubclass(starter_cls, ProcessStarter):
return starter_cls
depr_msg = "Pass a ProcessStarter for preparefunc"
warnings.warn(depr_msg, DeprecationWarning, stacklevel=3)
return functools.partial(CompatStarter, starter_cls)
|
from sklearn.svm import SVC
from ml_config import MachineLearningConfig
from ml_validation import AccuracyValidation
config = MachineLearningConfig()
image_data, target_data = config.read_training_data(config.training_data[0])
# kernel can be linear, rbf e.t.c
svc_model = SVC(kernel='linear', probability=True)
svc_model.fit(image_data, target_data)
#config.save_model(svc_model, 'SVC_model')
###############################################
# for validation and testing purposes
###############################################
validate = AccuracyValidation()
validate.split_validation(svc_model, image_data, target_data, True)
validate.cross_validation(svc_model, 3, image_data,
target_data)
###############################################
# end of validation and testing
############################################### |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('weather', '0004_auto_20171023_1719'),
]
operations = [
migrations.AlterField(
model_name='departmentwarnings',
name='warningname',
field=models.CharField(max_length=200, db_index=True),
),
]
|
# -*- coding: utf-8 -*-
import os
from keras.callbacks import *
from config import ModelConfig
from callbacks import SWA
class BaseModel(object):
def __init__(self, config: ModelConfig):
self.config = config
self.callbacks = []
self.model = self.build()
def add_model_checkpoint(self):
self.callbacks.append(ModelCheckpoint(
filepath=os.path.join(self.config.checkpoint_dir,
'{}.hdf5'.format(self.config.exp_name)),
monitor=self.config.checkpoint_monitor,
save_best_only=self.config.checkpoint_save_best_only,
save_weights_only=self.config.checkpoint_save_weights_only,
mode=self.config.checkpoint_save_weights_mode,
verbose=self.config.checkpoint_verbose
))
print('Logging Info - Callback Added: ModelCheckPoint...')
def add_early_stopping(self):
self.callbacks.append(EarlyStopping(
monitor=self.config.early_stopping_monitor,
mode=self.config.early_stopping_mode,
patience=self.config.early_stopping_patience,
verbose=self.config.early_stopping_verbose
))
print('Logging Info - Callback Added: EarlyStopping...')
def add_swa(self, swa_start: int=5):
self.callbacks.append(SWA(self.build(), self.config.checkpoint_dir, self.config.exp_name,
swa_start=swa_start))
print('Logging Info - Callback Added: SWA with constant lr...')
def init_callbacks(self):
if 'modelcheckpoint' in self.config.callbacks_to_add:
self.add_model_checkpoint()
if 'earlystopping' in self.config.callbacks_to_add:
self.add_early_stopping()
if 'swa' in self.config.callbacks_to_add:
self.add_swa(swa_start=self.config.swa_start)
def build(self):
raise NotImplementedError
def fit(self, x_train, y_train, x_valid, y_valid):
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def score(self, x, y):
raise NotImplementedError
def load_weights(self, filename: str):
self.model.load_weights(filename)
def load_model(self, filename: str):
# we only save model's weight instead of the whole model
self.model.load_weights(filename)
def load_best_model(self):
print('Logging Info - Loading model checkpoint: %s.hdf5' % self.config.exp_name)
self.load_model(os.path.join(self.config.checkpoint_dir, f'{self.config.exp_name}.hdf5'))
print('Logging Info - Model loaded')
def load_swa_model(self):
print(f'Logging Info - Loading SWA model checkpoint: {self.config.exp_name}_swa.hdf5')
self.load_model(os.path.join(self.config.checkpoint_dir,
f'{self.config.exp_name}_swa.hdf5'))
print('Logging Info - SWA Model loaded')
def summary(self):
self.model.summary()
|
#
# Version: 31-jan-2018
#
# In this version we use plot= to denote te plotfile name. in the "au" tools the "figfile="
# keyword is used. In imview() they use the "out=". Go figure for standardization, but we
# should probably assume something standard.
#
# Also: plot=None could be used to not show a plot?
#
import matplotlib.pyplot as plt
# example figures made:
# plot1('test1/tp.ms','aver_12.ms', 'aver_07.ms',11.0,plot='figures/plot1.png')
# plot1('test1/tp.ms','aver_12.ms', 'aver_07.ms',200.0,plot='figures/plot1a.png')
def plot1(ms0=None, ms7=None, ms12=None, uvmax = 5.0, kwave=True, stride=1, plot='plot1.png'):
""" Plotting several MS in a U-V plot
ms0: TP (but could be any)
ms7: 7m (single MS)
ms12: 12m (single MS)
kwave: True means converted to klambda, False means native (meters)
stride: Take every stride'd point to plot
"""
def get_stride(uv, stride=1):
if stride == 1:
return uv
(u,v) = uv
idx = range(0,len(u),stride)
return (u[idx],v[idx])
(w,h) = plt.figaspect(1.0)
plt.figure(figsize=(w,h))
plt.xlim(-uvmax, uvmax)
plt.ylim(-uvmax, uvmax)
if ms0 != None:
(u0,v0) = get_stride(qtp_getuv(ms0,kwave),stride)
plt.scatter(u0, v0, c='b',s=1)
if ms7 != None:
(u7,v7) = get_stride(qtp_getuv(ms7,kwave),stride)
plt.scatter(u7, v7, c='g',s=20)
if ms12 != None:
(u12,v12) = get_stride(qtp_getuv(ms12,kwave),stride)
plt.scatter(u12,v12,c='r',s=60)
if kwave:
plt.xlabel("u (k$\lambda$)")
plt.ylabel("v (k$\lambda$)")
else:
plt.xlabel("u (meter)")
plt.ylabel("v (meter)")
plt.savefig(plot)
plt.show()
def plot1a(mslist, uvmax = 5.0, kwave=True, stride=1, plot='plot1a.png'):
""" Plotting several MS as a heat map in a U-V plot
mslist: List of MS
kwave: True means converted to klambda, False means native (meters)
stride: Take every stride'd point to plot
@todo CASA's matplotlib doesn't seem to have hist2d()
"""
def get_stride(uv, stride=1):
if stride == 1:
return uv
(u,v) = uv
idx = range(0,len(u),stride)
return (u[idx],v[idx])
from matplotlib.colors import LogNorm
(w,h) = plt.figaspect(1.0)
plt.figure(figsize=(w,h))
plt.xlim(-uvmax, uvmax)
plt.ylim(-uvmax, uvmax)
u = np.array([])
v = np.array([])
for ms in mslist:
(u0,v0) = get_stride(qtp_getuv(ms,kwave),stride)
u = np.append(u, u0)
v = np.append(v, v0)
# casa's plt doesn't have hist2d yet
#plt.hist2d(u,v,bins=300, norm=LogNorm())
#plt.colorbar()
if kwave:
plt.xlabel("u (k$\lambda$)")
plt.ylabel("v (k$\lambda$)")
else:
plt.xlabel("u (meter)")
plt.ylabel("v (meter)")
plt.savefig(plot)
plt.show()
# since this fails, write the (u,v)'s to a file and use a more modern python
if True:
np.savetxt("plot1a.tab",(u,v))
# (u,v) = np.loadtxt("plot1a.tab")
def plot1b(tab, uvmax = 5.0, bins=256, kwave=True, plot='plot1b.png'):
""" Plotting several MS as a heat map in a U-V plot
tab: ascii table from loadtxt/savetxt via plot1a()
kwave: True means converted to klambda, False means native (meters)
@todo CASA's matplotlib doesn't seem to have hist2d()
"""
(u,v) = np.loadtxt(tab)
print u.min(),v.min(),u.max(),v.max()
u = np.append(u,-u)
v = np.append(v,-v)
from matplotlib.colors import LogNorm
(w,h) = plt.figaspect(1.0)
plt.figure(figsize=(w,h))
plt.hist2d(u,v,bins=bins, norm=LogNorm())
# plt.colorbar()
plt.xlim(-uvmax, uvmax)
plt.ylim(-uvmax, uvmax)
if kwave:
plt.xlabel("u (k$\lambda$)")
plt.ylabel("v (k$\lambda$)")
else:
plt.xlabel("u (meter)")
plt.ylabel("v (meter)")
plt.savefig(plot)
plt.show()
def plot2(plot2file, f1=None, f2=None, plot='plot2.png'):
""" Plotting flux as function of channel for various situations
This is normally used to build up composite plots
"""
plt.figure()
_tmp = imstat(plot2file,axes=[0,1])
if 'flux' in _tmp:
flux = _tmp['flux']/1000.0
totalflux = imstat(plot2file)['flux'][0]/1000.0
else:
flux = _tmp['sum']/1000.0
totalflux = imstat(plot2file)['sum'][0]/1000.0
rms = _tmp['rms']/1000.0
chan = np.arange(len(flux))
plt.plot(chan,flux,c='r',label='TP image')
if f1 != None:
plt.plot(chan,f1,c='g')
if f2 != None:
plt.plot(chan,f2,c='b')
zero = 0.0 * flux
plt.plot(chan,zero,c='black')
plt.ylabel('Flux/1000')
plt.xlabel('Channel')
plt.title('%s Total flux/1000: %f' % (plot2file,totalflux))
plt.legend()
plt.savefig(plot)
plt.show()
return flux
def plot2a(f, title='Flux Comparison', plot='plot2a.png'):
""" Plotting flux as function of channel for various situations
f = list of equal sized arrays of fluxes
Also prints out the flux sums (sans the km/s factor we don't know)
"""
plt.figure()
chan = np.arange(len(f[0]))
for (fi,n) in zip(f,range(len(f))):
plt.plot(chan,fi,label='%d' % (n+1))
print "Sum[%d]: %g Jy (* unknown km/s)" % (n+1,fi.sum())
zero = 0.0 * f[0]
plt.plot(chan,zero,c='black')
plt.ylabel('Flux')
plt.xlabel('Channel')
plt.title(title)
plt.legend()
plt.savefig(plot)
plt.show()
return
def plot3(mslist, log=True, kwave=True, plot='plot3.png'):
""" Plotting several MS in a UVD - AMP plot
mlist: list of MS
log: logaritmic scale for AMP's
kwave: True means converted to klambda, False means native (meters)
This routine will probably run out of memory for large files, it needs to stream and collect
due to keeping nchan
"""
def my_getamp(ms, log=True):
tb.open(ms)
data = np.abs(tb.getcol('DATA')[0,:,:]) # -> data[nchan,nvis]
amp = data.max(axis=0)
tb.close()
if log: amp = np.log10(amp)
print "AMP min/max = ",amp.min(),amp.max()
return amp
colors = ['r', 'g', 'b']
plt.figure()
if type(mslist) == str:
mslist = [mslist]
for (ms,c) in zip(mslist,colors):
if iscasa(ms):
print "Processing ",ms
(u0,v0) = qtp_getuv(ms,kwave)
uvd = np.sqrt(u0*u0+v0*v0)
amp = my_getamp(ms,log)
plt.scatter(uvd,amp,c=c,label=ms)
else:
print "Skipping ",ms
if kwave:
plt.xlabel("uvdistance (k$\lambda$)")
else:
plt.xlabel("uvdistance (meter)")
if log:
plt.ylabel("log(amp[channel_max])")
else:
plt.ylabel("amp[channel_max]")
plt.legend()
plt.savefig(plot)
plt.show()
def plot4(mslist, bin=None, kwave=True, plot='plot4.png'):
""" Plotting several MS in a UVD - WEIGHT plot
mslist: list of MS
bin: if given, this is the binsize in kLambda for ring weight density
kwave: True in kLambda, False in native meters
"""
def my_getwt(ms):
tb.open(ms)
data = tb.getcol('WEIGHT')[:,:] # -> data[npol,nvis]
tb.close()
return data
colors = ['r', 'g', 'b']
plt.figure()
if type(mslist) == str:
mslist = [mslist]
for (ms,c) in zip(mslist,colors):
if iscasa(ms):
print "Processing ",ms
(u0,v0) = qtp_getuv(ms,kwave)
uvd = np.sqrt(u0*u0+v0*v0) # in kLambda (or meters)
wt = my_getwt(ms)
print "PJT",wt.shape
if bin == None:
# only do the first pol
plt.scatter(uvd,wt[0,:],c=c,label=ms)
# plt.scatter(uvd,wt[1,:],c=c,label=ms)
else:
uvbins = np.arange(0.0,uvd.max() + bin, bin)
#uvbins = np.arange(2.0,6.0,1.0)
print uvbins
print "UVD max",uvd.max()
wt = wt[0,:]
digit = np.digitize(uvd,uvbins)
if True:
# weight density
wt_bin = [wt[digit == i].sum() for i in range(1,len(uvbins))]
print wt_bin
print len(uvbins),len(digit),len(wt_bin)
# @todo check if i'm not off by 1/2 bin
uvarea = np.diff(uvbins*uvbins)
wt_bin = wt_bin / uvarea
else:
# mean weight per uvbin
wt_bin = [wt[digit == i].mean() for i in range(1,len(uvbins))]
print wt_bin
print len(uvbins),len(digit),len(wt_bin)
wt_bin = np.log10(wt_bin)
plt.plot(uvbins[1:],wt_bin,drawstyle='steps-mid')
else:
print "Skipping ",ms
if kwave:
plt.xlabel("uvdistance (k$\lambda$)")
else:
plt.xlabel("uvdistance (meter)")
if bin == None:
plt.ylabel("weight[channel_max]")
else:
plt.ylabel("weight density")
plt.legend()
plt.savefig(plot)
plt.show()
def plot5(image, box=None, plot='plot5.png'):
""" Plotting min,max,rms as function of channel
box xmin,ymin,xmax,ymax defaults to whole area
A useful way to check the the mean RMS at the first
or last 10 channels is:
imstat(image,axes=[0,1])['rms'][:10].mean()
imstat(image,axes=[0,1])['rms'][-10:].mean()
"""
plt.figure()
_tmp = imstat(image,axes=[0,1],box=box)
fmin = _tmp['min']
fmax = _tmp['max']
frms = _tmp['rms']
chan = np.arange(len(fmin))
f = 0.5 * (fmax - fmin) / frms
plt.plot(chan,fmin,c='r',label='min')
plt.plot(chan,fmax,c='g',label='max')
plt.plot(chan,frms,c='b',label='rms')
plt.plot(chan,f, c='black', label='<peak>/rms')
zero = 0.0 * frms
plt.plot(chan,zero,c='black')
plt.ylabel('Flux')
plt.xlabel('Channel')
plt.title('%s Min/Max/RMS' % (image))
plt.legend()
plt.savefig(plot)
plt.show()
def plot6(imlist, bins=50, range=None, log=False, alpha=[1, 0.3, 0.1], box=None, plot='plot6.png'):
""" Plotting histograms on top of each other, nice for comparison
imlist list of images
box='xmin,ymin,xmax,ymax' is the only syntax allowed here
"""
def mybox(box):
a = box.split(',')
if len(a) != 4:
return (0,0,0,0)
xmin = int(a[0])
ymin = int(a[1])
xmax = int(a[2])
ymax = int(a[3])
return (xmin,ymin,xmax,ymax)
plt.figure()
for (i,a) in zip(imlist,alpha):
data = ia.open(i)
if box == None:
data = ia.getchunk().ravel()
else:
(xmin,ymin,xmax,ymax) = mybox(box)
if xmin==0 and xmax==0:
print "Warning: bad box ",box
data = ia.getchunk().ravel()
else:
data = ia.getchunk([xmin,ymin],[xmax,ymax]).ravel()
ia.close()
plt.hist(data,bins=bins,range=range,log=log,alpha=a)
plt.savefig(plot)
plt.show()
|
import random
from itertools import izip
from stdnet.test import TestCase
from examples.models import SimpleModel
class TestManager(TestCase):
def setUp(self):
self.orm.register(SimpleModel)
def unregister(self):
self.orm.unregister(SimpleModel)
def testGetOrCreate(self):
v,created = SimpleModel.objects.get_or_create(code = 'test')
self.assertTrue(created)
self.assertEqual(v.code,'test')
v2,created = SimpleModel.objects.get_or_create(code = 'test')
self.assertFalse(created)
self.assertEqual(v,v2) |
#!/usr/bin/env python3
"""
Script to copy the model files to deployment directory
Author: Megan McGee
Date: October 7, 2021
"""
from flask import Flask, session, jsonify, request
import pandas as pd
import numpy as np
import pickle
import os
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import json
import shutil
# Load config.json and correct path variable
with open('config.json','r') as f:
config = json.load(f)
dataset_csv_path = os.path.join(config['output_folder_path'])
model_path = os.path.join(config['output_model_path'])
prod_deployment_path = os.path.join(config['prod_deployment_path'])
# function for deployment
def store_model_into_pickle():
'''
Copy the model pickle file, the latest score text file, and the text file
listing the data files ingested into the deployment directory
'''
# copy model pickle file
shutil.copy(
os.path.join(os.getcwd(), model_path,'trainedmodel.pkl'),
os.path.join(os.getcwd(), prod_deployment_path,'trainedmodel.pkl')
)
# copy latest score text file
shutil.copy(
os.path.join(os.getcwd(), model_path,'latestscore.txt'),
os.path.join(os.getcwd(), prod_deployment_path,'latestscore.txt')
)
# copy list of ingested data files
shutil.copy(
os.path.join(os.getcwd(), dataset_csv_path,'ingestedfiles.txt'),
os.path.join(os.getcwd(), prod_deployment_path,'ingestedfiles.txt')
)
if __name__ == '__main__':
store_model_into_pickle()
|
import os
site_env = os.getenv('SITE_TYPE', 'local')
if site_env == 'staging':
from .staging import *
else:
from .production import *
|
# coding=ascii
from __future__ import absolute_import, division, print_function
import os
import copy
import json
import csv
import re
import unittest
import tempfile
import shutil
from ..conform import (
GEOM_FIELDNAME, X_FIELDNAME, Y_FIELDNAME,
csv_source_to_csv, find_source_path, row_transform_and_convert,
row_fxn_regexp, row_smash_case, row_round_lat_lon, row_merge,
row_extract_and_reproject, row_convert_to_out, row_fxn_join, row_fxn_format,
row_fxn_prefixed_number, row_fxn_postfixed_street,
row_fxn_postfixed_unit,
row_fxn_remove_prefix, row_fxn_remove_postfix, row_fxn_chain,
row_fxn_first_non_empty,
row_canonicalize_unit_and_number, conform_smash_case, conform_cli,
convert_regexp_replace, conform_license,
conform_attribution, conform_sharealike, normalize_ogr_filename_case,
OPENADDR_CSV_SCHEMA, is_in, geojson_source_to_csv, check_source_tests
)
class TestConformTransforms (unittest.TestCase):
"Test low level data transform functions"
def test_row_smash_case(self):
r = row_smash_case(None, {"UPPER": "foo", "lower": "bar", "miXeD": "mixed"})
self.assertEqual({"upper": "foo", "lower": "bar", "mixed": "mixed"}, r)
def test_conform_smash_case(self):
d = { "conform": { "street": [ "U", "l", "MiXeD" ], "number": "U", "lat": "Y", "lon": "x",
"city": { "function": "join", "fields": ["ThIs","FiELd"], "separator": "-" },
"district": { "function": "regexp", "field": "ThaT", "pattern": ""},
"postcode": { "function": "join", "fields": ["MiXeD", "UPPER"], "separator": "-" } } }
r = conform_smash_case(d)
self.assertEqual({ "conform": { "street": [ "u", "l", "mixed" ], "number": "u", "lat": "y", "lon": "x",
"city": {"fields": ["this", "field"], "function": "join", "separator": "-"},
"district": { "field": "that", "function": "regexp", "pattern": ""},
"postcode": { "function": "join", "fields": ["mixed", "upper"], "separator": "-" } } },
r)
def test_row_convert_to_out(self):
d = { "conform": { "street": "s", "number": "n" } }
r = row_convert_to_out(d, {"s": "MAPLE LN", "n": "123", X_FIELDNAME: "-119.2", Y_FIELDNAME: "39.3"})
self.assertEqual({"LON": "-119.2", "LAT": "39.3", "UNIT": None, "NUMBER": "123", "STREET": "MAPLE LN",
"CITY": None, "REGION": None, "DISTRICT": None, "POSTCODE": None, "ID": None}, r)
def test_row_merge(self):
d = { "conform": { "street": [ "n", "t" ] } }
r = row_merge(d, {"n": "MAPLE", "t": "ST", "x": "foo"}, 'street')
self.assertEqual({"OA:street": "MAPLE ST", "x": "foo", "t": "ST", "n": "MAPLE"}, r)
d = { "conform": { "city": [ "n", "t" ] } }
r = row_merge(d, {"n": "Village of", "t": "Stanley", "x": "foo"}, 'city')
self.assertEqual({"OA:city": "Village of Stanley", "x": "foo", "t": "Stanley", "n": "Village of"}, r)
def test_row_fxn_join(self):
"New fxn join"
c = { "conform": {
"number": {
"function": "join",
"fields": ["a1"]
},
"street": {
"function": "join",
"fields": ["b1","b2"],
"separator": "-"
}
} }
d = { "a1": "va1", "b1": "vb1", "b2": "vb2" }
e = copy.deepcopy(d)
e.update({ "OA:number": "va1", "OA:street": "vb1-vb2" })
d = row_fxn_join(c, d, "number", c["conform"]["number"])
d = row_fxn_join(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
d = { "a1": "va1", "b1": "vb1", "b2": None}
e = copy.deepcopy(d)
e.update({ "OA:number": "va1", "OA:street": "vb1" })
d = row_fxn_join(c, d, "number", c["conform"]["number"])
d = row_fxn_join(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
def test_row_fxn_format(self):
c = { "conform": {
"number": {
"function": "format",
"fields": ["a1", "a2", "a3"],
"format": "$1-$2-$3"
},
"street": {
"function": "format",
"fields": ["b1", "b2", "b3"],
"format": "foo $1$2-$3 bar"
}
} }
d = {"a1": "12.0", "a2": "34", "a3": "56", "b1": "1", "b2": "B", "b3": "3"}
e = copy.deepcopy(d)
d = row_fxn_format(c, d, "number", c["conform"]["number"])
d = row_fxn_format(c, d, "street", c["conform"]["street"])
self.assertEqual(d.get("OA:number", ""), "12-34-56")
self.assertEqual(d.get("OA:street", ""), "foo 1B-3 bar")
d = copy.deepcopy(e)
d["a2"] = None
d["b3"] = None
d = row_fxn_format(c, d, "number", c["conform"]["number"])
d = row_fxn_format(c, d, "street", c["conform"]["street"])
self.assertEqual(d.get("OA:number", ""), "12-56")
self.assertEqual(d.get("OA:street", ""), "foo 1B bar")
def test_row_fxn_chain(self):
c = { "conform": {
"number": {
"function": "chain",
"functions": [
{
"function": "format",
"fields": ["a1", "a2", "a3"],
"format": "$1-$2-$3"
},
{
"function": "remove_postfix",
"field": "OA:number",
"field_to_remove": "b1"
}
]
}
} }
d = {"a1": "12", "a2": "34", "a3": "56 UNIT 5", "b1": "UNIT 5"}
e = copy.deepcopy(d)
d = row_fxn_chain(c, d, "number", c["conform"]["number"])
self.assertEqual(d.get("OA:number", ""), "12-34-56")
d = copy.deepcopy(e)
d["a2"] = None
d = row_fxn_chain(c, d, "number", c["conform"]["number"])
self.assertEqual(d.get("OA:number", ""), "12-56")
def test_row_fxn_chain_nested(self):
c = { "conform": {
"number": {
"function": "chain",
"variable": "foo",
"functions": [
{
"function": "format",
"fields": ["a1", "a2"],
"format": "$1-$2"
},
{
"function": "chain",
"variable": "bar",
"functions": [
{
"function": "format",
"fields": ["foo", "a3"],
"format": "$1-$2"
},
{
"function": "remove_postfix",
"field": "bar",
"field_to_remove": "b1"
}
]
}
]
}
} }
d = {"a1": "12", "a2": "34", "a3": "56 UNIT 5", "b1": "UNIT 5"}
e = copy.deepcopy(d)
d = row_fxn_chain(c, d, "number", c["conform"]["number"])
self.assertEqual(d.get("OA:number", ""), "12-34-56")
d = copy.deepcopy(e)
d["a2"] = None
d = row_fxn_chain(c, d, "number", c["conform"]["number"])
self.assertEqual(d.get("OA:number", ""), "12-56")
def test_row_fxn_regexp(self):
"Regex split - replace"
c = { "conform": {
"number": {
"function": "regexp",
"field": "ADDRESS",
"pattern": "^([0-9]+)(?:.*)",
"replace": "$1"
},
"street": {
"function": "regexp",
"field": "ADDRESS",
"pattern": "(?:[0-9]+ )(.*)",
"replace": "$1"
}
} }
d = { "ADDRESS": "123 MAPLE ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123", "OA:street": "MAPLE ST" })
d = row_fxn_regexp(c, d, "number", c["conform"]["number"])
d = row_fxn_regexp(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex split - no replace - good match"
c = { "conform": {
"number": {
"function": "regexp",
"field": "ADDRESS",
"pattern": "^([0-9]+)"
},
"street": {
"function": "regexp",
"field": "ADDRESS",
"pattern": "(?:[0-9]+ )(.*)"
}
} }
d = { "ADDRESS": "123 MAPLE ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123", "OA:street": "MAPLE ST" })
d = row_fxn_regexp(c, d, "number", c["conform"]["number"])
d = row_fxn_regexp(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"regex split - no replace - bad match"
c = { "conform": {
"number": {
"function": "regexp",
"field": "ADDRESS",
"pattern": "^([0-9]+)"
},
"street": {
"function": "regexp",
"field": "ADDRESS",
"pattern": "(fake)"
}
} }
d = { "ADDRESS": "123 MAPLE ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123", "OA:street": "" })
d = row_fxn_regexp(c, d, "number", c["conform"]["number"])
d = row_fxn_regexp(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
def test_transform_and_convert(self):
d = { "conform": { "street": ["s1", "s2"], "number": "n", "lon": "y", "lat": "x" }, "fingerprint": "0000" }
r = row_transform_and_convert(d, { "n": "123", "s1": "MAPLE", "s2": "ST", X_FIELDNAME: "-119.2", Y_FIELDNAME: "39.3" })
self.assertEqual({"STREET": "MAPLE ST", "UNIT": "", "NUMBER": "123", "LON": "-119.2", "LAT": "39.3",
"CITY": None, "REGION": None, "DISTRICT": None, "POSTCODE": None, "ID": None,
'HASH': 'eee8eb535bb20a03'}, r)
d = { "conform": { "street": ["s1", "s2"], "number": "n", "lon": "y", "lat": "x" }, "fingerprint": "0000" }
r = row_transform_and_convert(d, { "n": "123", "s1": "MAPLE", "s2": "ST", X_FIELDNAME: "-119.2", Y_FIELDNAME: "39.3" })
self.assertEqual({"STREET": "MAPLE ST", "UNIT": "", "NUMBER": "123", "LON": "-119.2", "LAT": "39.3",
"CITY": None, "REGION": None, "DISTRICT": None, "POSTCODE": None, "ID": None,
'HASH': 'eee8eb535bb20a03'}, r)
d = { "conform": { "number": {"function": "regexp", "field": "s", "pattern": "^(\\S+)" }, "street": { "function": "regexp", "field": "s", "pattern": "^(?:\\S+ )(.*)" }, "lon": "y", "lat": "x" }, "fingerprint": "0000" }
r = row_transform_and_convert(d, { "s": "123 MAPLE ST", X_FIELDNAME: "-119.2", Y_FIELDNAME: "39.3" })
self.assertEqual({"STREET": "MAPLE ST", "UNIT": "", "NUMBER": "123", "LON": "-119.2", "LAT": "39.3",
"CITY": None, "REGION": None, "DISTRICT": None, "POSTCODE": None, "ID": None,
'HASH': 'eee8eb535bb20a03'}, r)
def test_row_canonicalize_unit_and_number(self):
r = row_canonicalize_unit_and_number({}, {"NUMBER": "324 ", "STREET": " OAK DR.", "UNIT": "1"})
self.assertEqual("324", r["NUMBER"])
self.assertEqual("OAK DR.", r["STREET"])
self.assertEqual("1", r["UNIT"])
# Tests for integer conversion
for e, a in (("324", " 324.0 "),
("", ""),
("3240", "3240"),
("INVALID", "INVALID"),
("324.5", "324.5")):
r = row_canonicalize_unit_and_number({}, {"NUMBER": a, "STREET": "", "UNIT": ""})
self.assertEqual(e, r["NUMBER"])
def test_row_canonicalize_street_and_no_number(self):
r = row_canonicalize_unit_and_number({}, {"NUMBER": None, "STREET": " OAK DR.", "UNIT": None})
self.assertEqual("", r["NUMBER"])
self.assertEqual("OAK DR.", r["STREET"])
self.assertEqual("", r["UNIT"])
def test_row_canonicalize_street_with_no_unit_number(self):
r = row_canonicalize_unit_and_number({}, {"NUMBER": None, "STREET": " OAK DR.", "UNIT": None})
self.assertEqual("", r["NUMBER"])
self.assertEqual("OAK DR.", r["STREET"])
self.assertEqual("", r["UNIT"])
def test_row_round_lat_lon(self):
r = row_round_lat_lon({}, {"LON": "39.14285717777", "LAT": "-121.20"})
self.assertEqual({"LON": "39.1428572", "LAT": "-121.2"}, r)
for e, a in (( "" , ""),
( "39.3" , "39.3"),
( "39.3" , "39.3000000"),
( "-39.3" , "-39.3000"),
( "39.1428571", "39.142857143"),
( "139.1428572", "139.142857153"),
( "39.1428572", "39.142857153"),
( "3.1428572", "3.142857153"),
( "0.1428572", "0.142857153"),
("-139.1428572","-139.142857153"),
( "-39.1428572", "-39.142857153"),
( "-3.1428572", "-3.142857153"),
( "-0.1428572", "-0.142857153"),
( "39.1428572", "39.142857153"),
( "0" , " 0.00"),
( "-0" , "-0.00"),
( "180" , "180.0"),
("-180" , "-180")):
r = row_round_lat_lon({}, {"LAT": a, "LON": a})
self.assertEqual(e, r["LON"])
def test_row_extract_and_reproject(self):
# CSV lat/lon column names
d = { "conform" : { "lon": "longitude", "lat": "latitude", "format": "csv" }, 'protocol': 'test' }
r = row_extract_and_reproject(d, {"longitude": "-122.3", "latitude": "39.1"})
self.assertEqual({Y_FIELDNAME: "39.1", X_FIELDNAME: "-122.3"}, r)
# non-CSV lat/lon column names
d = { "conform" : { "lon": "x", "lat": "y", "format": "" }, 'protocol': 'test' }
r = row_extract_and_reproject(d, {X_FIELDNAME: "-122.3", Y_FIELDNAME: "39.1" })
self.assertEqual({X_FIELDNAME: "-122.3", Y_FIELDNAME: "39.1"}, r)
# reprojection
d = { "conform" : { "srs": "EPSG:2913", "format": "" }, 'protocol': 'test' }
r = row_extract_and_reproject(d, {X_FIELDNAME: "7655634.924", Y_FIELDNAME: "668868.414"})
self.assertAlmostEqual(-122.630842186650796, float(r[X_FIELDNAME]))
self.assertAlmostEqual(45.481554393851063, float(r[Y_FIELDNAME]))
d = { "conform" : { "lon": "X", "lat": "Y", "srs": "EPSG:2913", "format": "" }, 'protocol': 'test' }
r = row_extract_and_reproject(d, {X_FIELDNAME: "", Y_FIELDNAME: ""})
self.assertEqual("", r[X_FIELDNAME])
self.assertEqual("", r[Y_FIELDNAME])
# commas in lat/lon columns (eg Iceland)
d = { "conform" : { "lon": "LONG_WGS84", "lat": "LAT_WGS84", "format": "csv" }, 'protocol': 'test' }
r = row_extract_and_reproject(d, {"LONG_WGS84": "-21,77", "LAT_WGS84": "64,11"})
self.assertEqual({Y_FIELDNAME: "64.11", X_FIELDNAME: "-21.77"}, r)
def test_row_fxn_prefixed_number_and_postfixed_street_no_units(self):
"Regex prefixed_number and postfix_street - both fields present"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123 MAPLE ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123", "OA:street": "MAPLE ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - no number"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "MAPLE ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "", "OA:street": "MAPLE ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - empty input"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "" }
e = copy.deepcopy(d)
e.update({ "OA:number": "", "OA:street": "" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - no spaces after number"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123MAPLE ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "", "OA:street": "123MAPLE ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - excess whitespace"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": " \t 123 \t MAPLE ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123", "OA:street": "MAPLE ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_number - ordinal street w/house number"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "12 3RD ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "12", "OA:street": "3RD ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_number - ordinal street w/o house number"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "3RD ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "", "OA:street": "3RD ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_number - combined house number and suffix"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123A 3RD ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123A", "OA:street": "3RD ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_number - hyphenated house number and suffix"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123-A 3RD ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123-A", "OA:street": "3RD ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_number - queens-style house number"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123-45 3RD ST" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123-45", "OA:street": "3RD ST" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_number - should be case-insenstive"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123-a 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123-a", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - should honor space+1/2"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123 1/2 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123 1/2", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - should honor hyphen+1/2"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123-1/2 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123-1/2", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - should honor space+1/3"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123 1/3 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123 1/3", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - should honor hyphen+1/3"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123-1/3 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123-1/3", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - should honor space+1/4"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123 1/4 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123 1/4", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - should honor hyphen+1/4"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123-1/4 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123-1/4", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - should honor space+3/4"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123 3/4 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123 3/4", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"Regex prefixed_number and postfixed_street - should honor hyphen+3/4"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123-3/4 3rD St" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123-3/4", "OA:street": "3rD St" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"contains unit but may_contain_units is not present"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123 MAPLE ST UNIT 3" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123", "OA:street": "MAPLE ST UNIT 3" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"contains unit but may_contain_units is explicitly false"
c = { "conform": {
"number": {
"function": "prefixed_number",
"field": "ADDRESS"
},
"street": {
"function": "postfixed_street",
"may_contain_units": False,
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "123 MAPLE ST UNIT 3" }
e = copy.deepcopy(d)
e.update({ "OA:number": "123", "OA:street": "MAPLE ST UNIT 3" })
d = row_fxn_prefixed_number(c, d, "number", c["conform"]["number"])
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
def test_row_fxn_prefixed_number_and_postfixed_street_may_contain_units(self):
"UNIT-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST UNIT 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"APARTMENT-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST APARTMENT 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"APT-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST APT 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"APT.-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST APT. 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"SUITE-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST SUITE 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"STE-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST STE 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"STE.-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST STE. 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"BUILDING-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST BUILDING 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"BLDG-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST BLDG 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"BLDG.-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST BLDG. 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"LOT-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST LOT 3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"#-style unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST #3" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"no unit"
c = { "conform": {
"street": {
"function": "postfixed_street",
"field": "ADDRESS",
"may_contain_units": True
}
} }
d = { "ADDRESS": "123 MAPLE ST" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_postfixed_street(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
def test_row_fxn_postfixed_unit(self):
"postfixed_unit - UNIT-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Unit 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Unit 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - UNIT is word ending"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street runit 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - APARTMENT-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Apartment 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Apartment 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - APT-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Apt 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Apt 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - APT is word ending"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street rapt 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - APT.-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Apt. 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Apt. 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - SUITE-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Suite 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Suite 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - STE-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Ste 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Ste 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - STE is word ending"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Haste 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - STE.-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Ste. 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Ste. 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - BUILDING-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Building 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Building 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - BLDG-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Bldg 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Bldg 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - BLDG.-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Bldg. 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Bldg. 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - LOT-style"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street Lot 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "Lot 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - LOT is word ending"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street alot 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - #-style with spaces"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street # 300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "# 300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - #-style without spaces"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street #300" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "#300" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
"postfixed_unit - no unit"
c = { "conform": {
"unit": {
"function": "postfixed_unit",
"field": "ADDRESS"
}
} }
d = { "ADDRESS": "Main Street" }
e = copy.deepcopy(d)
e.update({ "OA:unit": "" })
d = row_fxn_postfixed_unit(c, d, "unit", c["conform"]["unit"])
self.assertEqual(e, d)
def test_row_fxn_remove_prefix(self):
"remove_prefix - field_to_remove is a prefix"
c = { "conform": {
"street": {
"function": "remove_prefix",
"field": "ADDRESS",
"field_to_remove": "PREFIX"
}
} }
d = { "ADDRESS": "123 MAPLE ST", "PREFIX": "123" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_remove_prefix(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"remove_prefix - field_to_remove is not a prefix"
c = { "conform": {
"street": {
"function": "remove_prefix",
"field": "ADDRESS",
"field_to_remove": "PREFIX"
}
} }
d = { "ADDRESS": "123 MAPLE ST", "PREFIX": "NOT THE PREFIX VALUE" }
e = copy.deepcopy(d)
e.update({ "OA:street": "123 MAPLE ST" })
d = row_fxn_remove_prefix(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"remove_prefix - field_to_remove value is empty string"
c = { "conform": {
"street": {
"function": "remove_prefix",
"field": "ADDRESS",
"field_to_remove": "PREFIX"
}
} }
d = { "ADDRESS": "123 MAPLE ST", "PREFIX": "" }
e = copy.deepcopy(d)
e.update({ "OA:street": "123 MAPLE ST" })
d = row_fxn_remove_prefix(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
def test_row_fxn_remove_postfix(self):
"remove_postfix - field_to_remove is a postfix"
c = { "conform": {
"street": {
"function": "remove_postfix",
"field": "ADDRESS",
"field_to_remove": "POSTFIX"
}
} }
d = { "ADDRESS": "MAPLE ST UNIT 5", "POSTFIX": "UNIT 5" }
e = copy.deepcopy(d)
e.update({ "OA:street": "MAPLE ST" })
d = row_fxn_remove_postfix(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"remove_postfix - field_to_remove is not a postfix"
c = { "conform": {
"street": {
"function": "remove_postfix",
"field": "ADDRESS",
"field_to_remove": "POSTFIX"
}
} }
d = { "ADDRESS": "123 MAPLE ST", "POSTFIX": "NOT THE POSTFIX VALUE" }
e = copy.deepcopy(d)
e.update({ "OA:street": "123 MAPLE ST" })
d = row_fxn_remove_postfix(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"remove_postfix - field_to_remove value is empty string"
c = { "conform": {
"street": {
"function": "remove_postfix",
"field": "ADDRESS",
"field_to_remove": "POSTFIX"
}
} }
d = { "ADDRESS": "123 MAPLE ST", "POSTFIX": "" }
e = copy.deepcopy(d)
e.update({ "OA:street": "123 MAPLE ST" })
d = row_fxn_remove_postfix(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
def test_row_first_non_empty(self):
"first_non_empty - fields array is empty"
c = { "conform": {
"street": {
"function": "first_non_empty",
"fields": []
}
} }
d = { }
e = copy.deepcopy(d)
e.update({ })
d = row_fxn_first_non_empty(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"first_non_empty - both fields are non-empty"
c = { "conform": {
"street": {
"function": "first_non_empty",
"fields": ["FIELD1", "FIELD2"]
}
} }
d = { "FIELD1": "field1 value", "FIELD2": "field2 value" }
e = copy.deepcopy(d)
e.update({ "OA:street": "field1 value" })
d = row_fxn_first_non_empty(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"first_non_empty - first field is null"
c = { "conform": {
"street": {
"function": "first_non_empty",
"fields": ["FIELD1", "FIELD2"]
}
} }
d = { "FIELD1": None, "FIELD2": "field2 value" }
e = copy.deepcopy(d)
e.update({ "OA:street": "field2 value" })
d = row_fxn_first_non_empty(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"first_non_empty - first field is 0-length string"
c = { "conform": {
"street": {
"function": "first_non_empty",
"fields": ["FIELD1", "FIELD2"]
}
} }
d = { "FIELD1": "", "FIELD2": "field2 value" }
e = copy.deepcopy(d)
e.update({ "OA:street": "field2 value" })
d = row_fxn_first_non_empty(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"first_non_empty - first field is trimmable to a 0-length string"
c = { "conform": {
"street": {
"function": "first_non_empty",
"fields": ["FIELD1", "FIELD2"]
}
} }
d = { "FIELD1": " \t ", "FIELD2": "field2 value" }
e = copy.deepcopy(d)
e.update({ "OA:street": "field2 value" })
d = row_fxn_first_non_empty(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
"first_non_empty - all field values are trimmable to a 0-length string"
c = { "conform": {
"street": {
"function": "first_non_empty",
"fields": ["FIELD1", "FIELD2"]
}
} }
d = { "FIELD1": " \t ", "FIELD2": " \t " }
e = copy.deepcopy(d)
e.update({ })
d = row_fxn_first_non_empty(c, d, "street", c["conform"]["street"])
self.assertEqual(e, d)
class TestConformCli (unittest.TestCase):
"Test the command line interface creates valid output files from test input"
def setUp(self):
self.testdir = tempfile.mkdtemp(prefix='openaddr-testPyConformCli-')
self.conforms_dir = os.path.join(os.path.dirname(__file__), 'conforms')
def tearDown(self):
shutil.rmtree(self.testdir)
def _run_conform_on_source(self, source_name, ext):
"Helper method to run a conform on the named source. Assumes naming convention."
with open(os.path.join(self.conforms_dir, "%s.json" % source_name)) as file:
source_definition = json.load(file)
source_path = os.path.join(self.conforms_dir, "%s.%s" % (source_name, ext))
dest_path = os.path.join(self.testdir, '%s-conformed.csv' % source_name)
rc = conform_cli(source_definition, source_path, dest_path)
return rc, dest_path
def test_unknown_conform(self):
# Test that the conform tool does something reasonable with unknown conform sources
self.assertEqual(1, conform_cli({}, 'test', ''))
self.assertEqual(1, conform_cli({'conform': {}}, 'test', ''))
self.assertEqual(1, conform_cli({'conform': {'format': 'broken'}}, 'test', ''))
def test_lake_man(self):
rc, dest_path = self._run_conform_on_source('lake-man', 'shp')
self.assertEqual(0, rc)
with open(dest_path) as fp:
reader = csv.DictReader(fp)
self.assertEqual(OPENADDR_CSV_SCHEMA, reader.fieldnames)
rows = list(reader)
self.assertAlmostEqual(float(rows[0]['LAT']), 37.802612637607439)
self.assertAlmostEqual(float(rows[0]['LON']), -122.259249687194824)
self.assertEqual(6, len(rows))
self.assertEqual(rows[0]['NUMBER'], '5115')
self.assertEqual(rows[0]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[1]['NUMBER'], '5121')
self.assertEqual(rows[1]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[2]['NUMBER'], '5133')
self.assertEqual(rows[2]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[3]['NUMBER'], '5126')
self.assertEqual(rows[3]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[4]['NUMBER'], '5120')
self.assertEqual(rows[4]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[5]['NUMBER'], '5115')
self.assertEqual(rows[5]['STREET'], 'OLD MILL RD')
def test_lake_man_gdb(self):
rc, dest_path = self._run_conform_on_source('lake-man-gdb', 'gdb')
self.assertEqual(0, rc)
with open(dest_path) as fp:
reader = csv.DictReader(fp)
self.assertEqual(OPENADDR_CSV_SCHEMA, reader.fieldnames)
rows = list(reader)
self.assertAlmostEqual(float(rows[0]['LAT']), 37.802612637607439)
self.assertAlmostEqual(float(rows[0]['LON']), -122.259249687194824)
self.assertEqual(6, len(rows))
self.assertEqual(rows[0]['NUMBER'], '5115')
self.assertEqual(rows[0]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[1]['NUMBER'], '5121')
self.assertEqual(rows[1]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[2]['NUMBER'], '5133')
self.assertEqual(rows[2]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[3]['NUMBER'], '5126')
self.assertEqual(rows[3]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[4]['NUMBER'], '5120')
self.assertEqual(rows[4]['STREET'], 'FRUITED PLAINS LN')
self.assertEqual(rows[5]['NUMBER'], '5115')
self.assertEqual(rows[5]['STREET'], 'OLD MILL RD')
def test_lake_man_split(self):
rc, dest_path = self._run_conform_on_source('lake-man-split', 'shp')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertEqual(rows[0]['NUMBER'], '915')
self.assertEqual(rows[0]['STREET'], 'EDWARD AVE')
self.assertEqual(rows[1]['NUMBER'], '3273')
self.assertEqual(rows[1]['STREET'], 'PETER ST')
self.assertEqual(rows[2]['NUMBER'], '976')
self.assertEqual(rows[2]['STREET'], 'FORD BLVD')
self.assertEqual(rows[3]['NUMBER'], '7055')
self.assertEqual(rows[3]['STREET'], 'ST ROSE AVE')
self.assertEqual(rows[4]['NUMBER'], '534')
self.assertEqual(rows[4]['STREET'], 'WALLACE AVE')
self.assertEqual(rows[5]['NUMBER'], '531')
self.assertEqual(rows[5]['STREET'], 'SCOFIELD AVE')
def test_lake_man_merge_postcode(self):
rc, dest_path = self._run_conform_on_source('lake-man-merge-postcode', 'shp')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertEqual(rows[0]['NUMBER'], '35845')
self.assertEqual(rows[0]['STREET'], 'EKLUTNA LAKE RD')
self.assertEqual(rows[1]['NUMBER'], '35850')
self.assertEqual(rows[1]['STREET'], 'EKLUTNA LAKE RD')
self.assertEqual(rows[2]['NUMBER'], '35900')
self.assertEqual(rows[2]['STREET'], 'EKLUTNA LAKE RD')
self.assertEqual(rows[3]['NUMBER'], '35870')
self.assertEqual(rows[3]['STREET'], 'EKLUTNA LAKE RD')
self.assertEqual(rows[4]['NUMBER'], '32551')
self.assertEqual(rows[4]['STREET'], 'EKLUTNA LAKE RD')
self.assertEqual(rows[5]['NUMBER'], '31401')
self.assertEqual(rows[5]['STREET'], 'EKLUTNA LAKE RD')
def test_lake_man_merge_postcode2(self):
rc, dest_path = self._run_conform_on_source('lake-man-merge-postcode2', 'shp')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertEqual(rows[0]['NUMBER'], '85')
self.assertEqual(rows[0]['STREET'], 'MAITLAND DR')
self.assertEqual(rows[1]['NUMBER'], '81')
self.assertEqual(rows[1]['STREET'], 'MAITLAND DR')
self.assertEqual(rows[2]['NUMBER'], '92')
self.assertEqual(rows[2]['STREET'], 'MAITLAND DR')
self.assertEqual(rows[3]['NUMBER'], '92')
self.assertEqual(rows[3]['STREET'], 'MAITLAND DR')
self.assertEqual(rows[4]['NUMBER'], '92')
self.assertEqual(rows[4]['STREET'], 'MAITLAND DR')
self.assertEqual(rows[5]['NUMBER'], '92')
self.assertEqual(rows[5]['STREET'], 'MAITLAND DR')
def test_lake_man_shp_utf8(self):
rc, dest_path = self._run_conform_on_source('lake-man-utf8', 'shp')
self.assertEqual(0, rc)
with open(dest_path, encoding='utf-8') as fp:
rows = list(csv.DictReader(fp))
self.assertEqual(rows[0]['STREET'], u'PZ ESPA\u00d1A')
def test_lake_man_shp_epsg26943(self):
rc, dest_path = self._run_conform_on_source('lake-man-epsg26943', 'shp')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertAlmostEqual(float(rows[0]['LAT']), 37.802612637607439)
self.assertAlmostEqual(float(rows[0]['LON']), -122.259249687194824)
def test_lake_man_shp_noprj_epsg26943(self):
rc, dest_path = self._run_conform_on_source('lake-man-epsg26943-noprj', 'shp')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertAlmostEqual(float(rows[0]['LAT']), 37.802612637607439)
self.assertAlmostEqual(float(rows[0]['LON']), -122.259249687194824)
# TODO: add tests for non-ESRI GeoJSON sources
def test_lake_man_split2(self):
"An ESRI-to-CSV like source"
rc, dest_path = self._run_conform_on_source('lake-man-split2', 'csv')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertEqual(rows[0]['NUMBER'], '1')
self.assertEqual(rows[0]['STREET'], 'Spectrum Pointe Dr #320')
self.assertEqual(rows[1]['NUMBER'], '')
self.assertEqual(rows[1]['STREET'], '')
self.assertEqual(rows[2]['NUMBER'], '300')
self.assertEqual(rows[2]['STREET'], 'E Chapman Ave')
self.assertEqual(rows[3]['NUMBER'], '1')
self.assertEqual(rows[3]['STREET'], 'Spectrum Pointe Dr #320')
self.assertEqual(rows[4]['NUMBER'], '1')
self.assertEqual(rows[4]['STREET'], 'Spectrum Pointe Dr #320')
self.assertEqual(rows[5]['NUMBER'], '1')
self.assertEqual(rows[5]['STREET'], 'Spectrum Pointe Dr #320')
def test_nara_jp(self):
"Test case from jp-nara.json"
rc, dest_path = self._run_conform_on_source('jp-nara', 'csv')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertEqual(rows[0]['NUMBER'], '2543-6')
self.assertAlmostEqual(float(rows[0]['LON']), 135.955104)
self.assertAlmostEqual(float(rows[0]['LAT']), 34.607832)
self.assertEqual(rows[0]['STREET'], u'\u91dd\u753a')
self.assertEqual(rows[1]['NUMBER'], '202-6')
def test_lake_man_3740(self):
"CSV in an oddball SRS"
rc, dest_path = self._run_conform_on_source('lake-man-3740', 'csv')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertAlmostEqual(float(rows[0]['LAT']), 37.802612637607439, places=5)
self.assertAlmostEqual(float(rows[0]['LON']), -122.259249687194824, places=5)
self.assertEqual(rows[0]['NUMBER'], '5')
self.assertEqual(rows[0]['STREET'], u'PZ ESPA\u00d1A')
def test_lake_man_gml(self):
"GML XML files"
rc, dest_path = self._run_conform_on_source('lake-man-gml', 'gml')
self.assertEqual(0, rc)
with open(dest_path) as fp:
rows = list(csv.DictReader(fp))
self.assertEqual(6, len(rows))
self.assertAlmostEqual(float(rows[0]['LAT']), 37.802612637607439)
self.assertAlmostEqual(float(rows[0]['LON']), -122.259249687194824)
self.assertEqual(rows[0]['NUMBER'], '5115')
self.assertEqual(rows[0]['STREET'], 'FRUITED PLAINS LN')
class TestConformMisc(unittest.TestCase):
def setUp(self):
self.testdir = tempfile.mkdtemp(prefix='openaddr-TestConformMisc-')
def tearDown(self):
shutil.rmtree(self.testdir)
def test_convert_regexp_replace(self):
'''
'''
crr = convert_regexp_replace
self.assertEqual(crr('$1'), r'\1')
self.assertEqual(crr('$9'), r'\9')
self.assertEqual(crr('$b'), '$b')
self.assertEqual(crr('$1yo$1'), r'\1yo\1')
self.assertEqual(crr('$9yo$9'), r'\9yo\9')
self.assertEqual(crr('$byo$b'), '$byo$b')
self.assertEqual(crr('$1 yo $1'), r'\1 yo \1')
self.assertEqual(crr('$9 yo $9'), r'\9 yo \9')
self.assertEqual(crr('$b yo $b'), '$b yo $b')
self.assertEqual(crr('$11'), r'\11')
self.assertEqual(crr('$99'), r'\99')
self.assertEqual(crr('$bb'), '$bb')
self.assertEqual(crr('$11yo$11'), r'\11yo\11')
self.assertEqual(crr('$99yo$99'), r'\99yo\99')
self.assertEqual(crr('$bbyo$bb'), '$bbyo$bb')
self.assertEqual(crr('$11 yo $11'), r'\11 yo \11')
self.assertEqual(crr('$99 yo $99'), r'\99 yo \99')
self.assertEqual(crr('$bb yo $bb'), '$bb yo $bb')
self.assertEqual(crr('${1}1'), r'\g<1>1')
self.assertEqual(crr('${9}9'), r'\g<9>9')
self.assertEqual(crr('${9}b'), r'\g<9>b')
self.assertEqual(crr('${b}b'), '${b}b')
self.assertEqual(crr('${1}1yo${1}1'), r'\g<1>1yo\g<1>1')
self.assertEqual(crr('${9}9yo${9}9'), r'\g<9>9yo\g<9>9')
self.assertEqual(crr('${9}byo${9}b'), r'\g<9>byo\g<9>b')
self.assertEqual(crr('${b}byo${b}b'), '${b}byo${b}b')
self.assertEqual(crr('${1}1 yo ${1}1'), r'\g<1>1 yo \g<1>1')
self.assertEqual(crr('${9}9 yo ${9}9'), r'\g<9>9 yo \g<9>9')
self.assertEqual(crr('${9}b yo ${9}b'), r'\g<9>b yo \g<9>b')
self.assertEqual(crr('${b}b yo ${b}b'), '${b}b yo ${b}b')
self.assertEqual(crr('${11}1'), r'\g<11>1')
self.assertEqual(crr('${99}9'), r'\g<99>9')
self.assertEqual(crr('${99}b'), r'\g<99>b')
self.assertEqual(crr('${bb}b'), '${bb}b')
self.assertEqual(crr('${11}1yo${11}1'), r'\g<11>1yo\g<11>1')
self.assertEqual(crr('${99}9yo${99}9'), r'\g<99>9yo\g<99>9')
self.assertEqual(crr('${99}byo${99}b'), r'\g<99>byo\g<99>b')
self.assertEqual(crr('${bb}byo${bb}b'), '${bb}byo${bb}b')
self.assertEqual(crr('${11}1yo${11}1'), r'\g<11>1yo\g<11>1')
self.assertEqual(crr('${99}9 yo ${99}9'), r'\g<99>9 yo \g<99>9')
self.assertEqual(crr('${99}b yo ${99}b'), r'\g<99>b yo \g<99>b')
self.assertEqual(crr('${bb}b yo ${bb}b'), '${bb}b yo ${bb}b')
self.assertEqual(re.sub(r'hello (world)', crr('goodbye $1'), 'hello world'), 'goodbye world')
self.assertEqual(re.sub(r'(hello) (world)', crr('goodbye $2'), 'hello world'), 'goodbye world')
self.assertEqual(re.sub(r'he(ll)o', crr('he$1$1o'), 'hello'), 'hellllo')
def test_find_shapefile_source_path(self):
shp_conform = {"conform": { "format": "shapefile" } }
self.assertEqual("foo.shp", find_source_path(shp_conform, ["foo.shp"]))
self.assertEqual("FOO.SHP", find_source_path(shp_conform, ["FOO.SHP"]))
self.assertEqual("xyzzy/FOO.SHP", find_source_path(shp_conform, ["xyzzy/FOO.SHP"]))
self.assertEqual("foo.shp", find_source_path(shp_conform, ["foo.shp", "foo.prj", "foo.shx"]))
self.assertEqual(None, find_source_path(shp_conform, ["nope.txt"]))
self.assertEqual(None, find_source_path(shp_conform, ["foo.shp", "bar.shp"]))
shp_file_conform = {"conform": { "format": "shapefile", "file": "foo.shp" } }
self.assertEqual("foo.shp", find_source_path(shp_file_conform, ["foo.shp"]))
self.assertEqual("foo.shp", find_source_path(shp_file_conform, ["foo.shp", "bar.shp"]))
self.assertEqual("xyzzy/foo.shp", find_source_path(shp_file_conform, ["xyzzy/foo.shp", "xyzzy/bar.shp"]))
shp_poly_conform = {"conform": { "format": "shapefile-polygon" } }
self.assertEqual("foo.shp", find_source_path(shp_poly_conform, ["foo.shp"]))
broken_conform = {"conform": { "format": "broken" }}
self.assertEqual(None, find_source_path(broken_conform, ["foo.shp"]))
def test_find_gdb_source_path(self):
shp_conform = {"conform": { "format": "gdb" } }
self.assertEqual("foo.gdb", find_source_path(shp_conform, ["foo.gdb"]))
self.assertEqual("FOO.GDB", find_source_path(shp_conform, ["FOO.GDB"]))
self.assertEqual("xyzzy/FOO.GDB", find_source_path(shp_conform, ["xyzzy/FOO.GDB"]))
self.assertEqual("foo.gdb", find_source_path(shp_conform, ["foo.gdb", "foo.prj", "foo.shx"]))
self.assertEqual(None, find_source_path(shp_conform, ["nope.txt"]))
self.assertEqual(None, find_source_path(shp_conform, ["foo.gdb", "bar.gdb"]))
shp_file_conform = {"conform": { "format": "gdb", "file": "foo.gdb" } }
self.assertEqual("foo.gdb", find_source_path(shp_file_conform, ["foo.gdb"]))
self.assertEqual("foo.gdb", find_source_path(shp_file_conform, ["foo.gdb", "bar.gdb"]))
self.assertEqual("xyzzy/foo.gdb", find_source_path(shp_file_conform, ["xyzzy/foo.gdb", "xyzzy/bar.gdb"]))
def test_find_geojson_source_path(self):
geojson_conform = {"protocol": "notESRI", "conform": {"format": "geojson"}}
self.assertEqual("foo.json", find_source_path(geojson_conform, ["foo.json"]))
self.assertEqual("FOO.JSON", find_source_path(geojson_conform, ["FOO.JSON"]))
self.assertEqual("xyzzy/FOO.JSON", find_source_path(geojson_conform, ["xyzzy/FOO.JSON"]))
self.assertEqual("foo.json", find_source_path(geojson_conform, ["foo.json", "foo.prj", "foo.shx"]))
self.assertEqual(None, find_source_path(geojson_conform, ["nope.txt"]))
self.assertEqual(None, find_source_path(geojson_conform, ["foo.json", "bar.json"]))
def test_find_esri_source_path(self):
# test that the legacy ESRI/GeoJSON style works
old_conform = {"protocol": "ESRI", "conform": {"format": "geojson"}}
self.assertEqual("foo.csv", find_source_path(old_conform, ["foo.csv"]))
# test that the new ESRI/CSV style works
new_conform = {"protocol": "ESRI", "conform": {"format": "csv"}}
self.assertEqual("foo.csv", find_source_path(new_conform, ["foo.csv"]))
def test_find_csv_source_path(self):
csv_conform = {"conform": {"format": "csv"}}
self.assertEqual("foo.csv", find_source_path(csv_conform, ["foo.csv"]))
csv_file_conform = {"conform": {"format": "csv", "file":"bar.txt"}}
self.assertEqual("bar.txt", find_source_path(csv_file_conform, ["license.pdf", "bar.txt"]))
self.assertEqual("aa/bar.txt", find_source_path(csv_file_conform, ["license.pdf", "aa/bar.txt"]))
self.assertEqual(None, find_source_path(csv_file_conform, ["foo.txt"]))
def test_find_xml_source_path(self):
c = {"conform": {"format": "xml"}}
self.assertEqual("foo.gml", find_source_path(c, ["foo.gml"]))
c = {"conform": {"format": "xml", "file": "xyzzy/foo.gml"}}
self.assertEqual("xyzzy/foo.gml", find_source_path(c, ["xyzzy/foo.gml", "bar.gml", "foo.gml"]))
self.assertEqual("/tmp/foo/xyzzy/foo.gml", find_source_path(c, ["/tmp/foo/xyzzy/foo.gml"]))
def test_normalize_ogr_filename_case1(self):
filename = os.path.join(self.testdir, 'file.geojson')
with open(filename, 'w') as file:
file.write('yo')
self.assertEqual(normalize_ogr_filename_case(filename), filename)
self.assertTrue(os.path.exists(normalize_ogr_filename_case(filename)))
def test_normalize_ogr_filename_case2(self):
filename = os.path.join(self.testdir, 'file.GeoJSON')
with open(filename, 'w') as file:
file.write('yo')
self.assertNotEqual(normalize_ogr_filename_case(filename), filename)
self.assertTrue(os.path.exists(normalize_ogr_filename_case(filename)))
def test_normalize_ogr_filename_case3(self):
filename = os.path.join(self.testdir, 'file.shp')
with open(filename, 'w') as file:
file.write('yo')
for otherbase in ('file.shx', 'file.dbf', 'file.prj'):
othername = os.path.join(self.testdir, otherbase)
with open(othername, 'w') as other:
other.write('yo')
self.assertEqual(normalize_ogr_filename_case(filename), filename)
self.assertTrue(os.path.exists(normalize_ogr_filename_case(filename)))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.shx')))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.dbf')))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.prj')))
def test_normalize_ogr_filename_case4(self):
filename = os.path.join(self.testdir, 'file.Shp')
with open(filename, 'w') as file:
file.write('yo')
for otherbase in ('file.Shx', 'file.Dbf', 'file.Prj'):
othername = os.path.join(self.testdir, otherbase)
with open(othername, 'w') as other:
other.write('yo')
self.assertNotEqual(normalize_ogr_filename_case(filename), filename)
self.assertTrue(os.path.exists(normalize_ogr_filename_case(filename)))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.shx')))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.dbf')))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.prj')))
def test_normalize_ogr_filename_case5(self):
filename = os.path.join(self.testdir, 'file.SHP')
with open(filename, 'w') as file:
file.write('yo')
for otherbase in ('file.SHX', 'file.DBF', 'file.PRJ'):
othername = os.path.join(self.testdir, otherbase)
with open(othername, 'w') as other:
other.write('yo')
self.assertNotEqual(normalize_ogr_filename_case(filename), filename)
self.assertTrue(os.path.exists(normalize_ogr_filename_case(filename)))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.shx')))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.dbf')))
self.assertTrue(os.path.exists(os.path.join(self.testdir, 'file.prj')))
def test_is_not_in(self):
self.assertFalse(is_in('foo', []), 'Should not match an empty list')
self.assertFalse(is_in('foo', ['bar']), 'Should not match')
self.assertTrue(is_in('foo', ['foo']), 'Should be a simple match')
self.assertTrue(is_in('Foo', ['foo']), 'Should be a case-insensitive match')
self.assertFalse(is_in('foo/bar', ['bar']), 'Should not match in a directory')
self.assertTrue(is_in('foo/bar', ['foo']), 'Should match a directory name')
self.assertTrue(is_in('Foo/bar', ['foo']), 'Should match a directory case-insensitively')
self.assertFalse(is_in('foo/bar/baz', ['baz']), 'Should not match in a nested directory')
self.assertTrue(is_in('foo/bar', ['foo/bar']), 'Should match a directory path')
self.assertTrue(is_in('foo/bar/baz', ['foo/bar']), 'Should match a directory path')
self.assertTrue(is_in('foo/bar/baz', ['foo']), 'Should match a directory path')
self.assertTrue(is_in('Foo/bar/baz', ['foo']), 'Should match a directory path case-insensitively')
self.assertTrue(is_in('foo/Bar', ['foo/bar']), 'Should match a directory path case-insensitively')
self.assertTrue(is_in('foo/Bar/baz', ['foo/bar']), 'Should match a directory path case-insensitively')
def test_geojson_source_to_csv(self):
'''
'''
geojson_path = os.path.join(os.path.dirname(__file__), 'data/us-pa-bucks.geojson')
csv_path = os.path.join(self.testdir, 'us-tx-waco.csv')
geojson_source_to_csv(geojson_path, csv_path)
with open(csv_path, encoding='utf8') as file:
row = next(csv.DictReader(file))
self.assertAlmostEqual(float(row[X_FIELDNAME]), -74.98335721879076)
self.assertAlmostEqual(float(row[Y_FIELDNAME]), 40.054962450263616)
self.assertEqual(row['PARCEL_NUM'], '02-022-003')
class TestConformCsv(unittest.TestCase):
"Fixture to create real files to test csv_source_to_csv()"
# Test strings. an ASCII CSV file (with 1 row) and a Unicode CSV file,
# along with expected outputs. These are Unicode strings; test code needs
# to convert the input to bytes with the tested encoding.
_ascii_header_in = u'STREETNAME,NUMBER,LATITUDE,LONGITUDE'
_ascii_row_in = u'MAPLE ST,123,39.3,-121.2'
_ascii_header_out = u'STREETNAME,NUMBER,{X_FIELDNAME},{Y_FIELDNAME}'.format(**globals())
_ascii_row_out = u'MAPLE ST,123,-121.2,39.3'
_unicode_header_in = u'STRE\u00c9TNAME,NUMBER,\u7def\u5ea6,LONGITUDE'
_unicode_row_in = u'\u2603 ST,123,39.3,-121.2'
_unicode_header_out = u'STRE\u00c9TNAME,NUMBER,{X_FIELDNAME},{Y_FIELDNAME}'.format(**globals())
_unicode_row_out = u'\u2603 ST,123,-121.2,39.3'
def setUp(self):
self.testdir = tempfile.mkdtemp(prefix='openaddr-testPyConformCsv-')
def tearDown(self):
shutil.rmtree(self.testdir)
def _convert(self, conform, src_bytes):
"Convert a CSV source (list of byte strings) and return output as a list of unicode strings"
self.assertNotEqual(type(src_bytes), type(u''))
src_path = os.path.join(self.testdir, "input.csv")
with open(src_path, "w+b") as file:
file.write(b'\n'.join(src_bytes))
dest_path = os.path.join(self.testdir, "output.csv")
csv_source_to_csv(conform, src_path, dest_path)
with open(dest_path, 'rb') as file:
return [s.decode('utf-8').strip() for s in file]
def test_simple(self):
c = { "conform": { "format": "csv", "lat": "LATITUDE", "lon": "LONGITUDE" }, 'protocol': 'test' }
d = (self._ascii_header_in.encode('ascii'),
self._ascii_row_in.encode('ascii'))
r = self._convert(c, d)
self.assertEqual(self._ascii_header_out, r[0])
self.assertEqual(self._ascii_row_out, r[1])
def test_utf8(self):
c = { "conform": { "format": "csv", "lat": u"\u7def\u5ea6", "lon": u"LONGITUDE" }, 'protocol': 'test' }
d = (self._unicode_header_in.encode('utf-8'),
self._unicode_row_in.encode('utf-8'))
r = self._convert(c, d)
self.assertEqual(self._unicode_header_out, r[0])
self.assertEqual(self._unicode_row_out, r[1])
def test_csvsplit(self):
c = { "conform": { "csvsplit": ";", "format": "csv", "lat": "LATITUDE", "lon": "LONGITUDE" }, 'protocol': 'test' }
d = (self._ascii_header_in.replace(',', ';').encode('ascii'),
self._ascii_row_in.replace(',', ';').encode('ascii'))
r = self._convert(c, d)
self.assertEqual(self._ascii_header_out, r[0])
self.assertEqual(self._ascii_row_out, r[1])
unicode_conform = { "conform": { "csvsplit": u";", "format": "csv", "lat": "LATITUDE", "lon": "LONGITUDE" }, 'protocol': 'test' }
r = self._convert(unicode_conform, d)
self.assertEqual(self._ascii_row_out, r[1])
def test_csvencoded_utf8(self):
c = { "conform": { "encoding": "utf-8", "format": "csv", "lat": u"\u7def\u5ea6", "lon": u"LONGITUDE" }, 'protocol': 'test' }
d = (self._unicode_header_in.encode('utf-8'),
self._unicode_row_in.encode('utf-8'))
r = self._convert(c, d)
self.assertEqual(self._unicode_header_out, r[0])
self.assertEqual(self._unicode_row_out, r[1])
def test_csvencoded_shift_jis(self):
c = { "conform": { "encoding": "shift-jis", "format": "csv", "lat": u"\u7def\u5ea6", "lon": u"LONGITUDE" }, 'protocol': 'test' }
d = (u'\u5927\u5b57\u30fb\u753a\u4e01\u76ee\u540d,NUMBER,\u7def\u5ea6,LONGITUDE'.encode('shift-jis'),
u'\u6771 ST,123,39.3,-121.2'.encode('shift-jis'))
r = self._convert(c, d)
self.assertEqual(r[0], u'\u5927\u5b57\u30fb\u753a\u4e01\u76ee\u540d,NUMBER,{X_FIELDNAME},{Y_FIELDNAME}'.format(**globals()))
self.assertEqual(r[1], u'\u6771 ST,123,-121.2,39.3')
def test_headers_minus_one(self):
c = { "conform": { "headers": -1, "format": "csv", "lon": "COLUMN4", "lat": "COLUMN3" }, 'protocol': 'test' }
d = (u'MAPLE ST,123,39.3,-121.2'.encode('ascii'),)
r = self._convert(c, d)
self.assertEqual(r[0], u'COLUMN1,COLUMN2,{X_FIELDNAME},{Y_FIELDNAME}'.format(**globals()))
self.assertEqual(r[1], u'MAPLE ST,123,-121.2,39.3')
def test_headers_and_skiplines(self):
c = {"conform": { "headers": 2, "skiplines": 2, "format": "csv", "lon": "LONGITUDE", "lat": "LATITUDE" }, 'protocol': 'test' }
d = (u'HAHA,THIS,HEADER,IS,FAKE'.encode('ascii'),
self._ascii_header_in.encode('ascii'),
self._ascii_row_in.encode('ascii'))
r = self._convert(c, d)
self.assertEqual(self._ascii_header_out, r[0])
self.assertEqual(self._ascii_row_out, r[1])
def test_perverse_header_name_and_case(self):
# This is an example inspired by the hipsters in us-or-portland
# Conform says lowercase but the actual header is uppercase.
# Also the columns are named X and Y in the input
c = {"conform": {"lon": "x", "lat": "y", "number": "n", "street": "s", "format": "csv"}, 'protocol': 'test'}
d = (u'n,s,X,Y'.encode('ascii'),
u'3203,SE WOODSTOCK BLVD,-122.629314,45.479425'.encode('ascii'))
r = self._convert(c, d)
self.assertEqual(r[0], u'n,s,{X_FIELDNAME},{Y_FIELDNAME}'.format(**globals()))
self.assertEqual(r[1], u'3203,SE WOODSTOCK BLVD,-122.629314,45.479425')
def test_srs(self):
# This is an example inspired by the hipsters in us-or-portland
c = {"conform": {"lon": "x", "lat": "y", "srs": "EPSG:2913", "number": "n", "street": "s", "format": "csv"}, 'protocol': 'test'}
d = (u'n,s,X,Y'.encode('ascii'),
u'3203,SE WOODSTOCK BLVD,7655634.924,668868.414'.encode('ascii'))
r = self._convert(c, d)
self.assertEqual(r[0], u'n,s,{X_FIELDNAME},{Y_FIELDNAME}'.format(**globals()))
self.assertEqual(r[1], u'3203,SE WOODSTOCK BLVD,-122.6308422,45.4815544')
def test_too_many_columns(self):
"Check that we don't barf on input with too many columns in some rows"
c = { "conform": { "format": "csv", "lat": "LATITUDE", "lon": "LONGITUDE" }, 'protocol': 'test' }
d = (self._ascii_header_in.encode('ascii'),
self._ascii_row_in.encode('ascii'),
u'MAPLE ST,123,39.3,-121.2,EXTRY'.encode('ascii'))
r = self._convert(c, d)
self.assertEqual(2, len(r))
self.assertEqual(self._ascii_header_out, r[0])
self.assertEqual(self._ascii_row_out, r[1])
def test_esri_csv(self):
# Test that our ESRI-emitted CSV is converted correctly.
c = { "protocol": "ESRI", "conform": { "format": "geojson", "lat": "theseare", "lon": "ignored" } }
d = (u'STREETNAME,NUMBER,OA:x,OA:y'.encode('ascii'),
u'MAPLE ST,123,-121.2,39.3'.encode('ascii'))
r = self._convert(c, d)
self.assertEqual(self._ascii_header_out, r[0])
self.assertEqual(self._ascii_row_out, r[1])
def test_esri_csv_no_lat_lon(self):
# Test that the ESRI path works even without lat/lon tags. See issue #91
c = { "protocol": "ESRI", "conform": { "format": "geojson" } }
d = (u'STREETNAME,NUMBER,OA:x,OA:y'.encode('ascii'),
u'MAPLE ST,123,-121.2,39.3'.encode('ascii'))
r = self._convert(c, d)
self.assertEqual(self._ascii_header_out, r[0])
self.assertEqual(self._ascii_row_out, r[1])
class TestConformLicense (unittest.TestCase):
def test_license_string(self):
''' Test that simple license strings are converted correctly.
'''
self.assertIsNone(conform_license(None))
self.assertEqual(conform_license('CC-BY-SA'), 'CC-BY-SA')
self.assertEqual(conform_license('http://example.com'), 'http://example.com')
self.assertEqual(conform_license(u'\xa7 unicode \xa7'), u'\xa7 unicode \xa7')
def test_license_dictionary(self):
''' Test that simple license strings are converted correctly.
'''
self.assertIsNone(conform_license({}))
self.assertEqual(conform_license({'text': 'CC-BY-SA'}), 'CC-BY-SA')
self.assertEqual(conform_license({'url': 'http://example.com'}), 'http://example.com')
self.assertEqual(conform_license({'text': u'\xa7 unicode \xa7'}), u'\xa7 unicode \xa7')
license = {'text': 'CC-BY-SA', 'url': 'http://example.com'}
self.assertIn(license['text'], conform_license(license))
self.assertIn(license['url'], conform_license(license))
def test_attribution(self):
''' Test combinations of attribution data.
'''
attr_flag1, attr_name1 = conform_attribution(None, None)
self.assertIs(attr_flag1, False)
self.assertIsNone(attr_name1)
attr_flag2, attr_name2 = conform_attribution({}, None)
self.assertIs(attr_flag2, False)
self.assertIsNone(attr_name2)
attr_flag3, attr_name3 = conform_attribution(None, '')
self.assertIs(attr_flag3, False)
self.assertIsNone(attr_name3)
attr_flag4, attr_name4 = conform_attribution({}, '')
self.assertIs(attr_flag4, False)
self.assertIsNone(attr_name4)
attr_flag5, attr_name5 = conform_attribution(None, u'Joe Bl\xf6')
self.assertIs(attr_flag5, True)
self.assertEqual(attr_name5, u'Joe Bl\xf6')
attr_flag6, attr_name6 = conform_attribution({}, u'Joe Bl\xf6')
self.assertIs(attr_flag6, True)
self.assertEqual(attr_name6, u'Joe Bl\xf6')
attr_flag7, attr_name7 = conform_attribution({'attribution': False}, u'Joe Bl\xf6')
self.assertIs(attr_flag7, False)
self.assertEqual(attr_name7, None)
attr_flag8, attr_name8 = conform_attribution({'attribution': True}, u'Joe Bl\xf6')
self.assertIs(attr_flag8, True)
self.assertEqual(attr_name8, u'Joe Bl\xf6')
attr_flag9, attr_name9 = conform_attribution({'attribution': None}, u'Joe Bl\xf6')
self.assertIs(attr_flag9, True)
self.assertEqual(attr_name9, u'Joe Bl\xf6')
attr_flag10, attr_name10 = conform_attribution({'attribution': False, 'attribution name': u'Joe Bl\xf6'}, None)
self.assertIs(attr_flag10, False)
self.assertEqual(attr_name10, None)
attr_flag11, attr_name11 = conform_attribution({'attribution': True, 'attribution name': u'Joe Bl\xf6'}, None)
self.assertIs(attr_flag11, True)
self.assertEqual(attr_name11, u'Joe Bl\xf6')
attr_flag12, attr_name12 = conform_attribution({'attribution': None, 'attribution name': u'Joe Bl\xf6'}, None)
self.assertIs(attr_flag12, True)
self.assertEqual(attr_name12, u'Joe Bl\xf6')
attr_flag13, attr_name13 = conform_attribution({'attribution': None, 'attribution name': u'Joe Bl\xf6'}, 'Jon Snow')
self.assertIs(attr_flag13, True)
self.assertEqual(attr_name13, u'Joe Bl\xf6')
attr_flag14, attr_name14 = conform_attribution({'attribution': None, 'attribution name': False}, None)
self.assertIs(attr_flag14, True)
self.assertEqual(attr_name14, 'False')
def test_sharealike(self):
''' Test combinations of share=alike data.
'''
for undict in (None, False, True, 'this', 'that'):
self.assertIs(conform_sharealike(undict), None, '{} should be None'.format(undict))
for value1 in (False, 'No', 'no', 'false', 'False', 'n', 'f', None, ''):
dict1 = {'share-alike': value1}
self.assertIs(conform_sharealike(dict1), False, 'sa:{} should be False'.format(repr(value1)))
for value2 in (True, 'Yes', 'yes', 'true', 'True', 'y', 't'):
dict2 = {'share-alike': value2}
self.assertIs(conform_sharealike(dict2), True, 'sa:{} should be True'.format(repr(value2)))
class TestConformTests (unittest.TestCase):
def test_good_tests(self):
'''
'''
filenames = ['cz-countrywide-good-tests.json', 'cz-countrywide-implied-tests.json']
for filename in filenames:
with open(os.path.join(os.path.dirname(__file__), 'sources', filename)) as file:
source = json.load(file)
result, message = check_source_tests(source)
self.assertIs(result, True, 'Tests should pass in {}'.format(filename))
self.assertIsNone(message, 'No message expected from {}'.format(filename))
def test_bad_tests(self):
'''
'''
with open(os.path.join(os.path.dirname(__file__), 'sources', 'cz-countrywide-bad-tests.json')) as file:
source = json.load(file)
result, message = check_source_tests(source)
self.assertIs(result, False, 'Tests should fail in {}'.format(file.name))
self.assertIn('address with /-delimited number', message, 'A message is expected from {}'.format(file.name))
def test_no_tests(self):
'''
'''
filenames = ['cz-countrywide-no-tests.json', 'cz-countrywide-disabled-tests.json']
for filename in filenames:
with open(os.path.join(os.path.dirname(__file__), 'sources', filename)) as file:
source = json.load(file)
result, message = check_source_tests(source)
self.assertIsNone(result, 'Tests should not exist in {}'.format(filename))
self.assertIsNone(message, 'No message expected from {}'.format(filename))
|
# Junta nome e sobrenome
# Faça uma função que recebe duas listas, uma de nomes e outra com os respectivos sobrenomes, e devolve uma nova lista com os nomes e sobrenomes em uma única string. Coloque exatamente um espaço entre o nome e o sobrenome.
# O nome da sua função deve ser junta_nome_sobrenome.
def junta_nome_sobrenome (nomes, sobrenomes):
nomes_sobrenomse = []
i = 0
while i < len(nomes):
nome_sobrenome = nomes[i] + " " + sobrenomes[i]
nomes_sobrenomse.append(nome_sobrenome)
i += 1
return nomes_sobrenomse
|
Subsets and Splits