metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jiyvn/pyautotest",
"score": 2
} |
#### File: pyautotest/app/appbase.py
```python
from appium.webdriver.common.mobileby import MobileBy
appPackage = {
'fake_location': 'com.lerist.fakelocation',
'chrome': "com.android.chrome",
'settings': 'com.android.settings',
'gmail': 'com.google.android.gm',
}
appActivity = {
'fake_location': '.ui.activity.MainActivity',
'chrome': "com.google.android.apps.chrome.Main",
'settings': '.Settings',
'gmail': '.ConversationListActivityGmail',
'gmail_wel': '.welcome.WelcomeTourActivity',
}
appBundleId = {
'chrome': 'com.google.chrome.ios',
'gmail': 'com.google.Gmail',
'safari': 'com.apple.mobilesafari',
'settings': 'com.apple.Preferences',
'appstore': 'com.apple.AppStore',
}
android_base = {
'text': lambda value: (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format(value)),
'text_contains': lambda value: (
MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("{0}")'.format(value)),
'text_view': lambda value: (MobileBy.XPATH, '//android.widget.TextView[@text="{0}"]'.format(value)),
'button': lambda value: (
MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().className("android.widget.Button").text("{0}")'.format(value)),
'edittext': lambda value: (MobileBy.XPATH, '//android.widget.EditText[@text="{0}"]'.format(value)),
'desc_contains': lambda value: (
MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().descriptionContains("{0}")'.format(value)), # content-desc属性
}
ios_base = {
'value': lambda v: (MobileBy.XPATH, '//*[@value="{}"]'.format(v)),
'value_contains': lambda v: (MobileBy.XPATH, '//*[contains(@value,"{}")]'.format(v)),
'name': lambda v: (MobileBy.XPATH, '//*[@name="{}"]'.format(v)),
'name_contains': lambda v: (MobileBy.XPATH, '//*[contains(@name,"{}")]'.format(v)),
'btn_name': lambda v: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name=="{}"'.format(v)),
'btn_name_contains': lambda v: (
MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name CONTAINS "{}"'.format(v)),
'switch_name': lambda v: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeSwitch" AND name=="{}"'.format(v)),
'switch_name_contains': lambda v: (
MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeSwitch" AND name CONTAINS "{}"'.format(v)),
'cell_name': lambda v: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeCell" AND name=="{}"'.format(v)),
'cell_name_contains': lambda v: (
MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeCell" AND name CONTAINS "{}"'.format(v)),
}
'''
iOS键盘上的按键, 大小写敏感。特殊键shift, delete, more, space, @, ., Return, Next keyboard(切换文字, 比如中英文)大小写不敏感
'''
ios_keyboard = {
'done': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label MATCHES "Done|完成"`]'),
'key': lambda k: (MobileBy.ACCESSIBILITY_ID, '{0}'.format(k))
}
def scrollable(locators: [list, str]):
if isinstance(locators, list):
return (MobileBy.ANDROID_UIAUTOMATOR, 'new UiScrollable(new UiSelector().scrollable(true).instance(0)).scrollIntoView(new UiSelector().%s)' % '.'.join(locators))
elif isinstance(locators, str):
return (MobileBy.ANDROID_UIAUTOMATOR, 'new UiScrollable(new UiSelector().scrollable(true).instance(0)).scrollIntoView(new UiSelector().%s)' % locators)
def selector(locators: [list, str]):
if isinstance(locators, list):
return (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().%s' % '.'.join(locators))
elif isinstance(locators, str):
return (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().%s' % locators)
settings = {
'nothing_en': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("%s")' % 'Nothing'),
'nothing_zh': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("%s")' % '无'),
'None': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("%s")' % 'None'),
'fake_location': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('Fake Location')),
'select_mock_app_en': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("%s")' % 'Mock'),
'select_mock_app_zh': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("%s")' % '模拟位置'),
'select_text_zh': '选择模拟位置信息应用',
'select_text_en': 'Select mock location app',
# ********* iOS ************
'ios_setting_page_title': (MobileBy.XPATH, '//XCUIElementTypeStaticText[@name="Settings"]'),
'ios_setting_search_field': (MobileBy.ACCESSIBILITY_ID, 'Search'),
# 蓝牙页面元素:开启时,value属性为1,关闭时value属性为0
'ios_bluetooth': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeSwitch','Bluetooth')),
'ios_bluetooth2': (MobileBy.XPATH, '//XCUIElementTypeSwitch[@name="Bluetooth"]'),
# setting主页元素:On/Off元素,点击进入蓝牙页面
'ios_bluetooth_item': (MobileBy.XPATH, '//XCUIElementTypeStaticText[@name="Bluetooth"]/following-sibling::XCUIElementTypeStaticText[1]'),
'ios_setting_items': lambda x: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeCell" AND name=="{0}"'.format(x)),
'ios_setting_toggles': lambda x: (MobileBy.XPATH, '//XCUIElementTypeSwitch[@name="{0}"]'.format(x)),
'ios_setting_wifi': (MobileBy.XPATH, '//XCUIElementTypeSwitch[@name="Wi-Fi"]'),
'ios_back_to_current_app': (MobileBy.ACCESSIBILITY_ID, 'breadcrumb'), # 这个记得要加approach = 'p'
'ios_setting_items_title': lambda x: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeOther" AND name=="{0}"'.format(x)),
# 'ios_general': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeCell', 'General')),
'ios_general': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeCell', '通用')),
'ios_date&time': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeCell', 'Date & Time')),
'ios_profile&devicemanagement': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name CONTAINS "{1}"'.format('XCUIElementTypeCell', 'Device Management')),
'ios_trust_app_btn': lambda x: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeStaticText" AND value=="Trust “{0}”"'.format(x)), # e.g. Trust “Fisher-Price, Inc.”
'ios_trust_app_dialog_title': lambda x: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeStaticText" AND value CONTAINS "Trust “iPhone Distribution: {0}”"'.format(x)), # e.g. Trust “iPhone Distribution: Fisher-Price, Inc.” Apps on This iPhone
'ios_trust_btn': (MobileBy.ACCESSIBILITY_ID, 'Trust'),
# 24小时制的按钮,如果当前为12小时制,其value为0,否则为1
'ios_24hr': (MobileBy.IOS_PREDICATE, 'type=="{0}" AND name=="{1}"'.format('XCUIElementTypeCell','24-Hour Time')),
'ios_24hr_x': (MobileBy.XPATH, '//XCUIElementTypeCell[@name="24-Hour Time"]'),
}
app_store = {
'continue': (MobileBy.IOS_PREDICATE, 'label == "继续" AND name == "继续" AND type == "XCUIElementTypeButton"'), # 继续 弹窗页面
'allow_when_using': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "使用App时允许"`]'),
'app_item': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "App"`]'),
'account': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "我的帐户"`]'),
'search_item': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "搜索"`]'),
'search_field': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeNavigationBar[`name == "搜索"`]/XCUIElementTypeSearchField'),
'keyboard_continue': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "继续"`]'), # 点击searchfield后出现
'keyboard_search_btn': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`name == "Search"`]'), # 点击searchfield后出现
'progress_circle': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeActivityIndicator[`label MATCHES "正在载入|进行中"`]'), # 加载搜索结果的按钮
'retry': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "重试"`]'), # 搜索失败时
'app': lambda a: (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label CONTAINS "{}"`]'.format(a)), # Fisher-Price® Smart Connect™
'navigate_search_btn': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeNavigationBar[`name == "搜索"`]`]'), # app详情页面
'reload_btn': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "重新下载"`]'), # app详情页面
'get_btn': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "获取"`]'), # app详情页面first time download
'upgrade_btn': (MobileBy.IOS_CLASS_CHAIN, ''),
'in_process': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "正在载入"`]'), # circle按钮
'downloading': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "正在下载"`]'), # 暂停按钮
'open_app': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "打开"`]'),
}
notification = {
'ios_notification': lambda msg_title: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeScrollView" AND (name CONTAINS "{0}" OR label CONTAINS "{0}")'.format(msg_title)), # 某个app的通知,多个的时候通常会重叠在一起,需要点击展开。需传入name值,一般为展开的消息的app title,如:SMART CONNECT
'ios_nt_msg': lambda c: (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name=="NotificationCell" AND label CONTAINS "{}"'.format(c)), # (右滑可删除)如消息: Animal projection on your My Child's Deluxe Soother is turning off soon.
'ios_nt_clear': lambda msg_title: (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label BEGINSWITH "{}"`]/XCUIElementTypeButton[`label == "Clear"`][1]'.format(msg_title)), # 向左滑动消息出现的Clear button
'ios_nt_clear_all': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "Clear All"`]'), # 向左滑动消息出现的Clear All button
'ios_clear_all_btn': (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name=="clear-button" AND value=="Notification Center"'), # 清除所有app消息按钮(x)
'ios_clear_btn': lambda app: (MobileBy.XPATH, '//XCUIElementTypeStaticText[@name="{}"]/../following::XCUIElementTypeButton[@name="clear-button"]'.format(app)), # 清除某个app消息按钮(x),没有label值,如Smart Connect
'ios_confirm_clear': (MobileBy.IOS_PREDICATE, 'type=="XCUIElementTypeButton" AND name=="clear-button" AND label=="Confirm Clear"'), # 点击x出现的clear按钮,通用
}
camera = {
# App内嵌拍照界面
'ios_capture': (MobileBy.ACCESSIBILITY_ID, 'PhotoCapture'),
'ios_cancel_capture': (MobileBy.ACCESSIBILITY_ID, 'Cancel'),
'ios_switch_camera': (MobileBy.ACCESSIBILITY_ID, 'FrontBackFacingCameraChooser'),
# 闪光灯有三个value, 分别是Automatic,On, Off, 不能使用send_keys来设置, 需要点Flash图标然后再在下面的二级菜单中选择
'ios_flash_light': (MobileBy.ACCESSIBILITY_ID, 'Flash'),
'ios_flash_auto': (MobileBy.ACCESSIBILITY_ID, 'Auto'),
'ios_flash_on': (MobileBy.ACCESSIBILITY_ID, 'On'),
'ios_flash_off': (MobileBy.ACCESSIBILITY_ID, 'Off'),
# 预览界面
'ios_retake': (MobileBy.ACCESSIBILITY_ID, 'Retake'),
'ios_use': (MobileBy.ACCESSIBILITY_ID, 'Use Photo'),
# 剪切界面
'ios_crop_use': (MobileBy.ACCESSIBILITY_ID, 'useButton'),
'ios_crop_cancel': (MobileBy.ACCESSIBILITY_ID, 'cancelButton'),
}
albums = {
# App相册界面(选取相)
'ios_cancel': (MobileBy.ACCESSIBILITY_ID, 'Cancel'),
# 各个相册, 使用时引用相册的名字即可, 系统默认的相册一般有: Camera Roll, Recently Added, Screenshots, 还有各个App自建的相册
'ios_albums': lambda x: (MobileBy.ACCESSIBILITY_ID, '{0}'.format(x)),
'ios_back_btn': (MobileBy.ACCESSIBILITY_ID, 'Photos'), # 这个位于左上角的返回键, 是用于返回相册列表的
# 相册里的相, 一般来说最新的相会出现在相册的最后, 可以引入last()来获取, 或者输入数字来获取第N张相, 比如输入200就会获得相册中第200张相
# 这里要注意相册里可以显示的相是有限的, 选取了没显示的相点击第一次, 相册会自动跳转到被选择的相的可显示位置, 再点击一次才最终选择到这张相
'ios_photos_by_position': lambda x: (MobileBy.XPATH, '//XCUIElementTypeCollectionView[@name="PhotosGridView"]/XCUIElementTypeCell[{0}]'.format(x)),
# 剪切界面
'ios_crop_use': (MobileBy.ACCESSIBILITY_ID, 'useButton'),
'ios_crop_cancel': (MobileBy.ACCESSIBILITY_ID, 'cancelButton'),
}
fake_location = {
'menu_btn': (MobileBy.ACCESSIBILITY_ID, 'Open navigation drawer'),
'start_to_fake': (MobileBy.ID, 'com.lerist.fakelocation:id/f_fakeloc_tv_service_switch'),
'add_btn': (MobileBy.ID, 'com.lerist.fakelocation:id/fab'),
'current_coords': (MobileBy.ID, 'com.lerist.fakelocation:id/f_fakeloc_tv_current_latlong'),
'running_mode': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('运行模式')),
'no_root_mode': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("{0}")'.format('NOROOT')),
'root_mode': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().textContains("{0}")'.format('ROOT(推荐)')),
'permission_allow': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('允许')),
'permission_allow_id': (MobileBy.ID, 'com.android.packageinstaller:id/dialog_container'),
'title_choose_location': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('选择位置')),
'search_btn': (MobileBy.ID, 'com.lerist.fakelocation:id/m_item_search'),
'search_box': (MobileBy.ID, 'com.lerist.fakelocation:id/l_search_panel_et_input'),
'confirm_btn': (MobileBy.ID, 'com.lerist.fakelocation:id/a_map_btn_done'),
'back_btn': (MobileBy.ACCESSIBILITY_ID, '转到上一层级'),
'update_next_time': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('下次再说')),
'forward_toset': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('前往设置')),
'get_permission': (MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("{0}")'.format('前往授权')),
# 通用元素-提示框&提示框的确认取消按钮
'native_dialog': (MobileBy.ID, 'android:id/parentPanel'),
'prompt_dialog': (MobileBy.ID, 'com.lerist.fakelocation:id/parentPanel'),
'dialog_confirm_btn': (MobileBy.ID, 'android:id/button1'),
'dialog_cancel_btn': (MobileBy.ID, 'android:id/button2'),
}
```
#### File: pyautotest/app/cli.py
```python
import argparse
class appOptions:
show_devices = '--show-devices'
clean_report = '--clean-report'
device_config = '--device-config'
global_config = '--global-config'
test_case = '--test-case'
tests_dir = '--tests-dir'
device = '--device'
test = '--test'
service_address = '--service-address'
bp = '--bp'
disable_screenshot = '--disable-screenshot'
output_dir = '--output-dir'
separate = '--separate'
allure_report = '--allure-report'
clean = '--clean'
log_level = '--pyauto-log-level'
# log_file = '--pyauto-log-file'
class Parser(object):
def __init__(self, parser=None, attach=True):
self.options = None
self.parser = parser or argparse.ArgumentParser()
if attach:
self.addoption()
def addoption(self):
self.add_help_option()
# 配置文件
self.add_config_option()
# 测试设备
self.add_device_option()
# 测试模块
self.add_tests_option()
# log配置
self.add_log_option()
# output
self.add_output_option()
# appium
self.add_appium_option()
# testing
self.add_testing_option()
def parse_arg(self, op=None):
self.options = self.parser.parse_args(op)
return self.options
def parse_known_args(self, op):
return self.parser.parse_known_args(op)
def add_config_option(self):
# 配置文件
self.parser.add_argument(
appOptions.device_config,
type=str,
help='device configuration file'
)
self.parser.add_argument(
appOptions.global_config,
type=str,
help='global configuration file'
)
self.parser.add_argument(
appOptions.test_case,
type=str,
help='Test case file'
)
def add_device_option(self):
# 运行设备:设备名,输入ios/android会选择默认的ios/android设备,未输入会选择default设备
self.parser.add_argument(
appOptions.device,
type=str,
help='device to test on, such as ios, android, <device>'
)
def add_tests_option(self):
# 运行case(模块): ios/android/...
self.parser.add_argument(
appOptions.test,
nargs='*',
help='Test case to run, such as: ios, android, <dir>/<test_case.py>'
)
self.parser.add_argument(
appOptions.tests_dir,
type=str,
help='Test case to run, such as: ios, android, <dir>/<test_case.py>'
)
def add_testing_option(self):
self.parser.add_argument(
appOptions.disable_screenshot,
action='store_true',
help='Disable device screenshot',
)
def add_log_option(self):
# log 配置
self.parser.add_argument(
appOptions.log_level,
type=str,
help='pyautotest log level',
)
def add_output_option(self):
# report
self.parser.add_argument(
appOptions.output_dir,
type=str,
help='test report directory'
)
self.parser.add_argument(
appOptions.separate,
action='store_true',
help='separate report directory each run',
)
self.parser.add_argument(
appOptions.allure_report,
action='store_true',
help='generate allure report',
)
self.parser.add_argument(
appOptions.clean,
action='store_true',
help='--clean for allure report command',
)
def add_appium_option(self):
# appium
self.parser.add_argument(
appOptions.service_address,
type=str,
help='Appium service address'
)
self.parser.add_argument(
appOptions.bp,
type=str,
help='WebDriverAgent port or Bootstrap port'
)
def add_help_option(self):
self.parser.add_argument(
appOptions.show_devices,
action='store_true',
help='show available devices in device.yml',
)
self.parser.add_argument(
appOptions.clean_report,
action='store_true',
help='clean reports, excluding logs',
)
class pytestOption(object):
def __init__(self, parser):
self.parser = parser
def add_config_option(self):
# 配置文件
self.parser.addoption(
'--device-config',
type=str,
help='device configuration file'
)
self.parser.addoption(
'--global-config',
type=str,
help='global configuration file'
)
self.parser.addoption(
'--test-case',
type=str,
help='Test case file'
)
self.parser.addoption(
'--data',
type=str,
help='Data file'
)
def add_device_option(self):
# 运行设备:设备名,输入ios/android会选择默认的ios/android设备,未输入会选择default设备
self.parser.addoption(
'--device',
type=str,
help='device to test on, such as ios, android, <device>'
)
self.parser.addoption(
'--system-port',
type=str,
help='android desired capabilities - systemPort'
)
self.parser.addoption(
'--platform',
type=str,
help='testing device platform, such as ios/android'
)
def add_case_option(self):
# 运行case(模块): ios/android/bunny/...
self.parser.addoption(
'--test',
type=str,
help='Test case to run, such as: ios, android, <test_case.py>'
)
def add_log_option(self):
# log 配置
self.parser.addoption(
'--pyauto-log-file',
type=str,
help='pyautotest log level',
)
def add_output_option(self):
# report
self.parser.addoption(
'--output-dir',
type=str,
help='output directory'
)
def add_appium_option(self):
# appium
self.parser.addoption(
'--service-address',
type=str,
help='Appium server host'
)
self.parser.addoption(
'--port',
type=str,
help='Appium server host'
)
self.parser.addoption(
'--bp',
type=str,
help='WebDriverAgent Port or Bootstrap Port'
)
def add_attachment_option(self):
self.parser.addoption(
'--disable-screenshot',
action='store_true',
help='Disable screenshot',
)
```
#### File: app/utils/pathz.py
```python
import os
import shutil
from pathlib import Path
class pathz(object):
def __init__(self, f=None):
self.f = f
@staticmethod
def makedirs(dpath):
if not os.path.exists(dpath):
os.makedirs(dpath)
@staticmethod
def touch(fpath, repc=False):
if repc:
if os.path.exists(fpath):
pathz.delete(fpath)
if os.path.exists(fpath):
raise FileExistsError(
"Cannot create a file when that file already exists: '{}'".format(fpath)
)
# Path().touch() won't replace fpath and won't raise error
Path(fpath).touch()
@staticmethod
def delete(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
@staticmethod
def copy(src, dst, repc=False):
"""
:param src:
:param dst:
:param repc:
:return:
"""
if repc:
if os.path.exists(dst):
pathz.delete(dst)
if os.path.isdir(src):
# shutil.copytree raise err if dst dir exists
shutil.copytree(src, dst)
else:
# shutil.copy directly replace dst file if exists
if os.path.exists(dst):
raise FileExistsError(
"Cannot create a file already existed: '{}'".format(dst)
)
shutil.copy(src, dst)
```
#### File: tests/ios/test_sample.py
```python
import sys
from app import logger
class TestIOSSample:
def test_print_ios(self, device):
logger.info(sys._getframe().f_code.co_name)
logger.info(f"device info: {device}")
```
#### File: pyautotest/unittests/test_mobile.py
```python
import time
import unittest
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from app.appbase import settings
from app.core.drivers.mobile import Mobile
class MobileTest(unittest.TestCase):
driver = None
@classmethod
def setUpClass(cls):
android_set = {
'LG_G6': {
'deviceName': 'LG_G6',
'devicePlatform': 'Android',
'platformVersion': '7.0',
'udid': 'VS988b3876fa0', # adb devices //android SDK
'appPackage': 'com.android.settings',
'appActivity': '.Settings',
}
}
ios_set = {
'iphone7': {
'automationName': 'XCUITest',
'platformName': 'iOS',
'platformVersion': '13.1.3',
'deviceName': 'iphone7',
'udid': '7524ead2a9a14eab947b648258ba4e02c5c12604', # idevice_id -l // brew install libimobiledevice –HEAD
'bundleId': 'com.apple.Preferences',
# 'startIWDP': True,
'launchTimeout': 60000,
'newCommandTimeout': 3600,
}
}
cls.param = {
'p': '4723',
'bp': '4724',
'system_port': '8100'
}
cls.desired_caps = ios_set['iphone7']
cls.driver = webdriver.Remote("http://127.0.0.1:" + cls.param['p'] + "/wd/hub", cls.desired_caps)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
def test_bluetooth(self):
settings = {
'bluetooth': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeCell[`label == "蓝牙"`]'),
}
bt = {
'back_button': (MobileBy.IOS_CLASS_CHAIN, '**/XCUIElementTypeButton[`label == "设置"`]'),
'bluetooth_switch': (MobileBy.IOS_PREDICATE, 'type == "XCUIElementTypeSwitch" AND label == "蓝牙"'),
}
page = Mobile(self.driver)
page.container(settings)
print(page.bluetooth.text)
# assert page.bluetooth.text == '打开'
page.bluetooth.click()
page.wait(1).container(bt)
assert page.bluetooth_switch.found() is True
page.bluetooth_switch.click()
page.back_button.click()
page.wait(1).container(settings)
page.timeout(3)
page['bluetooth'].click()
page.wait(1).container(bt)
page['bluetooth_switch'].found.click()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiyzhang/bert-ner",
"score": 2
} |
#### File: jiyzhang/bert-ner/preprocess.py
```python
import numpy as np
from pathlib import Path
import os
from data import fwords, ftags, DATADIR, build_hanzi_vocab, build_tag_vocab, build_hanzi_embedding
path = DATADIR / 'boson_ann_NONB'
files = os.listdir(path)
files_txt = [i for i in files if i.endswith('.txt')]
files = [i[0:-4] for i in files_txt]
sent_file = DATADIR / "sent.txt"
tag_file = DATADIR / "tag.txt"
def append_sents_tags(sent_f, tag_f, file):
"""
读取brat annotation tool的标注结果,转换为如下格式:
句子: 以"。"为分隔符,一个句子一行。句子内的字之间以"|"来分割
标签:对应于每一个句子的标签,长度与句子相同,不同字的标签之间通过"|"分割
:param sent_f: 输出句子文件的 file handler
:param tag_f: 输出标签文件的file handler
:param file: 文件名,不含扩展名。用于拼接处句子文件名(.txt)和标签文件名(.ann)
:return:
"""
sent_file = path / (file + ".txt")
tag_file = path / (file + ".ann")
print(file)
with Path(sent_file).open() as f_s, Path(tag_file).open() as f_t:
# 公告的全文,只有一行
sent_content = f_s.readline().strip()
sent_size = len(sent_content)
tags_content = list(['O'] * sent_size)
tag_lines = f_t.readlines()
for tag_line in tag_lines:
alist = tag_line.split()
if len(alist) > 5:
#忽略英文公司名称
#new_008739b9378aa74c793113e7335f6638
"""
T28 COMPANY 727 746 Golden Ares Limited
T29 COMPANY 747 768 Gingko Avenue Limited
T30 COMPANY 769 795 Mission Excellence Limited
T31 PERSON 796 799 王培强
T32 COMPANY 800 815 福建平潭自贸区崇德投资合伙企业
T33 COMPANY 822 839 Sagacious Limited
T34 PERSON 840 842 刘承
T35 PERSON 844 847 周小童
T38 COMPANY 1006 1016 神州优车股份有限公司
T1 COMPANY 410 414 中国结算
T2 COMPANY 700 726 Star Vantage(China)Limited
"""
continue
(_, tag, start, end, entity) = tag_line.split()
#print("{},{}, total size: {}".format(start, end, sent_size))
start = int(start)
end = int(end)
# print(line_arr[end])
# print(line_arr[start:end] )
if tag == "PERSON":
tags_content[start] = "B-PER" # B-PER
for i in range(start + 1, end):
tags_content[i] = "I-PER"
#tags_content[start + 1: end] = "I-PER" # I-PER
if tag == "COMPANY":
tags_content[start] = "B-ORG" # B-ORG
for i in range(start + 1, end):
tags_content[i] = "I-ORG"
#tags_content[start + 1: end] = "I-ORG" # B-ORG
#按照"。"分成句子
sent_arr = sent_content.split("。")
sent_len = 0
sent_acc = 0
startpos = 0
for sentence in sent_arr:
sent_len = len(sentence)
sent_str_to_write = "|".join(list(sentence))
sent_f.write(sent_str_to_write)
sent_f.write("\n")
tag_str_to_write = "|".join(tags_content[startpos: startpos + sent_len])
tag_f.write(tag_str_to_write)
tag_f.write("\n")
# print("sent_len: {}, tag_len: {}".format(len(sent_splitted.split()), len(tag_str.split())))
# if len(sent_splitted.split()) != len(tag_str.split()):
# print(sent_splitted)
# print(tag_str)
## split会出现不相等的情况,因为sent中会有多个空格在一起。
#assert len(sent_splitted.split()) == len(tag_str.split()), "the length of sent and tag don't match"
assert(len(sentence) == len(tags_content[startpos: startpos + sent_len]))
startpos = startpos + sent_len + 1 # skip the "。"
if __name__ == '__main__':
# 生成sent.txt, tag.txt
with Path(sent_file).open("w") as sent_f, Path(tag_file).open("w") as tag_f:
for i in files:
append_sents_tags(sent_f, tag_f, i)
#将sent.txt, tag.txt的内容切分train, valid, test
#生成文件名:
#test.tags.txt
#test.words.txt
#train.tags.txt
#train.words.txt
#valid.tags.txt
#valid.words.txt
with Path(sent_file).open("r") as sent_f, Path(tag_file).open("r") as tag_f:
sent_lines = sent_f.readlines()
tag_lines = tag_f.readlines()
total_size = len(sent_lines)
train_size = total_size * 0.7
valid_size = total_size * 0.2
test_size = total_size - train_size - valid_size
i = 0
f_train_words = Path(fwords("train")).open("w")
f_train_tags = Path(ftags("train")).open("w")
f_valid_words = Path(fwords("valid")).open("w")
f_valid_tags = Path(ftags("valid")).open("w")
f_test_words = Path(fwords("test")).open("w")
f_test_tags = Path(ftags("test")).open("w")
for s, t in zip(sent_lines, tag_lines):
if len(s.strip()) != 0:
if i < train_size:
f_train_words.write(s)
#f_train_words.write("\n")
f_train_tags.write(t)
#f_train_tags.write("\n")
elif i < train_size + valid_size:
f_valid_words.write(s)
#f_valid_words.write("\n")
f_valid_tags.write(t)
#f_valid_tags.write("\n")
else:
f_test_words.write(s)
#f_test_words.write("\n")
f_test_tags.write(t)
#f_test_tags.write("\n")
i = i + 1
f_train_words.close()
f_train_tags.close()
f_valid_words.close()
f_valid_tags.close()
f_test_words.close()
f_test_tags.close()
# 生成汉字字表和标记字表
# 生成文件 vocab.words.txt
build_hanzi_vocab()
# 生成文件 vocab.tags.txt
build_tag_vocab()
# 根据中文字词向量表,生成对应汉字字表的embedding
# sgns.npz
build_hanzi_embedding()
``` |
{
"source": "jiz148/3d-converter",
"score": 3
} |
#### File: 3d-converter/common/matrix_transform.py
```python
from .vtk_points_loader import VtkPointLoader
import numpy
TOTAL_NUM_OF_PICS = 65
TOTAL_NUM_OF_PIXELS = 31200000
def add_vtk_points_from_plane_list(pic_set: list, trans_set: list, same_shape=True, scale_factor=(1.0, 1.0)):
"""
Adds atk points from set of planes and its transformation matrix set
@param pic_set: <list> plane set
@param trans_set: <list> transformation matrix set
@param same_shape: <bool> whether pictures are in same shape
@param scale_factor: <Tuple> scale factor for scaling the matrix
@return: <VtkPointLoad> Point load, a vtk object
"""
vtk_point_loader = VtkPointLoader()
# get first flatten matrix
flattened_matrix = get_plane_matrix_flatten(pic_set[0])
# create scaling matrix
scale_matrix = _create_scale_matrix(scale_factor)
# put planes into the vtk object
print('Creating data...')
loop, volume_list, colors_list = 0, [], []
for pic, trans in zip(pic_set, trans_set):
plane_matrix = flattened_matrix if same_shape is True else get_plane_matrix_flatten(pic)
colors = pic.reshape([int(pic.size/3), 3]).tolist()
volume = numpy.dot(plane_matrix, trans.transpose()).dot(scale_matrix) * -1
volume = numpy.delete(volume, 3, 1).tolist()
volume_list = volume_list + volume
colors_list = colors_list + colors
loop += 1
_print_progress_bar(loop, len(trans_set))
# set colors to grayscale
print('-Done Creating data')
print('changing colors to grayscale...')
for i in range(len(colors_list)):
colors_list[i] = 0.2989 * colors_list[i][0] + 0.5870 * colors_list[i][1] + 0.1140 * colors_list[i][2]
print('-Done changing colors to grayscale')
vtk_point_loader.add_points_from_list(volume_list, colors_list)
vtk_point_loader.summarize()
return vtk_point_loader
def get_plane_matrix_flatten(plane):
"""
Gets a matrix in the form of
| X0 Y0 0 1 X0 Y1 0 1 ... ... ... ... Xi Yi 0 1 |
@param plane: plane
@return: <numpy.array> Matrix
"""
trans_list = []
for y in range(plane.shape[0]):
for x in range(plane.shape[1]):
trans_list.append(numpy.array([x, y, 0, 1]))
return numpy.array(trans_list)
def plane_pt_to_3d_point(pos, trans_matrix):
"""
Transform plane point position eg. P(x, y, 0, 1) to 3d point eg. P'(X, Y, Z, 1)
@param pos: <array_like> position
@param trans_matrix: <numpy.array> transformation matrix
@return: <Tuple> 3d position
"""
orig_matrix = numpy.array(pos, ndmin=2)
return numpy.dot(orig_matrix, trans_matrix.transpose())
def plane_set_to_3d(pic_set: list, trans_set: list, same_shape=True, scale_factor=(1.0, 1.0)):
"""
Transform plane set to 3d graph and put it to the space matrix by transformation matrices
Number of transformation matrices is same with number of planes
@param pic_set: set of picture planes
@param trans_set: set of transformation matrices
@param same_shape: if the planes has same shape
@param scale_factor: <Tuple> scale factor for scaling the matrix
@return:
"""
# get first flatten matrix
flattened_matrix = get_plane_matrix_flatten(pic_set[0])
volume_list, colors_list = [], []
# create scaling matrix
scale_matrix = _create_scale_matrix(scale_factor)
# put coordinates and colors into lists
print('building list...\n')
for pic, trans in zip(pic_set, trans_set):
plane_matrix = flattened_matrix if same_shape is True else get_plane_matrix_flatten(pic)
colors = pic.reshape([int(pic.size/3), 3]).tolist()
volume = numpy.dot(plane_matrix, trans.transpose()).dot(scale_matrix) * -1
volume = numpy.delete(volume, 3, 1).tolist()
volume_list = volume_list + volume
colors_list = colors_list + colors
print('-Done building list\n')
print('building space...\n')
space, min_x, min_y, min_z = _build_space(volume_list)
print('-Done building space\n')
print('inserting colors\n')
for volume, colors in zip(volume_list, colors_list):
# normalize coordinates
x = volume[0] + min_x * -1
y = volume[1] + min_y * -1
z = volume[2] + min_z * -1
# change colors to grayscale
gray_scale_color = 0.2989 * colors[0] + 0.5870 * colors[1] + 0.1140 * colors[2]
space[int(y), int(x), int(z)] = gray_scale_color
print('-Done inserting colors\n')
return space
def _build_space(coordinate_set):
"""
Builds an empty space to place planes
@param coordinate_set: set of planes
@return: <numpy.array> empty space
"""
coordinate_set_lists = numpy.array(coordinate_set).transpose().tolist()
max_x, max_y, max_z = max(coordinate_set_lists[0]), max(coordinate_set_lists[1]), max(coordinate_set_lists[2])
min_x, min_y, min_z = min(coordinate_set_lists[0]), min(coordinate_set_lists[1]), min(coordinate_set_lists[2])
y = int(max_y) + 2 if min_y >= 0 else int(max_y) + int(min_y) * -1 + 2
x = int(max_x) + 2 if min_x >= 0 else int(max_x) + int(min_x) * -1 + 2
z = int(max_z) + 2 if min_z >= 0 else int(max_z) + int(min_z) * -1 + 2
space = numpy.zeros((y, x, z, 1), float)
return space, min_x if min_x < 0 else 0, min_y if min_y < 0 else 0, min_z if min_z < 0 else 0
def _create_scale_matrix(scale_factors):
"""
Creates scale matrix for scaling
"""
return numpy.array([[scale_factors[0], 0, 0, 0],
[0, scale_factors[1], 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def _print_progress_bar(iteration, total, decimals=1, length=100, fill='█'):
"""
Print progress bar for CLI
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
length = int(length * iteration // total)
bar = fill * length
print('\r|%s| %s%%' % (bar, percent), end='\r')
if iteration == total:
print()
```
#### File: 3d-converter/common/vtk_points_loader.py
```python
import vtk
from common.hole_filler import HoleFiller
DEFAULT_COLOR = {"r": 255, "g": 255, "b": 255}
DEFAULT_SAVE_PATH = '/home/bioprober/gh/3d-converter/tests/data/my_poly_result_try.vtk'
class VtkPointLoader:
def __init__(self, z_min=-1000.0, z_max=1000.0):
self.vtk_poly_data = vtk.vtkPolyData()
self.clear_points()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self.vtk_poly_data)
mapper.SetColorModeToDefault()
mapper.SetScalarRange(z_min, z_max)
mapper.SetScalarVisibility(1)
self.vtk_actor = vtk.vtkActor()
self.vtk_actor.SetMapper(mapper)
def add_points(self, point, c=DEFAULT_COLOR):
"""
Adds points to the vtk point loader
@param point: <list> point to be added, list of [x, y, z]
@param c: <dictionary> dictionary of {"r": 1, "g": 2, "b": 3}
"""
point_id = self.vtk_points.InsertNextPoint(point[:])
self.vtk_depth.InsertNextValue(point[2])
self.vtk_cells.InsertNextCell(1)
self.vtk_cells.InsertCellPoint(point_id)
# setup colors
self.colors.InsertNextTuple3(c.get("r"), c.get("g"), c.get("b"))
# self.colors.InsertNextTuple3(255, 255, 255)
def add_points_from_list(self, points, colors=DEFAULT_COLOR):
"""
Adds points to the vtk point loader from list
@param points: <list> list of point coordinates
@param colors: <list> list of color list [r, g, b],
needs to be the same length with colors
"""
print('started scattering\n')
for point, c in zip(points, colors):
# change to [0, 0, 0] if want rgb
if c != 0.0:
point_id = self.vtk_points.InsertNextPoint(point[:])
# self.vtk_depth.InsertNextValue(point[2])
self.vtk_cells.InsertNextCell(1)
self.vtk_cells.InsertCellPoint(point_id)
# setup colors
# self.colors.InsertNextTuple3(c[0], c[1], c[2]) # for rgb
self.colors.InsertNextTuple([c]) # for grayscale
print('-Done scattering\n')
def add_points_from_3d_array(self, space):
"""
Adds points to the vtk point loader from numpy array
There are there parts of codes to be comment out to run without hole-filling
@param space: <numpy.array> numpy 3d array with all color data
"""
print('space shape: ', space.shape)
print('started scattering points...')
looped = 0
# comment out following 1 line to run without hole filling
# hole_filler = HoleFiller(space, 7)
for y in range(space.shape[0]):
for x in range(space.shape[1]):
for z in range(space.shape[2]):
if space[y, x, z, :][0] != 0.0:
point_id = self.vtk_points.InsertNextPoint([x, y, z])
# self.vtk_depth.InsertNextValue(space[y, x, z][2])
self.vtk_cells.InsertNextCell(1)
self.vtk_cells.InsertCellPoint(point_id)
self.colors.InsertNextTuple([space[y, x, z, :][0].item()]) # for grayscale
looped += 1
# comment out following else part to run without hole filling
# else:
# hole_filler.derive_points((x, y, z))
# looped += 1
# comment out following else part to run without hole filling
# print('processed ', looped, ' points', end='\r')
# comment out following 2 lines to run without hole filling
# point_list, color_list = hole_filler.summarize()
# self.add_points_from_list(point_list, color_list)
print('-Done scattering points')
def summarize(self):
"""
finalize the cells and points in the polydata
"""
self.vtk_poly_data.SetPoints(self.vtk_points)
self.vtk_poly_data.SetVerts(self.vtk_cells)
self.vtk_poly_data.GetPointData().SetScalars(self.colors)
self.vtk_cells.Modified()
self.vtk_points.Modified()
# self.vtk_depth.Modified()
self.vtk_poly_data.Modified()
def clear_points(self):
self.vtk_points = vtk.vtkPoints()
self.vtk_cells = vtk.vtkCellArray()
# self.vtk_depth = vtk.vtkDoubleArray()
self.colors = vtk.vtkUnsignedCharArray()
self.colors.SetName('Colors')
self.colors.SetNumberOfComponents(1)
# self.vtk_depth.SetName('DepthArray')
self.vtk_poly_data.SetPoints(self.vtk_points)
self.vtk_poly_data.SetVerts(self.vtk_cells)
# self.vtk_poly_data.GetPointData().SetScalars(self.vtk_depth)
# self.vtk_poly_data.GetPointData().SetActiveScalars('DepthArray')
def dump(self, save_name=DEFAULT_SAVE_PATH):
vtk_writer = vtk.vtkPolyDataWriter()
vtk_writer.SetInputData(self.vtk_poly_data)
vtk_writer.SetFileName(save_name)
vtk_writer.Update()
```
#### File: 3d-converter/misc/trans_writer.py
```python
import numpy
import re
SAVE_PATH = '/home/bioprober/gh/3d-converter/tests/data/'
class TransWriter:
def __init__(self, filename):
self.filename = filename
self.info = ''
def config(self, axis, degree, steps=65, move=(0, 0, 0)):
"""
Configs self.info string by given parameters
@param axis: <Tuple> formed by a tuple of two coordinates ((x, y, z), (x, y, z)), axis to be rotate about
@param degree: <float> final rotation along the axis
@param steps <int> total steps it takes, same with the number of rows in position file
@param move: <Tuple> final movement along x, y, z axis, (x, y, z)
"""
print('Configuring position information...')
# get id array for first element
id_arr = numpy.array(list(range(int(steps))))
move_x_arr = move_y_arr = move_z_arr = radian_arr = numpy.zeros(len(id_arr)).tolist()
# get move arrays
if int(move[0]/steps) != 0:
move_x_arr = list(numpy.arange(0, move[0], move[0]/steps))
if int(move[1]/steps) != 0:
move_y_arr = list(numpy.arange(0, move[1], move[1]/steps))
if int(move[2]/steps) != 0:
move_z_arr = list(numpy.arange(0, move[2], move[2]/steps))
# get radian arrays
if int(degree/steps) != 0:
degrees = list(numpy.arange(0, degree, degree/steps))
radian_arr = numpy.radians(degrees)
# Calculate the rotated arrays and save their strings to self.info
for i, x, y, z, theta in zip(id_arr, move_x_arr, move_y_arr, move_z_arr, radian_arr):
move_matrix = self._get_move_matrix(x, y, z)
x_1, y_1, z_1, x_2, y_2, z_2 = axis[0][0], axis[0][1], axis[0][2], axis[1][0], axis[1][1], axis[1][2]
t, t_inverse = self._get_t_and_inverse(x_1, y_1, z_1)
rx, rx_inverse, ry, ry_inverse = self._get_r_x_y_and_inverse(x_1, y_1, z_1, x_2, y_2, z_2)
rz = self._get_r_z(theta)
trans = move_matrix.dot(t_inverse).dot(rx_inverse).dot(ry_inverse).dot(rz).dot(ry).dot(rx).dot(t)
s = ' '.join(str(item) for item in trans)
s = re.sub(r'[\[\]]', '', s)
self.info += str(i) + ' ' + s + '\n'
print('-Done configuring position information')
def save_to_file(self):
"""
Save the string in self.info to file
"""
print('writing to file {}...'.format(self.filename))
file = open(self.filename, 'w')
file.write(self.info)
print('-Done writing to file')
@staticmethod
def _calculate_unit_vector_elements(x_1, y_1, z_1, x_2, y_2, z_2):
"""
Calculates the elements in unit vector
@return: <Tuple> unit x, y, z
"""
if (x_2 - x_1) == (y_2 - y_1) == (z_2 - z_1) == 0:
return 0, 1, 0
mag = numpy.sqrt((x_2 - x_1) ** 2 + (y_2 - y_1) ** 2 + (z_2 - z_1) ** 2)
return (x_2 - x_1) / mag, (y_2 - y_1 ** 2) / mag if (y_2 - y_1 ** 2) / mag != 0 else 1, (z_2 - z_1) / mag
@staticmethod
def _get_move_matrix(x, y, z):
"""
Gets move matrix
@return: move matrix
"""
return numpy.array([[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]])
def _get_r_x_y_and_inverse(self, x_1, y_1, z_1, x_2, y_2, z_2):
"""
Gets the Rx, Rx inverse, Ry, Ry inverse matrices
@return: <numpy.array> Rx, Rx inverse, Ry, Ry inverse matrices
"""
a, b, c = self._calculate_unit_vector_elements(x_1, y_1, z_1, x_2, y_2, z_2)
d = numpy.sqrt(b**2 + c**2)
rx = numpy.array([[1, 0, 0, 0],
[0, c/d, -b/d, 0],
[0, b/d, c/d, 0],
[0, 0, 0, 1]])
rx_inverse = numpy.array([[1, 0, 0, 0],
[0, c/d, b/d, 0],
[0, -b/d, c/d, 0],
[0, 0, 0, 1]])
ry = numpy.array([[d, 0, -a, 0],
[0, 1, 0, 0],
[a, 0, d, 0],
[0, 0, 0, 1]])
ry_inverse = numpy.array([[d, 0, a, 0],
[0, 1, 0, 0],
[-a, 0, d, 0],
[0, 0, 0, 1]])
return rx, rx_inverse, ry, ry_inverse
@staticmethod
def _get_r_z(theta):
"""
Gets Rz matrix
@param theta: theta radian of rotation
@return: Rz matrix
"""
return numpy.array([[numpy.cos(theta), -numpy.sin(theta), 0, 0],
[numpy.sin(theta), numpy.cos(theta), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
@staticmethod
def _get_t_and_inverse(x, y, z):
"""
Gets T and T inverse matrices
@param: x
@param: y
@param: z
@return: <Tuple> T and T inverse
"""
t = numpy.array([[1, 0, 0, -x],
[0, 1, 0, -y],
[0, 0, 1, -z],
[0, 0, 0, 1]])
t_inverse = numpy.array([[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]])
return t, t_inverse
def run():
print('---Transformation Information CLI---\n')
print('Please enter following information...\n')
x_1 = float(input('axis parameter x1: '))
y_1 = float(input('axis parameter y1: '))
z_1 = float(input('axis parameter z1: '))
x_2 = float(input('axis parameter x2: '))
y_2 = float(input('axis parameter y2: '))
z_2 = float(input('axis parameter z2: '))
degree = float(input('rotation degree: '))
steps = int(input('steps: '))
move_x = int(input('move along x: '))
move_y = int(input('move along y: '))
move_z = int(input('move along z: '))
filename = input('Please enter the file name to save file: ')
filename = SAVE_PATH + filename
axis = ((x_1, y_1, z_1), (x_2, y_2, z_2))
move = (move_x, move_y, move_z)
writer = TransWriter(filename)
writer.config(axis, degree, steps, move)
writer.save_to_file()
if __name__ == '__main__':
run()
``` |
{
"source": "jiz148/Battle-of-Nine-Kings",
"score": 2
} |
#### File: Battle-of-Nine-Kings/nk/main.py
```python
def run():
print('hello world')
if __name__ == "__main__":
run()
``` |
{
"source": "jiz148/blueprint_editor",
"score": 3
} |
#### File: operation/implement/multiply.py
```python
def generate_code(json_dict, lang='python'):
result = ''
multiply_1, multiply_2, output = parse_json(json_dict)
if lang == 'python':
result = "{} = {} * {}".format(output, multiply_1, multiply_2)
return result
def parse_json(json_dict):
"""
@param json_dict: should have keys: multiply_1, multiply_2, output
@return: strings of multiply_1, multiply_2, output
"""
try:
return str(json_dict['multiply_1']), str(json_dict['multiply_2']), str(json_dict['output'])
except Exception:
raise KeyError('Error while paring: Multiply')
if __name__ == '__main__':
print(generate_code({'multiply_1': 'a', 'multiply_2': 'b', 'output': 'asd'}))
``` |
{
"source": "jiz148/CatYesNo",
"score": 3
} |
#### File: ml/classifier/utils.py
```python
import h5py
import numpy as np
import os
from matplotlib import pyplot as plt
PWD = os.path.dirname(os.path.realpath(__file__))
def load_data():
"""
"""
train_dataset = load_train_datasets()
# print(type(train_dataset))
train_set_x_orig = np.array(train_dataset['train_set_x'][:]) # your train set features
train_set_y_orig = np.array(train_dataset['train_set_y'][:]) # your train set labels
test_dataset = load_test_datasets()
test_set_x_orig = np.array(test_dataset['test_set_x'][:]) # your test set features
test_set_y_orig = np.array(test_dataset['test_set_y'][:]) # your test set labels
classes = np.array(test_dataset['list_classes'][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def load_train_datasets():
"""
"""
data_train_h5 = os.path.join(PWD, 'datasets', 'train_catvnoncat.h5')
train_dataset = h5py.File(data_train_h5, 'r')
return train_dataset
def load_test_datasets():
"""
"""
data_tests_h5 = os.path.join(PWD, 'datasets', 'test_catvnoncat.h5')
test_dataset = h5py.File(data_tests_h5, 'r')
return test_dataset
def load_parameters():
"""
"""
data_dir = os.path.dirname(os.path.realpath(__file__))
saved_parameters = os.path.join(data_dir, 'datasets', 'saved_parameters.npy')
print('\nloading saved parameters: {}'.format(saved_parameters))
parameters = np.load(saved_parameters).item()
return parameters
def print_mislabeled_images(classes, X, y, p):
"""
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:, index].reshape(64, 64, 3), interpolation='nearest')
plt.axis('off')
plt.title(
"Prediction: " + classes[int(p[0, index])].decode("utf-8") + " \n Class: " + classes[y[0, index]].decode(
"utf-8"))
def print_pypath():
"""
Print out Python path.
"""
import sys
print('\nPYTHONPATH')
print('.'*80)
for p in sys.path:
print(p)
print('.' * 80)
def save_parameters(parameters):
"""
"""
data_dir = os.path.dirname(os.path.realpath(__file__))
saved_parameters = os.path.join(data_dir, 'datasets', 'saved_parameters.npy')
print('\nsaving parameters: {} ...'.format(saved_parameters))
np.save(saved_parameters, parameters, allow_pickle=True, fix_imports=True)
``` |
{
"source": "jiz148/CS543-project-1",
"score": 3
} |
#### File: backend/kafka_util/producer.py
```python
import requests
import threading
from kafka import KafkaProducer
API_DICT = {
"country_level": {
"url": "https://api.covidactnow.org/v2/states.timeseries.json",
"params": {'apiKey': '<KEY>'},
},
"state_level": {
"url": "https://api.covidactnow.org/v2/state/{}.timeseries.json",
"params": {'apiKey': '<KEY>'},
}
}
COUNTRY_TOPIC_NAME = 'country_level'
STATE_TOPIC_NAME = 'state_level'
def fetch_raw(url, params):
html = None
print('Processing..{}'.format(url))
try:
r = requests.get(url, params=params)
if r.status_code == 200:
print('Responded 200')
html = r.text
except Exception as ex:
print('Exception while accessing raw html')
print(str(ex))
finally:
return html.strip()
def publish_message(producer_instance, topic_name, key, value):
try:
key_bytes = bytes(key, encoding='utf-8')
value_bytes = bytes(value, encoding='utf-8')
producer_instance.send(topic_name, key=key_bytes, value=value_bytes)
producer_instance.flush()
print('Message published successfully.')
except Exception as ex:
print('Exception in publishing message')
print(str(ex))
def connect_kafka_producer():
_producer = None
try:
_producer = KafkaProducer(bootstrap_servers=['localhost:9092'], api_version=(0, 10))
except Exception as ex:
print('Exception while connecting Kafka')
print(str(ex))
finally:
return _producer
def produce(state=None):
kafka_producer = connect_kafka_producer()
# country_level_value = None
# produce state-level topic
if state:
state_url = API_DICT[STATE_TOPIC_NAME]["url"].format(state.upper())
state_params = API_DICT[STATE_TOPIC_NAME]["params"]
state_value = fetch_raw(state_url, state_params)
print(state_value)
publish_message(kafka_producer, STATE_TOPIC_NAME, 'latest_data', state_value)
# produce country_level topic
# country_url = API_DICT[COUNTRY_TOPIC_NAME]["url"]
# country_params = API_DICT[COUNTRY_TOPIC_NAME]["params"]
# country_value = fetch_raw(country_url, country_params)
# publish_message(kafka_producer, COUNTRY_TOPIC_NAME, 'latest_data', country_value)
if __name__ == "__main__":
thread_2 = threading.Thread(
target=produce,
args=('NJ',),
)
thread_2.start()
``` |
{
"source": "jiz148/medical_app",
"score": 4
} |
#### File: backend_app/common/db.py
```python
import sqlite3
class SqliteDBMS:
def __init__(self, db_dir):
"""
SQLite dbms class
@param db_dir: database directory
"""
self.conn = sqlite3.connect(db_dir)
self.cursor = self.conn.cursor()
def add(self, table, col_to_val):
"""
@param table: name of table
@param col_to_val: dict of column name to value
@return: <boolean> success
"""
cols_str = str(tuple(col_to_val.keys())).replace("'", '')
vals_str = str(tuple(col_to_val.values()))
sql = """INSERT INTO {} {} VALUES {}""".format(table, cols_str, vals_str)
# print('sql: ', sql)
try:
self.cursor.execute(sql)
self.conn.commit()
return True
except Exception as e:
self.conn.rollback()
print('error when adding to db')
print(e)
return False
pass
def close(self):
"""
Close the database
"""
self.conn.commit()
self.conn.close()
def delete(self, index):
"""
@param index: index to delete
@return: <boolean> success
"""
pass
def query(self, query_str):
"""
@param query_str: <str> sql query
@return: <boolean> success
<dict> result
"""
try:
self.cursor.execute(query_str)
return True, self.cursor.fetchall()
except Exception as e:
print('error when querying db')
print(e)
return False, None
pass
def update(self, db_index, col_to_val=None):
"""
@param db_index: index of row
@param col_to_val: dict of column name to new value
@return: <boolean> success
"""
if col_to_val is None:
col_to_val = {}
pass
if __name__ == '__main__':
dbms = SqliteDBMS('../../data/WinNBQ.db3')
table = 'User'
input_data = {
'username': 'jin',
'password': '<PASSWORD>',
'email': '<EMAIL>',
'birthday': '05-16-1763',
'gender': 'Male',
}
print(dbms.add(table, input_data))
sql_str = """select * from User limit 5"""
print(dbms.query(sql_str))
dbms.close()
```
#### File: backend_app/resources/users.py
```python
import os
from flask_restful import Resource
from flask import request
from backend.backend_app.common.db import SqliteDBMS
from backend.backend_app.common.send_email import send_email
from backend.backend_app.resources.path import DATA_FILE_PATH
from backend.backend_app.resources.contents import FORGET_MY_PASSWORD_SUBJECT, FORGET_MY_PASSWORD_CONTENT
TECHNICAL_EMAIL = '<EMAIL>'
EMAIL_PASS = os.getenv('MD_EMAIL_PASS')
SMTP_SERVER = 'smtp.office365.com'
SMTP_PORT = 587
class UsersRegister(Resource):
def get(self):
pass
def post(self):
dbms = SqliteDBMS(DATA_FILE_PATH)
data = request.get_json()
username = data["username"]
password = data["password"]
email = data["email"]
birthday = data["birthday"]
gender = data["gender"]
phone = data["phone"]
query_str_username = "select * from User where username ='"+username+"'"
obj1 = dbms.query(query_str_username)
query_str_email = "select * from User where email ='"+email+"'"
obj2 = dbms.query(query_str_email)
if obj1[1] != [] or obj2[1] != []:
# json conversion required
return {'success': 0, 'msg': 'Username or email already existed'}
table = 'User'
col_to_val = {
# "id": id,
"username": username,
"password": password,
"email": email,
"birthday": birthday,
"gender": gender,
"phone": phone
}
dbms.add(table, col_to_val)
dbms.close()
return {'success': 1, 'msg': 'Success'}
pass
class UsersLogin(Resource):
def get(self):
pass
def post(self):
dbms = SqliteDBMS(DATA_FILE_PATH)
data = request.get_json()
username = data["username"]
password = data["password"]
query_str = "select * from User where username = '"+username+"' and password ='"+password+"'"
obj = dbms.query(query_str)
if obj[1] == []:
# error it does not match
return {'success': 0, 'msg': 'Invalid username or password'}
dbms.close()
return {'success': 1, 'msg': 'Success'}
pass
class UserForgetPassword(Resource):
def get(self):
dbms = SqliteDBMS(DATA_FILE_PATH)
data = request.get_json()
user_email = data['email']
query_str_email = "select username from User where email ='" + user_email + "'"
query_result = dbms.query(query_str_email)
if not query_result[1]:
return {'success': 0, 'msg': 'Email does not exist'}
username = query_result[1][0][0]
email_content = FORGET_MY_PASSWORD_CONTENT.format(username)
try:
send_email(sender=TECHNICAL_EMAIL,
password=<PASSWORD>,
receiver=user_email,
subject=FORGET_MY_PASSWORD_SUBJECT,
content=email_content,
smtp_server=SMTP_SERVER,
port=SMTP_PORT)
return {'success': 1, 'msg': 'Success'}
except Exception as e:
print(e)
return {'success': 0, 'msg': 'Error when sending email'}
class UserChangePassword(Resource):
def post(self):
dbms = SqliteDBMS(DATA_FILE_PATH)
data = request.get_json()
username = data["username"]
new_password = data["password"]
pass
```
#### File: backend_app/models/findings.py
```python
from backend_sqlalchemy.backend_app.common.serializer import json_serial
from backend_sqlalchemy.backend_app.db import db
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import relationship
base = automap_base()
class FindingsModel(base):
__tablename__ = 'Findings'
visits = relationship('Visit', secondary='VisitToFinding')
diseases = relationship('Diseases', secondary='Stats',viewonly=True)
entity_FID2 = relationship('FindingsModel',
secondary='FindingsRel',
primaryjoin='FindingsModel.FID==FindingsRel.FID1',
secondaryjoin='FindingsModel.FID==FindingsRel.FID2',
backref='FID2',
viewonly=True)
entity_FID1 = relationship('FindingsModel',
secondary='FindingsRel',
primaryjoin='FindingsModel.FID==FindingsRel.FID2',
secondaryjoin='FindingsModel.FID==FindingsRel.FID1',
backref='FID1',
viewonly=True)
def __repr__(self):
return "<FindingsObject:FID="+str(self.FID)+" Name="+self.Name+">"
def as_dict(self):
return {c.name: json_serial(getattr(self, c.name)) for c in self.__table__.columns}
base.prepare(db.engine, reflect=True)
```
#### File: backend_app/models/user.py
```python
from backend_sqlalchemy.backend_app.db import db
from backend_sqlalchemy.backend_app.db import app
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import relationship
from passlib.hash import pbkdf2_sha256
# defining a custom hash function to be used for encrypting the password
custom_hash = pbkdf2_sha256.using(salt=bytes(app.secret_key, 'utf-8')).using(rounds=10000).hash
base = automap_base()
class UserModel(base):
__tablename__ = 'User'
visits = relationship('Visit', foreign_keys='Visit.uid')
def encrypt_password(self):
self.password = <PASSWORD>(self.password)
def new_password_encrypted(self, password):
self.password = <PASSWORD>)
base.prepare(db.engine, reflect=True)
```
#### File: backend_app/resources/stats.py
```python
from backend_sqlalchemy.backend_app.models.diseases import DiseasesModel
from backend_sqlalchemy.backend_app.db import db
from backend_sqlalchemy.backend_app.models.stats import StatsModel
def get_stats():
"""
Get stats
"""
all_stats = db.session.query(StatsModel.DID, StatsModel.FID, StatsModel.Sen)
g_stats = {}
for stat in all_stats:
g_stats[(stat.DID, stat.FID)] = stat.Sen
print(g_stats)
```
#### File: medical_app/frontend/queries.py
```python
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/main', methods=['POST'])
def query_main():
return render_template('main.html');
@app.route('/login', methods=['POST'])
def query_login():
return render_template('login.html');
@app.route('/disclaimer', methods=['POST'])
def query_disclaimer():
return render_template('disclaimer.html');
@app.route('/startregistration', methods=['POST'])
def query_startregistration():
return render_template('register.html');
@app.route('/', methods=['GET'])
def index():
return render_template('login.html')
#return render_template('index.html')
``` |
{
"source": "jiz148/parking-lot",
"score": 4
} |
#### File: parking_lot/common/parking_lot.py
```python
from .car import Car
class ParkingLot:
def __init__(self, slot_amount: int):
"""
@param slot_amount: number of slots in the parking lot
"""
self._slot_amount = slot_amount
self._car_list = [None] * self._slot_amount
print('Created a parking lot with {} slots'.format(self._slot_amount))
def is_full(self):
"""
Checks if the parking lot is full
@return: <bool> whether the parking lot is full
"""
return len([i for i, val in enumerate(self._car_list) if val is not None]) == len(self._car_list)
def leave(self, slot_num: int):
"""
Leaves the car according to the slot number
@param slot_num: <int> slot_number to leave car
"""
try:
slot_num = int(slot_num)
self._car_list[slot_num - 1] = None
print('Slot number {} is free'.format(slot_num))
except IndexError or TypeError:
print('Please enter a valid slot number that is in this parking lot.')
def park(self, registration_id: str, colour: str):
"""
Parks car into the parking lot
@param registration_id: <str> registration id of the car
@param colour: <str> colour of the car
"""
# check if the parking lot is full
car = Car(registration_id, colour)
if self.is_full():
print('Sorry, parking lot is full.\n')
return
# get first non-None index
first_available = [i for i in range(len(self._car_list)) if self._car_list[i] is None][0]
self._car_list[first_available] = car
# print out the parked place, starting from 1 to n
print('Allocated slot number: {}'.format(first_available + 1))
def registration_numbers_for_cars_with_colour(self, colour: str):
"""
Gets the registration numbers for cars with input colour
@param colour: colour to be queried
@return <list> of registration numbers
"""
result_list = []
for car in self._car_list:
if car.get_colour() == colour:
result_list.append(car.get_plate_number())
if not result_list:
print('Not found')
else:
print(', '.join(result_list))
return result_list
def status(self):
"""
Shows the status of our parking lot
"""
print('Slot No. Registration No Colour\n')
for i, car in list(enumerate(self._car_list)):
if car is not None:
print('{} {} {}'.format(i + 1, car.get_plate_number(), car.get_colour()))
def slot_numbers_for_cars_with_colour(self, colour: str):
"""
Gets slot number of cars with input colour
@return: <list> of slot numbers
"""
result_list = []
for i, car in list(enumerate(self._car_list)):
if car.get_colour() == colour:
result_list.append(str(i + 1))
if not result_list:
print('Not found')
else:
print(', '.join(result_list))
return result_list
def slot_number_for_registration_number(self, registration_num: str):
"""
Gets slot number for cars with input registration number
@return:
"""
result_list = []
for i, car in list(enumerate(self._car_list)):
if car.get_plate_number() == registration_num:
result_list.append(str(i + 1))
if not result_list:
print('Not found')
else:
print(', '.join(result_list))
return result_list
``` |
{
"source": "jiz148/personal-page",
"score": 2
} |
#### File: personal-page/blog/owners.py
```python
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
class OwnerListView(ListView):
"""
Sub-class the List View for owners
"""
pass
class OwnerDetailView(DetailView):
"""
Sub-class the Detail View for owners
"""
pass
class OwnerCreateView(LoginRequiredMixin, CreateView):
"""
Sub-class the Create View for owners
"""
# Saves the form instance, sets the current object for the view, and redirects to get_success_url().
def form_valid(self, form):
o = form.save(commit=False)
o.owner = self.request.user
o.save()
return super().form_valid(form)
class OwnerUpdateView(LoginRequiredMixin, UpdateView):
"""
Sub-class the Update View for owners
"""
pass
class OwnerDeleteView(LoginRequiredMixin, DeleteView):
"""
Sub-class the Delete View for owners
restrict a User from deleting other user's data.
"""
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(owner=self.request.user)
```
#### File: huffman/utils/huffman.py
```python
import copy
class BinaryMinHeap:
"""
easy binary heap for nodes with values
"""
def __init__(self, graph):
self.size = 0
self.heap = []
self.graph = {}
self.build_heap(graph)
def extract_min(self):
self.size -= 1
result = self.heap.pop(0)
if not self.is_empty():
last = self.heap.pop(-1)
self.heap.insert(0, last)
self._sift_down(0)
return result
def insert(self, key, value):
"""
Insert key value pair to graph and insert to heap
"""
self.size += 1
self.graph[key] = {
'value': value
}
self.heap.append(key)
self._bubble_up(self.size - 1)
def is_empty(self):
return True if self.size == 0 else False
def build_heap(self, graph):
"""
O(n)
"""
self.size = 0
self.graph = copy.deepcopy(graph)
for key in graph.keys():
self.heap.append(key)
self.size += 1
if not self.is_empty():
for i in range(int((len(self.heap) - 1) / 2), -1, -1):
self._sift_down(i)
def _bubble_up(self, index):
if index > 0:
parent_index = int((index - 1) / 2)
if self.graph[self.heap[index]].get('value') < self.graph[self.heap[parent_index]].get('value'):
self.heap[index], self.heap[parent_index] = self.heap[parent_index], self.heap[index]
self._bubble_up(parent_index)
def _sift_down(self, index):
if index < int(self.size / 2):
child_1_index = (index + 1) * 2 - 1
child_2_index = (index + 1) * 2
try:
if self.graph[self.heap[index]].get('value') > self.graph[self.heap[child_1_index]].get('value') or \
self.graph[self.heap[index]].get('value') > self.graph[self.heap[child_2_index]].get('value'):
if self.graph[self.heap[child_1_index]].get('value') > \
self.graph[self.heap[child_2_index]].get('value'):
self.heap[index], self.heap[child_2_index] = self.heap[child_2_index], self.heap[index]
self._sift_down(child_2_index)
else:
self.heap[index], self.heap[child_1_index] = self.heap[child_1_index], self.heap[index]
self._sift_down(child_1_index)
except IndexError:
if self.graph[self.heap[index]].get('value') > self.graph[self.heap[child_1_index]].get('value'):
self.heap[index], self.heap[child_1_index] = self.heap[child_1_index], self.heap[index]
self._sift_down(child_1_index)
def huffman_encoding(input_string):
"""
Huffman Encoding to generated encoded string
@param input_string: targeted string
@return: <string> encoded string
<dict> frequency dictionary
"""
freq_dict = _get_freq(input_string)
binary_tree = copy.deepcopy(freq_dict)
result_string = ''
if len(binary_tree) == 1:
for char in input_string:
result_string += '0'
return result_string
binary_tree, root = _construct_huffman_tree(binary_tree)
for char in input_string:
result_string += binary_tree_dfs(binary_tree, char, root, '')
return _bit_string_to_byte_string(result_string), freq_dict
def huffman_decoding(input_string, freq_dict):
"""
Decoder of Huffman encoded string.
@param input_string: Huffman encoded string
@param freq_dict: dictionary of char to frequency
@return: <str> decoded string
"""
binary_tree, root = _construct_huffman_tree(freq_dict)
result_string = ''
i = 0
input_string = _byte_string_to_bit_string(input_string)
while i < len(input_string):
node = root
while binary_tree[node].get('left') or binary_tree[node].get('right'):
node = binary_tree[node]['left'] if input_string[i] == '0' else binary_tree[node]['right']
i += 1
result_string += str(node)
return result_string
def _construct_huffman_tree(binary_tree):
"""
Constructs a huffman tree by a frequency table
@return: <dict> huffman tree, <int> or <string> root node
"""
binary_heap = BinaryMinHeap(binary_tree)
i = 0
while not len(binary_heap.heap) < 2:
# setting i as key
i += 1
# extract first two min nodes
left_node_key = binary_heap.extract_min()
right_node_key = binary_heap.extract_min()
# make a new node with two nodes above as children
binary_tree[i] = {}
binary_tree[i]['left'] = left_node_key
binary_tree[i]['right'] = right_node_key
binary_tree[i]['value'] = binary_tree[left_node_key].get('value') + binary_tree[right_node_key].get('value')
# add new node to binary heap
binary_heap.insert(i, binary_tree[i]['value'])
return binary_tree, i
def binary_tree_dfs(tree, char, tree_node_key, search_string):
"""
Assuming binary tree to be complete
@param tree: tree to search
@param char: char to search
@param tree_node_key: binary tree node
@param search_string: current search string, e.g. '010'
@return: a string of e.g. '0101101' as 0 being left and 1 being right
"""
if not tree[tree_node_key].get('left') and not tree[tree_node_key].get('right'):
return search_string if char == tree_node_key else None
left_node_key = tree[tree_node_key].get('left')
left_string = binary_tree_dfs(tree, char, left_node_key, search_string + '0')
right_node_key = tree[tree_node_key].get('right')
right_string = binary_tree_dfs(tree, char, right_node_key, search_string + '1')
return left_string if not right_string else right_string
def _get_freq(input_string):
"""
@param input_string: string of different chars
@return: a dict list {'char':
'value': freq
}
"""
result_dict = {}
for char in input_string:
if not result_dict.get(char):
result_dict[char] = {'value': 1}
else:
result_dict[char]['value'] += 1
return result_dict
def _bit_string_to_byte_string(input_string):
"""
Converts 0's and 1's to an ascii byte-string
@param input_string: any type of string which consists of only 0's and 1's
@return: <str> ascii byte-string
"""
result_string = ''
for i in range(0, len(input_string), 8):
current_len = 8 if i + 8 < len(input_string) else len(input_string) - i
current_byte = input_string[i: i + current_len]
# convert to int first
byte_int = int(current_byte, 2)
# convert the int to a ascii chr
byte_chr = chr(byte_int)
result_string += byte_chr
return result_string.encode('UTF-8')
def _byte_string_to_bit_string(byte_string):
"""
Converts byte_string (b_string) to a string of 0's and 1's
@param byte_string: <b_string> byte-string
@return: <str> bit-string but just a a string of 0's and 1's
"""
result_string = ''
byte_string = byte_string.decode('UTF-8')
for ch in byte_string:
binary = bin(ord(ch))[2:]
# convert binary into 8-digit binary
if len(bin(ord(ch))[2:]) <= 8:
binary = '0' * (8 - len(binary)) + binary
result_string += binary
# for last 8 characters, convert 8-digit binary to original binary, b/c of the design of encoding
i = len(result_string) - 8
while i < len(result_string):
if result_string[i] == '0':
result_string = result_string[:i] + result_string[i + 1:]
else:
break
return result_string
def write_binary_to_file(byte_string, file_name):
with open(file_name, 'wb') as f:
f.write(byte_string)
def read_binary_file(file_name):
result_string = b''
with open(file_name, "rb") as f:
byte = f.read(1)
result_string += byte
while byte:
byte = f.read(1)
result_string += byte
return result_string
``` |
{
"source": "jiz148/py-test",
"score": 3
} |
#### File: jinchi/demo/foobar.py
```python
import os
def check_env(env_var_name):
"""
Check and return the type of an environment variable.
supported types:
None
Integer
String
@param env_var_name: environment variable name
@return: string of the type name.
"""
try:
val = os.getenv(env_var_name)
if val is None:
return 'None'
except Exception as ex:
return "None"
try:
int_val = int(val)
return 'Integer'
except ValueError:
return 'String'
``` |
{
"source": "jiz148/rift-projects",
"score": 4
} |
#### File: interview/common/string_interviews.py
```python
def is_isomorphic(word_1: str, word_2: str):
"""
Example:
Input: s = "egg", t = "add"
Output: true
@param word_1: <str> word 1
@param word_2: <str> word 2
@return: <bool> whether two words are isomorphic
"""
return len(set(word_1)) == len(set(word_2)) == len(set(zip(word_1, word_2)))
```
#### File: rift-projects/tests/test_common_generic_interviews.py
```python
import unittest
from interview.common.generic_interviews import binary_to_decimal
class TestStringInterviews(unittest.TestCase):
def setUp(self):
pass
def test_is_isomorphic(self):
"""
test interview.common.generic_interviews.binary_to_decimal
"""
tests = [{
"string": "0",
"returned": 0,
}, {
"string": "111",
"returned": 7,
}, {
"string": "11111",
"returned": 31,
}, {
"string": "1010101",
"returned": 85,
}, {
"string": "11111111",
"returned": 255,
}, {
"string": "111111110101",
"returned": 4085,
}, {
"string": "00000111",
"returned": 7,
}, {
"string": "1111111111111111111111111111111",
"returned": 2147483647,
}]
for test in tests:
binary_string = test.get("string")
expected = test.get("returned")
returned = binary_to_decimal(binary_string)
self.assertEqual(expected, returned)
```
#### File: rift-projects/tests/test_util_node.py
```python
import unittest
from interview.utils.node import Node
class TestNode(unittest.TestCase):
def setUp(self):
self.example_node = 'node_1'
self.example_value = 'example_value'
self.example_link_node = 'example_link_node'
self.example_node_with_properties = Node(value=self.example_value, link_node=self.example_link_node)
@staticmethod
def _create_node():
"""
Create a Node instance
@return: <Node> Node instance
"""
return Node('example value')
def test_set_link_node(self):
"""
test interview.utils.node :: Node :: set_link_node
"""
tests = [{
"node": self.example_node,
"returned": self.example_node,
}]
for test in tests:
node = test.get("node")
expected = test.get("returned")
returned_node = self._create_node()
returned_node.set_link_node(node)
returned = returned_node._link_node
self.assertEqual(expected, returned)
def test_get_link_node(self):
"""
test interview.utils.node :: Node :: get_link_node
"""
expected = self.example_link_node
returned = self.example_node_with_properties.get_link_node()
self.assertEqual(expected, returned)
def test_get_value(self):
"""
test interview.utils.node :: Node :: get_value
"""
expected = self.example_value
returned = self.example_node_with_properties.get_value()
self.assertEqual(expected, returned)
```
#### File: rift-projects/tests/test_utils_stack.py
```python
import unittest
from interview.utils.stack import Stack
class TestStack(unittest.TestCase):
def setUp(self):
self.example_basic_stack = Stack()
self.example_empty_item_stack = Stack()
pass
def test_has_space(self):
"""
test interview.utils.stack :: Stack :: _has_space
"""
# when size < limit
self.example_basic_stack.size = 999
returned = self.example_basic_stack._has_space()
self.assertTrue(returned)
# when size >= limit
self.example_basic_stack.size = 1000
returned = self.example_basic_stack._has_space()
self.assertFalse(returned)
def test_is_empty(self):
"""
test interview.utils.stack :: Stack :: is_empty
"""
# when stack is empty
self.example_basic_stack.size = 0
returned = self.example_basic_stack.is_empty()
self.assertTrue(returned)
# when stack is not empty
self.example_basic_stack.size = 1
returned = self.example_basic_stack.is_empty()
self.assertFalse(returned)
def test_peek(self):
"""
test interview.utils.stack :: Stack :: peek
"""
self.example_empty_item_stack.reset_from_list([1, 2, 3, 4, 5])
expected = 1
returned = self.example_empty_item_stack.peek()
self.assertEqual(expected, returned)
# when size = 0
expected = None
self.example_empty_item_stack = Stack()
returned = self.example_empty_item_stack.peek()
self.assertEqual(expected, returned)
def test_pop(self):
"""
test interview.utils.stack :: Stack :: pop
"""
self.example_empty_item_stack.reset_from_list([1, 2, 3, 4, 5])
expected, expected_list = 1, [2, 3, 4, 5]
returned = self.example_empty_item_stack.pop()
returned_list = self.example_empty_item_stack.to_list()
self.assertEqual((expected, expected_list), (returned, returned_list))
# when size = 0
self.example_empty_item_stack = Stack()
expected, expected_list = None, []
returned = self.example_empty_item_stack.pop()
returned_list = self.example_empty_item_stack.to_list()
self.assertEqual((expected, expected_list), (returned, returned_list))
def test_push(self):
"""
test interview.utils.stack :: Stack :: push
"""
self.example_empty_item_stack.reset_from_list([1, 2, 3, 4, 5])
expected = [9, 1, 2, 3, 4, 5]
self.example_empty_item_stack.push(9)
returned = self.example_empty_item_stack.to_list()
self.assertEqual(expected, returned)
# when stack does not have space
self.example_empty_item_stack.reset_from_list([1, 2, 3, 4, 5])
expected = [1, 2, 3, 4, 5]
self.example_empty_item_stack.limit = 5
self.example_empty_item_stack.push(9)
returned = self.example_empty_item_stack.to_list()
self.assertEqual(expected, returned)
``` |
{
"source": "jiz148/Stock-Game",
"score": 3
} |
#### File: stock_game/common/world.py
```python
from player import Player
class World:
def __init__(self):
self.player_list = []
self.stock_list = []
def add_player(self):
"""
Adds a player
"""
pass
def add_stock(self):
"""
Adds a stock
"""
pass
``` |
{
"source": "jiz148/traffic-sign-detection",
"score": 3
} |
#### File: traffic-sign-detection/german-traffic-sign-recognition/gui.py
```python
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
import numpy as np
# load the trained model to classify sign
from keras.models import load_model
model = load_model('traffic_classifier.h5')
# dictionary to label all traffic signs class.
classes = {1: 'Speed limit (20km/h)',
2: 'Speed limit (30km/h)',
3: 'Speed limit (50km/h)',
4: 'Speed limit (60km/h)',
5: 'Speed limit (70km/h)',
6: 'Speed limit (80km/h)',
7: 'End of speed limit (80km/h)',
8: 'Speed limit (100km/h)',
9: 'Speed limit (120km/h)',
10: 'No passing',
11: 'No passing veh over 3.5 tons',
12: 'Right-of-way at intersection',
13: 'Priority road',
14: 'Yield',
15: 'Stop',
16: 'No vehicles',
17: 'Veh > 3.5 tons prohibited',
18: 'No entry',
19: 'General caution',
20: 'Dangerous curve left',
21: 'Dangerous curve right',
22: 'Double curve',
23: 'Bumpy road',
24: 'Slippery road',
25: 'Road narrows on the right',
26: 'Road work',
27: 'Traffic signals',
28: 'Pedestrians',
29: 'Children crossing',
30: 'Bicycles crossing',
31: 'Beware of ice/snow',
32: 'Wild animals crossing',
33: 'End speed + passing limits',
34: 'Turn right ahead',
35: 'Turn left ahead',
36: 'Ahead only',
37: 'Go straight or right',
38: 'Go straight or left',
39: 'Keep right',
40: 'Keep left',
41: 'Roundabout mandatory',
42: 'End of no passing',
43: 'End no passing vehicle with a weight greater than 3.5 tons'
}
# initialise GUI
top = tk.Tk()
top.geometry('800x600')
top.title('Traffic sign classification')
top.configure(background='#CDCDCD')
label = Label(top, background='#CDCDCD', font=('arial', 15, 'bold'))
sign_image = Label(top)
def classify(file_path):
global label_packed
image = Image.open(file_path)
image = image.resize((30,30))
image = np.expand_dims(image, axis=0)
image = np.array(image)
pred_x = model.predict([image])
pred = np.argmax(pred_x, axis=1)[0]
sign = classes[pred+1]
label.configure(foreground='#011638', text=sign)
def show_classify_button(file_path):
classify_b = Button(top, text="Classify Image", command=lambda: classify(file_path), padx=10, pady=5)
classify_b.configure(background='#364156', foreground='white', font=('arial', 10, 'bold'))
classify_b.place(relx=0.79, rely=0.46)
def upload_image():
try:
file_path = filedialog.askopenfilename()
uploaded = Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25), (top.winfo_height()/2.25)))
im = ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image = im
label.configure(text='')
show_classify_button(file_path)
except:
pass
upload = Button(top, text="Upload an image", command=upload_image, padx=10, pady=5)
upload.configure(background='#364156', foreground='white', font=('arial', 10, 'bold'))
upload.pack(side=BOTTOM, pady=50)
sign_image.pack(side=BOTTOM, expand=True)
label.pack(side=BOTTOM, expand=True)
heading = Label(top, text="check traffic sign", pady=20, font=('arial', 20, 'bold'))
heading.configure(background='#CDCDCD', foreground='#364156')
heading.pack()
top.mainloop()
``` |
{
"source": "jiz148/wujiang",
"score": 2
} |
#### File: wujiang/wujiang/app.py
```python
import os
from flask import abort, Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from sqlalchemy import exc
# init app
app = Flask(__name__)
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
# Database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(DATA_DIR, 'wujiang_index.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Init db
db = SQLAlchemy(app)
# Init ma
ma = Marshmallow(app)
# wujiang Class/Model
class Wujiang(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
level = db.Column(db.Integer)
profession = db.Column(db.String(100))
type = db.Column(db.String(100))
race = db.Column(db.String(100))
attack = db.Column(db.Float)
defense = db.Column(db.Float)
speed = db.Column(db.Float)
ranging = db.Column(db.Float)
mag = db.Column(db.Float)
spells = db.Column(db.String(500))
specs = db.Column(db.String(500))
def __init__(self, name, level, profession, type, race, attack, defense, speed, ranging, mag, spells, specs):
self.name = name
self.level = level
self.profession = profession
self.type = type
self.race = race
self.attack = attack
self.defense = defense
self.speed = speed
self.ranging = ranging
self.mag = mag
self.spells = spells
self.specs = specs
# wujiang Schema
class WujiangSchema(ma.Schema):
class Meta:
fields = (
'id',
'name',
'level',
'profession',
'type',
'race',
'attack',
'defense',
'speed',
'ranging',
'mag',
'spells',
'specs',
)
# init schema
wujiang_schema = WujiangSchema()
wujiangs_schema = WujiangSchema(many=True)
# create a wujiang
@app.route('/wujiang', methods=['POST'])
def add_wujiang():
attrs = _get_all_attrs()
new_wujiang = Wujiang(
attrs[0],
attrs[1],
attrs[2],
attrs[3],
attrs[4],
attrs[5],
attrs[6],
attrs[7],
attrs[8],
attrs[9],
attrs[10],
attrs[11])
db.session.add(new_wujiang)
db.session.commit()
return wujiang_schema.jsonify(new_wujiang)
# Get Single Wujiang
@app.route('/wujiang/<id>', methods=['GET'])
def get_wujiang(id):
wujiang = Wujiang.query.get_or_404(id)
return wujiang_schema.jsonify(wujiang)
# get all Wujiangs
@app.route('/wujiang/', methods=['GET'])
def get_wujiangs():
result = Wujiang.query
arg_keys, arg_values = request.args.keys(), request.args.values()
for key, value in zip(arg_keys, arg_values):
try:
if key == 'spells':
search = '%{}%'.format(value)
result = result.filter(Wujiang.spells.like(search))
elif key == 'specs':
search = '%{}%'.format(value)
result = result.filter(Wujiang.specs.like(search))
elif key == 'name':
search = '%{}%'.format(value)
result = result.filter(Wujiang.name.like(search))
else:
result = result.filter_by(**{key: value})
except exc.InvalidRequestError:
abort(400)
# print('result!!!!!!!!! is !!!!', result.all())
return wujiangs_schema.jsonify(result) if result.all() else abort(404)
# update a wujiang
@app.route('/wujiang/<id>', methods=['PUT'])
def update_wujiang(id):
updating_wujiang = Wujiang.query.get_or_404(id)
attrs = _get_all_attrs()
# Update if not None
updated_wujiang = _update_all_attrs(updating_wujiang, attrs)
db.session.commit()
return wujiang_schema.jsonify(updated_wujiang)
# Delete Wujiang
@app.route('/wujiang/<id>', methods=['DELETE'])
def delete_wujiang(id):
deleting_wujiang = Wujiang.query.get_or_404(id)
db.session.delete(deleting_wujiang)
db.session.commit()
return wujiang_schema.jsonify(deleting_wujiang)
def _get_all_attrs():
"""
Gets all the attributes of wujiang from asdf
@return: <Tuple> Tuple of attributes
"""
name = request.json.get('name')
level = request.json.get('level')
profession = request.json.get('profession')
type = request.json.get('type')
race = request.json.get('race')
attack = request.json.get('attack')
defense = request.json.get('defense')
speed = request.json.get('speed')
ranging = request.json.get('ranging')
mag = request.json.get('mag')
spells = request.json.get('spells')
specs = request.json.get('specs')
return name, level, profession, type, race, attack, defense, speed, ranging, mag, spells, specs
def _update_all_attrs(wujiang, attrs):
"""
Updates wujiang with tuple of new attributes only if the attribute is not None
@param wujiang: updating wujiang
@param attrs: new attrs
@return: updated wujiang
"""
if attrs[0]:
wujiang.name = attrs[0]
if attrs[1]:
wujiang.level = attrs[1]
if attrs[2]:
wujiang.profession = attrs[2]
if attrs[3]:
wujiang.type = attrs[3]
if attrs[4]:
wujiang.race = attrs[4]
if attrs[5]:
wujiang.attack = attrs[5]
if attrs[6]:
wujiang.defense = attrs[6]
if attrs[7]:
wujiang.speed = attrs[7]
if attrs[8]:
wujiang.ranging = attrs[8]
if attrs[9]:
wujiang.mag = attrs[9]
if attrs[10]:
wujiang.spells = attrs[10]
if attrs[11]:
wujiang.specs = attrs[11]
return wujiang
# Run Server
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jiz148/Yuan",
"score": 3
} |
#### File: yuan/common/game.py
```python
from .battle_field import BattleField
from .enemy import Enemy
from .game_panel import GamePanel
import random
from .yuan_li import YuanLi
class Game:
def __init__(self):
self.yuan = YuanLi()
pass
def battle(self, enemy):
battle_field = BattleField(self.yuan, enemy)
battle_field.start()
victory = battle_field.fight()
if victory:
print('Yuan won the battle! level up...')
self.yuan.level_up()
print('Yuan is now level {}'.format(self.yuan._level))
def encounter(self):
pass
def main(self):
"""
Main program
"""
self.start()
flag = True
game_panel = GamePanel(self.yuan)
while flag:
choice = game_panel.show_and_take_input()
choice_chart = {
"a": self.battle,
"b": self.yuan.rest,
"c": self.yuan.talk,
"d": self.yuan.print_status,
"q": "quit",
}
item = choice_chart.get(choice)
if item is None:
print('?')
elif item == 'quit':
flag = False
elif '.battle' in str(item):
item(self.generate_enemy())
else:
item()
print("Game Over...Please play again")
def start(self):
print('Starting game...')
pass
@staticmethod
def generate_enemy(name=None, level=None):
"""
generate enemy by name and level
@return: enemy object
"""
if name is None :
random.seed()
name_code = random.randint(0, 99999)
name = 'Enemy ' + str(name_code)
if level is None:
level = random.randint(1, 20)
return Enemy(name, level)
``` |
{
"source": "jiz148/z-test",
"score": 3
} |
#### File: z-test/common/z_table.py
```python
from scipy.integrate import quad
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def print_normal_distribution():
# print standard normal distribution
x = np.linspace(-4, 4, num=100)
constant = 1.0 / np.sqrt(2*np.pi)
pdf_normal_distribution = constant * np.exp((-x**2) / 2.0)
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(x, pdf_normal_distribution)
ax.set_ylim(0)
ax.set_title('Standardized Normal Distribution', size=20)
ax.set_ylabel('Probability Density', size=20)
plt.show()
def normal_p_density(x):
c = 1.0 / np.sqrt(2*np.pi)
return c * np.exp((-x**2) / 2.0)
def calculate_p_from_z(z):
"""
two tails
"""
p1, _ = quad(normal_p_density, np.NINF, -z)
p2, _ = quad(normal_p_density, z, np.Inf)
return p1 + p2
def get_z_table():
std_normal_table = pd.DataFrame(data=[],
index=np.round(np.arange(0, 3.5, .1), 2),
columns=np.round(np.arange(0.00, .1, 0.01), 2)
)
for i in std_normal_table.index:
for c in std_normal_table.columns:
z = np.round(i + c, 2)
value, _ = quad(normal_p_density, np.NINF, z)
std_normal_table.loc[i, c] = value
std_normal_table.index = std_normal_table.index.astype(str)
std_normal_table.columns = [str(column).ljust(4, '0') for column in std_normal_table.columns]
return std_normal_table
if __name__ == "__main__":
# print z distribution
# print_normal_distribution()
# calculate cumulative distribution
# print(calculate_p_from_z(3), '\n')
# print z-table for approximating p-value by hand
print(get_z_table().to_markdown())
``` |
{
"source": "jizai945/luck_draw",
"score": 2
} |
#### File: luck_draw/ui/luck_draw.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(707, 458)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.btn_setrule = QtWidgets.QPushButton(Form)
self.btn_setrule.setObjectName("btn_setrule")
self.gridLayout.addWidget(self.btn_setrule, 0, 0, 1, 1)
self.btn_start = QtWidgets.QPushButton(Form)
self.btn_start.setObjectName("btn_start")
self.gridLayout.addWidget(self.btn_start, 0, 1, 1, 1)
self.scrollArea = QtWidgets.QScrollArea(Form)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 687, 408))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea, 1, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.btn_setrule.setText(_translate("Form", "设置规则"))
self.btn_start.setText(_translate("Form", "开始抽奖"))
``` |
{
"source": "jizai945/py-shell-tool",
"score": 2
} |
#### File: jizai945/py-shell-tool/run_shell.py
```python
import subprocess, datetime, os, inspect, ctypes, signal, sys
import psutil
from threading import Timer
# 在线程中抛出异常,使线程退出
def async_raise(tid, exctype):
"""Raises an exception in the threads with id tid"""
try:
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
except Exception as e:
print(e)
pass
def kill_command(p):
"""终止命令的函数"""
print('kill command')
# kill所有子进程
proc_pid = p.pid
parent_proc = psutil.Process(proc_pid)
for child_proc in parent_proc.children(recursive=True):
child_proc.kill()
parent_proc.kill()
def execute(command, timeout, cwd=''):
'''执行命令,一次返回一行数据'''
if (cwd == ''):
app = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, \
stdout = subprocess.PIPE, stderr=subprocess.STDOUT)
else:
app = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd = cwd)
# 设置定时器去终止这个命令
if timeout != 0:
timer = Timer(timeout, kill_command, args=(app,))
timer.start()
for i in iter(app.stdout.readline,'b'):
if not i:
break
yield(i.decode('gbk', 'replace'))
# yield (i.decode('utf-8', 'replace'))
if timeout != 0:
# 判断超时定时器是否执行
if timer.is_alive() == False:
yield (f"执行时间:{timeout}s 已超时")
yield(408)
else:
timer.cancel()
# 返回最终执行结果
stdout, stderr = app.communicate()
yield(app.returncode)
return 0
def execute_retapp(command, timeout):
'''
执行命令,一次返回一行数据
第一次返回subprocess的对象, 用于外部kill 命令
'''
app = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
yield(app)
# 设置定时器去终止这个命令
if timeout != 0:
timer = Timer(timeout, kill_command, app)
timer.start()
for i in iter(app.stdout.readline,'b'):
if not i:
break
yield(i.decode('gbk'))
if timeout != 0:
timer.cancel()
# 返回最终执行结果
stdout, stderr = app.communicate()
yield(app.returncode)
return 0
def execute_char(command, timeout, cwd=''):
'''
执行命令,每次输出一个字符, 按照gbk编码
'''
if (cwd == ''):
app = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, \
stdout = subprocess.PIPE, stderr=subprocess.STDOUT, encoding = "gbk")
else:
app = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE, \
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd = cwd, encoding = "gbk")
# 设置定时器去终止这个命令
if timeout != 0:
timer = Timer(timeout, kill_command, args=(app,))
timer.start()
for i in iter(lambda: app.stdout.read(1), ''):
if not i:
break
yield(i)
if timeout != 0:
# 判断超时定时器是否执行
if timer.is_alive() == False:
yield (f"执行时间:{timeout}s 已超时")
yield(408)
else:
timer.cancel()
# 返回最终执行结果
stdout, stderr = app.communicate()
yield(app.returncode)
return 0
if __name__ == '__main__':
# command = 'ping www.baidu.com'
command = 'python progress_bar.py'
f = execute_char(command, 0)
while True:
read = next(f)
if type(read) == type(''):
print(read, end="")
# sys.stdout.write(read)
sys.stdout.flush()
else:
# 执行结果
print(f'执行结果:{read}')
break
# 一次输出一行
f = execute(command, 0)
while True:
read = next(f)
if type(read) == type(''):
# print(read, end="")
sys.stdout.write(read)
sys.stdout.flush()
else:
# 执行结果
print(f'执行结果:{read}')
break
``` |
{
"source": "jizai945/serial-canopen",
"score": 2
} |
#### File: jizai945/serial-canopen/example.py
```python
import types
import canopen
import time
import serial
import struct
import traceback
from can import Message
def listen_cb(msg):
print(f'lcb: {msg}')
def my_serial_send(self, msg, timeout=None):
'''Reconstruction sending method'''
print('my_serial_send')
try:
a_id = struct.pack('<I', msg.arbitration_id)
except struct.error:
raise ValueError('Arbitration Id is out of range')
send_array = bytearray([0x57, 0x58]) # USB数据头
send_array += bytearray(a_id[2:4]) # can id
send_array += bytearray(msg.data[:msg.dlc]) # can数据
send_array += bytearray([0 for _ in range(8 - msg.dlc)]) # 补零
send_array += bytearray([msg.dlc]) # 实际数据长度
send_array += bytearray([0xA8, 0XA7]) # USB数据尾
self.ser.write(bytearray(send_array)) # 发送
def my_recv_internal(self, timeout):
'''Reconstruction receiving method'''
try:
# ser.read can return an empty string
# or raise a SerialException
rx_byte = self.ser.read()
except serial.SerialException:
return None, False
try:
if rx_byte and ord(rx_byte) == 0x57:
rx_byte = self.ser.read()
if not (rx_byte and ord(rx_byte) == 0x58):
print('333')
return None, False
s = bytearray([0, 0, 0, 0])
t = bytearray(self.ser.read(2))
s[1], s[0] = t[0], t[1]
arb_id = (struct.unpack('<I', s))[0]
data = self.ser.read(8)
dlc = ord(self.ser.read())
rxd_byte = self.ser.read(2)
timestamp = time.time()
if rxd_byte and rxd_byte[0] == 0xA8 and rxd_byte[1] == 0xA7:
# received message data okay
msg = Message(timestamp=timestamp,
arbitration_id=arb_id,
dlc=8,
data=data)
return msg, False
else:
return None, False
except:
return None, False
if __name__ == '__main__':
COM_PORT = 'COM6'
try:
# Start with creating a network representing one CAN bus
network = canopen.Network()
# Add some nodes with corresponding Object Dictionaries
node = canopen.RemoteNode(6, './CANopenSocket.eds')
network.add_node(node)
node2 = canopen.RemoteNode(7, './e35.eds')
network.add_node(node2)
# Add some nodes with corresponding Object Dictionaries
network.connect(bustype="serial", channel=COM_PORT)
network.bus.send = types.MethodType(my_serial_send, network.bus) # 重构发送方法
network.bus._recv_internal = types.MethodType(my_recv_internal, network.bus) # 重构接收方法
network.listeners.append(listen_cb) # 添加一个监听回调函数
# send test message
network.send_message(0x06, bytes([0x11, 0x22]))
print('-'*30)
time.sleep(3)
network.sync.stop()
network.disconnect()
except Exception as e:
print(traceback.format_exc())
print('can err')
``` |
{
"source": "Jizanator/botty",
"score": 3
} |
#### File: botty/src/config.py
```python
import configparser
import numpy as np
import os
class Config:
def _select_val(self, section: str, key: str = None):
if section in self._custom and key in self._custom[section]:
return self._custom[section][key]
elif section in self._config:
return self._config[section][key]
elif section in self._pickit_config:
return self._pickit_config[section][key]
elif section in self._shop_config:
return self._shop_config[section][key]
else:
return self._game_config[section][key]
def __init__(self, print_warnings: bool = False):
# print_warnings, what a hack... here it is, not making the effort
# passing a single config instance through bites me in the ass
self._print_warnings = print_warnings
self._config = configparser.ConfigParser()
self._config.read('config/params.ini')
self._game_config = configparser.ConfigParser()
self._game_config.read('config/game.ini')
self._pickit_config = configparser.ConfigParser()
self._pickit_config.read('config/pickit.ini')
self._shop_config = configparser.ConfigParser()
self._shop_config.read('config/shop.ini')
self._custom = configparser.ConfigParser()
if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'):
self._custom.read('config/custom.ini')
self.general = {
"saved_games_folder": self._select_val("general", "saved_games_folder"),
"name": self._select_val("general", "name"),
"monitor": int(self._select_val("general", "monitor")),
"max_game_length_s": float(self._select_val("general", "max_game_length_s")),
"exit_key": self._select_val("general", "exit_key"),
"resume_key": self._select_val("general", "resume_key"),
"auto_settings_key": self._select_val("general", "auto_settings_key"),
"graphic_debugger_key": self._select_val("general", "graphic_debugger_key"),
"logg_lvl": self._select_val("general", "logg_lvl"),
"randomize_runs": bool(int(self._select_val("general", "randomize_runs"))),
"difficulty": self._select_val("general", "difficulty"),
"custom_message_hook": self._select_val("general", "custom_message_hook"),
"discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")),
"info_screenshots": bool(int(self._select_val("general", "info_screenshots"))),
"loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))),
}
# Added for dclone ip hunting
self.dclone = {
"region_ips": self._select_val("dclone", "region_ips"),
"dclone_hotip": self._select_val("dclone", "dclone_hotip"),
}
self.routes = {}
for key in self._config["routes"]:
self.routes[key] = bool(int(self._select_val("routes", key)))
self.char = {
"type": self._select_val("char", "type"),
"show_items": self._select_val("char", "show_items"),
"inventory_screen": self._select_val("char", "inventory_screen"),
"stand_still": self._select_val("char", "stand_still"),
"force_move": self._select_val("char", "force_move"),
"num_loot_columns": int(self._select_val("char", "num_loot_columns")),
"take_health_potion": float(self._select_val("char", "take_health_potion")),
"take_mana_potion": float(self._select_val("char", "take_mana_potion")),
"take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")),
"take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")),
"heal_merc": float(self._select_val("char", "heal_merc")),
"heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")),
"chicken": float(self._select_val("char", "chicken")),
"merc_chicken": float(self._select_val("char", "merc_chicken")),
"tp": self._select_val("char", "tp"),
"belt_rows": int(self._select_val("char", "belt_rows")),
"show_belt": self._select_val("char", "show_belt"),
"potion1": self._select_val("char", "potion1"),
"potion2": self._select_val("char", "potion2"),
"potion3": self._select_val("char", "potion3"),
"potion4": self._select_val("char", "potion4"),
"belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")),
"belt_hp_columns": int(self._select_val("char", "belt_hp_columns")),
"belt_mp_columns": int(self._select_val("char", "belt_mp_columns")),
"stash_gold": bool(int(self._select_val("char", "stash_gold"))),
"gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))),
"use_merc": bool(int(self._select_val("char", "use_merc"))),
"pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))),
"cta_available": bool(int(self._select_val("char", "cta_available"))),
"weapon_switch": self._select_val("char", "weapon_switch"),
"battle_orders": self._select_val("char", "battle_orders"),
"battle_command": self._select_val("char", "battle_command"),
"casting_frames": int(self._select_val("char", "casting_frames")),
"atk_len_trav": float(self._select_val("char", "atk_len_trav")),
"atk_len_pindle": float(self._select_val("char", "atk_len_pindle")),
"atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")),
"atk_len_shenk": float(self._select_val("char", "atk_len_shenk")),
"atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")),
"hork_time_pindle": float(self._select_val("char", "hork_time_pindle")),
"hork_time_eldritch": float(self._select_val("char", "hork_time_eldritch")),
"hork_time_shenk": float(self._select_val("char", "hork_time_shenk")),
"hork_time_council": float(self._select_val("char", "hork_time_council")),
"hork_time_nihlatak": float(self._select_val("char", "hork_time_nihlatak")),
}
self.sorceress = dict(self._config["sorceress"])
if "sorceress" in self._custom:
self.sorceress.update(dict(self._custom["sorceress"]))
self.hammerdin = self._config["hammerdin"]
if "hammerdin" in self._custom:
self.hammerdin.update(self._custom["hammerdin"])
self.trapsin = self._config["trapsin"]
if "trapsin" in self._custom:
self.trapsin.update(self._custom["trapsin"])
self.barbarian = self._config["barbarian"]
if "barbarian" in self._custom:
self.barbarian.update(self._custom["barbarian"])
self.advanced_options = {
"pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10),
"message_headers": self._select_val("advanced_options", "message_headers"),
"message_body_template": self._select_val("advanced_options", "message_body_template"),
"message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))),
}
self.items = {}
for key in self._pickit_config["items"]:
self.items[key] = int(self._select_val("items", key))
if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings:
print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items")
self.colors = {}
for key in self._game_config["colors"]:
self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2)
self.ui_pos = {}
for key in self._game_config["ui_pos"]:
self.ui_pos[key] = int(self._select_val("ui_pos", key))
self.ui_roi = {}
for key in self._game_config["ui_roi"]:
self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")])
self.path = {}
for key in self._game_config["path"]:
self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2))
self.shop = {
"shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))),
"shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))),
"shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))),
"shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))),
"trap_min_score": int(self._select_val("claws", "trap_min_score")),
"melee_min_score": int(self._select_val("claws", "melee_min_score")),
}
if __name__ == "__main__":
config = Config(print_warnings=True)
# Check if any added items miss templates
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
# Check if any item templates miss a config
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
``` |
{
"source": "Jizanthapus/curse-scraper",
"score": 3
} |
#### File: Jizanthapus/curse-scraper/curse_scraper.py
```python
print('curse-scraper beginning\n')
# Plenty of imports
try:
import json
import time
import sys
import urllib.parse
from lxml import html
import os.path
from multiprocessing.dummy import Pool
import requests
import datetime
# Imports for Sheets API
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
except ImportError:
print('ERROR: Imports missing')
sys.exit()
# Try to open variable file to fire up some variables
try:
PROGRAM_VARS_FILE = 'programVars.json'
print('Loading variable file:', PROGRAM_VARS_FILE)
with open(PROGRAM_VARS_FILE) as FILE:
PROGRAM_VARS = json.load(FILE)
USER_VARS_FILE = PROGRAM_VARS.get('userVarsName')
print('Loading variable file:', USER_VARS_FILE, '\n')
with open(USER_VARS_FILE) as FILE:
USER_VARS = json.load(FILE)
SPREADSHEET_ID = USER_VARS.get('spreadsheetId')
RANGE_1 = PROGRAM_VARS.get('range1')
RANGE_2_PRE = PROGRAM_VARS.get('range2pre')
RANGE_3_PRE = PROGRAM_VARS.get('range3pre')
RANGE_4 = PROGRAM_VARS.get('range4')
MOD_URL_PRE = PROGRAM_VARS.get('modURLpre')
MOD_URL_POST = PROGRAM_VARS.get('modURLpost')
LOCAL_PATH = USER_VARS.get('localPath')
UPDATE_LIST_NAME = PROGRAM_VARS.get('updateListName')
NUM_OF_PROCESSES = int(USER_VARS.get('processes'))
FILTERS = PROGRAM_VARS.get('filters')
except FileNotFoundError:
print('ERROR: One or more of the variable files were not found')
sys.exit()
print('*** Running with the following settings ***')
print('Number of processes for downloads and HTTP requests:', NUM_OF_PROCESSES)
print('Files will be downloaded to:', LOCAL_PATH, '\n')
# Fire up some more variables
FILES_TO_DOWNLOAD = {}
ALL_MODS_INFO = {}
MODS_NEEDING_UPDATES = []
INFO_TO_WRITE = []
UPDATE_LIST = []
POOL = Pool(NUM_OF_PROCESSES)
def download_entry(ENTRY):
'''
Function for downloading files
'''
ENTRY_JAR = FILES_TO_DOWNLOAD[ENTRY].get('jar')
ENTRY_PATH = LOCAL_PATH + ENTRY_JAR
ENTRY_URL = FILES_TO_DOWNLOAD[ENTRY].get('downloadURL')
if os.path.isfile(ENTRY_PATH):
print('Already exists:', ENTRY_JAR)
else:
r = requests.get(ENTRY_URL, allow_redirects=True)
open(ENTRY_PATH, 'wb').write(r.content)
print('Downloaded:', ENTRY, 'as', ENTRY_JAR)
def get_info_from_curse(line):
'''
Retrieve the mod info from curse
'''
MOD_NAME = line[0]
MOD_MC_VER = line[1]
PROJECT_ID = line[2].split('/')[4]
if len(line) == 5:
if line[3] == '':
OLD_FILE_ID = 0
else:
OLD_FILE_ID = int(line[3])
else:
while (len(line) < 5):
line.append(0)
OLD_FILE_ID = int(line[3])
MOD_URL = MOD_URL_PRE + PROJECT_ID + MOD_URL_POST + FILTERS.get(MOD_MC_VER)
print('Checking on', MOD_NAME)
PAGE = requests.get(MOD_URL)
PAGE_DATA = html.fromstring(PAGE.content)
for TABLE in PAGE_DATA.xpath('//table[@class="listing listing-project-file project-file-listing b-table b-table-a"]'):
DOWNLOAD_PATH = TABLE.xpath('//a[@class="button tip fa-icon-download icon-only"]/@href')
if not DOWNLOAD_PATH:
print('Something went wrong retrieving the download path for', MOD_NAME)
sys.exit()
DOWNLOAD_URL = 'https://minecraft.curseforge.com' + DOWNLOAD_PATH[0]
NEW_FILE_ID = int(DOWNLOAD_PATH[0].split('/')[4])
if not NEW_FILE_ID:
print('Something went wrong retrieving the new file ID for', MOD_NAME)
print(DOWNLOAD_PATH)
sys.exit()
try:
r = requests.get(DOWNLOAD_URL, allow_redirects=True)
REAL_URL = r.url
except ConnectionError as e:
print(e.fp.read())
sys.exit()
FILENAME = REAL_URL.split('/')[-1]
FINAL_FILENAME = urllib.parse.unquote(FILENAME)
if FINAL_FILENAME[-4:] != '.jar':
print('Error: Something changed with the download URL. Report this.')
sys.exit()
ALL_MODS_INFO[MOD_NAME] = {'currentFileID':NEW_FILE_ID,
'jar':FINAL_FILENAME,
'downloadURL':DOWNLOAD_URL}
if NEW_FILE_ID > OLD_FILE_ID:
MODS_NEEDING_UPDATES.append(MOD_NAME)
FILES_TO_DOWNLOAD[MOD_NAME] = {'currentFileID':NEW_FILE_ID,
'jar':FINAL_FILENAME,
'downloadURL':DOWNLOAD_URL}
line[3] = NEW_FILE_ID
line[4] = DOWNLOAD_URL
time.sleep(1)
# Setup the Sheets API
print('Attempting to contact Sheets\n')
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
STORE = file.Storage('credentials.json')
CREDS = STORE.get()
if not CREDS or CREDS.invalid:
FLOW = client.flow_from_clientsecrets('client_secret.json', SCOPES)
CREDS = tools.run_flow(FLOW, STORE)
SERVICE = build('sheets', 'v4', http=CREDS.authorize(Http()))
# Call the Sheets API
# RESULT_1 is a range of values that will contain how many mods are in the list and when this program was last run
RESULT_1 = SERVICE.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID, majorDimension='COLUMNS', range=RANGE_1).execute()
# Use RESULT_1 to determine how many cells to request for RESULT_2 and RESULT_3
NUM_MODS = RESULT_1.get('values')[0][0]
print('Sheet indicates there are', NUM_MODS, 'mods to check\n')
RANGE_2_BEGIN = RANGE_2_PRE[-1:]
RANGE_2_END = int(NUM_MODS) + int(RANGE_2_BEGIN) - 1
RANGE_2 = RANGE_2_PRE[:-1] + str(RANGE_2_END)
RANGE_3_BEGIN = RANGE_3_PRE[-1:]
RANGE_3_END = int(NUM_MODS) + int(RANGE_3_BEGIN) - 1
RANGE_3 = RANGE_3_PRE[:-1] + str(RANGE_3_END)
# RESULT_2 contains: mod names, link, old file id, and a download link
RESULT_2 = SERVICE.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID, range=RANGE_2).execute()
# Use the project id from RESULTS_2 to build the Curse URL and get the files page
# then find the latest jar and add it to a list to download
print('Attempting to contact Curse for mod info')
MODS_ONLY = RESULT_2.get('values')
print('Looking for these mods:')
POOL.map(get_info_from_curse, MODS_ONLY)
print()
# Setup time for reasons
TIME = datetime.datetime.now()
TIME_STRING = [[TIME.strftime("%Y-%m-%d %H:%M")]]
TIME_STRING_2 = TIME.strftime("%y-%m-%d_%H-%M-")
TIME_STRING_3 = TIME.strftime("%Y-%m-%d %H:%M")
# See if any mods need updating
if len(MODS_NEEDING_UPDATES) > 0:
# List mods we need to update
print('Update required for the following', len(MODS_NEEDING_UPDATES), 'mods:')
for MOD in MODS_NEEDING_UPDATES:
print(MOD)
print()
# Write out a list of the updated mods
UPDATE_LIST_NAME_TIME = LOCAL_PATH + str(TIME_STRING_2) + str(UPDATE_LIST_NAME)
UPDATE_LIST_DATA = {'date':TIME_STRING_3,
'numModsUpdated': len(MODS_NEEDING_UPDATES),
'modsUpdated':FILES_TO_DOWNLOAD,
'allMods': ALL_MODS_INFO}
with open(UPDATE_LIST_NAME_TIME, 'w') as FILE:
json.dump(UPDATE_LIST_DATA, FILE, indent=2, sort_keys=True)
# Write the updated info back to the sheet
for line in MODS_ONLY:
INFO_TO_WRITE.append(line[ (len(line)-2) : len(line) ])
MOD_DATA_FOR_SHEET = {'values': INFO_TO_WRITE}
print('Writing updated mod info back to Sheets\n')
RESULT_3 = SERVICE.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID, range=RANGE_3, valueInputOption='USER_ENTERED', body=MOD_DATA_FOR_SHEET).execute()
# Download the updated mods
print('Starting downloads')
POOL.map(download_entry, FILES_TO_DOWNLOAD)
print()
else:
print('Looks like all the mods are currently up to date\n')
# Update the sheet to show this run
print('Writing the current time back to Sheets\n')
TIME_TO_WRITE = {'values': TIME_STRING}
RESULT_4 = SERVICE.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID, range=RANGE_4, valueInputOption='USER_ENTERED', body=TIME_TO_WRITE).execute()
print('Program complete\n')
``` |
{
"source": "jizhang02/HC-reg-seg",
"score": 2
} |
#### File: HC-reg-seg/code/data_seg.py
```python
from __future__ import division
import warnings
warnings.filterwarnings('ignore') # ignore warnings
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # only show error
import numpy as np
import pandas as pd
import cv2 as cv
from PIL import Image
import skimage.io as io
from skimage import img_as_ubyte
import os
from medpy.metric.binary import dc, hd, assd
from keras import backend as K
from keras.optimizers import Adam
#from tensorflow.keras.optimizers import Adam # only for doubleunet
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
import segmentation_models as sm
from model_seg import *
from doubleu_net import *
def load_data(img_path_aug, img_path_ori, gt_path_aug, gt_path_ori, csv_aug, csv_ori, H, W):
df_ori = pd.read_csv(csv_ori)
df_aug = pd.read_csv(csv_aug)
filename_list_ori = df_ori['filename'].values
filename_list_aug = df_aug['filename'].values
pixel_size_ori = df_ori['pixel size(mm)'].values
hcpx_ori = df_ori['head circumference(px)'].values
img_ori = []
label_ori = []
img_aug = []
label_aug = []
pixel_ori = []
label_hc = []
for (i, f) in enumerate(filename_list_ori):
img = Image.open(img_path_ori + f).convert('RGB') # 3 channels
img = img.resize((H,W))
img = np.array(img)
img_norm = (img - np.mean(img)) / np.std(img) # normalize
img_ori.append(img_norm)
pixel_ori.append(pixel_size_ori[i])
label_hc.append(hcpx_ori[i])
gt = Image.open(gt_path_ori + f).convert('L')
gt = gt.resize((H,W))
gt = np.array(gt)
gt[gt > 0.5] = 1 # normalize
gt[gt <= 0.5] = 0
gt = gt[:, :, np.newaxis]
label_ori.append(gt)
for (i, f) in enumerate(filename_list_aug):
img = Image.open(img_path_aug + f).convert('RGB')
img = img.resize((H,W))
img = np.array(img)
img_norm = (img - np.mean(img)) / np.std(img) # normalize
# img = img_norm[:, :, np.newaxis]
img_aug.append(img_norm)
gt = Image.open(gt_path_aug + f).convert('L')
gt = gt.resize((H,W))
gt = np.array(gt)
gt[gt > 0.5] = 1 # normalize
gt[gt <= 0.5] = 0
gt = gt[:, :, np.newaxis]
label_aug.append(gt)
print("load data successfully!")
return np.asarray(img_aug, dtype=np.float64), np.asarray(label_aug), np.asarray(label_hc), \
np.asarray(img_ori, dtype=np.float64), np.asarray(label_ori), np.asarray(pixel_ori)
def save_data(save_path, segment_results, label, shape=(800, 540)):
image_resize = []
label_resize = []
for i, item in enumerate(segment_results):
img = item[:, :, 0]
if np.isnan(np.sum(img)):
img = img[~np.isnan(img)] # just remove nan elements from vector
img[img > 0.5] = 1
img[img <= 0.5] = 0
img_resize = cv.resize(img, shape, interpolation=cv.INTER_AREA)
image_resize.append(img_resize)
io.imsave(os.path.join(save_path, "%d_predict.png" % i), img_as_ubyte(img_resize))
for i, item in enumerate(label):
gt_resize = cv.resize(item, shape, interpolation=cv.INTER_AREA)
label_resize.append(gt_resize)
print("save data successfully!")
return np.asarray(image_resize), np.asarray(label_resize)
def Dice(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
def Dice_score(gt, seg):
dice = []
for item in range(len(gt)):
dice.append(dc(gt[item], seg[item]))
print("The mean and std dice score is: ", '%.2f' % np.mean(dice), '%.2f' % np.std(dice))
return np.mean(dice), np.std(dice)
def HausdorffDistance_score(gt, seg, pixelsize):
hausdorff = []
for item in range(len(gt)):
if np.sum(seg[item]) > 0: # If the structure is predicted on at least one pixel
hausdorff.append(hd(seg[item], gt[item], voxelspacing=[pixelsize[item], pixelsize[item]]))
print("The mean and std Hausdorff Distance is: ", '%.2f' % np.mean(hausdorff), '%.2f' % np.std(hausdorff))
return np.mean(hausdorff), np.std(hausdorff)
def ASSD_score(gt, seg, pixelsize):
ASSD = []
for item in range(len(gt)):
if np.sum(seg[item]) > 0:
ASSD.append(assd(seg[item], gt[item], voxelspacing=[pixelsize[item], pixelsize[item]]))
print("The mean and std ASSD is: ", '%.2f' % np.mean(ASSD), '%.2f' % np.std(ASSD))
return np.mean(ASSD), np.std(ASSD)
def EllipseCircumference(a,b):
# Ramanujan approximation II
# HC = pi*(a+b)*(1+3h/(10+sqrt(4-3*h))),h = (a-b)**2/(a+b)**2
h = ((a / 2 - b / 2) ** 2) / ((a / 2 + b / 2) ** 2)
perimeter_ellipse = np.pi * (a / 2 + b / 2) * (1 + 3 * h / (10 + np.sqrt(4 - 3 * h)))
return perimeter_ellipse
def HC_calculate(pred):
'''
3 ways to calculate HC:
1. the number of contour points;
2. the length of contour;
3. the length of fitted ellipse
'''
num_points_pp = []
len_contour_pp = []
len_ellipse_pp = []
num_points = []
len_contour = []
len_ellipse = []
for item in range(len(pred)):
image = np.uint8(pred[item])
image[image > 0.5] = 255
image[image <= 0.5] = 0
contour = cv.Canny(image, 80, 160)
contours, hierarchy = cv.findContours(contour, mode=cv.RETR_EXTERNAL, method=cv.CHAIN_APPROX_NONE)
#print("performing with post processing")
max_contour = []
for i in range(len(contours)):
if len(contours[i]) > len(max_contour):
max_contour = contours[i]
if len(max_contour) != 0:
perimeter_contour = cv.arcLength(max_contour, True) # para2:indicating whether the curve is closed or not
# fitting ellipse, return center points, axis
(cx, cy), (a, b), angle = cv.fitEllipse(max_contour)
perimeter_ellipse = EllipseCircumference(a,b)
else:
perimeter_contour = 0
perimeter_ellipse = 0
num_points_pp.append(len(max_contour))
len_contour_pp.append(perimeter_contour)
len_ellipse_pp.append(perimeter_ellipse)
#print("performing without post processing")
if len(contours) !=0:
num_points_unit=0
len_contour_unit=0
len_ellipse_unit=0
for i in range(len(contours)):
num_points_unit +=len(contours[i])
len_contour_unit +=cv.arcLength(contours[i], True)
if len(contours[i])>5:#There should be at least 5 points to fit the ellipse in function 'cv::fitEllipse'
(cx, cy), (a, b), angle = cv.fitEllipse(contours[i])
len_ellipse_unit +=EllipseCircumference(a,b)
else:
num_points_unit = 0
len_contour_unit = 0
len_ellipse_unit = 0
num_points.append(num_points_unit)
len_contour.append(len_contour_unit)
len_ellipse.append(len_ellipse_unit)
return np.asarray(num_points), np.asarray(len_contour), np.asarray(len_ellipse),\
np.asarray(num_points_pp), np.asarray(len_contour_pp), np.asarray(len_ellipse_pp)
def predictions(x_text, y_test, label_hc_px, pixelsize, model, save_path):
score = model.evaluate(x_text, y_test, verbose=2)
print("Loss,iou sore:", '%.2f' % score[0], '%.2f' % score[1])
results = model.predict(x_text) # return probability
pred, y_test = save_data(save_path, results, y_test)
# segmentation analysis
mean_dice, std_dice = Dice_score(y_test, pred)
mean_hd, std_hd = HausdorffDistance_score(y_test, pred, pixelsize)
mean_assd, std_assd = ASSD_score(y_test, pred, pixelsize)
# HC analysis
HC_pred_points, HC_pred_contour, HC_pred_ellipse,\
HC_pred_points_pp, HC_pred_contour_pp, HC_pred_ellipse_pp= HC_calculate(pred)
print("predicted value in mm:", HC_pred_ellipse_pp * pixelsize)
print("predicted value in mm wo pp:", HC_pred_ellipse * pixelsize)
absDiff_points = np.abs((HC_pred_points - label_hc_px) * pixelsize)
absDiff_contour = np.abs((HC_pred_contour - label_hc_px) * pixelsize)
absDiff_ellipse = np.abs((HC_pred_ellipse - label_hc_px) * pixelsize)
absDiff_points_pp = np.abs((HC_pred_points_pp - label_hc_px) * pixelsize)
absDiff_contour_pp = np.abs((HC_pred_contour_pp - label_hc_px) * pixelsize)
absDiff_ellipse_pp = np.abs((HC_pred_ellipse_pp - label_hc_px) * pixelsize)
mean_mae_points = round(np.mean(absDiff_points), 2) # compute mae in mm
mean_mae_contour = round(np.mean(absDiff_contour), 2) # compute mae in mm
mean_mae_ellipse = round(np.mean(absDiff_ellipse), 2) # compute mae in mm
mean_mae_points_pp = round(np.mean(absDiff_points_pp), 2) # compute mae in mm
mean_mae_contour_pp = round(np.mean(absDiff_contour_pp), 2) # compute mae in mm
mean_mae_ellipse_pp = round(np.mean(absDiff_ellipse_pp), 2) # compute mae in mm
std_mae_points = round(np.std(absDiff_points), 2)
std_mae_contour = round(np.std(absDiff_contour), 2)
std_mae_ellipse = round(np.std(absDiff_ellipse), 2)
std_mae_points_pp = round(np.std(absDiff_points_pp), 2)
std_mae_contour_pp = round(np.std(absDiff_contour_pp), 2)
std_mae_ellipse_pp = round(np.std(absDiff_ellipse_pp), 2)
mean_mae_px_points = round(np.mean(absDiff_points / pixelsize), 2) # compute mae in pixel
mean_mae_px_contour = round(np.mean(absDiff_contour / pixelsize), 2) # compute mae in pixel
mean_mae_px_ellipse = round(np.mean(absDiff_ellipse / pixelsize), 2) # compute mae in pixel
mean_mae_px_points_pp = round(np.mean(absDiff_points_pp / pixelsize), 2) # compute mae in pixel
mean_mae_px_contour_pp = round(np.mean(absDiff_contour_pp / pixelsize), 2) # compute mae in pixel
mean_mae_px_ellipse_pp = round(np.mean(absDiff_ellipse_pp / pixelsize), 2) # compute mae in pixel
std_mae_px_points = round(np.std(absDiff_points / pixelsize), 2)
std_mae_px_contour = round(np.std(absDiff_contour / pixelsize), 2)
std_mae_px_ellipse = round(np.std(absDiff_ellipse / pixelsize), 2)
std_mae_px_points_pp = round(np.std(absDiff_points_pp / pixelsize), 2)
std_mae_px_contour_pp = round(np.std(absDiff_contour_pp / pixelsize), 2)
std_mae_px_ellipse_pp = round(np.std(absDiff_ellipse_pp / pixelsize), 2)
mean_pmae_points = np.mean(np.abs((label_hc_px - HC_pred_points) / label_hc_px)) * 100 # compute percentage mae
mean_pmae_contour = np.mean(np.abs((label_hc_px - HC_pred_contour) / label_hc_px)) * 100 # compute percentage mae
mean_pmae_ellipse = np.mean(np.abs((label_hc_px - HC_pred_ellipse) / label_hc_px)) * 100 # compute percentage mae
mean_pmae_points_pp = np.mean(np.abs((label_hc_px - HC_pred_points_pp) / label_hc_px)) * 100 # compute percentage mae
mean_pmae_contour_pp = np.mean(np.abs((label_hc_px - HC_pred_contour_pp) / label_hc_px)) * 100 # compute percentage mae
mean_pmae_ellipse_pp = np.mean(np.abs((label_hc_px - HC_pred_ellipse_pp) / label_hc_px)) * 100 # compute percentage mae
std_pmae_points = np.std(np.abs((label_hc_px - HC_pred_points) / label_hc_px)) * 100
std_pmae_contour = np.std(np.abs((label_hc_px - HC_pred_contour) / label_hc_px)) * 100
std_pmae_ellipse = np.std(np.abs((label_hc_px - HC_pred_ellipse) / label_hc_px)) * 100
std_pmae_points_pp = np.std(np.abs((label_hc_px - HC_pred_points_pp) / label_hc_px)) * 100
std_pmae_contour_pp = np.std(np.abs((label_hc_px - HC_pred_contour_pp) / label_hc_px)) * 100
std_pmae_ellipse_pp = np.std(np.abs((label_hc_px - HC_pred_ellipse_pp) / label_hc_px)) * 100
print('\t HC_mae in mm(points) w/o pp:', round(mean_mae_points, 2), 'mm (+-)', round(std_mae_points, 2), 'mm')
print('\t HC_mae in mm(contour)w/o pp:', round(mean_mae_contour, 2), 'mm (+-)', round(std_mae_contour, 2), 'mm')
print('\t HC_mae in mm(ellipse)w/o pp:', round(mean_mae_ellipse, 2), 'mm (+-)', round(std_mae_ellipse, 2), 'mm')
print('\t HC_mae in mm(points) w pp:', round(mean_mae_points_pp, 2), 'mm (+-)', round(std_mae_points_pp, 2), 'mm')
print('\t HC_mae in mm(contour) w pp:', round(mean_mae_contour_pp, 2), 'mm (+-)', round(std_mae_contour_pp, 2), 'mm')
print('\t HC_mae in mm(ellipse) w pp:', round(mean_mae_ellipse_pp, 2), 'mm (+-)', round(std_mae_ellipse_pp, 2), 'mm')
print('\t HC_mae in px(points) w/o pp:', round(mean_mae_px_points, 2), 'pixels (+-)', round(std_mae_px_points, 2))
print('\t HC_mae in px(contour)w/o pp:', round(mean_mae_px_contour, 2), 'pixels (+-)', round(std_mae_px_contour, 2))
print('\t HC_mae in px(ellipse)w/o pp:', round(mean_mae_px_ellipse, 2), 'pixels (+-)', round(std_mae_px_ellipse, 2))
print('\t HC_mae in px(points) w pp:', round(mean_mae_px_points_pp, 2), 'pixels (+-)', round(std_mae_px_points_pp, 2))
print('\t HC_mae in px(contour) w pp:', round(mean_mae_px_contour_pp, 2), 'pixels (+-)', round(std_mae_px_contour_pp, 2))
print('\t HC_mae in px(ellipse) w pp:', round(mean_mae_px_ellipse_pp, 2), 'pixels (+-)', round(std_mae_px_ellipse_pp, 2))
print('\t pmae(points) w/o pp:', round(mean_pmae_points, 2), '% (+-)', round(std_pmae_points, 2))
print('\t pmae(contour)w/o pp:', round(mean_pmae_contour, 2), '% (+-)', round(std_pmae_contour, 2))
print('\t pmae(ellipse)w/o pp:', round(mean_pmae_ellipse, 2), '% (+-)', round(std_pmae_ellipse, 2))
print('\t pmae(points) w pp:', round(mean_pmae_points_pp, 2), '% (+-)', round(std_pmae_points_pp, 2))
print('\t pmae(contour) w pp:', round(mean_pmae_contour_pp, 2), '% (+-)', round(std_pmae_contour_pp, 2))
print('\t pmae(ellipse) w pp:', round(mean_pmae_ellipse_pp, 2), '% (+-)', round(std_pmae_ellipse_pp, 2))
return mean_dice, std_dice, mean_hd, std_hd, mean_assd, std_assd, \
mean_mae_points, mean_mae_contour, mean_mae_ellipse, \
std_mae_points, std_mae_contour, std_mae_ellipse, \
mean_mae_px_points, mean_mae_px_contour, mean_mae_px_ellipse, \
std_mae_px_points, std_mae_px_contour, std_mae_px_ellipse, \
mean_pmae_points, mean_pmae_contour, mean_pmae_ellipse, \
std_pmae_points, std_pmae_contour, std_pmae_ellipse,\
mean_mae_points_pp, mean_mae_contour_pp, mean_mae_ellipse_pp, \
std_mae_points_pp, std_mae_contour_pp, std_mae_ellipse_pp, \
mean_mae_px_points_pp, mean_mae_px_contour_pp, mean_mae_px_ellipse_pp, \
std_mae_px_points_pp, std_mae_px_contour_pp, std_mae_px_ellipse_pp, \
mean_pmae_points_pp, mean_pmae_contour_pp, mean_pmae_ellipse_pp, \
std_pmae_points_pp, std_pmae_contour_pp, std_pmae_ellipse_pp
def fold_cross_valid(root,x_aug, y_aug, x_ori, y_ori, label_hc, ps_ori, inputshape2D, loss, save_path,
nb_epoch=50, batch_size=8, learning_rate=1e-3, best_filename='best.h5'):
test_dice = []
test_hd = []
test_assd = []
test_mae_HC_px_point = []
test_mae_HC_px_contour = []
test_mae_HC_px_ellipse = []
test_mae_HC_px_point_pp = []
test_mae_HC_px_contour_pp = []
test_mae_HC_px_ellipse_pp = []
test_mae_HC_mm_point = []
test_mae_HC_mm_contour = []
test_mae_HC_mm_ellipse = []
test_mae_HC_mm_point_pp = []
test_mae_HC_mm_contour_pp = []
test_mae_HC_mm_ellipse_pp = []
test_pmae_HC_point = []
test_pmae_HC_contour = []
test_pmae_HC_ellipse = []
test_pmae_HC_point_pp = []
test_pmae_HC_contour_pp = []
test_pmae_HC_ellipse_pp = []
early_stopping = EarlyStopping(monitor='val_loss', patience=90, verbose=1)
model_checkpoint = ModelCheckpoint(best_filename, verbose=0, save_best_only=True)
log_path = [
'logs/log-1.csv',
'logs/log-2.csv',
'logs/log-3.csv',
'logs/log-4.csv',
'logs/log-5.csv'
]
for i in range(0, 5):
idx_train = np.load('cv_array/train' + str(i) + '.npy', allow_pickle=True)
idx_test = np.load('cv_array/test' + str(i) + '.npy', allow_pickle=True)
idx_valid = np.load('cv_array/valid' + str(i) + '.npy', allow_pickle=True)
# idx_train2 = [i + 999 for i in idx_train]
# x_train = np.concatenate((x_ori[idx_train], x_aug[idx_train]), axis=0) # first 600 data augmentation
# y_train = np.concatenate((y_ori[idx_train], y_aug[idx_train]), axis=0)
#
# x_train = np.concatenate((x_train, x_aug[idx_train2]), axis=0) # second 600 data augmentation
# y_train = np.concatenate((y_train, y_aug[idx_train2]), axis=0)
x_train = x_ori[idx_train]
y_train = y_ori[idx_train]
x_valid = x_ori[idx_valid]
y_valid = y_ori[idx_valid]
x_test = x_ori[idx_test]
y_test = y_ori[idx_test]
ps_test = ps_ori[idx_test]
hc_test = label_hc[idx_test]
metric = sm.metrics.iou_score
#model = unet(inputshape2D)
model = sm.Unet(backbone,input_shape=inputshape2D,encoder_weights = None)
#model = smx.Xnet(backbone,input_shape=inputshape2D)
#model = sm.FPN(backbone,input_shape=inputshape2D) #
#model = sm.Linknet(backbone,input_shape=inputshape2D)
#model = sm.PSPNet(backbone,input_shape=inputshape2D) # require 480,480!!!
#model = doubleUNet(inputshape2D) # require two keras frames!!!
#model = sm.PSPNet(backbone, encoder_weights = 'imagenet', classes = 1,
#encoder_freeze=False, activation='sigmoid', downsample_factor=16, input_shape=(480,480,3),
#psp_conv_filters=1024, psp_pooling_type='avg')
#model.summary()
model.compile(loss=loss, optimizer=Adam(lr=learning_rate), metrics=[metric])
model.fit(x_train, y_train, validation_data=(x_valid, y_valid),
callbacks=[early_stopping, model_checkpoint, CSVLogger(log_path[i], append=True)],
batch_size=batch_size, epochs=nb_epoch, shuffle=True, verbose=1)
model.load_weights(best_filename)
# -------------- Test Predictions ----------------------------
mean_dice, std_dice, mean_hd, std_hd, mean_assd, std_assd, \
mean_mae_points, mean_mae_contour, mean_mae_ellipse, \
std_mae_points, std_mae_contour, std_mae_ellipse, \
mean_mae_px_points, mean_mae_px_contour, mean_mae_px_ellipse, \
std_mae_px_points, std_mae_px_contour, std_mae_px_ellipse, \
mean_pmae_points, mean_pmae_contour, mean_pmae_ellipse, \
std_pmae_points, std_pmae_contour, std_pmae_ellipse, \
mean_mae_points_pp, mean_mae_contour_pp, mean_mae_ellipse_pp, \
std_mae_points_pp, std_mae_contour_pp, std_mae_ellipse_pp, \
mean_mae_px_points_pp, mean_mae_px_contour_pp, mean_mae_px_ellipse_pp, \
std_mae_px_points_pp, std_mae_px_contour_pp, std_mae_px_ellipse_pp, \
mean_pmae_points_pp, mean_pmae_contour_pp, mean_pmae_ellipse_pp, \
std_pmae_points_pp, std_pmae_contour_pp, std_pmae_ellipse_pp = \
predictions(x_test, y_test, hc_test, ps_test, model, save_path[i])
test_dice.append([mean_dice, std_dice])
test_hd.append([mean_hd, std_hd])
test_assd.append([mean_assd, std_assd])
test_mae_HC_px_point.append([mean_mae_px_points, std_mae_px_points])
test_mae_HC_px_contour.append([mean_mae_px_contour, std_mae_px_contour])
test_mae_HC_px_ellipse.append([mean_mae_px_ellipse, std_mae_px_ellipse])
test_mae_HC_px_point_pp.append([mean_mae_px_points_pp, std_mae_px_points_pp])
test_mae_HC_px_contour_pp.append([mean_mae_px_contour_pp, std_mae_px_contour_pp])
test_mae_HC_px_ellipse_pp.append([mean_mae_px_ellipse_pp, std_mae_px_ellipse_pp])
test_mae_HC_mm_point.append([mean_mae_points, std_mae_points])
test_mae_HC_mm_contour.append([mean_mae_contour, std_mae_contour])
test_mae_HC_mm_ellipse.append([mean_mae_ellipse, std_mae_ellipse])
test_mae_HC_mm_point_pp.append([mean_mae_points_pp, std_mae_points_pp])
test_mae_HC_mm_contour_pp.append([mean_mae_contour_pp, std_mae_contour_pp])
test_mae_HC_mm_ellipse_pp.append([mean_mae_ellipse_pp, std_mae_ellipse_pp])
test_pmae_HC_point.append([mean_pmae_points, std_pmae_points])
test_pmae_HC_contour.append([mean_pmae_contour, std_pmae_contour])
test_pmae_HC_ellipse.append([mean_pmae_ellipse, std_pmae_ellipse])
test_pmae_HC_point_pp.append([mean_pmae_points_pp, std_pmae_points_pp])
test_pmae_HC_contour_pp.append([mean_pmae_contour_pp, std_pmae_contour_pp])
test_pmae_HC_ellipse_pp.append([mean_pmae_ellipse_pp, std_pmae_ellipse_pp])
# end of for loop
CV_mean_dice, CV_std_dice = np.mean(test_dice, axis=0)
CV_mean_hd, CV_std_hd = np.mean(test_hd, axis=0)
CV_mean_assd, CV_std_assd = np.mean(test_assd, axis=0)
CV_mean_mae_HC_px_point, CV_std_mae_HC_px_point = np.mean(test_mae_HC_px_point, axis=0)
CV_mean_mae_HC_px_contour, CV_std_mae_HC_px_contour = np.mean(test_mae_HC_px_contour, axis=0)
CV_mean_mae_HC_px_ellipse, CV_std_mae_HC_px_ellipse = np.mean(test_mae_HC_px_ellipse, axis=0)
CV_mean_mae_HC_px_point_pp, CV_std_mae_HC_px_point_pp = np.mean(test_mae_HC_px_point_pp, axis=0)
CV_mean_mae_HC_px_contour_pp, CV_std_mae_HC_px_contour_pp = np.mean(test_mae_HC_px_contour_pp, axis=0)
CV_mean_mae_HC_px_ellipse_pp, CV_std_mae_HC_px_ellipse_pp = np.mean(test_mae_HC_px_ellipse_pp, axis=0)
CV_mean_mae_HC_mm_point, CV_std_mae_HC_mm_point = np.mean(test_mae_HC_mm_point, axis=0)
CV_mean_mae_HC_mm_contour, CV_std_mae_HC_mm_contour = np.mean(test_mae_HC_mm_contour, axis=0)
CV_mean_mae_HC_mm_ellipse, CV_std_mae_HC_mm_ellipse = np.mean(test_mae_HC_mm_ellipse, axis=0)
CV_mean_mae_HC_mm_point_pp, CV_std_mae_HC_mm_point_pp = np.mean(test_mae_HC_mm_point_pp, axis=0)
CV_mean_mae_HC_mm_contour_pp, CV_std_mae_HC_mm_contour_pp = np.mean(test_mae_HC_mm_contour_pp, axis=0)
CV_mean_mae_HC_mm_ellipse_pp, CV_std_mae_HC_mm_ellipse_pp = np.mean(test_mae_HC_mm_ellipse_pp, axis=0)
CV_pmae_mean_HC_point, CV_pmae_std_HC_point = np.mean(test_pmae_HC_point, axis=0)
CV_pmae_mean_HC_contour, CV_pmae_std_HC_contour = np.mean(test_pmae_HC_contour, axis=0)
CV_pmae_mean_HC_ellipse, CV_pmae_std_HC_ellipse = np.mean(test_pmae_HC_ellipse, axis=0)
CV_pmae_mean_HC_point_pp, CV_pmae_std_HC_point_pp = np.mean(test_pmae_HC_point_pp, axis=0)
CV_pmae_mean_HC_contour_pp, CV_pmae_std_HC_contour_pp = np.mean(test_pmae_HC_contour_pp, axis=0)
CV_pmae_mean_HC_ellipse_pp, CV_pmae_std_HC_ellipse_pp = np.mean(test_pmae_HC_ellipse_pp, axis=0)
print('-' * 60)
print('5CV Mean dice score :', round(CV_mean_dice, 3), '(+-)', round(CV_std_dice, 3))
print('5CV Mean hd score :', round(CV_mean_hd, 3), 'mm (+-)', round(CV_std_hd, 3), 'mm')
print('5CV Mean assd :', round(CV_mean_assd, 3), 'mm (+-)', round(CV_std_assd, 3), 'mm')
print('-' * 60)
print('5CV mae HC(px) in points w/o pp:', round(CV_mean_mae_HC_px_point, 3), 'px (+-)', round(CV_std_mae_HC_px_point, 3), 'px')
print('5CV mae HC(px) in contour w/o pp:', round(CV_mean_mae_HC_px_contour, 3), 'px (+-)', round(CV_std_mae_HC_px_contour, 3), 'px')
print('5CV mae HC(px) in ellipse w/o pp:', round(CV_mean_mae_HC_px_ellipse, 3), 'px (+-)', round(CV_std_mae_HC_px_ellipse, 3), 'px')
print('5CV mae HC(px) in points w pp:', round(CV_mean_mae_HC_px_point_pp, 3), 'px (+-)',
round(CV_std_mae_HC_px_point_pp, 3), 'px')
print('5CV mae HC(px) in contour w pp:', round(CV_mean_mae_HC_px_contour_pp, 3), 'px (+-)',
round(CV_std_mae_HC_px_contour_pp, 3), 'px')
print('5CV mae HC(px) in ellipse w pp:', round(CV_mean_mae_HC_px_ellipse_pp, 3), 'px (+-)',
round(CV_std_mae_HC_px_ellipse_pp, 3), 'px')
print('5CV mae HC(mm) in points w/o pp:', round(CV_mean_mae_HC_mm_point, 3), 'mm (+-)', round(CV_std_mae_HC_mm_point, 3), 'mm')
print('5CV mae HC(mm) in contour w/o pp:', round(CV_mean_mae_HC_mm_contour, 3), 'mm (+-)', round(CV_std_mae_HC_mm_contour, 3), 'mm')
print('5CV mae HC(mm) in ellipse w/o pp:', round(CV_mean_mae_HC_mm_ellipse, 3), 'mm (+-)', round(CV_std_mae_HC_mm_ellipse, 3), 'mm')
print('5CV mae HC(mm) in points w pp:', round(CV_mean_mae_HC_mm_point_pp, 3), 'mm (+-)',
round(CV_std_mae_HC_mm_point_pp, 3), 'mm')
print('5CV mae HC(mm) in contour w pp:', round(CV_mean_mae_HC_mm_contour_pp, 3), 'mm (+-)',
round(CV_std_mae_HC_mm_contour_pp, 3), 'mm')
print('5CV mae HC(mm) in ellipse w pp:', round(CV_mean_mae_HC_mm_ellipse_pp, 3), 'mm (+-)',
round(CV_std_mae_HC_mm_ellipse_pp, 3), 'mm')
print('5CV pmae HC in points w/o pp:', round(CV_pmae_mean_HC_point, 3), '% (+-)', round(CV_pmae_std_HC_point, 3))
print('5CV pmae HC in contour w/o pp:', round(CV_pmae_mean_HC_contour, 3), '% (+-)', round(CV_pmae_std_HC_contour, 3))
print('5CV pmae HC in ellipse w/o pp:', round(CV_pmae_mean_HC_ellipse, 3), '% (+-)', round(CV_pmae_std_HC_ellipse, 3))
print('5CV pmae HC in points w pp:', round(CV_pmae_mean_HC_point_pp, 3), '% (+-)', round(CV_pmae_std_HC_point_pp, 3))
print('5CV pmae HC in contour w pp:', round(CV_pmae_mean_HC_contour_pp, 3), '% (+-)', round(CV_pmae_std_HC_contour_pp, 3))
print('5CV pmae HC in ellipse w pp:', round(CV_pmae_mean_HC_ellipse_pp, 3), '% (+-)', round(CV_pmae_std_HC_ellipse_pp, 3))
def Dice_loss(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return 1 - (2. * intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
def Kappa_loss(y_true, y_pred, N=224 * 224):
Gi = K.flatten(y_true)
Pi = K.flatten(y_pred)
numerator = 2 * K.sum(Pi * Gi) - K.sum(Pi) * K.sum(Gi) / N
denominator = K.sum(Pi * Pi) + K.sum(Gi * Gi) - 2 * K.sum(Pi * Gi) / N
Kappa = 1 - numerator / denominator
return Kappa
```
#### File: HC-reg-seg/code/predict-reg.py
```python
import time
from keras.models import load_model
from data_reg import *
from model_reg import *
from memory_profiler import profile
H = 224
W = 224
slice = 3
test_path = 'HCdata/test/image/'
test_gt = 'HCdata/test/HC_ori_test.csv'
X_aug, Y_aug, ps_aug, \
X_ori, Y_ori, ps_ori = load_data(test_path, test_path, test_gt, test_gt, H, W, slice)
absdff_mm = []
absdff_px = []
abspmae = []
inputshape = (W, H, slice)
y_test = Y_ori / ps_ori # length in pixels
#HC_max = np.max(y_test)
HC_max = 1786.50024241547 # the largest value in training set.
y_test = y_test / HC_max # Normalized HC in pixels
custom_objects = {'huber_loss': huber_loss}
# Test stage
model = load_model('regmodels/best_efnMAE.h5', custom_objects)
print('predicting')
@profile #Decorator
def pred():
time_start=time.time()
preds = model.predict(X_ori,batch_size=16) # The output value is between (0,1) due to normalization.
preds = preds.flatten()
time_end=time.time()
print('time cost',time_end-time_start,'s')
def predict():
H = 224
W = 224
slice = 3
test_path = 'HCdata/test/image/'
test_gt = 'HCdata/test/HC_ori_test.csv'
X_aug, Y_aug, ps_aug, \
X_ori, Y_ori, ps_ori = load_data(test_path, test_path, test_gt, test_gt, H, W, slice)
absdff_mm = []
absdff_px = []
abspmae = []
inputshape = (W, H, slice)
y_test = Y_ori / ps_ori # length in pixels
#HC_max = np.max(y_test)
HC_max = 1786.50024241547 # the largest value in training set.
y_test = y_test / HC_max # Normalized HC in pixels
custom_objects = {'huber_loss': huber_loss}
# Test stage
model = load_model('regmodels/best_efnMAE.h5', custom_objects)
print('predicting')
time_start=time.time()
preds = model.predict(X_ori,batch_size=16) # The output value is between (0,1) due to normalization.
preds = preds.flatten()
time_end=time.time()
print('time cost',time_end-time_start,'s')
#predict_results = preds * HC_max * ps_ori
#print(predict_results.shape)
#print("The predicted HC in mm:")
#for i in (predict_results):
# print(i)
if __name__ == "__main__":
pred()
```
#### File: HC-reg-seg/code/predict-seg.py
```python
import cv2 as cv
import numpy as np
import math
import os
from keras.models import load_model
#from tensorflow.keras.models import load_model # only for doubleunet
from data_seg import *
#from model_seg import *
from doubleu_net import *
import time
def Ellipse_Circumference(a,b):
h = ((a / 2 - b / 2) ** 2) / ((a / 2 + b / 2) ** 2)
len_ellipse = np.pi * (a / 2 + b / 2) * (1 + 3 * h / (10 + np.sqrt(4 - 3 * h)))
return len_ellipse
def HC_calculate(img): # with post processing
print(img)
image = cv.imread(img)
contour = cv.Canny(image, 80, 160)
#cv.imshow("canny_output", contour)
#cv.waitKey(0)
contours, hierarchy = cv.findContours(contour, mode=cv.RETR_EXTERNAL, method=cv.CHAIN_APPROX_NONE)
max_contour = [1]
for i in range(len(contours)):
if len(contours[i])>len(max_contour):
max_contour = contours[i]
# draw detected contour # -1:all the contours are drawn; Color; Thickness of lines
cv.drawContours(image,max_contour, -1, (0, 255, 0), 4)
# fitting ellipse, return center points, axis
(cx, cy), (a, b), angle = cv.fitEllipse(max_contour)
# generate ellipse # 0:start_angle; 360:end_angle; color; -1 filled ellipse; thickness, linetype
newimg = np.zeros((540, 800, 3), np.uint8) # 生成一个空灰度图像
cv.ellipse(newimg, (np.int32(cx), np.int32(cy)), (np.int32(a / 2), np.int32(b / 2)),
angle, 0, 360, (0, 0, 255), 0, 3, 0)
save_path = 'fitted_results/'+img[-15:]
len_ellipse = Ellipse_Circumference(a, b)
cv.imwrite(save_path,newimg)
cv.imshow("canny_output", newimg)
#cv.waitKey(0)
#print(len_ellipse)
#print(b)
def HC_calculate_multi(img): # without post processing
image = cv.imread(img)
contour = cv.Canny(image, 80, 160)
contours, hierarchy = cv.findContours(contour, mode=cv.RETR_EXTERNAL, method=cv.CHAIN_APPROX_NONE)
points_contours = []
lenth_ellipse = []
peri_contours = []
for i in range(len(contours)):
points_contours.append(len(contours[i]))
cv.drawContours(image,contours[i], -1, (0, 255, 0), 4)
(cx, cy), (a, b), angle = cv.fitEllipse(contours[i])
cv.ellipse(image, (np.int32(cx), np.int32(cy)), (np.int32(a / 2), np.int32(b / 2)),
angle, 0, 360, (0, 0, 255), -1, 4, 0)
len_ellipse = Ellipse_Circumference(a,b)
peri_contours.append(cv.arcLength(contours[i], True))
lenth_ellipse.append(len_ellipse)
print(sum(lenth_ellipse))
#HC_calculate_multi()
# batch compute HC, compute time of post processing
path = 'HCdata/test/cv5-unet-original/'
img_list = os.listdir(path)
img_list.sort(key=lambda x: int(x[:-12])) ##文件名按数字排序,屏蔽除数字以外的字符
print(img_list)
time_start = time.time()
for i in range(len(img_list)):
img_name = path + img_list[i]
HC_calculate_multi(img_name)# without post processing
#HC_calculate(img_name) # with post processing
time_end = time.time()
print('time cost', time_end - time_start, 's')
from memory_profiler import profile
# H = 224 # 480 for psp net
# W = 224
# test_path = 'HCdata/test/image/'
# test_gt = 'HCdata/test/HC_ori_test.csv'
# X_aug, Y_aug, label_hc, \
# X_ori, Y_ori, ps_ori = load_data(test_path, test_path, test_path, test_path, test_gt, test_gt, H, W)
#
# custom_objects = {'dice_loss': sm.losses.dice_loss,
# 'iou_score': sm.metrics.iou_score
# }
# save_path = 'HCdata/Challenge/test_result_version2/'
# # Test stage
# model = load_model('segmodels/best_unet.h5', custom_objects)
# print('predicting')
# @profile #Decorator
# def pred():
# time_start = time.time()
# preds = model.predict(X_ori, batch_size=16) # The output value is between (0,1) due to normalization.
# time_end = time.time()
# print('time cost', time_end - time_start, 's')
def predict():
H = 224 # 480 for psp net
W = 224
test_path = 'HCdata/test/image/'
test_gt = 'HCdata/test/HC_ori_test.csv'
X_aug, Y_aug, label_hc, \
X_ori, Y_ori, ps_ori = load_data(test_path, test_path, test_path, test_path, test_gt, test_gt, H, W)
custom_objects = {'dice_loss': sm.losses.dice_loss,
'iou_score': sm.metrics.iou_score
}
save_path = 'HCdata/test/cv5-unet-resnet-none/'
# Test stage
model = load_model('segmodels/u-net-resnet-none.h5', custom_objects)
print('predicting')
time_start = time.time()
preds = model.predict(X_ori, batch_size=16) # The output value is between (0,1) due to normalization.
time_end = time.time()
print('time cost', time_end - time_start, 's')
save_data(save_path, preds, Y_ori)
#predict()
#if __name__ == "__main__":
# pred()
``` |
{
"source": "jizhang02/XAI-reg",
"score": 2
} |
#### File: XAI-reg/code/perturbation_analysis.py
```python
import warnings
warnings.simplefilter('ignore')
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
import keras.backend as K
import cv2
import keras
import keras.models
import pandas as pd
from PIL import Image,ImageChops
import innvestigate
import innvestigate.utils as iutils
from innvestigate.tools import Perturbation, PerturbationAnalysis
from keras.models import load_model
from model_rebuilt import *
from utils import plot_image_grid
def huber_loss(y_true, y_pred, clip_delta=0.5):
error = y_true - y_pred
cond = tf.keras.backend.abs(error) < clip_delta
squared_loss = 0.5 * tf.keras.backend.square(error)
linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta)
return tf.where(cond, squared_loss, linear_loss)
def huber_loss_mean(y_true, y_pred, clip_delta=0.005):
return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta))
# percent_mean_absolute_error
def pmae(y_true, y_pred):
if not K.is_tensor(y_pred):
y_pred = K.constant(y_pred)
y_true = K.cast(y_true, y_pred.dtype)
diff = K.mean(K.abs((y_pred - y_true))/K.mean(K.clip(K.abs(y_true),K.epsilon(),None)))
return 100. * K.mean(diff)
def huber_loss(y_true, y_pred, clip_delta=1.0):
error = y_true - y_pred
cond = tf.keras.backend.abs(error) < clip_delta
squared_loss = 0.5 * tf.keras.backend.square(error)
linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta)
return tf.keras.backend.mean(tf.where(cond, squared_loss, linear_loss))
custom_objects={
'huber_loss':huber_loss,
'pmae':pmae
}
from tqdm import tqdm # progress bar
import glob
def dataloader(images_path, type=3):
imagelist = sorted(glob.glob(os.path.join(images_path, '*.png')))
if(type==3):X = np.zeros((int(len(imagelist)), 128,128,3))# for reading rgb images
else: X = np.zeros((int(len(imagelist)), 128,128,1))
for i in tqdm(range(len(imagelist))):
if(type==3):img = plt.imread(imagelist[i]) # for reading rgb images
else: img = Image.open(imagelist[i]).convert('L')
img = np.array(img)
# normalize
img_mean = np.mean(img)
img_std = np.std(img)
img_norm = (1/img_std)*(img-img_mean)
if(type==3):X[i] = img_norm # for reading rgb images
else: X[i] = img_norm[:, :, np.newaxis]
return X
path = "X_test/"
x_test = dataloader(path,type=3)
y_test_csv = pd.read_csv('X_test/test_annotationcv01_128.csv')
y_test_px = y_test_csv['head circumference (mm)']/y_test_csv['pixel size(mm)']
HC_max = np.max(y_test_px)
y_test = y_test_px/HC_max# normalization
generator = iutils.BatchSequence([x_test, y_test], batch_size=16)
# Load a model
input_size = (128,128,3)
#model = resnet50(input_shape=input_size)#vgg16(input_shape=input_size)
#model.load_weights('models/model1.h5')
#note,resnet50HL have problem on decovnet, add twice on deconvet, like selected_methods_indices = [0,1,2,2,3,4,5,6,7]
model = load_model('models/resnet50MSE.h5',custom_objects)#vgg16MAE.h5 resnet50MAE
perturbation_function = "gaussian"#(0,0.3)"zeros" "mean" "invert"
region_shape = (32, 32)# fit to the input image size (128,128)
steps = 15 # Number of perturbation steps.
regions_per_step = 1 # Perturbate 1 region per step
# Scale to [0, 1] range for plotting.
input_range = [-1, 1]
noise_scale = (input_range[1]-input_range[0]) * 0.1
ri = input_range[0] # reference input
# Configure analysis methods and properties
methods = [
# NAME OPT.PARAMS TITLE
# Show input, Returns the input as analysis.
#("input", {}, "Input"),
# Returns the Gaussian noise as analysis.
# ("random", {}, "Random"),
# Function
("gradient", {"postprocess": "abs"}, "Gradient"),
("smoothgrad", {"noise_scale": noise_scale, "postprocess": "square"}, "SmoothGrad"),
# Signal
("deconvnet", {}, "Deconvnet"),
("guided_backprop", {}, "Guided Backprop",),
#("pattern.net", {"pattern_type": "relu"}, "PatternNet"),
# Interaction
# ("pattern.attribution", {"pattern_type": "relu"}, "PatternAttribution"),
("deep_taylor.bounded", {"low": input_range[0], "high": input_range[1]}, "DeepTaylor"),
("input_t_gradient", {}, "Input * Gradient"),
("integrated_gradients", {"reference_inputs": ri}, "Integrated Gradients"),
# ("deep_lift.wrapper", {"reference_inputs": ri}, "DeepLIFT Wrapper - Rescale"),
#("deep_lift.wrapper", {"reference_inputs": ri, "nonlinear_mode": "reveal_cancel"}, "DeepLIFT Wrapper - RevealCancel"),
("lrp.z", {}, "LRP-Z"),
# ("lrp.epsilon", {"epsilon": 1}, "LRP-Epsilon"),
]
# Select methods of your choice
selected_methods_indices = [0,1,2,3,4,5,6,7]
#selected_methods_indices = [13,10,9,8,5,4,3,2]
#selected_methods_indices = [0]
selected_methods = [methods[i] for i in selected_methods_indices]
print('Using method(s) "{}".'.format([method[0] for method in selected_methods]))
# instantiate the analyzer objects
analyzers = [innvestigate.create_analyzer(method[0], model, **method[1]) for method in selected_methods]
scores_selected_methods = dict()
perturbation_analyses = list()
for method, analyzer in zip(selected_methods, analyzers):
print("Method: {}".format(method[0]))
try:
# Set up the perturbation analysis
# This is the method with which the pixels in the most important regions are perturbated
# Gaussian(mean=0.0,standard diviation=0.3)
perturbation = Perturbation(perturbation_function, region_shape=region_shape, in_place=False)
# Comment out to invert the perturbation order
# perturbation.aggregation_function = lambda x, axis: -np.mean(x, axis=axis)
perturbation_analysis = PerturbationAnalysis(analyzer, model, generator, perturbation, recompute_analysis=False,
steps=steps, regions_per_step=regions_per_step, verbose=True)
test_loss = perturbation_analysis.compute_perturbation_analysis()# Scalar test loss (if the model has a single output and no metrics)
#print(len(test_loss)) # the number of perturbation areas 16
print(test_loss)
# Store the scores and perturbation analyses for later use
scores = np.array(test_loss)*HC_max # multiply with max y_test # one column
Error = scores[:,1]
AOPC = Error[0]-np.mean(Error)
print("ERROR:",Error)
print("AOPC:",AOPC)
scores_selected_methods[method[0]] = np.array(scores)
perturbation_analyses.append(perturbation_analysis)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print('\n' + message)
continue
#Plot the perturbation curves and compute area over the perturbation curve (AOPC) with baseline
# fig = plt.figure(figsize=(15, 5))
# aopc = list() # Area over the perturbation curve
# baseline_accuracy = scores_selected_methods["random"][:, 1]
# for method_name in scores_selected_methods.keys():
# scores = scores_selected_methods[method_name] # the shape is (16,3),(number of perturbations,, number of channels)
# accuracy = scores[:, 1]
# aopc.append(accuracy[0] - np.mean(accuracy))# AOPC of each analyser
#
# label = "{} (AOPC: {:.3f})".format(method_name, aopc[-1])
# plt.plot(accuracy - baseline_accuracy, label=label)
#Plot the perturbation curves and compute area over the perturbation curve (AOPC) without baseline
fig = plt.figure(figsize=(7.5, 5))
aopc = list() # Area over the perturbation curve
for method_name in scores_selected_methods.keys():
scores = scores_selected_methods[method_name] # the shape is (16,3),(number of perturbations,, number of channels)
accuracy = scores[:, 1]#the second column, totally 3 columns
aopc.append(accuracy[0] - np.mean(accuracy))# AOPC of each analyser
label = "{} (AOPC: {:.3f})".format(method_name, aopc[-1])
plt.plot(accuracy, label=label)
plt.xlabel("Perturbation steps")
plt.ylabel("Predicted ERROR of analyzers (pixels)")
#plt.xticks(np.array(range(scores.shape[0])))
plt.xticks(np.array(range(16)))
plt.legend()
plt.savefig('analysis_results/' + 'aopc' + ".pdf", bbox_inches='tight', pad_inches=0.0)
plt.show()
# # Now plot the perturbation step by step.
# for perturbation_analysis, method in zip(perturbation_analyses, selected_methods):
# samples = list()
#
# # Reset the perturbation_analysis
# perturbation_analysis.perturbation.num_perturbed_regions = 1
#
# sample = np.copy(x_test[0:1])# (1, 128, 128, 3)
# analysis = perturbation_analysis.analyzer.analyze(sample)
# a = analysis
# a = a.sum(axis=np.argmax(np.asarray(a.shape) == 3))
# a /= np.max(np.abs(a))
# b = np.squeeze(a)
# plt.imshow(b, cmap="seismic", clim=(-1, 1))
# plt.axis('off')
# plt.savefig('analysis_results/' + 'analysis_' + str(method[0]) + ".png", bbox_inches='tight', pad_inches=0.0)
# #aggregated_regions = perturbation_analysis.perturbation.reduce_function(np.moveaxis(analysis, 3, 1), axis=1,keepdims=True)
# #aggregated_regions = perturbation_analysis.perturbation.aggregate_regions(aggregated_regions)
# #ranks = perturbation_analysis.perturbation.compute_region_ordering(aggregated_regions)
# #print(np.shape(ranks)) # (1, 1, 4, 4)
# #print(np.shape(analysis))# (1, 128, 128, 3)
# # Perturbate for some steps
# for i in range(steps + 1):
# # Plot the original image and analysis without any perturbation
# if i > 0:
# perturbation_analysis.perturbation.num_perturbed_regions += perturbation_analysis.regions_per_step
# # Perturbate
# sample = perturbation_analysis.compute_on_batch(sample, analysis)
#
# a = sample
# #a = a.sum(axis=np.argmax(np.asarray(a.shape) == 3))
# a /= np.max(np.abs(a))
# b = np.squeeze(a)
# #plt.imshow(b, cmap="binary", clim=(-1, 1)) # seismic binary
# plt.imshow(b, cmap="binary", clim=(-1, 1)) # seismic binary
# plt.axis('off')
# plt.savefig('analysis_results/' + 'step_' + str(i) + str(method[0]) + ".png",bbox_inches='tight', pad_inches=0.0)
``` |
{
"source": "jizhilong/docker-make",
"score": 2
} |
#### File: docker-make/tests/test_builds.py
```python
import os
import shutil
import unittest
import uuid
from unittest import mock
from unittest.mock import MagicMock
from dmake import utils, build, template_args
def yaml_file(filename):
return os.path.join(os.path.dirname(__file__), filename)
class BuildOrderTestCase(unittest.TestCase):
def test_only_one_build(self):
order, builds = utils.get_sorted_build_dicts_from_yaml(yaml_file('simple.yaml'))
self.assertEqual(1, len(builds))
self.assertEqual(1, len(order))
self.assertEqual('base', order[0])
def test_orders_with_depends_on(self):
builds = {'base.test': {'depends_on': ['base']}, 'base': {}}
# dict.keys is guaranteed to be ordered in Python 3.7+
self.assertEqual(['base.test', 'base'], list(builds.keys()))
order = utils.sort_builds_dict(builds)
self.assertEqual(['base', 'base.test'], order)
def test_orders_with_rewrite_from(self):
builds = {'base.test': {'rewrite_from': 'base'}, 'base': {}}
self.assertEqual(['base.test', 'base'], list(builds.keys()))
order = utils.sort_builds_dict(builds)
self.assertEqual(['base', 'base.test'], order)
class BuildTestCase(unittest.TestCase):
def setUp(self) -> None:
self.context = uuid.uuid4().hex
os.mkdir(self.context)
def tearDown(self) -> None:
shutil.rmtree(self.context)
# cleanup side effects on global state
template_args._tag_template_args = None
template_args._label_template_args = None
@mock.patch('dmake.template_args.label_template_args', return_value=None)
def test_depends_on_implicitly_determined_by_rewrite_from(self, mocked):
b = build.Build('base.test', '/', 'Dockerfile', rewrite_from='base')
self.assertEqual(['base'], b.depends_on)
@mock.patch('dmake.build.Build._do_build', return_value='1234')
def test_build_dockerfile_path(self, mocked_do_build: MagicMock):
with open(os.path.join(self.context, 'Dockerfile'), 'w') as f:
f.write('FROM python:3.8.5')
b = build.Build('test', context=self.context, dockerfile='Dockerfile')
b.build({})
self.assertEqual('1234', b.final_image)
build_params = mocked_do_build.call_args.args[0]
self.assertIn('dockerfile', build_params)
@mock.patch('dmake.build.Build._do_build', return_value='1234')
def test_build_dockerfile_content(self, mocked_do_build: MagicMock):
b = build.Build('test', context=self.context, dockerfile='FROM python:3.8.5')
b.build({})
self.assertEqual('1234', b.final_image)
build_params = mocked_do_build.call_args.args[0]
self.assertIn('dockerfile', build_params)
with open(os.path.join(build_params['path'], build_params['dockerfile'])) as f:
self.assertEqual('FROM python:3.8.5', f.read().strip())
if __name__ == '__main__':
unittest.main()
```
#### File: docker-make/tests/test_template_args.py
```python
import subprocess
from datetime import datetime
import unittest
from unittest import mock
from dmake import template_args
class TemplateArgsGeneratorTests(unittest.TestCase):
@mock.patch('datetime.datetime')
def test_date_generator(self, mocked_datetime):
mocked_datetime.now.return_value = datetime(2016, 7, 21)
args_date = next(template_args.DateGenerator().gen_args(), None)
self.assertIsInstance(args_date, tuple)
k, v = args_date
self.assertEqual(k, 'date')
self.assertEqual(v, '20160721')
mocked_datetime.now.assert_called_once()
@mock.patch('datetime.datetime')
def test_datetime_generator(self, mocked_datetime):
mocked_datetime.now.return_value = datetime(2016, 7, 21, 12, 23)
args_date = next(template_args.DateTimeGenerator(
'datetime', '%Y%m%d%H%M').gen_args(), None)
self.assertIsInstance(args_date, tuple)
k, v = args_date
self.assertEqual(k, 'datetime')
self.assertEqual(v, '201607211223')
mocked_datetime.now.assert_called_once()
def test_validate_tag_name(self):
self.assertTrue(template_args.validate_tag_name('v1.0.0'))
self.assertTrue(template_args.validate_tag_name('latest'))
self.assertFalse(template_args.validate_tag_name('feature/123'))
self.assertFalse(template_args.validate_tag_name('-master'))
self.assertFalse(template_args.validate_tag_name('.test'))
def test_correct_tag_name(self):
self.assertEqual(template_args.correct_tag_name('feature/123'),
'feature_123')
self.assertEqual(template_args.correct_tag_name('-master'),
'_master')
self.assertEqual(template_args.correct_tag_name('.test'),
'_test')
long_tag_name = ''.join(str(i) for i in range(128))
self.assertEqual(len(template_args.correct_tag_name(long_tag_name)),
128)
class ExternalCmdGeneratorTests(unittest.TestCase):
@mock.patch('subprocess.check_output', return_value=b' dummy ')
def test_key_cmd_in_cls_attr(self, mocked_check_output):
class DummyGenerator(template_args.ExternalCmdGenerator):
key = 'dummy'
cmd = 'echo dummy'
args = next(DummyGenerator().gen_args(), None)
self.assertIsInstance(args, tuple)
k, v = args
self.assertEqual(k, 'dummy')
self.assertEqual(v, 'dummy')
mocked_check_output.assert_called_once_with('echo dummy', shell=True, stderr=-2)
@mock.patch('subprocess.check_output', return_value=b' dummy ')
def test_key_cmd_in_init(self, mocked_check_output):
key, cmd = 'dummy', 'echo dummy'
args = next(template_args.ExternalCmdGenerator(key, cmd).gen_args(), None)
self.assertIsInstance(args, tuple)
k, v = args
self.assertEqual(k, 'dummy')
self.assertEqual(v, 'dummy')
mocked_check_output.assert_called_once_with('echo dummy', shell=True, stderr=-2)
@mock.patch('subprocess.check_output', side_effect=subprocess.CalledProcessError(-1, 'echo dummy'))
def test_raise_call_error(self, mocked_check_output):
key, cmd = 'dummy', 'echo dummy'
args = next(template_args.ExternalCmdGenerator(key, cmd).gen_args(), None)
self.assertIsNone(args)
mocked_check_output.assert_called_once_with('echo dummy', shell=True, stderr=-2)
@mock.patch('subprocess.check_output', return_value=b' ')
def test_blank_output(self, mocked_check_output):
key, cmd = 'dummy', 'echo dummy'
args = next(template_args.ExternalCmdGenerator(key, cmd).gen_args(), None)
self.assertIsNone(args)
mocked_check_output.assert_called_once_with('echo dummy', shell=True, stderr=-2)
class GitGeneratorsTests(unittest.TestCase):
@mock.patch('subprocess.check_output', return_value=b'56903369fd200ea021dbb75f357f94b7fb5e829e')
def test_git_commit(self, mocked_check_output):
pairs = template_args.GitCommitGenerator().gen_args()
k1, v1 = next(pairs)
self.assertEqual(k1, 'fcommitid')
self.assertEqual(v1, '56903369fd200ea021dbb75f357f94b7fb5e829e')
mocked_check_output.assert_called_once_with('git rev-parse HEAD', shell=True, stderr=-2)
k2, v2 = next(pairs)
self.assertEqual(k2, 'scommitid')
self.assertEqual(v2, '5690336')
@mock.patch('subprocess.check_output', return_value=b'5690336 refactor and add unit tests.')
def test_git_commitmsg(self, mocked_check_output):
k, v = next(template_args.GitCommitMsgGenerator().gen_args())
self.assertEqual(k, 'commitmsg')
self.assertEqual(v, '5690336 refactor and add unit tests.')
mocked_check_output.assert_called_once_with('git log --oneline|head -1', shell=True,
stderr=-2)
@mock.patch('subprocess.check_output', return_value=b'master')
def test_git_branch(self, mocked_check_output):
k, v = next(template_args.GitBranchGenerator().gen_args())
self.assertEqual(k, 'git_branch')
self.assertEqual(v, 'master')
mocked_check_output.assert_called_once_with('git rev-parse --abbrev-ref HEAD', shell=True,
stderr=-2)
@mock.patch('subprocess.check_output', return_value=b'1.11.3')
def test_git_tag(self, mocked_check_output):
k, v = next(template_args.GitTagGenerator().gen_args())
self.assertEqual(k, 'git_tag')
self.assertEqual(v, '1.11.3')
mocked_check_output.assert_called_once_with('git tag --contains HEAD|head -1', shell=True,
stderr=-2)
@mock.patch('subprocess.check_output', return_value=b'1.1.2-5-g5690336')
def test_git_describe(self, mocked_check_output):
k, v = next(template_args.GitDescribeGenerator().gen_args())
self.assertEqual(k, 'git_describe')
self.assertEqual(v, '1.1.2-5-g5690336')
mocked_check_output.assert_called_once_with('git describe --tags', shell=True, stderr=-2)
class ArgsExportingFunctionTests(unittest.TestCase):
@mock.patch('datetime.datetime')
def test__template_args(self, mocked_datetime):
mocked_datetime.now.return_value = datetime(2016, 7, 21)
generators = [template_args.DateGenerator()]
ret = template_args._template_args(generators)
self.assertEqual(ret, {'date': '20160721'})
mocked_datetime.now.assert_called_once()
@mock.patch('dmake.template_args._template_args', return_value={})
def test_tag_template_args(self, mocked__template_args):
self.assertEqual({}, template_args._template_args())
self.assertIsNone(template_args._tag_template_args)
ret = template_args.tag_template_args()
self.assertEqual(ret, {})
self.assertEqual(template_args._tag_template_args, {})
ta = template_args
generator_classes = [ta.GitCommitGenerator, ta.GitCommitMsgGenerator,
ta.GitBranchGenerator, ta.GitTagGenerator,
ta.GitDescribeGenerator, ta.DateGenerator]
for obj, cls in zip(mocked__template_args.call_args[0][0],
generator_classes):
self.assertIsInstance(obj, cls)
@mock.patch('dmake.template_args._template_args', return_value={})
def test_label_template_args(self, mocked__template_args):
self.assertIsNone(template_args._label_template_args)
ret = template_args.label_template_args()
self.assertEqual(ret, {})
self.assertEqual(template_args._label_template_args, {})
ta = template_args
generator_classes = [ta.GitCommitGenerator, ta.GitCommitMsgGenerator,
ta.GitBranchGenerator, ta.GitTagGenerator,
ta.GitDescribeGenerator]
for obj, cls in zip(mocked__template_args.call_args[0][0],
generator_classes):
self.assertIsInstance(obj, cls)
def test_validate_tag_name_config(self):
func = template_args.validate_tag_name_config
self.assertTrue(func({
'type': 'cmd',
'name': 'dummy',
'value': 'echo dummt',
}))
self.assertFalse(func({
'name': 'dummy',
'value': 'echo dummt',
}))
self.assertFalse(func({
'type': 'cmd',
'value': 'echo dummt',
}))
self.assertFalse(func({
'type': 'cmd',
'name': 'dummy',
}))
self.assertFalse(func({
}))
def test_create_extra_generators(self):
configurations = [
{'type': 'cmd',
'name': 'dummy',
'value': 'echo dummt'},
]
result = template_args.create_extra_generators(configurations)
self.assertEqual(1, len(result))
self.assertIsInstance(result[0],
template_args.ExternalCmdGenerator)
configurations = [
{'type': 'datetime',
'name': 'time',
'value': '%H%M'},
{'type': 'notexist',
'name': 'dummy',
'value': 'dummy'}
]
result = template_args.create_extra_generators(configurations)
self.assertEqual(1, len(result))
self.assertIsInstance(result[0],
template_args.DateTimeGenerator)
@mock.patch('dmake.utils.load_yaml')
@mock.patch('dmake.template_args.label_template_args')
@mock.patch('dmake.template_args.tag_template_args')
def test_init_tag_names(self, patched_tag_template_args,
patched_label_template_args,
patched_load_yaml):
patched_load_yaml.return_value = {'tag-names': []}
template_args.init_tag_names('.docker-make.yml')
patched_load_yaml.assert_called_once_with('.docker-make.yml')
patched_label_template_args.assert_called_once_with([])
patched_tag_template_args.assert_called_once_with([])
``` |
{
"source": "jizhilong/docker-wait",
"score": 2
} |
#### File: dresponse/dresponse/app.py
```python
import json
import logging
import hashlib
import errno
import os
import flask
import docker
from dresponse import handler
LOG = logging.getLogger('dresponse')
def is_running_in_container():
return os.environ.get('INCONTAINER', '0') == '1'
def get_app():
app = flask.Flask('dresponse', instance_path='/etc/docker-wait',
instance_relative_config=True)
app.config.from_pyfile('dresponse.cfg', silent=True)
app.secret_key = app.secret_key or ''
app.docker = docker.from_env(version='auto')
app.handlers = handler.get_handlers(app)
app.in_container = is_running_in_container()
if app.in_container:
app.hostroot = '/hostroot'
else:
app.hostroot = '/'
return app
app = get_app()
def calculate_auth_token(container_id):
hasher = hashlib.sha256(container_id+app.secret_key)
return hasher.hexdigest()
def verify_auth_token(container_id, token):
if calculate_auth_token(container_id) != token:
flask.abort(401)
def make_netns_symlink(pid, name):
netns_dir = '/var/run/netns'
if not os.path.exists(netns_dir):
os.makedirs(netns_dir, mode=0755)
source = os.path.join(app.hostroot, 'proc', str(pid), 'ns', 'net')
dest = os.path.join(netns_dir, name)
os.symlink(source, dest)
def destroy_netns_symlink(name):
try:
os.remove(os.path.join('/var/run/netns', name))
except OSError as err:
if err.errno == errno.ENOENT:
pass
else:
raise err
@app.route("/init", methods=['POST'])
def init():
json_data = flask.request.get_json(force=True)
container_id = json_data['container_id']
container = app.docker.containers.get(container_id)
image = app.docker.images.get(container.attrs['Config']['Image'])
entrypoint = image.attrs['Config']['Entrypoint'] or []
command = container.attrs['Config']['Cmd'] or image.attrs['Config']['Cmd']
pid = container.attrs['State']['Pid']
root = os.path.join(app.hostroot, 'proc', str(pid), 'root')
make_netns_symlink(pid, container.id)
try:
for handler in app.handlers:
handler.handle(root, container.id,
container.attrs,
image.attrs)
except Exception as e:
LOG.exception('failed to run handler')
raise e
finally:
destroy_netns_symlink(container.id)
return flask.jsonify({'entrypoint': entrypoint, 'command': command,
'token': calculate_auth_token(container_id)})
@app.route("/reboot", methods=['POST'])
def reboot():
json_data = flask.request.get_json(force=True)
container_id = json_data['container_id']
verify_auth_token(container_id, flask.request.headers.get('Auth-Token'))
container = app.docker.containers.get(container_id)
container.restart()
return flask.jsonify({'rebooted': container_id})
``` |
{
"source": "JizhiziLi/GFM",
"score": 2
} |
#### File: GFM/core/gfm.py
```python
import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
from config import *
from util import *
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv_up_psp(in_channels, out_channels, up_sample):
return nn.Sequential(
nn.Conv2d(in_channels,out_channels,3,padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Upsample(scale_factor=up_sample, mode='bilinear',align_corners = False))
def build_bb(in_channels, mid_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels,mid_channels,3,dilation=2, padding=2),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels,out_channels,3,dilation=2, padding=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels,out_channels,3,dilation=2, padding=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True))
def build_decoder(in_channels, mid_channels_1, mid_channels_2, out_channels, last_bnrelu, upsample_flag):
layers = []
layers += [nn.Conv2d(in_channels,mid_channels_1,3,padding=1),
nn.BatchNorm2d(mid_channels_1),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels_1,mid_channels_2,3,padding=1),
nn.BatchNorm2d(mid_channels_2),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels_2,out_channels,3,padding=1)]
if last_bnrelu:
layers += [nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),]
if upsample_flag:
layers += [nn.Upsample(scale_factor=2, mode='bilinear')]
sequential = nn.Sequential(*layers)
return sequential
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class PSPModule(nn.Module):
def __init__(self, features, out_features=1024, sizes=(1, 2, 3, 6)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1)
self.relu = nn.ReLU()
def _make_stage(self, features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, features, kernel_size=1, bias=False)
return nn.Sequential(prior, conv)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear',align_corners = True) for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return self.relu(bottle)
class SELayer(nn.Module):
def __init__(self, channel, reduction=4):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class GFM(nn.Module):
def __init__(self, args):
super().__init__()
self.backbone = args.backbone
self.rosta = args.rosta
if self.rosta=='TT':
self.gd_channel = 3
else:
self.gd_channel = 2
if self.backbone=='r34_2b':
##################################
### Backbone - Resnet34 + 2 blocks
##################################
self.resnet = models.resnet34(pretrained=True)
self.encoder0 = nn.Sequential(
nn.Conv2d(3,64,3,padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
self.encoder1 = self.resnet.layer1
self.encoder2 = self.resnet.layer2
self.encoder3 = self.resnet.layer3
self.encoder4 = self.resnet.layer4
self.encoder5 = nn.Sequential(
nn.MaxPool2d(2, 2, ceil_mode=True),
BasicBlock(512,512),
BasicBlock(512,512),
BasicBlock(512,512))
self.encoder6 = nn.Sequential(
nn.MaxPool2d(2, 2, ceil_mode=True),
BasicBlock(512,512),
BasicBlock(512,512),
BasicBlock(512,512))
self.psp_module = PSPModule(512, 512, (1, 3, 5))
self.psp6 = conv_up_psp(512, 512, 2)
self.psp5 = conv_up_psp(512, 512, 4)
self.psp4 = conv_up_psp(512, 256, 8)
self.psp3 = conv_up_psp(512, 128, 16)
self.psp2 = conv_up_psp(512, 64, 32)
self.psp1 = conv_up_psp(512, 64, 32)
self.decoder6_g = build_decoder(1024, 512, 512, 512, True, True)
self.decoder5_g = build_decoder(1024, 512, 512, 512, True, True)
self.decoder4_g = build_decoder(1024, 512, 512, 256, True, True)
self.decoder3_g = build_decoder(512, 256, 256, 128, True, True)
self.decoder2_g = build_decoder(256, 128, 128, 64, True, True)
self.decoder1_g = build_decoder(128, 64, 64, 64, True, False)
self.bridge_block = build_bb(512, 512, 512)
self.decoder6_f = build_decoder(1024, 512, 512, 512, True, True)
self.decoder5_f = build_decoder(1024, 512, 512, 512, True, True)
self.decoder4_f = build_decoder(1024, 512, 512, 256, True, True)
self.decoder3_f = build_decoder(512, 256, 256, 128, True, True)
self.decoder2_f = build_decoder(256, 128, 128, 64, True, True)
self.decoder1_f = build_decoder(128, 64, 64, 64, True, False)
if self.rosta == 'RIM':
self.decoder0_g_tt = nn.Sequential(nn.Conv2d(64,3,3,padding=1))
self.decoder0_g_ft = nn.Sequential(nn.Conv2d(64,2,3,padding=1))
self.decoder0_g_bt = nn.Sequential(nn.Conv2d(64,2,3,padding=1))
self.decoder0_f_tt = nn.Sequential(nn.Conv2d(64,1,3,padding=1))
self.decoder0_f_ft = nn.Sequential(nn.Conv2d(64,1,3,padding=1))
self.decoder0_f_bt = nn.Sequential(nn.Conv2d(64,1,3,padding=1))
else:
self.decoder0_g = nn.Sequential(nn.Conv2d(64,self.gd_channel,3,padding=1))
self.decoder0_f = nn.Sequential(nn.Conv2d(64,1,3,padding=1))
if self.backbone=='r34':
##########################
### Backbone - Resnet34
##########################
self.resnet = models.resnet34(pretrained=True)
self.encoder0 = nn.Sequential(
self.resnet.conv1,
self.resnet.bn1,
self.resnet.relu)
self.encoder1 = nn.Sequential(
self.resnet.maxpool,
self.resnet.layer1)
self.encoder2 = self.resnet.layer2
self.encoder3 = self.resnet.layer3
self.encoder4 = self.resnet.layer4
self.psp_module = PSPModule(512, 512, (1, 3, 5))
self.psp4 = conv_up_psp(512, 256, 2)
self.psp3 = conv_up_psp(512, 128, 4)
self.psp2 = conv_up_psp(512, 64, 8)
self.psp1 = conv_up_psp(512, 64, 16)
self.decoder4_g = build_decoder(1024, 512, 512, 256, True, True)
self.decoder3_g = build_decoder(512, 256, 256, 128, True, True)
self.decoder2_g = build_decoder(256, 128, 128, 64, True, True)
self.decoder1_g = build_decoder(128, 64, 64, 64, True, True)
self.bridge_block = build_bb(512, 512, 512)
self.decoder4_f = build_decoder(1024, 512, 512, 256, True, True)
self.decoder3_f = build_decoder(512, 256, 256, 128, True, True)
self.decoder2_f = build_decoder(256, 128, 128, 64, True, True)
self.decoder1_f = build_decoder(128, 64, 64, 64, True, True)
if self.rosta == 'RIM':
self.decoder0_g_tt = build_decoder(128, 64, 64, 3, False, True)
self.decoder0_g_ft = build_decoder(128, 64, 64, 2, False, True)
self.decoder0_g_bt = build_decoder(128, 64, 64, 2, False, True)
self.decoder0_f_tt = build_decoder(128, 64, 64, 1, False, True)
self.decoder0_f_ft = build_decoder(128, 64, 64, 1, False, True)
self.decoder0_f_bt = build_decoder(128, 64, 64, 1, False, True)
else:
self.decoder0_g = build_decoder(128, 64, 64, self.gd_channel, False, True)
self.decoder0_f = build_decoder(128, 64, 64, 1, False, True)
elif self.backbone=='r101':
##########################
### Backbone - Resnet101
##########################
self.resnet = models.resnet101(pretrained=True)
self.encoder0 = nn.Sequential(
self.resnet.conv1,
self.resnet.bn1,
self.resnet.relu)
self.encoder1 = nn.Sequential(
self.resnet.maxpool,
self.resnet.layer1)
self.encoder2 = self.resnet.layer2
self.encoder3 = self.resnet.layer3
self.encoder4 = self.resnet.layer4
self.psp_module = PSPModule(2048, 2048, (1, 3, 5))
self.bridge_block = build_bb(2048, 2048, 2048)
self.psp4 = conv_up_psp(2048, 1024, 2)
self.psp3 = conv_up_psp(2048, 512, 4)
self.psp2 = conv_up_psp(2048, 256, 8)
self.psp1 = conv_up_psp(2048, 64, 16)
self.decoder4_g = build_decoder(4096, 2048, 1024, 1024, True, True)
self.decoder3_g = build_decoder(2048, 1024, 512, 512, True, True)
self.decoder2_g = build_decoder(1024, 512, 256, 256, True, True)
self.decoder1_g = build_decoder(512, 256, 128, 64, True, True)
self.decoder4_f = build_decoder(4096, 2048, 1024, 1024, True, True)
self.decoder3_f = build_decoder(2048, 1024, 512, 512, True, True)
self.decoder2_f = build_decoder(1024, 512, 256, 256, True, True)
self.decoder1_f = build_decoder(512, 256, 128, 64, True, True)
if self.rosta == 'RIM':
self.decoder0_g_tt = build_decoder(128, 64, 64, 3, False, True)
self.decoder0_g_ft = build_decoder(128, 64, 64, 2, False, True)
self.decoder0_g_bt = build_decoder(128, 64, 64, 2, False, True)
self.decoder0_f_tt = build_decoder(128, 64, 64, 1, False, True)
self.decoder0_f_ft = build_decoder(128, 64, 64, 1, False, True)
self.decoder0_f_bt = build_decoder(128, 64, 64, 1, False, True)
else:
self.decoder0_g = build_decoder(128, 64, 64, self.gd_channel, False, True)
self.decoder0_f = build_decoder(128, 64, 64, 1, False, True)
elif self.backbone=='d121':
#############################
### Encoder part - DESNET121
#############################
self.densenet = models.densenet121(pretrained=True)
self.encoder0 = nn.Sequential(
self.densenet.features.conv0,
self.densenet.features.norm0,
self.densenet.features.relu0)
self.encoder1 = nn.Sequential(
self.densenet.features.denseblock1,
self.densenet.features.transition1)
self.encoder2 = nn.Sequential(
self.densenet.features.denseblock2,
self.densenet.features.transition2)
self.encoder3 = nn.Sequential(
self.densenet.features.denseblock3,
self.densenet.features.transition3)
self.encoder4 = nn.Sequential(
self.densenet.features.denseblock4,
nn.Conv2d(1024,512,3,padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2, ceil_mode=True))
self.psp_module = PSPModule(512, 512, (1, 3, 5))
self.psp4 = conv_up_psp(512, 256, 2)
self.psp3 = conv_up_psp(512, 128, 4)
self.psp2 = conv_up_psp(512, 64, 8)
self.psp1 = conv_up_psp(512, 64, 16)
self.decoder4_g = build_decoder(1024, 512, 512, 256, True, True)
self.decoder3_g = build_decoder(512, 256, 256, 128, True, True)
self.decoder2_g = build_decoder(256, 128, 128, 64, True, True)
self.decoder1_g = build_decoder(128, 64, 64, 64, True, True)
self.bridge_block = build_bb(512, 512, 512)
self.decoder4_f = build_decoder(1024, 512, 512, 256, True, True)
self.decoder3_f = build_decoder(768, 256, 256, 128, True, True)
self.decoder2_f = build_decoder(384, 128, 128, 64, True, True)
self.decoder1_f = build_decoder(192, 64, 64, 64, True, True)
if self.rosta == 'RIM':
self.decoder0_g_tt = build_decoder(128, 64, 64, 3, False, True)
self.decoder0_g_ft = build_decoder(128, 64, 64, 2, False, True)
self.decoder0_g_bt = build_decoder(128, 64, 64, 2, False, True)
self.decoder0_f_tt = build_decoder(128, 64, 64, 1, False, True)
self.decoder0_f_ft = build_decoder(128, 64, 64, 1, False, True)
self.decoder0_f_bt = build_decoder(128, 64, 64, 1, False, True)
else:
self.decoder0_g = build_decoder(128, 64, 64, self.gd_channel, False, True)
self.decoder0_f = build_decoder(128, 64, 64, 1, False, True)
if self.rosta=='RIM':
self.rim = nn.Sequential(
nn.Conv2d(3,16,1),
SELayer(16),
nn.Conv2d(16,1,1))
def forward(self, input):
glance_sigmoid = torch.zeros(input.shape)
focus_sigmoid = torch.zeros(input.shape)
fusion_sigmoid = torch.zeros(input.shape)
#################
### Encoder part
#################
e0 = self.encoder0(input)
e1 = self.encoder1(e0)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
##########################
### Decoder part - GLANCE
##########################
if self.backbone=='r34_2b':
e5 = self.encoder5(e4)
e6 = self.encoder6(e5)
psp = self.psp_module(e6)
d6_g = self.decoder6_g(torch.cat((psp, e6),1))
d5_g = self.decoder5_g(torch.cat((self.psp6(psp),d6_g),1))
d4_g = self.decoder4_g(torch.cat((self.psp5(psp),d5_g),1))
else:
psp = self.psp_module(e4)
d4_g = self.decoder4_g(torch.cat((psp,e4),1))
d3_g = self.decoder3_g(torch.cat((self.psp4(psp),d4_g),1))
d2_g = self.decoder2_g(torch.cat((self.psp3(psp),d3_g),1))
d1_g = self.decoder1_g(torch.cat((self.psp2(psp),d2_g),1))
if self.backbone=='r34_2b':
if self.rosta=='RIM':
d0_g_tt = self.decoder0_g_tt(d1_g)
d0_g_ft = self.decoder0_g_ft(d1_g)
d0_g_bt = self.decoder0_g_bt(d1_g)
else:
d0_g = self.decoder0_g(d1_g)
else:
if self.rosta=='RIM':
d0_g_tt = self.decoder0_g_tt(torch.cat((self.psp1(psp),d1_g),1))
d0_g_ft = self.decoder0_g_ft(torch.cat((self.psp1(psp),d1_g),1))
d0_g_bt = self.decoder0_g_bt(torch.cat((self.psp1(psp),d1_g),1))
else:
d0_g = self.decoder0_g(torch.cat((self.psp1(psp),d1_g),1))
if self.rosta=='RIM':
glance_sigmoid_tt = F.sigmoid(d0_g_tt)
glance_sigmoid_ft = F.sigmoid(d0_g_ft)
glance_sigmoid_bt = F.sigmoid(d0_g_bt)
else:
glance_sigmoid = F.sigmoid(d0_g)
##########################
### Decoder part - FOCUS
##########################
if self.backbone == 'r34_2b':
bb = self.bridge_block(e6)
d6_f = self.decoder6_f(torch.cat((bb, e6),1))
d5_f = self.decoder5_f(torch.cat((d6_f, e5),1))
d4_f = self.decoder4_f(torch.cat((d5_f, e4),1))
else:
bb = self.bridge_block(e4)
d4_f = self.decoder4_f(torch.cat((bb, e4),1))
d3_f = self.decoder3_f(torch.cat((d4_f, e3),1))
d2_f = self.decoder2_f(torch.cat((d3_f, e2),1))
d1_f = self.decoder1_f(torch.cat((d2_f, e1),1))
if self.backbone=='r34_2b':
if self.rosta=='RIM':
d0_f_tt = self.decoder0_f_tt(d1_f)
d0_f_ft = self.decoder0_f_ft(d1_f)
d0_f_bt = self.decoder0_f_bt(d1_f)
else:
d0_f = self.decoder0_f(d1_f)
else:
if self.rosta=='RIM':
d0_f_tt = self.decoder0_f_tt(torch.cat((d1_f, e0),1))
d0_f_ft = self.decoder0_f_ft(torch.cat((d1_f, e0),1))
d0_f_bt = self.decoder0_f_bt(torch.cat((d1_f, e0),1))
else:
d0_f = self.decoder0_f(torch.cat((d1_f, e0),1))
if self.rosta=='RIM':
focus_sigmoid_tt = F.sigmoid(d0_f_tt)
focus_sigmoid_ft = F.sigmoid(d0_f_ft)
focus_sigmoid_bt = F.sigmoid(d0_f_bt)
else:
focus_sigmoid = F.sigmoid(d0_f)
##########################
### Collaborative Matting
##########################
if self.rosta=='RIM':
fusion_sigmoid_tt = collaborative_matting('TT', glance_sigmoid_tt, focus_sigmoid_tt)
fusion_sigmoid_ft = collaborative_matting('FT', glance_sigmoid_ft, focus_sigmoid_ft)
fusion_sigmoid_bt = collaborative_matting('BT', glance_sigmoid_bt, focus_sigmoid_bt)
fusion_sigmoid = torch.cat((fusion_sigmoid_tt,fusion_sigmoid_ft,fusion_sigmoid_bt),1)
fusion_sigmoid = self.rim(fusion_sigmoid)
return [[glance_sigmoid_tt, focus_sigmoid_tt, fusion_sigmoid_tt],[glance_sigmoid_ft, focus_sigmoid_ft, fusion_sigmoid_ft],[glance_sigmoid_bt, focus_sigmoid_bt, fusion_sigmoid_bt], fusion_sigmoid]
else:
fusion_sigmoid = collaborative_matting(self.rosta, glance_sigmoid, focus_sigmoid)
return glance_sigmoid, focus_sigmoid, fusion_sigmoid
``` |
{
"source": "JizhiziLi/P3M",
"score": 2
} |
#### File: P3M/core/data.py
```python
from config import *
from util import *
import torch
import cv2
import os
import random
import numpy as np
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import logging
from torchvision import transforms
from torch.autograd import Variable
from skimage.transform import resize
#########################
## Data transformer
#########################
class MattingTransform(object):
def __init__(self):
super(MattingTransform, self).__init__()
def __call__(self, *argv):
ori = argv[0]
h, w, c = ori.shape
rand_ind = random.randint(0, len(CROP_SIZE) - 1)
crop_size = CROP_SIZE[rand_ind] if CROP_SIZE[rand_ind]<min(h, w) else 512
resize_size = RESIZE_SIZE
### generate crop centered in transition area randomly
trimap = argv[4]
trimap_crop = trimap[:h-crop_size, :w-crop_size]
target = np.where(trimap_crop == 128) if random.random() < 0.5 else np.where(trimap_crop > -100)
if len(target[0])==0:
target = np.where(trimap_crop > -100)
rand_ind = np.random.randint(len(target[0]), size = 1)[0]
cropx, cropy = target[1][rand_ind], target[0][rand_ind]
# # flip the samples randomly
flip_flag=True if random.random()<0.5 else False
# generate samples (crop, flip, resize)
argv_transform = []
for item in argv:
item = item[cropy:cropy+crop_size, cropx:cropx+crop_size]
if flip_flag:
item = cv2.flip(item, 1)
item = cv2.resize(item, (resize_size, resize_size), interpolation=cv2.INTER_LINEAR)
argv_transform.append(item)
return argv_transform
#########################
## Data Loader
#########################
class MattingDataset(torch.utils.data.Dataset):
def __init__(self, args, transform):
self.samples=[]
self.transform = transform
self.logging = args.logging
self.logging.info('===> Loading training set')
self.samples += generate_paths_for_dataset(args)
self.logging.info(f"\t--crop_size: {CROP_SIZE} | resize: {RESIZE_SIZE}")
self.logging.info("\t--Valid Samples: {}".format(len(self.samples)))
def __getitem__(self,index):
# Prepare training sample paths
ori_path, mask_path, fg_path, bg_path = self.samples[index]
ori = np.array(Image.open(ori_path))
mask = trim_img(np.array(Image.open(mask_path)))
fg = np.array(Image.open(fg_path))
bg = np.array(Image.open(bg_path))
# Generate trimap/dilation/erosion online
kernel_size = random.randint(15, 30)
trimap = gen_trimap_with_dilate(mask, kernel_size)
# Data transformation to generate samples (crop/flip/resize)
argv = self.transform(ori, mask, fg, bg, trimap)
argv_transform = []
for item in argv:
if item.ndim<3:
item = torch.from_numpy(item.astype(np.float32)[np.newaxis, :, :])
else:
item = torch.from_numpy(item.astype(np.float32)).permute(2, 0, 1)
argv_transform.append(item)
[ori, mask, fg, bg, trimap] = argv_transform
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
ori = ori/255.0
ori = normalize(ori)
fg = fg/255.0
fg = normalize(fg)
bg = bg/255.0
bg = normalize(bg)
return ori, mask, fg, bg, trimap
def __len__(self):
return len(self.samples)
```
#### File: P3M/core/util.py
```python
import os
import shutil
import cv2
import numpy as np
import torch
from config import *
##########################
### Pure functions
##########################
def extract_pure_name(original_name):
pure_name, extention = os.path.splitext(original_name)
return pure_name
def listdir_nohidden(path):
new_list = []
for f in os.listdir(path):
if not f.startswith('.'):
new_list.append(f)
new_list.sort()
return new_list
def create_folder_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def refresh_folder(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
else:
shutil.rmtree(folder_path)
os.makedirs(folder_path)
def save_test_result(save_dir, predict):
predict = (predict * 255).astype(np.uint8)
cv2.imwrite(save_dir, predict)
def generate_composite_img(img, alpha_channel):
b_channel, g_channel, r_channel = cv2.split(img)
b_channel = b_channel * alpha_channel
g_channel = g_channel * alpha_channel
r_channel = r_channel * alpha_channel
alpha_channel = (alpha_channel*255).astype(b_channel.dtype)
img_BGRA = cv2.merge((r_channel,g_channel,b_channel,alpha_channel))
return img_BGRA
##########################
### for dataset processing
##########################
def trim_img(img):
if img.ndim>2:
img = img[:,:,0]
return img
def gen_trimap_with_dilate(alpha, kernel_size):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size))
fg_and_unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
fg = np.array(np.equal(alpha, 255).astype(np.float32))
dilate = cv2.dilate(fg_and_unknown, kernel, iterations=1)
erode = cv2.erode(fg, kernel, iterations=1)
trimap = erode *255 + (dilate-erode)*128
return trimap.astype(np.uint8)
##########################
### Functions for fusion
##########################
def gen_trimap_from_segmap_e2e(segmap):
trimap = np.argmax(segmap, axis=1)[0]
trimap = trimap.astype(np.int64)
trimap[trimap==1]=128
trimap[trimap==2]=255
return trimap.astype(np.uint8)
def get_masked_local_from_global(global_sigmoid, local_sigmoid):
values, index = torch.max(global_sigmoid,1)
index = index[:,None,:,:].float()
### index <===> [0, 1, 2]
### bg_mask <===> [1, 0, 0]
bg_mask = index.clone()
bg_mask[bg_mask==2]=1
bg_mask = 1- bg_mask
### trimap_mask <===> [0, 1, 0]
trimap_mask = index.clone()
trimap_mask[trimap_mask==2]=0
### fg_mask <===> [0, 0, 1]
fg_mask = index.clone()
fg_mask[fg_mask==1]=0
fg_mask[fg_mask==2]=1
fusion_sigmoid = local_sigmoid*trimap_mask+fg_mask
return fusion_sigmoid
def get_masked_local_from_global_test(global_result, local_result):
weighted_global = np.ones(global_result.shape)
weighted_global[global_result==255] = 0
weighted_global[global_result==0] = 0
fusion_result = global_result*(1.-weighted_global)/255+local_result*weighted_global
return fusion_result
#######################################
### Function to generate training data
#######################################
def generate_paths_for_dataset(args):
ORI_PATH = DATASET_PATHS_DICT['P3M10K']['TRAIN']['ORIGINAL_PATH']
MASK_PATH = DATASET_PATHS_DICT['P3M10K']['TRAIN']['MASK_PATH']
FG_PATH = DATASET_PATHS_DICT['P3M10K']['TRAIN']['FG_PATH']
BG_PATH = DATASET_PATHS_DICT['P3M10K']['TRAIN']['BG_PATH']
mask_list = listdir_nohidden(MASK_PATH)
total_number = len(mask_list)
paths_list = []
for mask_name in mask_list:
path_list = []
ori_path = ORI_PATH+extract_pure_name(mask_name)+'.jpg'
mask_path = MASK_PATH+mask_name
fg_path = FG_PATH+mask_name
bg_path = BG_PATH+extract_pure_name(mask_name)+'.jpg'
path_list.append(ori_path)
path_list.append(mask_path)
path_list.append(fg_path)
path_list.append(bg_path)
paths_list.append(path_list)
return paths_list
``` |
{
"source": "jizhouh/deepcell-tf",
"score": 2
} |
#### File: deepcell-tf/deepcell/callbacks.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import timeit
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
class InferenceTimer(tf.keras.callbacks.Callback):
"""Callback to log inference speed per epoch."""
def __init__(self, samples=100):
super(InferenceTimer, self).__init__()
self._samples = int(samples)
self._batch_times = []
self._samples_seen = []
self._timer = None
def on_predict_begin(self, epoch, logs=None):
self._batch_times = []
self._samples_seen = []
def on_predict_batch_begin(self, batch, logs=None):
self._timer = timeit.default_timer()
def on_predict_batch_end(self, batch, logs=None):
t = timeit.default_timer() - self._timer
self._batch_times.append(t)
outputs = logs.get('outputs', np.empty((1,)))
if isinstance(self.model.output_shape, list):
outputs = outputs[0]
self._samples_seen.append(outputs.shape[0])
def on_predict_end(self, logs=None):
total_samples = np.sum(self._samples_seen)
per_sample = [t / float(s) for t, s in
zip(self._batch_times, self._samples_seen)]
avg = np.mean(per_sample)
std = np.std(per_sample)
print('Average inference speed per sample for %s total samples: '
'%0.5fs ± %0.5fs.' % (total_samples, avg, std))
def on_epoch_end(self, epoch, logs=None):
shape = tuple([self._samples] + list(self.model.input_shape[1:]))
test_batch = np.random.random(shape)
self.model.predict(test_batch, callbacks=self)
```
#### File: deepcell-tf/deepcell/callbacks_test.py
```python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.framework import test_util as tf_test_util
from deepcell import callbacks
class TestInferenceTimer(keras_parameterized.TestCase):
"""Callback to log inference speed per epoch."""
@keras_parameterized.run_all_keras_modes
def test_inference_time_logging(self):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(1))
model.compile(
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
x = tf.ones((200, 3))
y = tf.zeros((200, 2))
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*Average inference.*)+'
cbks = [callbacks.InferenceTimer()]
with self.captureWritesToStream(sys.stdout) as printed:
y = model.call(x)
model.fit(dataset, epochs=2, steps_per_epoch=10, callbacks=cbks)
self.assertRegex(printed.contents(), expected_log)
``` |
{
"source": "jizhouli/ladder-tournament",
"score": 2
} |
#### File: zaih-core/zaih_core/errors.py
```python
from __future__ import unicode_literals
'''Helper utilities and decorators.'''
from flask import render_template
from werkzeug.exceptions import (HTTPException as _HTTPException,
BadRequest as _BadRequest,
Unauthorized as _Unauthorized,
Forbidden as _Forbidden,
NotFound as _NotFound,
InternalServerError as _InternalServerError,
MethodNotAllowed as _MethodNotAllowed)
class ZaihException(Exception):
pass
class HTTPException(ZaihException, _HTTPException):
"""封装原有方法, 实现自定义模板"""
def get_body(self, environ):
"""Get the HTML body."""
return render_template('errors.html', error=self)
class BadRequest(HTTPException, _BadRequest):
pass
class Unauthorized(HTTPException, _Unauthorized):
pass
class Forbidden(HTTPException, _Forbidden):
pass
class NotFound(HTTPException, _NotFound):
pass
class InternalServerError(HTTPException, _InternalServerError):
pass
class MethodNotAllowed(HTTPException, _MethodNotAllowed):
pass
```
#### File: zaih-core/zaih_core/image.py
```python
import base64
import random
from urlparse import urlparse
from StringIO import StringIO
from flask import current_app as app
from qiniu import Auth, PersistentFop, op_save, put_file, BucketManager
from PIL import Image, ImageDraw, ImageFont
def image_for(image_hash, style=None):
if not image_hash:
return None
url = image_hash.split('!')[0]
up = urlparse(url)
image_hash = up.path
if len(image_hash) != 0 and image_hash[0] == '/':
image_hash = image_hash[1:]
image_domain = app.config['QINIU_DOMAIN']
url = '//%s%s' % (image_domain, image_hash)
if style:
url = '%s!%s' % (url, style)
if app.name == 'tutor.apis':
return 'http:%s' % url
return url
def init():
ACCESS_KEY = str(app.config['QINIU_ACCESS_TOKEN'])
SECRET_KEY = str(app.config['QINIU_SECRET_TOKEN'])
q = Auth(ACCESS_KEY, SECRET_KEY)
return q
def qiniu_token():
q = init()
uptoken = q.upload_token('hangjia', None, 30000)
return uptoken
def qiniu_key():
key = []
seed = 'abcdefghijklmnopqrstuvwxyz0123456789'
for i in range(32):
key.append(random.choice(seed))
return ''.join(key)
def qiniu_form():
return {
'token': qiniu_token(),
'domain': '%s://%s' % (str(app.config['APP_TRANSPORT']),
str(app.config['QINIU_DOMAIN'])),
'upload_url': str(app.config['QINIU_UPLOAD_URL'])}
def qiniu_upload(key, fpath):
uptoken = qiniu_token()
return put_file(uptoken, key, fpath)
def qiniu_saveas(url):
q = init()
up = urlparse(url)
src_path = up.path[1:]
src_query = up.query
saved_key = src_path + str(random.randint(20, 100))
pfop = PersistentFop(q, 'hangjia')
op = op_save(src_query, 'hangjia', saved_key)
ops = []
ops.append(op)
ret, info = pfop.execute(src_path, ops, 1)
image_url = '%s://%s/%s' % (up.scheme, up.netloc, saved_key)
if ret is not None:
return {'url': image_url}
else:
return
def qiniu_delete(key):
q = init()
bucket = BucketManager(q)
return bucket.delete('hangjia', key)
def qiniu_fetch(url):
q = init()
bucket = BucketManager(q)
key = qiniu_key()
ret, info = bucket.fetch(url, 'hangjia', key)
if info.status_code == 200:
return key
from_top = 4
def noise_arcs(draw, image):
fg_color = app.config['CAPTCHA_FOREGROUND_COLOR']
size = image.size
draw.arc([-20, -20, size[0], 20], 0, 295, fill=fg_color)
draw.line([-20, 20, size[0] + 20, size[1] - 20], fill=fg_color)
draw.line([-20, 0, size[0] + 20, size[1]], fill=fg_color)
return draw
def noise_dots(draw, image):
fg_color = app.config['CAPTCHA_FOREGROUND_COLOR']
size = image.size
for p in range(int(size[0] * size[1] * 0.1)):
draw.point((random.randint(0, size[0]), random.randint(0, size[1])),
fill=fg_color)
return draw
def noise_functions():
noise_fs = [noise_arcs, noise_dots]
if noise_fs:
return noise_fs
return []
def post_smooth(image):
try:
import ImageFilter
except ImportError:
from PIL import ImageFilter
return image.filter(ImageFilter.SMOOTH)
def filter_functions():
filter_fs = [post_smooth]
if filter_fs:
return filter_fs
return []
def getsize(font, text):
if hasattr(font, 'getoffset'):
return [x + y for x, y in zip(font.getsize(text), font.getoffset(text))]
else:
return font.getsize(text)
def create_captcha(text):
font_path = app.config['CAPTCHA_FONT_PATH']
font_size = app.config['CAPTCHA_FONT_SIZE']
punctuation = app.config['CAPTCHA_PUNCTUATION']
foreground_color = app.config['CAPTCHA_FOREGROUND_COLOR']
letter_rotation = app.config['CAPTCHA_LETTER_ROTATION']
if font_path.lower().strip().endswith('ttf'):
font = ImageFont.truetype(font_path, font_size)
else:
font = ImageFont.load(font_path)
size = getsize(font, text)
size = (size[0] * 2, int(size[1] * 1.4))
image = Image.new('RGB', size,
app.config['CAPTCHA_BACKGROUND_COLOR'])
xpos = 2
charlist = []
for char in text:
if char in punctuation and len(charlist) >= 1:
charlist[-1] += char
else:
charlist.append(char)
for char in charlist:
fgimage = Image.new('RGB', size, foreground_color)
charimage = Image.new('L', getsize(font, ' %s ' % char), '#000000')
chardraw = ImageDraw.Draw(charimage)
chardraw.text((0, 0), ' %s ' % char, font=font, fill='#ffffff')
if letter_rotation:
charimage = charimage.rotate(random.randrange(*letter_rotation),
expand=0, resample=Image.BICUBIC)
charimage = charimage.crop(charimage.getbbox())
maskimage = Image.new('L', size)
maskimage.paste(charimage, (xpos, from_top, xpos + charimage.size[0],
from_top + charimage.size[1]))
size = maskimage.size
image = Image.composite(fgimage, image, maskimage)
xpos = xpos + 2 + charimage.size[0]
image = image.crop((0, 0, xpos + 1, size[1]))
draw = ImageDraw.Draw(image)
for f in noise_functions():
draw = f(draw, image)
for f in filter_functions():
image = f(image)
return image
def generate_base64_code(url):
import qrcode
qr_img = qrcode.make(url)
buf = StringIO()
qr_img.save(buf, 'PNG')
value = buf.getvalue()
return base64.b64encode(value)
```
#### File: zaih-core/zaih_core/pager.py
```python
from __future__ import unicode_literals
def get_offset_limit(args):
"""
return offset limit
"""
if 'offset' in args and 'limit' in args:
try:
offset = int(args.get('offset', 0))
except ValueError:
offset = 0
try:
limit = int(args.get('limit', 20))
except ValueError:
limit = 0
else:
try:
page = int(args.get('page', 1))
except ValueError:
page = 1
try:
limit = int(args.get('per_page', 20))
except ValueError:
limit = 20
offset = limit * (page - 1)
return offset, limit
```
#### File: zaih-core/zaih_core/test_redis_cache_model.py
```python
from __future__ import unicode_literals
import random
from datetime import datetime
import redis
from mockredis import mock_strict_redis_client
from zaih_core.redis_cache_fields import (
DateTimeField, CharField, IntegerField,
BooleanField, ListField, JsonField)
from zaih_core.ztime import now
from zaih_core.redis_cache_model import CacheModel
from zaih_core.ztime import date2timestamp
redis_client = mock_strict_redis_client()
# redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)
class RCNotice(CacheModel):
__model_name__ = 'rcnotice'
__redis_client__ = redis_client
__expire__ = 86400 * 7 * 3
__created_at__ = 'date_created'
__index_meta__ = 'receiver_id'
__unique_index__ = (
('category', 'date_created'),
('receiver_id', 'is_read', 'notice_id'),
('receiver_id', 'is_read', 'date_created'),
('receiver_id', 'category', 'date_created'),
('receiver_id', 'category', 'is_read', 'date_created'),
)
id = CharField('id', required=True)
receiver_id = IntegerField('receiver_id', required=True, index_value=True)
target_id = CharField('target_id', required=True)
target_type = CharField('target_type', required=True)
action = CharField('action', required=True)
category = CharField('category', required=True, index_value=True)
title = CharField('title', required=True)
content = CharField('content', required=False)
sender_ids = ListField('sender_ids', required=True, default=[])
target_info = JsonField('target_info', required=False, default={})
is_read = BooleanField('is_read', required=True, index_value=True,
default=False)
notice_id = IntegerField('notice_id', required=True, indexed=True)
date_created = DateTimeField('date_created', required=True, indexed=True)
@property
def _created_at(self):
return date2timestamp(self.date_created)
def test_query():
params = dict(receiver_id=2,
target_id='10',
target_type='question',
action='ask',
category='ask',
title='向你提了问题',
content='你说这个cache 好用么',
sender_ids=[2],
date_created=now())
for i in range(10):
params['id'] = str(i)
params['reveiver_id'] = i
params['notice_id'] = i
params['action'] == random.choice(['ask', 'quesiton'])
cn = RCNotice(**params)
cn.save()
query = (RCNotice.query()
.filter(RCNotice.receiver_id.eq(2))
# .filter(RCNotice.receiver_id.in_([2, 4]))
.filter(RCNotice.notice_id.eq(2))
.filter(RCNotice.is_read.eq(False))
.order_by(RCNotice.notice_id.desc()))
results = query.offset(0).limit(2).all()
assert len(results) == 1
assert results[0].id == '2'
query = (RCNotice.query()
.filter(RCNotice.receiver_id.in_([2, 4]))
.filter(RCNotice.notice_id.eq(2))
.filter(RCNotice.is_read.eq(False))
.order_by(RCNotice.notice_id.desc()))
results = query.offset(0).limit(2).all()
assert len(results) == 1
assert results[0].id == '2'
query = (
RCNotice.query()
.filter(RCNotice.category.eq('ask'))
.order_by(RCNotice.date_created.desc()))
results = query.limit(1).all()
assert len(results) == 1
assert results[0].id == '9'
def test_curd():
# test curd
cn = RCNotice(id='6',
receiver_id=2,
target_id='10',
target_type='question',
action='ask',
category='ask',
title='向你提了问题',
content='你说这个cache 好用么',
sender_ids=[2],
notice_id=100,
date_created=datetime(2017, 9, 11))
cn.save()
assert cn.id == '6'
cn = RCNotice.get_by_id(6)
assert cn.id == '6'
assert cn.receiver_id == 2
cn = cn.update(title='test update')
assert cn.title == 'test update'
cns = RCNotice.batch_get_by_ids([6])
assert len(cns) == 1
assert cns[0].id == '6'
cn = RCNotice.get_by_id(6)
cn.delete()
cn = RCNotice.get_by_id(6)
assert cn is None
def main():
test_curd()
test_query()
if __name__ == '__main__':
main()
```
#### File: zaih-core/zaih_core/verification.py
```python
from __future__ import unicode_literals
import json
import base64
import slumber
from flask import request, current_app as app
from datetime import datetime
from .caching import cache_for
from .helpers import get_backend_api
def get_authorization():
authorization = request.headers.get('Authorization')
if not authorization:
return False, None
try:
authorization_type, token = authorization.split(' ')
return authorization_type, token
except ValueError:
return False, None
def verify_token(access_token):
# verify token return scopes
api = slumber.API(app.config['AUTH_TOKEN_INFO_URL'],
auth=(app.config['APP_CLIENT_ID'],
app.config['APP_CLIENT_SECRET']),
append_slash=False)
token_info = api.post({'access_token': access_token})
if not isinstance(token_info, dict):
try:
token_info = json.loads(token_info)
except ValueError:
return False, None
if (token_info.get('access_token', None) and
datetime.utcnow() < datetime.fromtimestamp(token_info.get('expires', 0))):
return True, token_info
return False, None
@cache_for(3600*24)
def verify_client(token):
if app.config['TESTING']:
return True, ['backend']
ALLOW_CLIENTS = app.config.get('ALLOW_CLIENTS', [])
client = base64.b64decode(token)
client_id, secret = client.split(':')
if client_id not in ALLOW_CLIENTS:
return False, None
api = get_backend_api()
scopes = api.client.scopes.post({'client_id': client_id, 'secret': secret})
api = slumber.API(app.config['ZAIH_BACKEND_API'],
auth=(client_id, secret),
append_slash=False)
scopes = api.client.scopes.get()
if scopes:
return True, list(set(scopes) & set(['login', 'register']))
return False, None
def verify_request():
authorization_type, token = get_authorization()
if authorization_type == 'Basic':
return verify_client(token)
elif authorization_type == 'Bearer':
return verify_token(token)
return False, None
```
#### File: src/services/weixin.py
```python
import hashlib
import base64
from Crypto.Cipher import AES
import json
from src.services.utils import Request
class WXAPPError(Exception):
def __init__(self, code, description):
self.code = code
self.description = description
def __str__(self):
return '%s: %s' % (self.code, self.description)
class WXAPPAPI(object):
# 默认https
host = "api.weixin.qq.com"
def __init__(self, appid=None, app_secret=None):
self.appid = appid
self.app_secret = app_secret
def pre_params(self):
return dict(secret=self.app_secret,
appid=self.appid)
def jscode2session(self, js_code):
path = '/sns/jscode2session'
params = self.pre_params()
params.update(js_code=js_code,
grant_type='authorization_code')
response = Request.get(self.host, path, params)
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content
def client_credential_for_access_token(self):
path = '/cgi-bin/token'
params = self.pre_params()
params.update(grant_type='client_credential')
response = Request.get(self.host, path, params)
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content
def getwxacode(self, access_token, page_path):
# 接口A 数量有限 A+C 100000个
path = '/wxa/getwxacode?access_token=%s' % access_token
params = {
'path': page_path,
}
response = Request.post(self.host, path, params)
try:
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content, None
except:
return base64.standard_b64encode(response.content), len(response.content)
def getwxacodeunlimit(self, access_token, scene):
# 接口B 数量无限 scene strint(32)
path = '/wxa/getwxacodeunlimit?access_token=%s' % access_token
params = {
'scene': scene,
}
response = Request.post(self.host, path, params)
try:
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content, None
except:
return base64.standard_b64encode(response.content), len(response.content)
def createwxaqrcode(self, access_token, page_path):
# 接口C 数量有限 A+C 100000个
path = '/cgi-bin/wxaapp/createwxaqrcode?access_token=%s' % access_token
params = {
'path': page_path,
}
response = Request.post(self.host, path, params)
try:
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content, None
except:
return base64.standard_b64encode(response.content), len(response.content)
class WXBizDataCrypt:
def __init__(self, appid, session_key):
self.app_id = appid
self.session_key = session_key
def decrypt(self, encryptedData, iv):
# base64 decode
sessionKey = base64.b64decode(self.session_key)
encryptedData = base64.b64decode(encryptedData)
iv = base64.b64decode(iv)
cipher = AES.new(sessionKey, AES.MODE_CBC, iv)
decrypted = json.loads(self._unpad(cipher.decrypt(encryptedData)))
if decrypted['watermark']['appid'] != self.app_id:
raise Exception('Invalid Buffer')
return decrypted
def check_raw_data(self, raw_data, session_key, signature):
return hashlib.sha1(raw_data + session_key).hexdigest() == signature
def _unpad(self, s):
return s[:-ord(s[len(s) - 1:])]
```
#### File: v1/api/code_token.py
```python
from __future__ import absolute_import, print_function
from datetime import timedelta
from flask import request, g
from zaih_core.database import db
from zaih_core.api_errors import Unauthorized
from zaih_core.ztime import now
from src.settings import Config
from src.services.weixin import WXAPPAPI, WXBizDataCrypt
from src.models import WXAuthentication, Account, OAuth2Token
from . import Resource
class CodeToken(Resource):
def post(self):
token = None
code = g.json.get('code')
if not code:
raise Unauthorized('invalid_wxapp_token')
# 调用微信接口,code换取session_key
# 1. 获取session_key
appid = Config.WXAPP_APPID
secret = Config.WXAPP_SECRET
api = WXAPPAPI(appid=appid,
app_secret=secret)
session_info = api.jscode2session(js_code=code)
session_key = session_info.get('session_key')
# 2. 通过session_key解密用户信息
crypt = WXBizDataCrypt(appid, session_key)
iv = g.json.get('iv')
encrypted_data = g.json.get('encrypted_data')
user_info = crypt.decrypt(encrypted_data, iv)
# 3. 获取用户信息
openid = user_info.get('openId', None)
unionid = user_info.get('unionId', '')
nickname = user_info.get('nickName', '')
gender = user_info.get('gender', '')
city = user_info.get('city', '')
province = user_info.get('province', '')
country = user_info.get('country', '')
avatar_url = user_info.get('avatarUrl', '')
watermark = user_info.get('watermark', {})
appid = watermark.get('appid', '')
timestamp = watermark.get('timestamp', None)
# 4. 判断用户信息字段有效性
if not openid:
raise Unauthorized('invalid_wxapp_token:open')
# 5. 生成Account
auth = WXAuthentication.query.filter_by(
openid=openid,
openid_type=WXAuthentication.OPENID_TYPE_XCX,
).first()
# 第一次登录
if not auth:
account = Account.create(
nickname=nickname,
_avatar=avatar_url,
)
wxauth = WXAuthentication(
account_id = account.id,
unionid = unionid,
openid = openid,
openid_type = WXAuthentication.OPENID_TYPE_XCX,
)
db.session.add(wxauth)
# 再次授权
else:
account = Account.query.get(auth.account_id)
db.session.commit()
token = OAuth2Token.get_or_create(
WXAuthentication.OPENID_TYPE_XCX,
account_id=account.id,
session_key=session_key,
)
return token, 200, None
```
#### File: v1/api/hello.py
```python
from __future__ import absolute_import, print_function
from flask import request, g
from . import Resource
from .. import schemas
class Hello(Resource):
def get(self):
hello = "Hello, Ladder Tournament!"
return hello, 200, None
``` |
{
"source": "jizhouli/py-logger",
"score": 3
} |
#### File: jizhouli/py-logger/pylogger.py
```python
import os
import sys
import logging
#from logging import handlers
from logging.handlers import RotatingFileHandler
class PyLogger(object):
'''
an esay-to-use logging wrapper
'''
def __init__(self):
self.logger = None
self.handler = None
self.log_file_path = ''
self.rotate_max_byte = 0
self.rotate_backup_count = 0
self.logger = logging.getLogger()
def __str__(self):
s = 'PyLogger config reloaded\n'
s += 'log file path: %s\n' % self.log_file_path
s += 'rotate config: maxbyte %s, backupcount %s\n' % (self.rotate_max_byte, self.rotate_backup_count)
return s
def get_file_path(self):
path = sys.path[0]
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
def config(self, log_dir='log', log_file='run.log', rotate_max_byte=1024*1024*256, rotate_backup_count=10):
self.rotate_max_byte = rotate_max_byte
self.rotate_backup_count = rotate_backup_count
# log directory path
log_dir_path = '/'.join([self.get_file_path(), log_dir])
if not os.path.exists(log_dir_path):
os.mkdir(log_dir_path)
# log file path
self.log_file_path = '/'.join([log_dir_path, log_file])
# IMPORTANT! Manually re-assign the handler according to stackoverflow solution
# http://stackoverflow.com/questions/5296130/restart-logging-to-a-new-file-python
if self.logger and self.handler:
self.logger.handlers[0].stream.close()
self.logger.removeHandler(self.logger.handlers[0])
# create file handler and not set level
self.handler = RotatingFileHandler(self.log_file_path,
maxBytes=self.rotate_max_byte,
backupCount=self.rotate_backup_count)
formatter = logging.Formatter("%(asctime)s - %(levelname)s : %(message)s")
self.handler.setFormatter(formatter)
# add handler to logger
self.logger.addHandler(self.handler)
# set output level
self.logger.setLevel(logging.INFO)
# output logger summary
self.logger.info(str(self).encode('utf8'))
def debug(self, output):
self.logger.debug(output.encode('utf8'))
def info(self, output):
self.logger.info(output.encode('utf8'))
def warn(self, output):
self.logger.warn(output.encode('utf8'))
def error(self, output):
self.logger.error(output.encode('utf8'))
def critical(self, output):
self.logger.critical(output.encode('utf8'))
# initialize as singleton instance
logger = PyLogger()
logger.config()
if __name__ == '__main__':
logger.info('hello py-logger')
logger.config(rotate_max_byte=1000*100, rotate_backup_count=10)
for i in range(500000):
# modify logger configures any time any times you want
if i == 250000:
logger.config(log_file='new.log', rotate_max_byte=1000*1000, rotate_backup_count=5)
logger.info('%s this is a test log content so dont be aware' % i)
``` |
{
"source": "jizi19911101/HttpRunner",
"score": 3
} |
#### File: HttpRunner/httprunner/validator.py
```python
import sys
import traceback
from loguru import logger
from httprunner import exceptions, parser
class Validator(object):
"""Validate tests
Attributes:
validation_results (dict): store validation results,
including validate_extractor and validate_script.
"""
def __init__(self, session_context, resp_obj):
""" initialize a Validator for each teststep (API request)
Args:
session_context: HttpRunner session context
resp_obj: ResponseObject instance
"""
self.session_context = session_context
self.resp_obj = resp_obj
self.validation_results = {}
def __eval_validator_check(self, check_item):
""" evaluate check item in validator.
Args:
check_item: check_item should only be the following 5 formats:
1, variable reference, e.g. $token
2, function reference, e.g. ${is_status_code_200($status_code)}
3, dict or list, maybe containing variable/function reference, e.g. {"var": "$abc"}
4, string joined by delimiter. e.g. "status_code", "headers.content-type"
5, regex string, e.g. "LB[\d]*(.*)RB[\d]*"
"""
if isinstance(check_item, (dict, list)) \
or isinstance(check_item, parser.LazyString):
# format 1/2/3
check_value = self.session_context.eval_content(check_item)
else:
# format 4/5
check_value = self.resp_obj.extract_field(check_item)
return check_value
def __eval_validator_expect(self, expect_item):
""" evaluate expect item in validator.
Args:
expect_item: expect_item should only be in 2 types:
1, variable reference, e.g. $expect_status_code
2, actual value, e.g. 200
"""
expect_value = self.session_context.eval_content(expect_item)
return expect_value
def validate_script(self, script):
""" make validation with python script
"""
result = {
"validate_script": "<br/>".join(script),
"check_result": "pass",
"output": ""
}
script = "\n ".join(script)
code = f"""
# encoding: utf-8
def run_validate_script():
{script}
"""
variables = {
"status_code": self.resp_obj.status_code,
"response_json": self.resp_obj.json,
"response": self.resp_obj
}
variables.update(self.session_context.test_variables_mapping)
variables.update(globals())
try:
exec(code, variables)
except SyntaxError as ex:
logger.warning(f"SyntaxError in python validate script: {ex}")
result["check_result"] = "fail"
result["output"] = "<br/>".join([
f"ErrorMessage: {ex.msg}",
f"ErrorLine: {ex.lineno}",
f"ErrorText: {ex.text}"
])
return result
try:
# run python validate script
variables["run_validate_script"]()
except Exception as ex:
logger.warning(f"run python validate script failed: {ex}")
result["check_result"] = "fail"
_type, _value, _tb = sys.exc_info()
_lineno = -1
if _tb.tb_next:
_lineno = _tb.tb_next.tb_lineno
line_no = _lineno - 4
elif len(traceback.extract_tb(_tb)) > 0:
# filename, lineno, name, line
_, _lineno, _, _ = traceback.extract_tb(_tb)[-1]
line_no = _lineno - 4
else:
line_no = "N/A"
result["output"] = "<br/>".join([
f"ErrorType: {_type.__name__}",
f"ErrorLine: {line_no}"
])
return result
def validate(self, validators):
""" make validation with comparators
"""
self.validation_results = {}
if not validators:
return
logger.debug("start to validate.")
validate_pass = True
failures = []
for validator in validators:
if isinstance(validator, dict) and validator.get("type") == "python_script":
script = self.session_context.eval_content(validator["script"])
result = self.validate_script(script)
if result["check_result"] == "fail":
validate_pass = False
failures.append(result["output"])
self.validation_results["validate_script"] = result
continue
if "validate_extractor" not in self.validation_results:
self.validation_results["validate_extractor"] = []
# validator should be LazyFunction object
if not isinstance(validator, parser.LazyFunction):
raise exceptions.ValidationFailure(
f"validator should be parsed first: {validators}")
# evaluate validator args with context variable mapping.
validator_args = validator.get_args()
check_item, expect_item = validator_args
check_value = self.__eval_validator_check(check_item)
expect_value = self.__eval_validator_expect(expect_item)
validator.update_args([check_value, expect_value])
comparator = validator.func_name
validator_dict = {
"comparator": comparator,
"check": check_item,
"check_value": check_value,
"expect": expect_item,
"expect_value": expect_value
}
validate_msg = f"\nvalidate: {check_item} {comparator} {expect_value}({type(expect_value).__name__})"
try:
validator.to_value(self.session_context.test_variables_mapping)
validator_dict["check_result"] = "pass"
validate_msg += "\t==> pass"
logger.debug(validate_msg)
except (AssertionError, TypeError):
validate_pass = False
validator_dict["check_result"] = "fail"
validate_msg += "\t==> fail"
validate_msg += "\n{}({}) {} {}({})".format(
check_value,
type(check_value).__name__,
comparator,
expect_value,
type(expect_value).__name__
)
logger.error(validate_msg)
failures.append(validate_msg)
self.validation_results["validate_extractor"].append(validator_dict)
# restore validator args, in case of running multiple times
validator.update_args(validator_args)
if not validate_pass:
failures_string = "\n".join([failure for failure in failures])
raise exceptions.ValidationFailure(failures_string)
``` |
{
"source": "jizizr/PagerMaid_Plugins",
"score": 2
} |
#### File: jizizr/PagerMaid_Plugins/antichannelpin.py
```python
from telethon.errors import ChatAdminRequiredError
from telethon.tl.types import Channel
from asyncio import sleep
from pagermaid import redis, log, redis_status
from pagermaid.utils import lang, alias_command
from pagermaid.listener import listener
@listener(is_plugin=False, outgoing=True, command=alias_command('antichannelpin'),
description='开启对话的自动取消频道置顶功能,需要 Redis',
parameters="<true|false|status>")
async def antichannelpin(context):
if not redis_status():
await context.edit(f"{lang('error_prefix')}{lang('redis_dis')}")
return
if len(context.parameter) != 1:
await context.edit(f"{lang('error_prefix')}{lang('arg_error')}")
return
myself = await context.client.get_me()
self_user_id = myself.id
if context.parameter[0] == "true":
if context.chat_id == self_user_id:
await context.edit(lang('ghost_e_mark'))
return
redis.set("antichannelpin." + str(context.chat_id), "true")
await context.edit(f"已成功开启群组 {str(context.chat_id)} 的自动取消频道置顶功能。")
await log(f"已成功开启群组 {str(context.chat_id)} 的自动取消频道置顶功能。")
elif context.parameter[0] == "false":
if context.chat_id == self_user_id:
await context.edit(lang('ghost_e_mark'))
return
try:
redis.delete("antichannelpin." + str(context.chat_id))
except:
await context.edit('emm...当前对话不存在于自动取消频道置顶功能列表中。')
return
await context.edit(f"已成功关闭群组 {str(context.chat_id)} 的自动取消频道置顶功能。")
await log(f"已成功关闭群组 {str(context.chat_id)} 的自动取消频道置顶功能。")
elif context.parameter[0] == "status":
if redis.get("antichannelpin." + str(context.chat_id)):
await context.edit('当前对话存在于自动取消频道置顶功能列表中。')
else:
await context.edit('当前对话不存在于自动取消频道置顶功能列表中。')
else:
await context.edit(f"{lang('error_prefix')}{lang('arg_error')}")
@listener(is_plugin=False, incoming=True, ignore_edited=True)
async def unpin_link_channel_message(context):
""" Event handler to unpin linked channel messages. """
if not redis_status():
return
if not redis.get("antichannelpin." + str(context.chat_id)):
return
try:
if not isinstance(context.sender, Channel):
return
except:
return
await sleep(1)
try:
await context.unpin()
except ChatAdminRequiredError:
redis.delete("antichannelpin." + str(context.chat_id))
except:
pass
```
#### File: jizizr/PagerMaid_Plugins/da.py
```python
from asyncio import sleep
from pagermaid import log
from pagermaid.listener import listener
from pagermaid.utils import alias_command
from telethon.errors import PeerFloodError
@listener(is_plugin=True, outgoing=True, command=alias_command("da"),
description="以此命令删除所有消息。(非群组管理员只删除自己的消息)",
parameters="<text>")
async def da(context):
if len(context.parameter) > 2 or len(context.parameter) == 0:
await context.edit("\n呜呜呜,请执行 `-da true` 来删除所有消息。")
return
if context.parameter[0] != "true":
await context.edit("\n呜呜呜,请执行 `-da true` 来删除所有消息。")
return
await context.edit('正在删除所有消息 . . .')
input_chat = await context.get_input_chat()
messages = []
count = 0
async for message in context.client.iter_messages(input_chat, min_id=1):
messages.append(message)
count += 1
messages.append(1)
if len(messages) == 100:
await context.client.delete_messages(input_chat, messages)
messages = []
if messages:
await context.client.delete_messages(input_chat, messages)
await log(f"批量删除了 {str(count)} 条消息。")
try:
notification = await send_prune_notify(context, count)
except:
return
await sleep(.5)
await notification.delete()
async def send_prune_notify(context, count):
return await context.client.send_message(
context.chat_id,
"批量删除了 "
+ str(count)
+ " 条消息。"
)
```
#### File: jizizr/PagerMaid_Plugins/deletemsgsfromgrps.py
```python
from asyncio import sleep
from telethon.tl.custom.message import Message
from pagermaid.listener import listener
from pagermaid.utils import alias_command
@listener(is_plugin=True, outgoing=True, command=alias_command("dmfg"))
async def dmfg(context: Message) -> None:
if len(context.parameter) == 0:
await context.edit('您没有输入参数.\n`-dmfg group` 删除所有群内发言\n`-dmfg private` 删除所有与人的对话消息')
return
if context.parameter[0] == 'group':
await context.edit('准备中...')
count = 1000000
count_buffer = 0
await context.edit('执行中...')
async for dialog in context.client.iter_dialogs():
if dialog.is_channel and not dialog.is_group:
continue
if dialog.id > 0:
continue
async for message in context.client.iter_messages(dialog.id, from_user="me"):
if dialog.id == context.chat_id and message.id == context.id:
continue
if count_buffer == count:
break
await message.delete()
count_buffer += 1
await context.edit('成功!')
await sleep(5)
await context.delete()
elif context.parameter[0] == 'private':
await context.edit('准备中...')
count = 1000000
count_buffer = 0
await context.edit('执行中...')
async for dialog in context.client.iter_dialogs():
if dialog.id > 0:
async for message in context.client.iter_messages(dialog.id, from_user="me"):
if dialog.id == context.chat_id and message.id == context.id:
continue
if count_buffer == count:
break
await message.delete()
count_buffer += 1
await context.edit('成功!')
await sleep(5)
await context.delete()
else:
await context.edit('您输入的参数错误.\n`-dmfg group` 删除所有群内发言\n`-dmfg private` 删除所有与人的对话消息')
```
#### File: jizizr/PagerMaid_Plugins/eat.py
```python
from PIL import Image
from os.path import exists
from os import remove
from requests import get
from random import randint
from telethon.events import NewMessage
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.functions.channels import GetFullChannelRequest
from telethon.tl.patched import Message
from telethon.tl.types import Channel, MessageEntityMentionName, MessageEntityPhone, MessageEntityBotCommand
from telethon.errors.rpcerrorlist import ChatSendStickersForbiddenError
from struct import error as StructError
from pagermaid.listener import listener
from pagermaid.utils import alias_command
from pagermaid import redis, config, bot, user_id
from collections import defaultdict
import json
try:
git_source = config['git_source']
except:
git_source = "https://raw.githubusercontent.com/Xtao-Labs/PagerMaid_Plugins/master/"
positions = {
"1": [297, 288],
"2": [85, 368],
"3": [127, 105],
"4": [76, 325],
"5": [256, 160],
"6": [298, 22],
}
notifyStrArr = {
"6": "踢人",
}
extensionConfig = {}
max_number = len(positions)
configFilePath = 'plugins/eat/config.json'
configFileRemoteUrlKey = "eat.configFileRemoteUrl"
async def get_full_id(object_n):
if isinstance(object_n, Channel):
return (await bot(GetFullChannelRequest(object_n.id))).full_chat.id # noqa
elif not object_n:
return user_id
return (await bot(GetFullUserRequest(object_n.id))).user.id
async def eat_it(context, uid, base, mask, photo, number, layer=0):
mask_size = mask.size
photo_size = photo.size
if mask_size[0] < photo_size[0] and mask_size[1] < photo_size[1]:
scale = photo_size[1] / mask_size[1]
photo = photo.resize((int(photo_size[0] / scale), int(photo_size[1] / scale)), Image.LANCZOS)
photo = photo.crop((0, 0, mask_size[0], mask_size[1]))
mask1 = Image.new('RGBA', mask_size)
mask1.paste(photo, mask=mask)
numberPosition = positions[str(number)]
isSwap = False
# 处理头像,放到和背景同样大小画布的特定位置
try:
isSwap = extensionConfig[str(number)]["isSwap"]
except:
pass
if isSwap:
photoBg = Image.new('RGBA', base.size)
photoBg.paste(mask1, (numberPosition[0], numberPosition[1]), mask1)
photoBg.paste(base, (0, 0), base)
base = photoBg
else:
base.paste(mask1, (numberPosition[0], numberPosition[1]), mask1)
# 增加判断是否有第二个头像孔
isContinue = len(numberPosition) > 2 and layer == 0
if isContinue:
await context.client.download_profile_photo(
uid,
"plugins/eat/" + str(uid) + ".jpg",
download_big=True
)
try:
markImg = Image.open("plugins/eat/" + str(uid) + ".jpg")
maskImg = Image.open("plugins/eat/mask" + str(numberPosition[2]) + ".png")
except:
await context.edit(f"图片模版加载出错,请检查并更新配置:mask{str(numberPosition[2])}.png")
return base
base = await eat_it(context, uid, base, maskImg, markImg, numberPosition[2], layer + 1)
temp = base.size[0] if base.size[0] > base.size[1] else base.size[1]
if temp != 512:
scale = 512 / temp
base = base.resize((int(base.size[0] * scale), int(base.size[1] * scale)), Image.LANCZOS)
return base
async def updateConfig(context):
configFileRemoteUrl = redis.get(configFileRemoteUrlKey)
if configFileRemoteUrl:
if downloadFileFromUrl(configFileRemoteUrl, configFilePath) != 0:
redis.set(configFileRemoteUrlKey, configFileRemoteUrl)
return -1
else:
return await loadConfigFile(context, True)
return 0
def downloadFileFromUrl(url, filepath):
try:
re = get(url)
with open(filepath, 'wb') as ms:
ms.write(re.content)
except:
return -1
return 0
async def loadConfigFile(context, forceDownload=False):
global positions, notifyStrArr, extensionConfig
try:
with open(configFilePath, 'r', encoding='utf8') as cf:
# 读取已下载的配置文件
remoteConfigJson = json.load(cf)
# positionsStr = json.dumps(positions)
# positions = json.loads(positionsStr)
# 读取配置文件中的positions
positionsStr = json.dumps(remoteConfigJson["positions"])
data = json.loads(positionsStr)
# 与预设positions合并
positions = mergeDict(positions, data)
# 读取配置文件中的notifies
data = json.loads(json.dumps(remoteConfigJson["notifies"]))
# 与预设positions合并
notifyStrArr = mergeDict(notifyStrArr, data)
# 读取配置文件中的extensionConfig
try:
data = json.loads(json.dumps(remoteConfigJson["extensionConfig"]))
# 与预设extensionConfig合并
extensionConfig = mergeDict(extensionConfig, data)
except:
# 新增扩展配置,为了兼容旧的配置文件更新不出错,无视异常
pass
# 读取配置文件中的needDownloadFileList
data = json.loads(json.dumps(remoteConfigJson["needDownloadFileList"]))
# 下载列表中的文件
for fileurl in data:
try:
fsplit = fileurl.split("/")
filePath = f"plugins/eat/{fsplit[len(fsplit) - 1]}"
if not exists(filePath) or forceDownload:
downloadFileFromUrl(fileurl, filePath)
except:
await context.edit(f"下载文件异常,url:{fileurl}")
return -1
except:
return -1
return 0
def mergeDict(d1, d2):
dd = defaultdict(list)
for d in (d1, d2):
for key, value in d.items():
dd[key] = value
return dict(dd)
async def downloadFileByIds(ids, context):
idsStr = f',{",".join(ids)},'
try:
with open(configFilePath, 'r', encoding='utf8') as cf:
# 读取已下载的配置文件
remoteConfigJson = json.load(cf)
data = json.loads(json.dumps(remoteConfigJson["needDownloadFileList"]))
# 下载列表中的文件
sucSet = set()
failSet = set()
for fileurl in data:
try:
fsplit = fileurl.split("/")
fileFullName = fsplit[len(fsplit) - 1]
fileName = fileFullName.split(".")[0].replace("eat", "").replace("mask", "")
if f',{fileName},' in idsStr:
filePath = f"plugins/eat/{fileFullName}"
if downloadFileFromUrl(fileurl, filePath) == 0:
sucSet.add(fileName)
else:
failSet.add(fileName)
except:
failSet.add(fileName)
await context.edit(f"下载文件异常,url:{fileurl}")
notifyStr = "更新模版完成"
if len(sucSet) > 0:
notifyStr = f'{notifyStr}\n成功模版如下:{",".join(sucSet)}'
if len(failSet) > 0:
notifyStr = f'{notifyStr}\n失败模版如下:{",".join(failSet)}'
await context.edit(notifyStr)
except:
await context.edit("更新下载模版图片失败,请确认配置文件是否正确")
@listener(is_plugin=True, outgoing=True, command=alias_command("eat"),
description="生成一张 吃头像 图片\n"
"可选:当第二个参数是数字时,读取预存的配置;\n\n"
"当第二个参数是.开头时,头像旋转180°,并且判断r后面是数字则读取对应的配置生成\n\n"
"当第二个参数是/开头时,在/后面加url则从url下载配置文件保存到本地,如果就一个/,则直接更新配置文件,删除则是/delete;或者/后面加模版id可以手动更新指定模版配置\n\n"
"当第二个参数是-开头时,在-后面加上模版id,即可设置默认模版-eat直接使用该模版,删除默认模版是-eat -\n\n"
"当第二个参数是!或者!开头时,列出当前可用模版",
parameters="<username/uid> [随意内容]")
async def eat(context: NewMessage.Event):
assert isinstance(context.message, Message)
if len(context.parameter) > 2:
await context.edit("出错了呜呜呜 ~ 无效的参数。")
return
diu_round = False
from_user = user_object = context.sender
from_user_id = await get_full_id(from_user)
if context.reply_to_msg_id:
reply_message = await context.get_reply_message()
try:
user_id = reply_message.sender_id
except AttributeError:
await context.edit("出错了呜呜呜 ~ 无效的参数。")
return
if user_id > 0:
target_user = await context.client(GetFullUserRequest(user_id))
target_user_id = target_user.user.id
else:
target_user = await context.client(GetFullChannelRequest(user_id))
target_user_id = target_user.full_chat.id
else:
user_raw = ""
if len(context.parameter) == 1 or len(context.parameter) == 2:
user_raw = user = context.parameter[0]
if user.isnumeric():
user = int(user)
else:
user = from_user_id
if context.message.entities is not None:
if isinstance(context.message.entities[0], MessageEntityMentionName):
target_user = await context.client(GetFullUserRequest(context.message.entities[0].user_id))
target_user_id = target_user.user.id
elif isinstance(context.message.entities[0], MessageEntityPhone):
if user > 0:
target_user = await context.client(GetFullUserRequest(user))
target_user_id = target_user.user.id
else:
target_user = await context.client(GetFullChannelRequest(user))
target_user_id = target_user.full_chat.id
elif isinstance(context.message.entities[0], MessageEntityBotCommand):
target_user = await context.client(GetFullUserRequest(user_object.id))
target_user_id = target_user.user.id
else:
return await context.edit("出错了呜呜呜 ~ 参数错误。")
elif user_raw[:1] in [".", "/", "-", "!"]:
target_user_id = await get_full_id(from_user)
else:
try:
user_object = await context.client.get_entity(user)
target_user_id = await get_full_id(user_object)
except (TypeError, ValueError, OverflowError, StructError) as exception:
if str(exception).startswith("Cannot find any entity corresponding to"):
await context.edit("出错了呜呜呜 ~ 指定的用户不存在。")
return
if str(exception).startswith("No user has"):
await context.edit("出错了呜呜呜 ~ 指定的道纹不存在。")
return
if str(exception).startswith("Could not find the input entity for") or isinstance(exception,
StructError):
await context.edit("出错了呜呜呜 ~ 无法通过此 UserID 找到对应的用户。")
return
if isinstance(exception, OverflowError):
await context.edit("出错了呜呜呜 ~ 指定的 UserID 已超出长度限制,您确定输对了?")
return
raise exception
photo = await context.client.download_profile_photo(
target_user_id,
"plugins/eat/" + str(target_user_id) + ".jpg",
download_big=True
)
reply_to = context.message.reply_to_msg_id
if exists("plugins/eat/" + str(target_user_id) + ".jpg"):
for num in range(1, max_number + 1):
print(num)
if not exists('plugins/eat/eat' + str(num) + '.png'):
re = get(f'{git_source}eat/eat' + str(num) + '.png')
with open('plugins/eat/eat' + str(num) + '.png', 'wb') as bg:
bg.write(re.content)
if not exists('plugins/eat/mask' + str(num) + '.png'):
re = get(f'{git_source}eat/mask' + str(num) + '.png')
with open('plugins/eat/mask' + str(num) + '.png', 'wb') as ms:
ms.write(re.content)
number = randint(1, max_number)
try:
p1 = 0
p2 = 0
if len(context.parameter) == 1:
p1 = context.parameter[0]
if p1[0] == ".":
diu_round = True
if len(p1) > 1:
try:
p2 = int("".join(p1[1:]))
except:
# 可能也有字母的参数
p2 = "".join(p1[1:])
elif p1[0] == "-":
if len(p1) > 1:
try:
p2 = int("".join(p1[1:]))
except:
# 可能也有字母的参数
p2 = "".join(p1[1:])
if p2:
redis.set("eat.default-config", p2)
await context.edit(f"已经设置默认配置为:{p2}")
else:
redis.delete("eat.default-config")
await context.edit(f"已经清空默认配置")
return
elif p1[0] == "/":
await context.edit(f"正在更新远程配置文件")
if len(p1) > 1:
# 获取参数中的url
p2 = "".join(p1[1:])
if p2 == "delete":
redis.delete(configFileRemoteUrlKey)
await context.edit(f"已清空远程配置文件url")
return
if p2.startswith("http"):
# 下载文件
if downloadFileFromUrl(p2, configFilePath) != 0:
await context.edit(f"下载配置文件异常,请确认url是否正确")
return
else:
# 下载成功,加载配置文件
redis.set(configFileRemoteUrlKey, p2)
if await loadConfigFile(context, True) != 0:
await context.edit(f"加载配置文件异常,请确认从远程下载的配置文件格式是否正确")
return
else:
await context.edit(f"下载并加载配置文件成功")
else:
# 根据传入模版id更新模版配置,多个用","或者","隔开
# 判断redis是否有保存配置url
splitStr = ","
if "," in p2:
splitStr = ","
ids = p2.split(splitStr)
if len(ids) > 0:
# 下载文件
configFileRemoteUrl = redis.get(configFileRemoteUrlKey)
if configFileRemoteUrl:
if downloadFileFromUrl(configFileRemoteUrl, configFilePath) != 0:
await context.edit(f"下载配置文件异常,请确认url是否正确")
return
else:
# 下载成功,更新对应配置
if await loadConfigFile(context) != 0:
await context.edit(f"加载配置文件异常,请确认从远程下载的配置文件格式是否正确")
return
else:
await downloadFileByIds(ids, context)
else:
await context.edit(f"你没有订阅远程配置文件,更新个🔨")
else:
# 没传url直接更新
if await updateConfig(context) != 0:
await context.edit(f"更新配置文件异常,请确认是否订阅远程配置文件,或从远程下载的配置文件格式是否正确")
return
else:
await context.edit(f"从远程更新配置文件成功")
return
elif p1[0] == "!" or p1[0] == "!":
# 加载配置
if exists(configFilePath):
if await loadConfigFile(context) != 0:
await context.edit(f"加载配置文件异常,请确认从远程下载的配置文件格式是否正确")
return
txt = ""
if len(positions) > 0:
noShowList = []
for key in positions:
txt = f"{txt},{key}"
if len(positions[key]) > 2:
noShowList.append(positions[key][2])
for key in noShowList:
txt = txt.replace(f",{key}", "")
if txt != "":
txt = txt[1:]
await context.edit(f"目前已有的模版列表如下:\n{txt}")
return
defaultConfig = redis.get("eat.default-config")
if isinstance(p2, str):
number = p2
elif isinstance(p2, int) and p2 > 0:
number = int(p2)
elif not diu_round and ((isinstance(p1, int) and int(p1) > 0) or isinstance(p1, str)):
try:
number = int(p1)
except:
number = p1
elif defaultConfig:
try:
defaultConfig = defaultConfig.decode()
number = int(defaultConfig)
except:
number = str(defaultConfig)
# 支持配置默认是倒立的头像
if number.startswith("."):
diu_round = True
number = number[1:]
except:
number = randint(1, max_number)
# 加载配置
if exists(configFilePath):
if await loadConfigFile(context) != 0:
await context.edit(f"加载配置文件异常,请确认从远程下载的配置文件格式是否正确")
return
try:
notifyStr = notifyStrArr[str(number)]
except:
notifyStr = "吃头像"
await context.edit(f"正在生成 {notifyStr} 图片中 . . .")
markImg = Image.open("plugins/eat/" + str(target_user_id) + ".jpg")
try:
eatImg = Image.open("plugins/eat/eat" + str(number) + ".png")
maskImg = Image.open("plugins/eat/mask" + str(number) + ".png")
except:
await context.edit(f"图片模版加载出错,请检查并更新配置:{str(number)}")
return
if diu_round:
markImg = markImg.rotate(180) # 对图片进行旋转
try:
number = str(number)
except:
pass
result = await eat_it(context, from_user_id, eatImg, maskImg, markImg, number)
result.save('plugins/eat/eat.webp')
target_file = await context.client.upload_file("plugins/eat/eat.webp")
try:
remove("plugins/eat/" + str(target_user_id) + ".jpg")
remove("plugins/eat/" + str(target_user_id) + ".png")
remove("plugins/eat/" + str(from_user_id) + ".jpg")
remove("plugins/eat/" + str(from_user_id) + ".png")
remove("plugins/eat/eat.webp")
remove(photo)
except:
pass
else:
await context.edit("此用户未设置头像或头像对您不可见。")
return
if reply_to:
try:
await context.client.send_file(
context.chat_id,
target_file,
link_preview=False,
force_document=False,
reply_to=reply_to
)
await context.delete()
remove("plugins/eat/eat.webp")
try:
remove(photo)
except:
pass
return
except TypeError:
await context.edit("此用户未设置头像或头像对您不可见。")
except ChatSendStickersForbiddenError:
await context.edit("此群组无法发送贴纸。")
else:
try:
await context.client.send_file(
context.chat_id,
target_file,
link_preview=False,
force_document=False
)
await context.delete()
remove("plugins/eat/eat.webp")
try:
remove(photo)
except:
pass
return
except TypeError:
await context.edit("此用户未设置头像或头像对您不可见。")
except ChatSendStickersForbiddenError:
await context.edit("此群组无法发送贴纸。")
```
#### File: jizizr/PagerMaid_Plugins/epic.py
```python
import os
import sys
from requests import post, get
from pytz import timezone
from datetime import datetime
from pagermaid.listener import listener
from pagermaid.utils import alias_command
@listener(is_plugin=True, outgoing=True, command=alias_command("epic"),
description="获取 Epic 喜加一信息")
async def epic(context):
await context.edit("获取中 . . .")
epic_url = "https://www.epicgames.com/store/backend/graphql-proxy"
headers = {
"Referer": "https://www.epicgames.com/store/zh-CN/",
"Content-Type": "application/json; charset=utf-8",
}
data = {
"query":
"query searchStoreQuery($allowCountries: String, $category: String, $count: Int, $country: String!, "
"$keywords: String, $locale: String, $namespace: String, $sortBy: String, $sortDir: String, $start: Int, "
"$tag: String, $withPrice: Boolean = false, $withPromotions: Boolean = false) {\n Catalog {\n "
"searchStore(allowCountries: $allowCountries, category: $category, count: $count, country: $country, "
"keywords: $keywords, locale: $locale, namespace: $namespace, sortBy: $sortBy, sortDir: $sortDir, "
"start: $start, tag: $tag) {\n elements {\n title\n id\n namespace\n description\n effectiveDate\n "
"keyImages {\n type\n url\n }\n seller {\n id\n name\n }\n productSlug\n urlSlug\n url\n items {\n id\n "
"namespace\n }\n customAttributes {\n key\n value\n }\n categories {\n path\n }\n price(country: "
"$country) @include(if: $withPrice) {\n totalPrice {\n discountPrice\n originalPrice\n voucherDiscount\n "
"discount\n currencyCode\n currencyInfo {\n decimals\n }\n fmtPrice(locale: $locale) {\n originalPrice\n "
"discountPrice\n intermediatePrice\n }\n }\n lineOffers {\n appliedRules {\n id\n endDate\n "
"discountSetting {\n discountType\n }\n }\n }\n }\n promotions(category: $category) @include(if: "
"$withPromotions) {\n promotionalOffers {\n promotionalOffers {\n startDate\n endDate\n discountSetting {"
"\n discountType\n discountPercentage\n }\n }\n }\n upcomingPromotionalOffers {\n promotionalOffers {\n "
"startDate\n endDate\n discountSetting {\n discountType\n discountPercentage\n }\n }\n }\n }\n }\n paging "
"{\n count\n total\n }\n }\n }\n}\n",
"variables": {
"allowCountries": "CN",
"category": "freegames",
"count": 1000,
"country": "CN",
"locale": "zh-CN",
"sortBy": "effectiveDate",
"sortDir": "asc",
"withPrice": True,
"withPromotions": True
}
}
try:
res = post(epic_url, headers=headers, json=data, timeout=10.0)
resJson = res.json()
games = resJson["data"]["Catalog"]["searchStore"]["elements"]
except Exception as e:
return await context.edit("请求 Epic Store API 错误:" + str(sys.exc_info()[0]) + "\n" + str(e))
if not games:
return await context.edit("Epic 可能又抽风啦,请稍后再试(")
else:
for game in games:
try:
game_name = game["title"]
game_corp = game["seller"]["name"]
game_price = game["price"]["totalPrice"]["fmtPrice"]["originalPrice"]
game_promotions = game["promotions"]["promotionalOffers"]
upcoming_promotions = game["promotions"]["upcomingPromotionalOffers"]
if not game_promotions and upcoming_promotions:
continue # 促销即将上线,跳过
elif game["price"]["totalPrice"]["discountPrice"] != 0:
continue # 非免费游戏,跳过
else:
game_thumbnail, game_dev, game_pub = None, None, None
for image in game["keyImages"]:
game_thumbnail = image["url"] if image["type"] == "Thumbnail" else None
for pair in game["customAttributes"]:
game_dev = pair["value"] if pair["key"] == "developerName" else game_corp
game_pub = pair["value"] if pair["key"] == "publisherName" else game_corp
game_desp = game["description"]
end_date_iso = game["promotions"]["promotionalOffers"][0]["promotionalOffers"][0]["endDate"][:-1]
end_date = datetime.fromisoformat(end_date_iso).replace(
tzinfo=timezone('UTC')).astimezone(timezone('Asia/Chongqing')).strftime("%Y-%m-%d %H:%M:%S")
# API 返回不包含游戏商店 URL,此处自行拼接,可能出现少数游戏 404
game_url = f"https://www.epicgames.com/store/zh-CN/p/{game['productSlug'].replace('/home', '')}"
msg = f"**FREE now :: {game_name} ({game_price})**\n\n{game_desp}\n\n"
msg += f"游戏由 {game_pub} 发售," if game_dev == game_pub else f"游戏由 {game_dev} 开发、{game_pub} 出版,"
msg += f"将在 **{end_date}** 结束免费游玩,戳下面的链接领取吧~\n{game_url}"
if game_thumbnail:
r = get(game_thumbnail, timeout=10.0)
with open("epic.jpg", "wb") as code:
code.write(r.content)
await context.client.send_file(context.chat_id, "epic.jpg", caption=msg, force_document=False)
else:
await context.respond(msg)
except (TypeError, IndexError):
pass
except Exception as e:
await context.respond("获取 Epic 信息错误:" + str(sys.exc_info()[0]) + "\n" + str(e))
try:
os.remove("epic.jpg")
except FileNotFoundError:
pass
await context.delete()
```
#### File: jizizr/PagerMaid_Plugins/groupindex.py
```python
import datetime
from pytz import timezone
from telethon.tl.functions.users import GetFullUserRequest
from pagermaid.utils import alias_command
from pagermaid.listener import listener
@listener(is_plugin=True, outgoing=True, command=alias_command("groupindex"),
description="获取群组当日活跃数据")
async def group_index(context):
if not context.is_group:
await context.edit('请在群组中运行。')
return
# 获取群组信息
try:
title = context.chat.title
except AttributeError:
await context.edit('读取群组信息失败。')
return
end_id = context.id
text = f'以下是群组 {title} 今日的活跃数据:\n'
await context.edit('正在分析群组数据中...(1/3)')
# 格式化日期
now = datetime.date.today()
yesterday = now - datetime.timedelta(days=1)
search = datetime.datetime(yesterday.year, yesterday.month, yesterday.day, 16, 0, 0)
# 初始化变量
all_members = []
member_count = {}
start_id = None
utc_tz = timezone('UTC')
cst_tz = timezone('Asia/Shanghai')
join_count = 0
leave_count = 0
# 读取管理员操作日志
admin = True
try:
async for i in context.client.iter_admin_log(context.chat_id, join=True):
utc_time = i.date.replace(tzinfo=utc_tz)
cst_time = utc_time.astimezone(cst_tz)
date = datetime.date(cst_time.year, cst_time.month, cst_time.day)
if not date == now:
break
join_count += 1
async for i in context.client.iter_admin_log(context.chat_id, leave=True):
utc_time = i.date.replace(tzinfo=utc_tz)
cst_time = utc_time.astimezone(cst_tz)
date = datetime.date(cst_time.year, cst_time.month, cst_time.day)
if not date == now:
break
leave_count += 1
except:
admin = False
await context.edit('正在分析群组数据中...(2/3)')
async for i in context.client.iter_messages(context.chat_id, offset_date=search, reverse=True):
uid = i.sender_id
if not start_id:
start_id = i.id
if uid:
if uid > 0:
if uid not in all_members:
all_members.append(uid)
try:
count = member_count[uid]
except KeyError:
count = 0
count += 1
member_count[uid] = count
msg_counts = end_id - start_id
member_counts = len(all_members)
text += f'活跃人数:{member_counts} 人\n' \
f'总消息数:{msg_counts} 条\n'
if admin:
text += f'加群 {join_count} 人,退群 {leave_count} 人\n'
text += f'最活跃的小可爱们:\n'
# 字典倒序排序
member_count = sorted(member_count.items(), key=lambda x: x[1], reverse=True)
# 遍历列表
if len(member_count) == 0:
text += "没有发言用户呢 ~"
else:
for i in range(min(len(member_count), 5)):
# 获取用户信息
target_user = await context.client(GetFullUserRequest(member_count[i][0]))
first_name = target_user.user.first_name
if first_name:
first_name = first_name.replace("\u2060", "")
text += f'{first_name} `{member_count[i][1]}`\n'
await context.edit(text)
```
#### File: jizizr/PagerMaid_Plugins/groupword.py
```python
from asyncio import sleep
from wordcloud import WordCloud
from io import BytesIO
from os.path import exists
from os import makedirs
from sys import executable
from collections import defaultdict
from requests import get
from pagermaid.utils import execute, alias_command
from pagermaid.listener import listener
imported = True
imported_ = True
punctuation = {33: ' ', 34: ' ', 35: ' ', 36: ' ', 37: ' ', 38: ' ', 39: ' ', 40: ' ', 41: ' ', 42: ' ', 43: ' ',
44: ' ', 45: ' ', 46: ' ', 47: ' ', 58: ' ', 59: ' ', 60: ' ', 61: ' ', 62: ' ', 63: ' ', 64: ' ',
91: ' ', 92: ' ', 93: ' ', 94: ' ', 95: ' ', 96: ' ', 123: ' ', 124: ' ', 125: ' ', 126: ' ',
65311: ' ', 65292: ' ', 65281: ' ', 12304: ' ', 12305: ' ', 65288: ' ', 65289: ' ', 12289: ' ',
12290: ' ', 65306: ' ', 65307: ' ', 8217: ' ', 8216: ' ', 8230: ' ', 65509: ' ', 183: ' '}
try:
import jieba
except ImportError:
imported = False
try:
import paddle
except ImportError:
imported_ = False
@listener(is_plugin=True, outgoing=True, command=alias_command("groupword"),
description="拉取最新 300 条消息生成词云。",
parameters="[任意内容启用AI分词]")
async def group_word(context):
imported_1 = False
if len(context.parameter) >= 1:
imported_1 = True
if not imported:
try:
await context.edit("支持库 `jieba` 未安装...\n正在尝试自动安装...")
await execute(f'{executable} -m pip install jieba')
await sleep(10)
result = await execute(f'{executable} -m pip show jieba')
if len(result) > 0:
await context.edit('支持库 `jieba` 安装成功...\n正在尝试自动重启...')
await context.client.disconnect()
else:
await context.edit(f"自动安装失败..请尝试手动安装 `{executable} -m pip install jieba` 随后,请重启 PagerMaid-Modify 。")
return
except:
return
if not imported_ and imported_1:
try:
await context.edit("支持库 `paddlepaddle-tiny` 未安装...\n正在尝试自动安装...")
await execute(f'{executable} -m pip install paddlepaddle-tiny')
await sleep(10)
result = await execute(f'{executable} -m pip show paddlepaddle-tiny')
if len(result) > 0 and not 'WARNING' in result:
await context.edit('支持库 `paddlepaddle-tiny` 安装成功...\n正在尝试自动重启...')
await context.client.disconnect()
else:
await context.edit(f"自动安装失败,可能是系统不支持..\nAI 分词不可用,切换到基础分词。\n"
f"您可以尝试手动安装 `{executable} -m pip install paddlepaddle-tiny` 。")
await sleep(4)
except:
return
try:
await context.edit('正在生成中。。。')
except:
return
if not exists("plugins/groupword"):
makedirs("plugins/groupword")
if not exists("plugins/groupword/wqy-microhei.ttc"):
await context.edit('正在拉取中文字体文件。。。(等待时间请评估你的服务器)')
r = get('https://cdn.jsdelivr.net/gh/anthonyfok/fonts-wqy-microhei/wqy-microhei.ttc')
with open("plugins/groupword/wqy-microhei.ttc", "wb") as code:
code.write(r.content)
words = defaultdict(int)
count = 0
try:
if imported_ and imported_1:
try:
jieba.enable_paddle()
except:
imported_1 = False
async for msg in context.client.iter_messages(context.chat, limit=500):
if msg.id == context.id:
continue
if msg.text and not msg.text.startswith('/') and not msg.text.startswith('-') and not '//' in msg.text:
try:
if imported_ and imported_1:
for word in jieba.cut(msg.text.translate(punctuation), use_paddle=True):
word = word.lower()
words[word] += 1
else:
for word in jieba.cut(msg.text.translate(punctuation)):
word = word.lower()
words[word] += 1
count += 1
except:
pass
except:
if count == 0:
try:
await context.edit('您已被 TG 官方限制。')
return
except:
return
try:
image = WordCloud(font_path="plugins/groupword/wqy-microhei.ttc", width=800,
height=400).generate_from_frequencies(
words).to_image()
stream = BytesIO()
image.save(stream, 'PNG')
except:
await context.edit('词云生成失败。')
return
try:
await context.client.send_message(context.chat, f'对最近的 {count} 条消息进行了分析。', file=stream.getvalue())
await context.delete()
except:
return
```
#### File: jizizr/PagerMaid_Plugins/transfer.py
```python
import asyncio, zipfile, os
from io import BytesIO
from os.path import exists, isfile
from pagermaid import bot
from pagermaid.listener import listener
from pagermaid.utils import alias_command
async def make_zip(source_dir, output_filename):
zipf = zipfile.ZipFile(output_filename, "w")
pre_len = len(os.path.dirname(source_dir))
for parent, dirnames, filenames in os.walk(source_dir):
for filename in filenames:
pathfile = os.path.join(parent, filename)
arcname = pathfile[pre_len:].strip(os.path.sep)
zipf.write(pathfile, arcname)
zipf.close()
async def del_msg(context, t_lim):
await asyncio.sleep(t_lim)
try:
await context.delete()
except:
pass
@listener(is_plugin=True, outgoing=True, command=alias_command("transfer"),
description="上传 / 下载文件",
parameters="upload <filepath>` 或 `download <filepath>")
async def transfer(context):
params = context.parameter
if len(params) < 2:
await context.edit("参数缺失")
await del_msg(context, 3)
return
params[1] = " ".join(params[1:])
file_list = params[1].split("\n")
chat_id = context.chat_id
if params[0] == "upload":
index = 1
for file_path in file_list:
await context.edit(f"正在上传第 {index} 个文件")
if exists(file_path):
if isfile(file_path):
await bot.send_file(chat_id, file_path, force_document=True)
else:
token = file_path.split("/")
token = token[len(token) - 1]
await make_zip(file_path, f"/tmp/{token}.zip")
await bot.send_file(chat_id, f"/tmp/{token}.zip", force_document=True)
os.remove(f"/tmp/{token}.zip")
index += 1
await context.edit("上传完毕")
await del_msg(context, 3)
elif params[0] == "download":
message = await context.get_reply_message()
if message and message.media:
_file = BytesIO()
try:
await bot.download_file(message.media.document, _file)
except AttributeError:
await context.edit('无法下载此类型的文件。')
return
if not exists(file_list[0]):
with open(file_list[0], "wb") as f:
f.write(_file.getvalue())
await context.edit(f"保存成功, 保存路径 {file_list[0]}")
await del_msg(context, 5)
else:
await context.edit("路径已存在文件")
await del_msg(context, 3)
else:
await context.edit("未回复消息或回复消息中不包含文件")
await del_msg(context, 3)
else:
await context.edit("未知命令")
await del_msg(context, 3)
``` |
{
"source": "jizongFox/adversarial-robustness-toolbox",
"score": 2
} |
#### File: art/attacks/newtonfool.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from art.attacks.attack import Attack
from art.utils import to_categorical
logger = logging.getLogger(__name__)
class NewtonFool(Attack):
"""
Implementation of the attack from Uyeong Jang et al. (2017). Paper link: http://doi.acm.org/10.1145/3134600.3134635
"""
attack_params = Attack.attack_params + ["max_iter", "eta", "batch_size"]
def __init__(self, classifier, max_iter=1000, eta=0.01, batch_size=128):
"""
Create a NewtonFool attack instance.
:param classifier: A trained model.
:type classifier: :class:`Classifier`
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param eta: The eta coefficient.
:type eta: `float`
:param batch_size: Batch size
:type batch_size: `int`
"""
super(NewtonFool, self).__init__(classifier)
params = {"max_iter": max_iter, "eta": eta, "batch_size": batch_size}
self.set_params(**params)
def generate(self, x, **kwargs):
"""
Generate adversarial samples and return them in a Numpy array.
:param x: An array with the original inputs to be attacked.
:type x: `np.ndarray`
:param kwargs: Attack-specific parameters used by child classes.
:type kwargs: `dict`
:return: An array holding the adversarial examples.
:rtype: `np.ndarray`
"""
self.set_params(**kwargs)
x_adv = x.copy()
# Initialize variables
clip_min, clip_max = self.classifier.clip_values
y_pred = self.classifier.predict(x, logits=False)
pred_class = np.argmax(y_pred, axis=1)
# Compute perturbation with implicit batching
for batch_id in range(int(np.ceil(x_adv.shape[0] / float(self.batch_size)))):
batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size
batch = x_adv[batch_index_1:batch_index_2]
# Main algorithm for each batch
norm_batch = np.linalg.norm(np.reshape(batch, (batch.shape[0], -1)), axis=1)
l = pred_class[batch_index_1:batch_index_2]
l_b = to_categorical(l, self.classifier.nb_classes).astype(bool)
# Main loop of the algorithm
for _ in range(self.max_iter):
# Compute score
score = self.classifier.predict(batch, logits=False)[l_b]
# Compute the gradients and norm
grads = self.classifier.class_gradient(batch, label=l, logits=False)
grads = np.squeeze(grads, axis=1)
norm_grad = np.linalg.norm(np.reshape(grads, (batch.shape[0], -1)), axis=1)
# Theta
theta = self._compute_theta(norm_batch, score, norm_grad)
# Pertubation
di_batch = self._compute_pert(theta, grads, norm_grad)
# Update xi and pertubation
batch += di_batch
# Apply clip
x_adv[batch_index_1:batch_index_2] = np.clip(batch, clip_min, clip_max)
preds = np.argmax(self.classifier.predict(x), axis=1)
preds_adv = np.argmax(self.classifier.predict(x_adv), axis=1)
logger.info('Success rate of NewtonFool attack: %.2f%%', (np.sum(preds != preds_adv) / x.shape[0]))
return x_adv
def set_params(self, **kwargs):
"""Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.
:param max_iter: The maximum number of iterations.
:type max_iter: `int`
:param eta: The eta coefficient.
:type eta: `float`
"""
# Save attack-specific parameters
super(NewtonFool, self).set_params(**kwargs)
if not isinstance(self.max_iter, (int, np.int)) or self.max_iter <= 0:
raise ValueError("The number of iterations must be a positive integer.")
if not isinstance(self.eta, (float, int, np.int)) or self.eta <= 0:
raise ValueError("The eta coefficient must be a positive float.")
if self.batch_size <= 0:
raise ValueError('The batch size `batch_size` has to be positive.')
return True
def _compute_theta(self, norm_batch, score, norm_grad):
"""
Function to compute the theta at each step.
:param norm_batch: norm of a batch.
:type norm_batch: `np.ndarray`
:param score: softmax value at the attacked class.
:type score: `np.ndarray`
:param norm_grad: norm of gradient values at the attacked class.
:type norm_grad: `np.ndarray`
:return: theta value.
:rtype: `np.ndarray`
"""
equ1 = self.eta * norm_batch * norm_grad
equ2 = score - 1.0 / self.classifier.nb_classes
result = np.minimum.reduce([equ1, equ2])
return result
@staticmethod
def _compute_pert(theta, grads, norm_grad):
"""
Function to compute the pertubation at each step.
:param theta: theta value at the current step.
:type theta: `np.ndarray`
:param grads: gradient values at the attacked class.
:type grads: `np.ndarray`
:param norm_grad: norm of gradient values at the attacked class.
:type norm_grad: `np.ndarray`
:return: pertubation.
:rtype: `np.ndarray`
"""
# Pick a small scalar to avoid division by 0
tol = 10e-8
nom = -theta[:, None, None, None] * grads
denom = norm_grad**2
denom[denom < tol] = tol
result = nom / denom[:, None, None, None]
return result
```
#### File: adversarial-robustness-toolbox/art/data_generators_unittest.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from art.data_generators import KerasDataGenerator, PyTorchDataGenerator, MXDataGenerator
from art.utils import master_seed
logger = logging.getLogger('testLogger')
class TestKerasDataGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
import keras
class DummySequence(keras.utils.Sequence):
def __init__(self):
self._size = 5
self._x = np.random.rand(self._size, 28, 28, 1)
self._y = np.random.randint(0, high=10, size=(self._size, 10))
def __len__(self):
return self._size
def __getitem__(self, idx):
return self._x[idx], self._y[idx]
sequence = DummySequence()
cls.data_gen = KerasDataGenerator(sequence, size=5, batch_size=1)
def setUp(self):
# Set master seed
master_seed(42)
def test_gen_interface(self):
gen = self._dummy_gen()
data_gen = KerasDataGenerator(gen, size=None, batch_size=5)
x, y = data_gen.get_batch()
# Check return types
self.assertTrue(isinstance(x, np.ndarray))
self.assertTrue(isinstance(y, np.ndarray))
# Check shapes
self.assertTrue(x.shape == (5, 28, 28, 1))
self.assertTrue(y.shape == (5, 10))
def test_gen_keras_specific(self):
gen = self._dummy_gen()
data_gen = KerasDataGenerator(gen, size=None, batch_size=5)
iter_ = iter(data_gen.generator)
x, y = next(iter_)
# Check return types
self.assertTrue(isinstance(x, np.ndarray))
self.assertTrue(isinstance(y, np.ndarray))
# Check shapes
self.assertTrue(x.shape == (5, 28, 28, 1))
self.assertTrue(y.shape == (5, 10))
def test_sequence_keras_specific(self):
iter_ = iter(self.data_gen.generator)
x, y = next(iter_)
# Check return types
self.assertTrue(isinstance(x, np.ndarray))
self.assertTrue(isinstance(y, np.ndarray))
# Check shapes
self.assertTrue(x.shape == (28, 28, 1))
self.assertTrue(y.shape == (10,))
def test_sequence_interface(self):
x, y = self.data_gen.get_batch()
# Check return types
self.assertTrue(isinstance(x, np.ndarray))
self.assertTrue(isinstance(y, np.ndarray))
# Check shapes
self.assertTrue(x.shape == (28, 28, 1))
self.assertTrue(y.shape == (10,))
def test_imagedatagen_interface(self):
train_size, batch_size = 20, 5
x_train, y_train = np.random.rand(train_size, 28, 28, 1), np.random.randint(0, 2, size=(train_size, 10))
datagen = ImageDataGenerator(width_shift_range=0.075, height_shift_range=0.075, rotation_range=12,
shear_range=0.075, zoom_range=0.05, fill_mode='constant', cval=0)
datagen.fit(x_train)
# Create wrapper and get batch
data_gen = KerasDataGenerator(datagen.flow(x_train, y_train, batch_size=batch_size), size=None,
batch_size=batch_size)
x, y = data_gen.get_batch()
# Check return types
self.assertTrue(isinstance(x, np.ndarray))
self.assertTrue(isinstance(y, np.ndarray))
# Check shapes
self.assertTrue(x.shape == (batch_size, 28, 28, 1))
self.assertTrue(y.shape == (batch_size, 10))
def test_imagedatagen_keras_specific(self):
train_size, batch_size = 20, 5
x_train, y_train = np.random.rand(train_size, 28, 28, 1), np.random.randint(0, 2, size=(train_size, 10))
datagen = ImageDataGenerator(width_shift_range=0.075, height_shift_range=0.075, rotation_range=12,
shear_range=0.075, zoom_range=0.05, fill_mode='constant', cval=0)
datagen.fit(x_train)
# Create wrapper and get batch
data_gen = KerasDataGenerator(datagen.flow(x_train, y_train, batch_size=batch_size), size=None,
batch_size=batch_size)
x, y = next(data_gen.generator)
# Check return types
self.assertTrue(isinstance(x, np.ndarray))
self.assertTrue(isinstance(y, np.ndarray))
# Check shapes
self.assertTrue(x.shape == (batch_size, 28, 28, 1))
self.assertTrue(y.shape == (batch_size, 10))
@staticmethod
def _dummy_gen(size=5):
yield np.random.rand(size, 28, 28, 1), np.random.randint(low=0, high=10, size=(size, 10))
class TestPyTorchGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
import torch
from torch.utils.data import DataLoader
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self._size = 10
self._x = np.random.rand(self._size, 1, 5, 5)
self._y = np.random.randint(0, high=10, size=self._size)
def __len__(self):
return self._size
def __getitem__(self, idx):
return self._x[idx], self._y[idx]
dataset = DummyDataset()
data_loader = DataLoader(dataset=dataset, batch_size=5, shuffle=True)
cls.data_gen = PyTorchDataGenerator(data_loader, size=10, batch_size=5)
def test_gen_interface(self):
x, y = self.data_gen.get_batch()
# Check return types
self.assertTrue(isinstance(x, np.ndarray))
self.assertTrue(isinstance(y, np.ndarray))
# Check shapes
self.assertTrue(x.shape == (5, 1, 5, 5))
self.assertTrue(y.shape == (5,))
def test_pytorch_specific(self):
import torch
iter_ = iter(self.data_gen.data_loader)
x, y = next(iter_)
# Check return types
self.assertTrue(isinstance(x, torch.Tensor))
self.assertTrue(isinstance(y, torch.Tensor))
# Check shapes
self.assertTrue(x.shape == (5, 1, 5, 5))
self.assertTrue(y.shape == (5,))
class TestMXGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
import mxnet as mx
x = mx.random.uniform(shape=(10, 1, 5, 5))
y = mx.random.uniform(shape=10)
dataset = mx.gluon.data.dataset.ArrayDataset(x, y)
data_loader = mx.gluon.data.DataLoader(dataset, batch_size=5, shuffle=True)
cls.data_gen = MXDataGenerator(data_loader, size=10, batch_size=5)
def test_gen_interface(self):
x, y = self.data_gen.get_batch()
# Check return types
self.assertTrue(isinstance(x, np.ndarray))
self.assertTrue(isinstance(y, np.ndarray))
# Check shapes
self.assertTrue(x.shape == (5, 1, 5, 5))
self.assertTrue(y.shape == (5,))
def test_mxnet_specific(self):
import mxnet as mx
iter_ = iter(self.data_gen.data_loader)
x, y = next(iter_)
# Check return types
self.assertTrue(isinstance(x, mx.ndarray.NDArray))
self.assertTrue(isinstance(y, mx.ndarray.NDArray))
# Check shapes
self.assertTrue(x.shape == (5, 1, 5, 5))
self.assertTrue(y.shape == (5,))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jizongFox/Continual-Learning-Benchmark",
"score": 2
} |
#### File: Continual-Learning-Benchmark/dataloaders/haxio_dataset.py
```python
import bisect
import os
import warnings
from pathlib import Path
import torch
from loguru import logger
from torch.utils import data
from torch.utils.data.dataset import Dataset, IterableDataset
from torchvision.datasets.folder import (
accimage_loader,
pil_loader,
make_dataset,
IMG_EXTENSIONS,
)
from torchvision.datasets.vision import StandardTransform
__all__ = ["ImageFolder", "ConcatDataset"]
from utils.printable import item2str
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == "accimage":
return accimage_loader(path)
else:
return pil_loader(path)
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(self, root, transforms=None, transform=None, target_transform=None):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
assert Path(root).exists() and Path(root).is_dir(), root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError(
"Only transforms or transform/target_transform can "
"be passed as argument"
)
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return ["{}{}".format(head, lines[0])] + [
"{}{}".format(" " * len(head), line) for line in lines[1:]
]
def extra_repr(self):
return ""
def _find_classes(dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
class DatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, loader, extensions=None, transform=None, target_transform=None, customized_classes=None,
is_valid_file=None, ):
super(DatasetFolder, self).__init__(root, transform=transform, target_transform=target_transform)
root_message = f"Building root from {str(self.root)}"
if customized_classes is None:
classes, class_to_idx = _find_classes(self.root)
logger.debug(f"{root_message}, Using automatic class mapping {item2str(class_to_idx)}")
else:
classes = customized_classes
class_to_idx = {customized_classes[i]: i for i in range(len(customized_classes))}
logger.debug(f"{root_message}, Using customized class mapping {item2str(class_to_idx)}", )
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
raise RuntimeError("Found 0 files in sub_folders of: " + str(self.root) +
"\nSupported extensions are: " + ",".join(extensions))
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
self._is_valid_file = is_valid_file
def switch_mapping(self, class_list):
self.classes = class_list
self.class_to_idx = {c: i for i, c in enumerate(class_list)}
logger.debug(f"switch customized class mapping {self.class_to_idx}", )
self.samples = make_dataset(self.root, self.class_to_idx, self.extensions, self._is_valid_file)
self.targets = [s[1] for s in self.samples]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
is_valid_file (callable, optional): A function that takes path of an Image file
and check if the file is a valid file (used to check of corrupt files)
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(
self, root, transform=None, target_transform=None, customized_classes=None
):
super(ImageFolder, self).__init__(
root,
default_loader,
IMG_EXTENSIONS,
transform=transform,
target_transform=target_transform,
is_valid_file=None,
customized_classes=customized_classes,
)
self.imgs = self.samples
@property
def num_classes(self):
return len(self.classes)
class ConcatDataset(Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Arguments:
datasets (sequence): List of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
for d in self.datasets:
assert not isinstance(
d, IterableDataset
), "ConcatDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError(
"absolute value of index should not exceed dataset length"
)
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
warnings.warn(
"cummulative_sizes attribute is renamed to " "cumulative_sizes",
DeprecationWarning,
stacklevel=2,
)
return self.cumulative_sizes
@property
def num_classes(self):
return max([x.num_classes for x in self.datasets])
@property
def classes(self):
return self.datasets[0].classes
```
#### File: Continual-Learning-Benchmark/utils/printable.py
```python
from typing import Iterable
def is_float(v):
"""if v is a scalar"""
try:
float(v)
return True
except (ValueError, TypeError):
return False
def is_iterable(v):
"""if v is an iterable, except str"""
if isinstance(v, str):
return False
return isinstance(v, (list, tuple, dict, set))
def _float2str(v):
"""convert a scalar to float, in order to display"""
v = float(v)
if v == 0:
return "0"
if abs(float(v)) < 0.01 or abs(float(v)) >= 99:
return f"{v:.2e}"
return f"{v:.3f}"
def _leafitem2str(v):
if is_float(v):
return _float2str(v)
return f"{v}"
def _generate_pair(k, v):
"""generate str for non iterable k v"""
return f"{k}:{_leafitem2str(v)}"
def _dict2str(dictionary: dict):
def create_substring(k, v):
if not is_iterable(v):
return _generate_pair(k, v)
else:
return f"{k}:[" + item2str(v) + "]"
strings = [create_substring(k, v) for k, v in dictionary.items()]
return ", ".join(strings)
def _iter2str(item: Iterable):
"""A list or a tuple"""
return ", ".join(
[_leafitem2str(x) if not is_iterable(x) else item2str(x) for x in item]
)
def item2str(item):
"""convert item to string in a pretty way.
@param item: list, dictionary, set and tuple
@return: pretty string
"""
if isinstance(item, dict):
return _dict2str(item)
return _iter2str(item)
``` |
{
"source": "jizongFox/convmixer-cifar10",
"score": 2
} |
#### File: jizongFox/convmixer-cifar10/train.py
```python
import argparse
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default="ConvMixer")
parser.add_argument('--batch-size', default=512, type=int)
parser.add_argument('--scale', default=0.75, type=float)
parser.add_argument('--reprob', default=0.25, type=float)
parser.add_argument('--ra-m', default=8, type=int)
parser.add_argument('--ra-n', default=1, type=int)
parser.add_argument('--jitter', default=0.1, type=float)
parser.add_argument('--hdim', default=256, type=int)
parser.add_argument('--depth', default=8, type=int)
parser.add_argument('--psize', default=2, type=int)
parser.add_argument('--conv-ks', default=5, type=int)
parser.add_argument('--wd', default=0.01, type=float)
parser.add_argument('--clip-norm', action='store_true')
parser.add_argument('--epochs', default=25, type=int)
parser.add_argument('--lr-max', default=0.01, type=float)
parser.add_argument('--workers', default=2, type=int)
parser.add_argument('--model-name', default="convmixer", choices=["convmixer", "baseline"], type=str)
args = parser.parse_args()
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
def ConvMixer(dim, depth, kernel_size=5, patch_size=2, n_classes=10):
return nn.Sequential(
nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size),
nn.GELU(),
nn.BatchNorm2d(dim),
*[nn.Sequential(
Residual(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"),
nn.GELU(),
nn.BatchNorm2d(dim)
)),
nn.Conv2d(dim, dim, kernel_size=1),
nn.GELU(),
nn.BatchNorm2d(dim)
) for _ in range(depth)],
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(dim, n_classes)
)
def ComparableNet(dim, depth, kernel_size=5, patch_size=2, n_classes=10):
return nn.Sequential(
nn.Conv2d(3, dim, kernel_size=3, stride=1),
nn.GELU(),
nn.BatchNorm2d(dim),
*[nn.Sequential(
Residual(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"),
nn.GELU(),
nn.BatchNorm2d(dim)
)),
nn.Conv2d(dim, dim, kernel_size=1),
nn.GELU(),
nn.BatchNorm2d(dim)
) for _ in range(depth)],
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(dim, n_classes)
)
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
train_transform = transforms.Compose([
transforms.RandomResizedCrop(32, scale=(args.scale, 1.0), ratio=(1.0, 1.0)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandAugment(num_ops=args.ra_n, magnitude=args.ra_m),
transforms.ColorJitter(args.jitter, args.jitter, args.jitter),
transforms.ToTensor(),
transforms.Normalize(cifar10_mean, cifar10_std),
transforms.RandomErasing(p=args.reprob)
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cifar10_mean, cifar10_std)
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=train_transform)
trainloader = DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=test_transform)
testloader = DataLoader(testset, batch_size=args.batch_size,
shuffle=False, num_workers=args.workers)
if args.model_name == "convmixer":
model = ConvMixer(args.hdim, args.depth, patch_size=args.psize, kernel_size=args.conv_ks, n_classes=10)
else:
model = ComparableNet(args.hdim, args.depth, patch_size=args.psize, kernel_size=args.conv_ks, n_classes=10)
model = nn.DataParallel(model).cuda()
lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs * 4 // 5, args.epochs],
[0, args.lr_max, args.lr_max / 20.0, 0])[0]
opt = optim.AdamW(model.parameters(), lr=args.lr_max, weight_decay=args.wd)
criterion = nn.CrossEntropyLoss()
scaler = torch.cuda.amp.GradScaler()
writer = SummaryWriter(log_dir=os.path.join("./output", args.model_name))
with writer:
for epoch in range(args.epochs):
start = time.time()
train_loss, train_acc, n = 0, 0, 0
for i, (X, y) in enumerate(trainloader):
model.train()
X, y = X.cuda(), y.cuda()
lr = lr_schedule(epoch + (i + 1) / len(trainloader))
opt.param_groups[0].update(lr=lr)
opt.zero_grad()
with torch.cuda.amp.autocast():
output = model(X)
loss = criterion(output, y)
scaler.scale(loss).backward()
if args.clip_norm:
scaler.unscale_(opt)
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(opt)
scaler.update()
train_loss += loss.item() * y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
model.eval()
test_acc, m = 0, 0
with torch.no_grad():
for i, (X, y) in enumerate(testloader):
X, y = X.cuda(), y.cuda()
with torch.cuda.amp.autocast():
output = model(X)
test_acc += (output.max(1)[1] == y).sum().item()
m += y.size(0)
print(
f'[{args.name}] Epoch: {epoch} | Train Acc: {train_acc / n:.4f}, Test Acc: {test_acc / m:.4f}, '
f'Time: {time.time() - start:.1f}, lr: {lr:.6f}')
writer.add_scalar(tag="tra/loss", scalar_value=train_loss / n, global_step=epoch)
writer.add_scalar(tag="tra/acc", scalar_value=train_acc / n, global_step=epoch)
writer.add_scalar(tag="test/acc", scalar_value=test_acc / m, global_step=epoch)
``` |
{
"source": "jizongFox/deep-clustering-toolbox",
"score": 2
} |
#### File: classification/IIC/net6c.py
```python
import torch.nn as nn
from .vgg import VGGTrunk, VGGNet
from deepclustering.decorator.decorator import export
# 4h but for cifar, 24x24
__all__ = ["ClusterNet6c", "ClusterNet6c_Param"]
class ClusterNet6cTrunk(VGGTrunk):
def __init__(self, num_channel: int = 3, batchnorm_track: bool = True):
r"""
Initialize
:param num_channel: input image channel, default 3
:param batchnorm_track:
"""
super(ClusterNet6cTrunk, self).__init__()
self.batchnorm_track = batchnorm_track
self.conv_size = 5
self.pad = 2
self.cfg = ClusterNet6c.cfg
self.in_channels = num_channel
self.features = self._make_layers()
def forward(self, x):
x = self.features(x)
bn, nf, h, w = x.size()
x = x.view(bn, nf * h * w)
return x
class ClusterNet6cHead(nn.Module):
def __init__(
self,
input_size: int = 64,
num_sub_heads: int = 5,
output_k: int = 10,
batchnorm_track: bool = True,
):
super(ClusterNet6cHead, self).__init__()
self.batchnorm_track = batchnorm_track
self.num_sub_heads = num_sub_heads
self.cfg = ClusterNet6c.cfg
num_features = self.cfg[-1][0]
if input_size == 24:
features_sp_size = 3
elif input_size == 64:
features_sp_size = 8
self.heads = nn.ModuleList(
[
nn.Sequential(
nn.Linear(
num_features * features_sp_size * features_sp_size, output_k
),
nn.Softmax(dim=1),
)
for _ in range(self.num_sub_heads)
]
)
def forward(self, x, kmeans_use_features=False):
results = []
for i in range(self.num_sub_heads):
if kmeans_use_features:
results.append(x) # duplicates
else:
results.append(self.heads[i](x))
return results
@export
class ClusterNet6c(VGGNet):
r"""
VGG based clustering method with single head
"""
cfg = [(64, 1), ("M", None), (128, 1), ("M", None), (256, 1), ("M", None), (512, 1)]
def __init__(
self,
num_channel: int = 3,
input_size: int = 64,
num_sub_heads: int = 5,
output_k: int = 10,
batchnorm_track: bool = True,
):
r"""
:param num_channel: input image channel
:param input_size: input image size
:param num_sub_heads: num of sub heads for one head
:param output_k: clustering numbers
:param batchnorm_track: whether to track the batchnorm states
"""
super(ClusterNet6c, self).__init__()
self.batchnorm_track = batchnorm_track
self.trunk = ClusterNet6cTrunk(
num_channel=num_channel, batchnorm_track=self.batchnorm_track
)
self.head = ClusterNet6cHead(
input_size=input_size,
num_sub_heads=num_sub_heads,
output_k=output_k,
batchnorm_track=self.batchnorm_track,
)
self._initialize_weights()
def forward(
self,
x,
kmeans_use_features=False,
trunk_features=False,
penultimate_features=False,
):
if penultimate_features:
print("Not needed/implemented for this arch")
exit(1)
x = self.trunk(x)
if trunk_features: # for semisup
return x
x = self.head(x, kmeans_use_features=kmeans_use_features) # returns list
return x
ClusterNet6c_Param = {
"num_channel": 3,
"input_size": 64,
"num_sub_heads": 5,
"output_k": 10,
}
```
#### File: deepclustering/arch/__init__.py
```python
from functools import partial
from typing import *
from .classification import *
from .segmentation import *
from ..utils.general import _register
__all__ = [
"weights_init",
"get_arch",
"ARCH_CALLABLES",
"_register_arch",
]
"""
Package
"""
# A Map from string to arch callables
ARCH_CALLABLES: Dict[str, Callable] = {}
ARCH_PARAM_DICT: Dict[str, Dict[str, Union[int, float, str]]] = {}
_register_arch = partial(_register, CALLABLE_DICT=ARCH_CALLABLES)
_register_param = partial(_register_arch, CALLABLE_DICT=ARCH_PARAM_DICT)
# Adding architecture (new architecture goes here...)
_register_arch("clusternet5g", ClusterNet5g)
_register_arch("clusternet5gtwohead", ClusterNet5gTwoHead)
_register_arch("clusternet5gmultihead", ClusterNet5gMultiHead)
_register_arch("clusternet6c", ClusterNet6c)
_register_arch("clusternet6cTwoHead", ClusterNet6cTwoHead)
_register_arch("clusternetimsat", IMSATNet)
_register_arch("dummy", Dummy)
_register_arch("vatnet", VATNetwork)
_register_arch("enet", Enet)
_register_arch("unet", UNet)
_register_arch("unet_bn", UNet_bn)
_register_arch("cnet", CorstemNet)
_register_arch("preresnet", PreResNet)
_register_arch("epsnetv2", ESPNetv2)
_register_arch("attention_unet", UNet_Attention)
# Adding default keys here to enable automatic testing
_register_param("clusternet5g", ClusterNet5g_Param)
_register_param("clusternet5gtwohead", ClusterNet5gTwoHead_Param)
_register_param("clusternet5gmultihead", ClusterNet5gMultiHead_Param)
_register_param("clusternet6c", ClusterNet6c_Param)
_register_param("clusternet6cTwoHead", ClusterNet6cTwoHead_Param)
_register_param("clusternetimsat", IMSATNet_Param)
_register_param("dummy", Dummy_Param)
_register_param("vatnet", VatNet_Param)
_register_param("enet", Enet_Param)
_register_param("unet", Unet_Param)
_register_param("unet_bn", Unetbn_Param)
_register_param("cnet", CorstemNet_Param)
_register_param("preresnet", PreResNet110_params)
"""
Public interface
"""
def weights_init(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.xavier_normal_(m.weight.data)
elif type(m) == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif type(m) == nn.Linear:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
def get_arch(arch: str, kwargs) -> nn.Module:
""" Get the architecture. Return a torch.nn.Module """
arch_callable = ARCH_CALLABLES.get(arch.lower())
kwargs.pop("arch", None)
assert arch_callable, "Architecture {} is not found!".format(arch)
net = arch_callable(**kwargs)
# try:
# net.apply(weights_init)
# except AttributeError as e:
# print(f'Using pretrained models with the error:{e}')
return net
```
#### File: arch/segmentation/attention_unet.py
```python
import torch
import torch.nn as nn
class conv_block(nn.Module):
def __init__(self, in_ch, out_ch):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.up(x)
return x
class Attention_block(nn.Module):
def __init__(self, F_g, F_l, F_int):
super(Attention_block, self).__init__()
self._F_g = F_g
self._F_l = F_l
self._F_int = F_int
self.W_g = nn.Sequential(
nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(F_int),
)
self.W_x = nn.Sequential(
nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(F_int),
)
self.psi = nn.Sequential(
nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(1),
nn.Sigmoid(),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, g, x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.relu(g1 + x1)
psi = self.psi(psi)
out = x * psi
return out
class UNet_Attention(nn.Module):
def __init__(self, input_dim=3, num_classes=1):
super(UNet_Attention, self).__init__()
self.input_dim = input_dim
self.num_classes = num_classes
n1 = 64
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(input_dim, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
self.Up5 = up_conv(filters[4], filters[3])
self.Att5 = Attention_block(F_g=filters[3], F_l=filters[3], F_int=filters[2])
self.Up_conv5 = conv_block(filters[4], filters[3])
self.Up4 = up_conv(filters[3], filters[2])
self.Att4 = Attention_block(F_g=filters[2], F_l=filters[2], F_int=filters[1])
self.Up_conv4 = conv_block(filters[3], filters[2])
self.Up3 = up_conv(filters[2], filters[1])
self.Att3 = Attention_block(F_g=filters[1], F_l=filters[1], F_int=filters[0])
self.Up_conv3 = conv_block(filters[2], filters[1])
self.Up2 = up_conv(filters[1], filters[0])
self.Att2 = Attention_block(F_g=filters[0], F_l=filters[0], F_int=32)
self.Up_conv2 = conv_block(filters[1], filters[0])
self.Conv = nn.Conv2d(
filters[0], num_classes, kernel_size=1, stride=1, padding=0
)
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
d5 = self.Up5(e5)
x4 = self.Att5(g=d5, x=e4)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
x3 = self.Att4(g=d4, x=e3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=e2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=e1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
out = self.Conv(d2)
return out
```
#### File: dataset/segmentation/iSeg2017_dataset.py
```python
import os
from pathlib import Path
from typing import List, Tuple
from sklearn.model_selection import train_test_split
from termcolor import colored
from deepclustering import DATA_PATH
from deepclustering.augment import SequentialWrapper
from deepclustering.dataset.segmentation import (
MedicalImageSegmentationDataset,
SubMedicalDatasetBasedOnIndex,
)
from deepclustering.dataset.semi_helper import MedicalDatasetSemiInterface
from deepclustering.utils.download_unzip_helper import download_and_extract_archive
from copy import deepcopy as dcopy
class ISeg2017Dataset(MedicalImageSegmentationDataset):
download_link = "https://drive.google.com/uc?id=11qUAufhgtzz3pg-dpE9fFRYTEDyTl9oV"
zip_name = "iSeg-2017.zip"
folder_name = "iSeg-2017"
def __init__(
self,
root_dir: str,
mode: str,
subfolders: List[str],
transforms: SequentialWrapper = None,
verbose=True,
) -> None:
if (
Path(root_dir, self.folder_name).exists()
and Path(root_dir, self.folder_name).is_dir()
):
print(f"Found {self.folder_name}.")
else:
download_and_extract_archive(
url=self.download_link,
download_root=root_dir,
extract_root=root_dir,
filename=self.zip_name,
remove_finished=False,
)
super().__init__(
os.path.join(root_dir, self.folder_name),
mode,
subfolders,
transforms,
"^\d\d",
verbose,
)
print(colored(f"{self.__class__.__name__} intialized.", "green"))
class ISeg2017SemiInterface(MedicalDatasetSemiInterface):
def __init__(
self,
root_dir=DATA_PATH,
labeled_data_ratio: float = 0.2,
unlabeled_data_ratio: float = 0.8,
seed: int = 0,
verbose: bool = True,
) -> None:
super().__init__(
ISeg2017Dataset,
root_dir,
labeled_data_ratio,
unlabeled_data_ratio,
seed,
verbose,
)
def _create_semi_supervised_datasets(
self,
labeled_transform: SequentialWrapper = None,
unlabeled_transform: SequentialWrapper = None,
val_transform: SequentialWrapper = None,
) -> Tuple[
MedicalImageSegmentationDataset,
MedicalImageSegmentationDataset,
MedicalImageSegmentationDataset,
]:
train_set = self.DataClass(
root_dir=self.root_dir,
mode="train",
subfolders=["T1", "T2", "Labels"],
transforms=None,
verbose=self.verbose,
)
val_set = self.DataClass(
root_dir=self.root_dir,
mode="val",
subfolders=["T1", "T2", "Labels"],
transforms=None,
verbose=self.verbose,
)
if self.labeled_ratio == 1:
labeled_set = dcopy(train_set)
unlabeled_set = dcopy(train_set)
print(
"labeled_ratio==1, return train_set as both the labeled and unlabeled datasets."
)
else:
labeled_patients, unlabeled_patients = train_test_split(
train_set.get_group_list(),
test_size=self.unlabeled_ratio,
train_size=self.labeled_ratio,
random_state=self.seed,
)
labeled_set = SubMedicalDatasetBasedOnIndex(train_set, labeled_patients)
unlabeled_set = SubMedicalDatasetBasedOnIndex(train_set, unlabeled_patients)
assert len(labeled_set) + len(unlabeled_set) == len(
train_set
), "wrong on labeled/unlabeled split."
del train_set
if self.verbose:
print(f"labeled_dataset:{labeled_set.get_group_list().__len__()} Patients")
print(
f"unlabeled_dataset:{unlabeled_set.get_group_list().__len__()} Patients"
)
if labeled_transform:
labeled_set.set_transform(labeled_transform)
if unlabeled_transform:
unlabeled_set.set_transform(unlabeled_transform)
if val_transform:
val_set.set_transform(val_transform)
return labeled_set, unlabeled_set, val_set
```
#### File: deepclustering/dataset/semi_helper.py
```python
__all__ = ["SemiDataSetInterface", "MedicalDatasetSemiInterface"]
from abc import abstractmethod
from copy import deepcopy as dcp
from itertools import repeat
from typing import Tuple, Callable, List, Type, Dict, Union
import numpy as np
from PIL import Image
from deepclustering.augment import SequentialWrapper
from deepclustering.dataloader.dataset import CombineDataset
from deepclustering.dataloader.sampler import InfiniteRandomSampler
from deepclustering.dataset.segmentation import (
MedicalImageSegmentationDataset,
PatientSampler,
)
from deepclustering.decorator import FixRandomSeed
from torch import Tensor
from torch.utils.data import Dataset, DataLoader, Subset
# this function splits, however, we want to conserve the unlabeled dataset
def _draw_indices(
targets: np.ndarray,
labeled_sample_num: int,
class_nums: int = 10,
validation_num: int = 5000,
verbose: bool = True,
seed: int = 1,
) -> Tuple[List[int], List[int], List[int]]:
"""
draw indices for labeled and unlabeled dataset separations.
:param targets: `torch.utils.data.Dataset.targets`-like numpy ndarray with all labels, used to split into labeled, unlabeled and validation dataset.
:param labeled_sample_num: labeled sample number
:param class_nums: num of classes in the target.
:param validation_num: num of validation set, usually we split the big training set into `labeled`, `unlabeled`, `validation` sets, the `test` set is taken directly from the big test set.
:param verbose: whether to print information while running.
:param seed: random seed to draw indices
:return: labeled indices and unlabeled indices
"""
labeled_sample_per_class = int(labeled_sample_num / class_nums)
validation_sample_per_class = int(validation_num / class_nums) if class_nums else 0
targets = np.array(targets)
train_labeled_idxs: List[int] = []
train_unlabeled_idxs: List[int] = []
val_idxs: List[int] = []
with FixRandomSeed(seed):
for i in range(class_nums):
idxs = np.where(targets == i)[0]
np.random.shuffle(idxs)
train_labeled_idxs.extend(idxs[:labeled_sample_per_class])
train_unlabeled_idxs.extend(
idxs[labeled_sample_per_class:-validation_sample_per_class]
)
val_idxs.extend(idxs[-validation_sample_per_class:])
np.random.shuffle(train_labeled_idxs)
np.random.shuffle(val_idxs)
# highlight: this is to meet the UDA paper: unlabeled data is the true unlabeled_data + labeled_data, and there is no val_data
# train_unlabeled_idxs = train_labeled_idxs + train_unlabeled_idxs + val_idxs
# highlight: this leads to bad performance, using unlabeled = unlabeled + val
train_unlabeled_idxs = train_unlabeled_idxs + val_idxs
np.random.shuffle(train_unlabeled_idxs)
# assert train_unlabeled_idxs.__len__() == len(targets)
assert len(train_labeled_idxs) == labeled_sample_num
if verbose:
print(
f">>>Generating {len(train_labeled_idxs)} labeled data, {len(train_unlabeled_idxs)} unlabeled data, and {len(val_idxs)} validation data."
)
return train_labeled_idxs, train_unlabeled_idxs, val_idxs
class SemiDataSetInterface:
"""
Semi supervised dataloader creator interface
"""
def __init__(
self,
DataClass: Type[Dataset],
data_root: str,
labeled_sample_num: int,
validation_num=5000,
seed: int = 0,
batch_size: int = 1,
labeled_batch_size: int = None,
unlabeled_batch_size: int = None,
val_batch_size: int = None,
shuffle: bool = False,
num_workers: int = 1,
pin_memory: bool = True,
drop_last=False,
verbose: bool = True,
) -> None:
"""
when batch_size is not `None`, we do not consider `labeled_batch_size`, `unlabeled_batch_size`, and `val_batch_size`
when batch_size is `None`, `labeled_batch_size`,`unlabeled_batch_size` and `val_batch_size` should be all int and >=1
:param validation_num:
"""
super().__init__()
self.data_root = data_root
self.DataClass = DataClass
self.seed = seed
self.labeled_sample_num = labeled_sample_num
self.validation_num = validation_num
self.verbose = verbose
self._if_use_indiv_bz: bool = self._use_individual_batch_size(
batch_size,
labeled_batch_size,
unlabeled_batch_size,
val_batch_size,
verbose,
)
self.batch_params = {
"labeled_batch_size": labeled_batch_size,
"unlabeled_batch_size": unlabeled_batch_size,
"val_batch_size": val_batch_size,
}
self.dataloader_params = {
"batch_size": batch_size,
"shuffle": shuffle,
"num_workers": num_workers,
"pin_memory": pin_memory,
"drop_last": drop_last,
}
def _init_labeled_unlabled_val_and_test_sets(
self,
) -> Tuple[Subset, Subset, Subset, Dataset]: # type: ignore
"""
:param args: unknown args
:param kwargs: unknown kwargs
:return: Tuple of dataset, Labeled Dataset, Unlabeled Dataset, Val Dataset
"""
train_set, test_set = self._init_train_test_sets()
labeled_index, unlabeled_index, val_index = _draw_indices(
train_set.targets,
self.labeled_sample_num,
class_nums=10,
validation_num=self.validation_num,
seed=self.seed,
verbose=self.verbose,
)
# todo: to verify if here the dcp is necessary
labeled_set = Subset(dcp(train_set), labeled_index)
unlabeled_set = Subset(dcp(train_set), unlabeled_index)
val_set = Subset(dcp(train_set), val_index)
del train_set
return labeled_set, unlabeled_set, val_set, test_set
@staticmethod
def _use_individual_batch_size(
batch_size, l_batch_size, un_batch_size, val_batch_size, verbose
):
if (
isinstance(l_batch_size, int)
and isinstance(un_batch_size, int)
and isinstance(val_batch_size, int)
):
assert (
l_batch_size >= 1 and un_batch_size >= 1 and val_batch_size >= 1
), "batch_size should be greater than 1."
if verbose:
print(
f"Using labeled_batch_size={l_batch_size}, unlabeled_batch_size={un_batch_size}, val_batch_size={val_batch_size}"
)
return True
elif isinstance(batch_size, int) and batch_size >= 1:
if verbose:
print(f"Using all same batch size of {batch_size}")
return False
else:
raise ValueError(
f"batch_size setting error, given batch_size={batch_size}, labeled_batch_size={l_batch_size}, "
f"unlabeled_batch_size={un_batch_size}, val_batch_size={val_batch_size}."
)
@abstractmethod
def _init_train_test_sets(self) -> Tuple[Dataset, Dataset]:
raise NotImplementedError("train and test set initialization must be override")
def _create_semi_supervised_datasets(
self,
labeled_transform: Callable[[Image.Image], Tensor],
unlabeled_transform: Callable[[Image.Image], Tensor],
val_transform: Callable[[Image.Image], Tensor],
test_transform: Callable[[Image.Image], Tensor],
target_transform: Callable[[Tensor], Tensor] = None,
) -> Tuple[Subset, Subset, Subset, Dataset]:
(
labeled_set,
unlabeled_set,
val_set,
test_set,
) = self._init_labeled_unlabled_val_and_test_sets()
labeled_set = self.override_transforms(
labeled_set, labeled_transform, target_transform
)
unlabeled_set = self.override_transforms(
unlabeled_set, unlabeled_transform, target_transform
)
val_set = self.override_transforms(val_set, val_transform, target_transform)
test_set = self.override_transforms(test_set, test_transform, target_transform)
return labeled_set, unlabeled_set, val_set, test_set
@staticmethod
def override_transforms(dataset, img_transform, target_transform):
assert isinstance(dataset, (Dataset, Subset))
if isinstance(dataset, Subset):
dataset.dataset.transform = img_transform
dataset.dataset.target_transform = target_transform
else:
dataset.transform = img_transform
dataset.target_transform = target_transform
return dataset
def SemiSupervisedDataLoaders(
self,
labeled_transform: Callable[[Image.Image], Tensor],
unlabeled_transform: Callable[[Image.Image], Tensor],
val_transform: Callable[[Image.Image], Tensor],
test_transform: Callable[[Image.Image], Tensor],
target_transform: Callable[[Tensor], Tensor] = None,
) -> Tuple[DataLoader, DataLoader, DataLoader, DataLoader]:
_dataloader_params = dcp(self.dataloader_params)
(
labeled_set,
unlabeled_set,
val_set,
test_set,
) = self._create_semi_supervised_datasets(
labeled_transform=labeled_transform,
unlabeled_transform=unlabeled_transform,
val_transform=val_transform,
test_transform=test_transform,
target_transform=target_transform,
)
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("labeled_batch_size")}
)
labeled_loader = DataLoader(labeled_set, **_dataloader_params)
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("unlabeled_batch_size")}
)
unlabeled_loader = DataLoader(unlabeled_set, **_dataloader_params)
_dataloader_params.update({"shuffle": False, "drop_last": False})
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("val_batch_size")}
)
val_loader = DataLoader(val_set, **_dataloader_params)
test_loader = DataLoader(test_set, **_dataloader_params)
del _dataloader_params
return labeled_loader, unlabeled_loader, val_loader, test_loader
def SemiSupervisedParallelDataLoaders(
self,
labeled_transforms: List[Callable[[Image.Image], Tensor]],
unlabeled_transforms: List[Callable[[Image.Image], Tensor]],
val_transforms: List[Callable[[Image.Image], Tensor]],
test_transforms: List[Callable[[Image.Image], Tensor]],
target_transform: Callable[[Tensor], Tensor] = None,
use_infinite_sampler: bool = False,
) -> Tuple[DataLoader, DataLoader, DataLoader, DataLoader]:
_dataloader_params = dcp(self.dataloader_params)
def _override_transforms(dataset, img_transform_list, target_transform_list):
# here deep copying the datasets are needed.
return [
self.override_transforms(dcp(dataset), img_trans, target_trans)
for img_trans, target_trans in zip(
img_transform_list, target_transform_list
)
]
(
labeled_set,
unlabeled_set,
val_set,
test_set,
) = self._init_labeled_unlabled_val_and_test_sets()
target_transform_list = repeat(target_transform)
labeled_sets = _override_transforms(
labeled_set, labeled_transforms, target_transform_list
)
unlabeled_sets = _override_transforms(
unlabeled_set, unlabeled_transforms, target_transform_list
)
val_sets = _override_transforms(val_set, val_transforms, target_transform_list)
test_sets = _override_transforms(
test_set, test_transforms, target_transform_list
)
labeled_set = CombineDataset(*labeled_sets)
unlabeled_set = CombineDataset(*unlabeled_sets)
val_set = CombineDataset(*val_sets)
test_set = CombineDataset(*test_sets)
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("labeled_batch_size")}
)
if use_infinite_sampler:
_shuffle = _dataloader_params.get("shuffle", False)
_dataloader_params.update({"shuffle": False})
labeled_loader = DataLoader(
labeled_set,
**_dataloader_params,
sampler=InfiniteRandomSampler(labeled_set, shuffle=_shuffle),
)
else:
labeled_loader = DataLoader(labeled_set, **_dataloader_params)
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("unlabeled_batch_size")}
)
if use_infinite_sampler:
_shuffle = _dataloader_params.get("shuffle", False)
_dataloader_params.update({"shuffle": False})
unlabeled_loader = DataLoader(
unlabeled_set,
**_dataloader_params,
sampler=InfiniteRandomSampler(unlabeled_set, shuffle=_shuffle),
)
else:
unlabeled_loader = DataLoader(unlabeled_set, **_dataloader_params)
_dataloader_params.update({"shuffle": False, "drop_last": False})
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("val_batch_size")}
)
val_loader = DataLoader(val_set, **_dataloader_params)
test_loader = DataLoader(test_set, **_dataloader_params)
return labeled_loader, unlabeled_loader, val_loader, test_loader
class MedicalDatasetSemiInterface:
"""
Semi-supervised interface for datasets using `MedicalImageSegmentationDataset`
"""
def __init__(
self,
DataClass: Type[MedicalImageSegmentationDataset],
root_dir: str,
labeled_data_ratio: float,
unlabeled_data_ratio: float,
seed: int = 0,
verbose: bool = True,
) -> None:
super().__init__()
self.DataClass = DataClass
self.root_dir = root_dir
assert (
labeled_data_ratio + unlabeled_data_ratio
) <= 1, f"`labeled_data_ratio` + `unlabeled_data_ratio` should be less than 1.0, given {labeled_data_ratio + unlabeled_data_ratio}"
self.labeled_ratio = labeled_data_ratio
self.unlabeled_ratio = unlabeled_data_ratio
self.val_ratio = 1 - (labeled_data_ratio + unlabeled_data_ratio)
self.seed = seed
self.verbose = verbose
def compile_dataloader_params(
self,
batch_size: int = 1,
labeled_batch_size: int = None,
unlabeled_batch_size: int = None,
val_batch_size: int = None,
shuffle: bool = False,
num_workers: int = 1,
pin_memory: bool = True,
drop_last=False,
):
self._if_use_indiv_bz: bool = self._use_individual_batch_size(
batch_size,
labeled_batch_size,
unlabeled_batch_size,
val_batch_size,
self.verbose,
)
if self._if_use_indiv_bz:
self.batch_params = {
"labeled_batch_size": labeled_batch_size,
"unlabeled_batch_size": unlabeled_batch_size,
"val_batch_size": val_batch_size,
}
self.dataloader_params = {
"batch_size": batch_size,
"shuffle": shuffle,
"num_workers": num_workers,
"pin_memory": pin_memory,
"drop_last": drop_last,
}
def SemiSupervisedDataLoaders(
self,
labeled_transform: SequentialWrapper = None,
unlabeled_transform: SequentialWrapper = None,
val_transform: SequentialWrapper = None,
group_labeled=False,
group_unlabeled=False,
group_val=True,
use_infinite_sampler: bool = False,
) -> Tuple[DataLoader, DataLoader, DataLoader]:
_dataloader_params = dcp(self.dataloader_params)
labeled_set, unlabeled_set, val_set = self._create_semi_supervised_datasets(
labeled_transform=labeled_transform,
unlabeled_transform=unlabeled_transform,
val_transform=val_transform,
)
# labeled_dataloader
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("labeled_batch_size")}
)
if use_infinite_sampler:
labeled_loader = (
DataLoader(
labeled_set,
sampler=InfiniteRandomSampler(
labeled_set, shuffle=_dataloader_params.get("shuffle", False)
),
**{k: v for k, v in _dataloader_params.items() if k != "shuffle"},
)
if not group_labeled
else self._grouped_dataloader(
labeled_set, use_infinite_sampler=True, **_dataloader_params
)
)
else:
labeled_loader = (
DataLoader(labeled_set, **_dataloader_params)
if not group_labeled
else self._grouped_dataloader(
labeled_set, use_infinite_sampler=False, **_dataloader_params
)
)
# unlabeled_dataloader
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("unlabeled_batch_size")}
)
if use_infinite_sampler:
unlabeled_loader = (
DataLoader(
unlabeled_set,
sampler=InfiniteRandomSampler(
unlabeled_set, shuffle=_dataloader_params.get("shuffle", False)
),
**{k: v for k, v in _dataloader_params.items() if k != "shuffle"},
)
if not group_unlabeled
else self._grouped_dataloader(
unlabeled_set, use_infinite_sampler=True, **_dataloader_params
)
)
else:
unlabeled_loader = (
DataLoader(unlabeled_set, **_dataloader_params)
if not group_unlabeled
else self._grouped_dataloader(
unlabeled_set, use_infinite_sampler=True, **_dataloader_params
)
)
# val_dataloader
_dataloader_params.update({"shuffle": False, "drop_last": False})
if self._if_use_indiv_bz:
_dataloader_params.update(
{"batch_size": self.batch_params.get("val_batch_size")}
)
val_loader = (
DataLoader(val_set, **_dataloader_params)
if not group_val
else self._grouped_dataloader(val_set, **_dataloader_params)
)
del _dataloader_params
return labeled_loader, unlabeled_loader, val_loader
@staticmethod
def _use_individual_batch_size(
batch_size, l_batch_size, un_batch_size, val_batch_size, verbose
) -> bool:
if (
isinstance(l_batch_size, int)
and isinstance(un_batch_size, int)
and isinstance(val_batch_size, int)
):
assert (
l_batch_size >= 1 and un_batch_size >= 1 and val_batch_size >= 1
), "batch_size should be greater than 1."
if verbose:
print(
f"Using labeled_batch_size={l_batch_size}, unlabeled_batch_size={un_batch_size}, val_batch_size={val_batch_size}"
)
return True
elif isinstance(batch_size, int) and batch_size >= 1:
if verbose:
print(f"Using all same batch size of {batch_size}")
return False
else:
raise ValueError(
f"batch_size setting error, given batch_size={batch_size}, labeled_batch_size={l_batch_size}, "
f"unlabeled_batch_size={un_batch_size}, val_batch_size={val_batch_size}."
)
def _create_semi_supervised_datasets(
self,
labeled_transform: SequentialWrapper = None,
unlabeled_transform: SequentialWrapper = None,
val_transform: SequentialWrapper = None,
) -> Tuple[
MedicalImageSegmentationDataset,
MedicalImageSegmentationDataset,
MedicalImageSegmentationDataset,
]:
raise NotImplementedError
def _grouped_dataloader(
self,
dataset: MedicalImageSegmentationDataset,
use_infinite_sampler: bool = False,
**dataloader_params: Dict[str, Union[int, float, bool]],
) -> DataLoader:
"""
return a dataloader that requires to be grouped based on the reg of patient's pattern.
:param dataset:
:param shuffle:
:return:
"""
dataloader_params = dcp(dataloader_params)
batch_sampler = PatientSampler(
dataset=dataset,
grp_regex=dataset._re_pattern,
shuffle=dataloader_params.get("shuffle", False),
verbose=self.verbose,
infinite_sampler=True if use_infinite_sampler else False,
)
# having a batch_sampler cannot accept batch_size > 1
dataloader_params["batch_size"] = 1
dataloader_params["shuffle"] = False
dataloader_params["drop_last"] = False
return DataLoader(dataset, batch_sampler=batch_sampler, **dataloader_params)
@staticmethod
def override_transforms(
dataset: MedicalImageSegmentationDataset, transform: SequentialWrapper
):
assert isinstance(dataset, MedicalImageSegmentationDataset), dataset
assert isinstance(transform, SequentialWrapper), transform
dataset.set_transform(transform)
return dataset
```
#### File: deepclustering/decorator/cache_decorator.py
```python
import inspect
from functools import update_wrapper
from multiprocessing import Manager
__all__ = ["SingleProcessCache", "MultiProcessCache"]
class SingleProcessCache:
"""
>>> class A:
>>> @SingleProcessCache(key="index")
>>> def method(self,index):
"""
def __init__(self, key=None) -> None:
self._key = key
self._is_class_method = False
self._cache = self._initialize_cache()
def _initialize_cache(self):
return {}
def _get_variable_from_keys(self, args, kwargs):
assert self._key is not None
if isinstance(self._key, (list, tuple)):
result = []
for k in self._key:
r = self._get_variable_from_key(k, args, kwargs)
result.append(r)
return tuple(result)
else:
return self._get_variable_from_key(self._key, args, kwargs)
def _get_variable_from_key(self, key, args, kwargs):
# get the arguments and default values of the func
assert (
key in self.arg_list
), "key should be in the args list {}, given {}.".format(
self.arg_list.args, key
)
# check if there is the key in the kwargs
if key in kwargs:
return kwargs[key]
# check if there is the key in the args
pos = self.arg_list.index(key)
if pos < len(args):
return args[pos]
# the value is in the default setting.
return self.default_dict[key]
def get_key_value(self, args, kwargs):
if self._key is None:
if not self._is_class_method:
_args = args + tuple(kwargs.items())
else:
_args = tuple(list(args)[1:]) + tuple(kwargs.items())
else:
_args = self._get_variable_from_keys(args, kwargs)
return _args
def __call__(self, func):
func_args = inspect.getfullargspec(func)
self.func = update_wrapper(self, func)
self.func = func
self.default_dict = {}
if func_args.defaults:
self.default_dict = dict(
zip(func_args.args[::-1], func_args.defaults[::-1])
)
self.arg_list = func_args.args
if "self" in self.arg_list:
self._is_class_method = True
if self._key is not None:
if isinstance(self._key, (list, tuple)):
for k in self._key:
assert k in self.arg_list
else:
assert self._key in self.arg_list
def wrapper(*args, **kwargs):
_args = self.get_key_value(args, kwargs)
if _args in self._cache:
return self._cache[_args]
else:
val = self.func(*args, **kwargs)
self._cache[_args] = val
return val
return wrapper
class MultiProcessCache(SingleProcessCache):
"""
>>> class A:
>>> @MultiProcessCache(key="index")
>>> def method(self,index):
"""
def _initialize_cache(self):
return Manager().dict()
```
#### File: meters2/historicalContainer/historical_container.py
```python
from abc import ABCMeta
from collections import OrderedDict
from typing import Dict, OrderedDict as OrderedDict_Type, Any, Union
import pandas as pd
_Record_Type = Dict[str, float]
_Save_Type = OrderedDict_Type[int, _Record_Type]
__all__ = ["HistoricalContainer"]
class HistoricalContainer(metaclass=ABCMeta):
"""
Aggregate historical information in a ordered dict.
"""
def __init__(self) -> None:
self._record_dict: _Save_Type = OrderedDict()
self._current_epoch: int = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def trainer(self):
return self._trainer
@property
def record_dict(self) -> _Save_Type:
return self._record_dict
def get_record_dict(self, epoch=None) -> Union[_Record_Type, _Save_Type]:
if epoch is None:
return self.record_dict
assert epoch in self._record_dict.keys(), "epoch {} not saved in {}".format(
epoch, ", ".join(list(self._record_dict.keys()))
)
return self.record_dict[epoch]
@property
def current_epoch(self) -> int:
""" return current epoch
"""
return self._current_epoch
def summary(self) -> pd.DataFrame:
# todo: deal with the case where you have absent epoch
validated_table = pd.DataFrame(self.record_dict).T
# check if having missing values
if len(self.record_dict) < self.current_epoch:
missing_table = pd.DataFrame(
index=set(range(self.current_epoch)) - set(self.record_dict.keys())
)
validated_table = validated_table.append(missing_table, sort=True)
return validated_table
def add(self, input_dict: _Record_Type, epoch=None) -> None:
if epoch:
self._current_epoch = epoch
self._record_dict[self._current_epoch] = input_dict
self._current_epoch += 1
def reset(self) -> None:
self._record_dict: _Save_Type = OrderedDict()
self._current_epoch = 0
def state_dict(self) -> Dict[str, Any]:
"""Returns the state of the class.
"""
return self.__dict__
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""Loads the schedulers state.
Arguments:
state_dict (dict): weight_scheduler state. Should be an object returned
from a call to :math:`state_dict`.
"""
self.__dict__.update(state_dict)
def __repr__(self):
return str(pd.DataFrame(self.record_dict).T)
```
#### File: meters2/utils/__init__.py
```python
import pandas as pd
def rename_df_columns(dataframe: pd.DataFrame, name: str):
dataframe.columns = list(map(lambda x: name + "_" + x, dataframe.columns))
return dataframe
```
#### File: deepclustering/postprocessing/plot.py
```python
from functools import partial
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import argparse
from pathlib import Path
from deepclustering.postprocessing.utils import identical, butter_lowpass_filter
c = ["r", "g", "b", "c", "m", "y", "k", "r", "g", "b", "c", "m", "y", "k"]
s = ["-", "--", "-.", ":", "-", "--", "-.", ":", "-"]
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Plot curves given folders, files, and column names"
)
parser.add_argument(
"--folders", type=str, nargs="+", help="input folders", required=True
)
parser.add_argument("--file", type=str, required=True, help="csv name")
parser.add_argument(
"--classes",
type=str,
nargs="+",
default=None,
help="classes to plot, default plot all.",
)
parser.add_argument(
"--yrange", type=float, nargs=2, default=None, help="Y range for plot"
)
parser.add_argument(
"--xrange",
type=float,
nargs=2,
metavar="N",
default=None,
help="X range for plot.",
)
parser.add_argument("--out_dir", type=str, default=None, help="output_dir")
parser.add_argument(
"--smooth_factor", type=float, default=None, help="smooth factor, default None"
)
return parser.parse_args()
def main(args: argparse.Namespace) -> None:
assert isinstance(args.folders, list)
assert isinstance(args.file, str)
if args.classes is not None:
assert isinstance(args.classes, (list))
file_paths = [Path(p) / args.file for p in args.folders]
filter = identical
if args.smooth_factor:
filter = partial(
butter_lowpass_filter, cutoff=5000 * args.smooth_factor, fs=10000
)
if args.out_dir is not None:
parent_path = Path(args.out_dir)
else:
parent_path = file_paths[0].parents[1]
for p in file_paths:
assert p.exists(), p
# in the case args.classes is None:
if args.classes is None:
classes = []
for file_path in file_paths:
classes.extend(pd.read_csv(file_path, index_col=0).columns.to_list())
args.classes = list(set(classes))
for _class in args.classes:
for file_path in file_paths:
try:
file = filter(pd.read_csv(file_path, index_col=0)[_class])
except KeyError:
continue
plt.plot(file, label=file_path.parents[0])
# file.plot(label=file_path.parents[0])
plt.legend()
plt.title(_class)
plt.grid()
if args.xrange is not None:
plt.xlim(args.xrange)
if args.yrange:
plt.ylim(args.yrange)
plt.savefig(Path(parent_path) / (parent_path.stem + _class + ".png"))
plt.close("all")
for i, _class in enumerate(args.classes):
for j, file_path in enumerate(file_paths):
try:
file = pd.read_csv(file_path, index_col=0)[_class]
except KeyError:
continue
file.plot(
label=file_path.parent.stem + f"/{_class}", color=c[i], linestyle=s[j]
)
plt.legend()
plt.title("total")
plt.grid()
if args.xrange is not None:
plt.xlim(args.xrange)
if args.yrange:
plt.ylim(args.yrange)
plt.savefig(Path(parent_path) / (parent_path.stem + "total.png"))
plt.close("all")
if __name__ == "__main__":
main(get_args())
```
#### File: deepclustering/writer/SummaryWriter.py
```python
from typing import Dict, Any
import matplotlib
from tensorboardX import SummaryWriter as _SummaryWriter
matplotlib.use("agg")
class SummaryWriter(_SummaryWriter):
def __init__(self, log_dir=None, comment="", **kwargs):
assert log_dir is not None, f"log_dir should be provided, given {log_dir}."
log_dir = str(log_dir) + "/tensorboard"
super().__init__(log_dir, comment, **kwargs)
def add_scalar_with_tag(
self, tag, tag_scalar_dict, global_step=None, walltime=None
):
"""
Add one-level dictionary {A:1,B:2} with tag
:param tag: main tag like `train` or `val`
:param tag_scalar_dict: dictionary like {A:1,B:2}
:param global_step: epoch
:param walltime: None
:return:
"""
assert global_step is not None
for k, v in tag_scalar_dict.items():
# self.add_scalars(main_tag=tag, tag_scalar_dict={k: v})
self.add_scalar(tag=f"{tag}/{k}", scalar_value=v, global_step=global_step)
def write_config(self, config: Dict[str, Any]):
pass
```
#### File: playground/IIC_VAT/VATIICTrainer.py
```python
from typing import List
from torch.utils.data import DataLoader
from deepclustering.loss.IMSAT_loss import Perturbation_Loss, MultualInformaton_IMSAT
from deepclustering.meters import AverageValueMeter, MeterInterface
from deepclustering.model import Model
from deepclustering.trainer import IICMultiHeadTrainer
from deepclustering.utils import dict_filter
from deepclustering.utils.VAT import VATLoss_Multihead
class IMSATIICTrainer(IICMultiHeadTrainer):
def __init__(
self,
model: Model,
train_loader_A: DataLoader,
train_loader_B: DataLoader,
val_loader: DataLoader,
max_epoch: int = 100,
save_dir: str = "./runs/IICMultiHead",
checkpoint_path: str = None,
device="cpu",
head_control_params: dict = {},
use_sobel: bool = True,
config: dict = None,
adv_weight=0.1,
) -> None:
super().__init__(
model,
train_loader_A,
train_loader_B,
val_loader,
max_epoch,
save_dir,
checkpoint_path,
device,
head_control_params,
use_sobel,
config,
)
self.p_criterion = Perturbation_Loss() # kl_div
self.MI = MultualInformaton_IMSAT() # mutual information
self.adv_weight = float(adv_weight)
def __init_meters__(self) -> List[str]:
METER_CONFIG = {
"train_head_A": AverageValueMeter(),
"train_head_B": AverageValueMeter(),
"train_adv_A": AverageValueMeter(),
"train_adv_B": AverageValueMeter(),
"val_average_acc": AverageValueMeter(),
"val_best_acc": AverageValueMeter(),
}
self.METERINTERFACE = MeterInterface(METER_CONFIG)
return [
"train_head_A_mean",
"train_head_B_mean",
"train_adv_A_mean",
"train_adv_B_mean",
"val_average_acc_mean",
"val_best_acc_mean",
]
@property
def _training_report_dict(self):
report_dict = {
"train_MI_A": self.METERINTERFACE["train_head_A"].summary()["mean"],
"train_MI_B": self.METERINTERFACE["train_head_B"].summary()["mean"],
"train_adv_A": self.METERINTERFACE["train_adv_A"].summary()["mean"],
"train_adv_B": self.METERINTERFACE["train_adv_B"].summary()["mean"],
}
report_dict = dict_filter(report_dict, lambda k, v: v != 0.0)
return report_dict
@property
def _eval_report_dict(self):
report_dict = {
"average_acc": self.METERINTERFACE.val_average_acc.summary()["mean"],
"best_acc": self.METERINTERFACE.val_best_acc.summary()["mean"],
}
report_dict = dict_filter(report_dict, lambda k, v: v != 0.0)
return report_dict
def _trainer_specific_loss(self, tf1_images, tf2_images, head_name):
iic_loss = super()._trainer_specific_loss(tf1_images, tf2_images, head_name)
adv_loss = 0
if self.adv_weight > 0:
adv_loss, *_ = VATLoss_Multihead(xi=0.25, eps=1, prop_eps=0.1)(
self.model.torchnet, tf1_images
)
self.METERINTERFACE[f"train_adv_{head_name}"].add(adv_loss.item())
total_loss = (
iic_loss + self.adv_weight * adv_loss
) # here the mi_batch_loss shoud be negative
return total_loss
```
#### File: PaNN/toy_example/dataset.py
```python
__all__ = ["get_mnist_dataloaders"]
from copy import deepcopy as dcp
from typing import *
import numpy as np
import pandas as pd
from PIL import Image
from torch import Tensor
from torch.utils.data import Dataset, Subset, DataLoader, RandomSampler
from torchvision import transforms
from torchvision.datasets import MNIST
from deepclustering import DATA_PATH
def _override_transformation(
dataset: Union[Dataset, Subset], transform: Callable[[Image.Image], Tensor]
):
"""
Iterative way to assign transform
:param dataset:
:param transform:
:return:
"""
assert isinstance(dataset, (MNIST, Subset))
if isinstance(dataset, MNIST):
dataset.transform = transform
else:
_override_transformation(dataset.dataset, transform)
def _draw_equal_dataset(
target: np.ndarray, num_samples: int = 1000, allowed_classes: List[int] = None
) -> np.ndarray:
"""
given the `target` and `num_samples`, return the labeled_index`
:param target: target
:param num_samples: 4000
:param allowed_classes: None or list of targets like [0, 1, 2]
:return: labeled_index
"""
if allowed_classes is None:
allowed_classes = list(range(len(np.unique(target))))
total_classes = len(allowed_classes)
num_per_class: int = int(num_samples / total_classes)
labeled_index: List[int] = []
for _target in allowed_classes:
labeled_index.extend(
np.random.permutation(np.where(target == _target)[0])[
:num_per_class
].tolist()
)
labeled_index.sort()
assert len(labeled_index) == num_samples
return np.array(labeled_index)
def _draw_inequal_dataset(
target: np.ndarray, class_sample_nums: Dict[int, int], excluded_index: List[int]
) -> np.ndarray:
available_index = list(set(list(range(target.__len__()))) - set(excluded_index))
return_list: List[int] = []
for _target, sample_num in class_sample_nums.items():
_target_index = np.where(target == _target)[0].tolist()
_available_index = list(set(available_index) & set(_target_index))
return_list.extend(
np.random.permutation(_available_index)[:sample_num].tolist()
)
assert set(excluded_index) & set(return_list) == set()
return np.array(return_list)
def show_dataset(dataset: Union[Subset, MNIST]):
if isinstance(dataset, MNIST):
print(dataset)
else:
print(dataset.dataset.__repr__())
indice = dataset.indices
try:
targets = dataset.dataset.targets[indice]
except:
targets = dataset.dataset.targets[np.ndarray(indice)]
print("label partition:")
print(pd.Series(targets).value_counts())
def get_mnist_dataloaders(
labeled_sample_num=10,
unlabeled_class_sample_nums=None,
train_transform=None,
val_transform=None,
dataloader_params={},
):
train_set = MNIST(root=DATA_PATH, train=True, download=True)
val_set = MNIST(root=DATA_PATH, train=False, download=True, transform=val_transform)
val_set_index = _draw_equal_dataset(
val_set.targets, num_samples=4000, allowed_classes=[0, 1, 2, 3, 4]
)
val_set = Subset(val_set, val_set_index)
labeled_index = _draw_equal_dataset(
train_set.targets, labeled_sample_num, allowed_classes=[0, 1, 2, 3, 4]
)
labeled_set = Subset(dcp(train_set), labeled_index)
_override_transformation(labeled_set, train_transform)
unlabeled_index = _draw_inequal_dataset(
train_set.targets,
class_sample_nums=unlabeled_class_sample_nums,
excluded_index=labeled_index.tolist(),
)
unlabeled_set = Subset(dcp(train_set), unlabeled_index)
_override_transformation(unlabeled_set, train_transform)
assert set(labeled_index.tolist()) & set(unlabeled_index.tolist()) == set()
del train_set
show_dataset(labeled_set)
show_dataset(unlabeled_set)
show_dataset(val_set)
labeled_loader = DataLoader(
labeled_set,
sampler=RandomSampler(
data_source=labeled_set, replacement=True, num_samples=int(1e5)
),
**dataloader_params
)
unlabeled_loader = DataLoader(
unlabeled_set,
sampler=RandomSampler(
data_source=unlabeled_set, replacement=True, num_samples=int(1e5)
),
**dataloader_params
)
val_loader = DataLoader(val_set, num_workers=1, batch_size=16)
return labeled_loader, unlabeled_loader, val_loader
```
#### File: PaNN/toy_example/trainer.py
```python
from typing import *
import torch
from torch import Tensor, nn
from torch.utils.data import DataLoader
from deepclustering import ModelMode
from deepclustering.decorator import lazy_load_checkpoint
from deepclustering.loss import KL_div, simplex, Entropy
from deepclustering.meters import AverageValueMeter, MeterInterface, ConfusionMatrix
from deepclustering.model import Model, ZeroGradientBackwardStep
from deepclustering.optim import RAdam
from deepclustering.trainer import _Trainer
from deepclustering.utils import (
class2one_hot,
tqdm_,
flatten_dict,
nice_dict,
filter_dict,
)
from .augment import AffineTensorTransform
class SemiTrainer(_Trainer):
"""
This trainer is to impose supervised training.
"""
@lazy_load_checkpoint
def __init__(
self,
model: Model,
labeled_loader: DataLoader,
unlabeled_loader: DataLoader,
val_loader: DataLoader,
max_epoch: int = 100,
save_dir: str = "base",
checkpoint_path: str = None,
device="cpu",
config: dict = None,
max_iter: int = 100,
**kwargs,
) -> None:
super().__init__(
model,
None,
val_loader,
max_epoch,
save_dir,
checkpoint_path,
device,
config,
**kwargs,
)
assert self._train_loader is None
self.labeled_loader = labeled_loader
self.unlabeled_loader = unlabeled_loader
self.kl_criterion = KL_div()
self.max_iter = max_iter
def __init_meters__(self) -> List[Union[str, List[str]]]:
meter_config = {
"lr": AverageValueMeter(),
"traloss": AverageValueMeter(),
"traconf": ConfusionMatrix(self._model.torchnet.num_classes),
"valloss": AverageValueMeter(),
"valconf": ConfusionMatrix(self._model.torchnet.num_classes),
}
self.METERINTERFACE = MeterInterface(meter_config)
return ["traloss_mean", "traconf_acc", "valloss_mean", "valconf_acc", "lr_mean"]
def start_training(self):
for epoch in range(self._start_epoch, self._max_epoch):
self._train_loop(
labeled_loader=self.labeled_loader,
unlabeled_loader=self.unlabeled_loader,
epoch=epoch,
)
with torch.no_grad():
current_score = self._eval_loop(self._val_loader, epoch)
self.METERINTERFACE.step()
self._model.schedulerStep()
# save meters and checkpoints
SUMMARY = self.METERINTERFACE.summary()
SUMMARY.to_csv(self._save_dir / self.wholemeter_filename)
self.drawer._draw(SUMMARY)
self.save_checkpoint(self.state_dict(), epoch, current_score)
self.writer.close()
def _train_loop(
self,
labeled_loader: DataLoader = None,
unlabeled_loader: DataLoader = None,
epoch: int = 0,
mode=ModelMode.TRAIN,
*args,
**kwargs,
):
self._model.set_mode(mode)
_max_iter = tqdm_(range(self.max_iter))
_max_iter.set_description(f"Training Epoch {epoch}")
self.METERINTERFACE["lr"].add(self._model.get_lr()[0])
for batch_num, (lab_img, lab_gt), (unlab_img, unlab_gt) in zip(
_max_iter, labeled_loader, unlabeled_loader
):
lab_img, lab_gt = lab_img.to(self._device), lab_gt.to(self._device)
lab_preds = self._model(lab_img)
sup_loss = self.kl_criterion(
lab_preds,
class2one_hot(lab_gt, C=self._model.torchnet.num_classes).float(),
)
reg_loss = self._trainer_specific_loss(unlab_img, unlab_gt)
self.METERINTERFACE["traloss"].add(sup_loss.item())
self.METERINTERFACE["traconf"].add(lab_preds.max(1)[1], lab_gt)
with ZeroGradientBackwardStep(
sup_loss + reg_loss, self._model
) as total_loss:
total_loss.backward()
report_dict = self._training_report_dict
_max_iter.set_postfix(report_dict)
print(f"Training Epoch {epoch}: {nice_dict(report_dict)}")
self.writer.add_scalar_with_tag("train", report_dict, global_step=epoch)
def _trainer_specific_loss(
self, unlab_img: Tensor, unlab_gt: Tensor, **kwargs
) -> Tensor:
return torch.tensor(0, dtype=torch.float32, device=self._device)
def _eval_loop(
self,
val_loader: DataLoader = None,
epoch: int = 0,
mode=ModelMode.EVAL,
*args,
**kwargs,
) -> float:
self._model.set_mode(mode)
_val_loader = tqdm_(val_loader)
_val_loader.set_description(f"Validating Epoch {epoch}")
for batch_num, (val_img, val_gt) in enumerate(_val_loader):
val_img, val_gt = val_img.to(self._device), val_gt.to(self._device)
val_preds = self._model(val_img)
val_loss = self.kl_criterion(
val_preds,
class2one_hot(val_gt, C=self._model.torchnet.num_classes).float(),
disable_assert=True,
)
self.METERINTERFACE["valloss"].add(val_loss.item())
self.METERINTERFACE["valconf"].add(val_preds.max(1)[1], val_gt)
report_dict = self._eval_report_dict
_val_loader.set_postfix(report_dict)
print(f"Validating Epoch {epoch}: {nice_dict(report_dict)}")
self.writer.add_scalar_with_tag(
tag="eval", tag_scalar_dict=report_dict, global_step=epoch
)
return self.METERINTERFACE["valconf"].summary()["acc"]
@property
def _training_report_dict(self):
return flatten_dict(
{
"tra_loss": self.METERINTERFACE["traloss"].summary()["mean"],
"tra_acc": self.METERINTERFACE["traconf"].summary()["acc"],
"lr": self.METERINTERFACE["lr"].summary()["mean"],
},
sep="_",
)
@property
def _eval_report_dict(self):
return flatten_dict(
{
"val_loss": self.METERINTERFACE["valloss"].summary()["mean"],
"val_acc": self.METERINTERFACE["valconf"].summary()["acc"],
},
sep="",
)
class SemiEntropyTrainer(SemiTrainer):
"""
This trainer impose the KL between the average and the prior.
By default, we apply conditional entropy minimization with a very small coefficient (0.1)
"""
@lazy_load_checkpoint
def __init__(
self,
model: Model,
labeled_loader: DataLoader,
unlabeled_loader: DataLoader,
val_loader: DataLoader,
max_epoch: int = 100,
save_dir: str = "base",
checkpoint_path: str = None,
device="cpu",
config: dict = None,
max_iter: int = 100,
prior: Tensor = None,
inverse_kl=False,
**kwargs,
) -> None:
"""
:param prior: the predefined prior, must provide as a tensor
:param inverse_kl:
:param kwargs:
"""
super().__init__(
model,
labeled_loader,
unlabeled_loader,
val_loader,
max_epoch,
save_dir,
checkpoint_path,
device,
config,
max_iter,
**kwargs,
)
assert isinstance(prior, Tensor), prior
assert simplex(prior, 0), f"`prior` provided must be simplex."
self.prior = prior.to(self._device)
self.entropy = Entropy()
self.inverse_kl = inverse_kl
def __init_meters__(self) -> List[Union[str, List[str]]]:
columns = super().__init_meters__()
self.METERINTERFACE.register_new_meter("marginal", AverageValueMeter())
self.METERINTERFACE.register_new_meter("centropy", AverageValueMeter())
columns.extend(["marginal_mean", "centropy_mean"])
return columns
def _trainer_specific_loss(self, unlab_img: Tensor, *args, **kwargs) -> Tensor:
unlab_img = unlab_img.to(self._device)
unlabeled_preds = self._model(unlab_img)
assert simplex(unlabeled_preds, 1)
marginal = unlabeled_preds.mean(0)
if not self.inverse_kl:
marginal_loss = self.kl_criterion(
marginal.unsqueeze(0), self.prior.unsqueeze(0)
)
else:
marginal_loss = self.kl_criterion(
self.prior.unsqueeze(0), marginal.unsqueeze(0), disable_assert=True
)
self.METERINTERFACE["marginal"].add(marginal_loss.item())
centropy = self.entropy(unlabeled_preds)
marginal_loss += centropy * 0.1
self.METERINTERFACE["centropy"].add(centropy.item())
return marginal_loss
@property
def _training_report_dict(self):
report_dict = super()._training_report_dict
report_dict.update(
{
"marginal": self.METERINTERFACE["marginal"].summary()["mean"],
"centropy": self.METERINTERFACE["centropy"].summary()["mean"],
}
)
return filter_dict(report_dict)
class SemiPrimalDualTrainer(SemiEntropyTrainer):
"""
This trainer is to impose the Primal-dual Method.
Conditional entropy minimization is included as in the previous case.
"""
def __init__(
self,
model: Model,
labeled_loader: DataLoader,
unlabeled_loader: DataLoader,
val_loader: DataLoader,
max_epoch: int = 100,
save_dir: str = "base",
checkpoint_path: str = None,
device="cpu",
config: dict = None,
max_iter: int = 100,
prior: Tensor = None,
inverse_kl=False,
**kwargs,
) -> None:
super().__init__(
model,
labeled_loader,
unlabeled_loader,
val_loader,
max_epoch,
save_dir,
checkpoint_path,
device,
config,
max_iter,
prior,
inverse_kl,
**kwargs,
)
self.mu = nn.Parameter(-1.0 / self.prior) # initialize mu = - 1 / prior
self.mu_optim = RAdam((self.mu,), lr=1e-4, betas=(0.5, 0.999))
def __init_meters__(self) -> List[Union[str, List[str]]]:
columns = super().__init_meters__()
self.METERINTERFACE.register_new_meter("residual", AverageValueMeter())
columns.append("residual_mean")
return columns
def _trainer_specific_loss(self, unlab_img: Tensor, **kwargs) -> Tensor:
unlab_img = unlab_img.to(self._device)
unlabeled_preds = self._model(unlab_img)
assert simplex(unlabeled_preds, 1)
marginal = unlabeled_preds.mean(0)
lagrangian = (
self.prior * (marginal * self.mu.detach() + 1 + (-self.mu.detach()).log())
).sum()
centropy = self.entropy(unlabeled_preds)
self.METERINTERFACE["centropy"].add(centropy.item())
lagrangian += centropy * 0.1
return lagrangian
def _update_mu(self, unlab_img: Tensor):
self.mu_optim.zero_grad()
unlab_img = unlab_img.to(self._device)
unlabeled_preds = self._model(unlab_img).detach()
assert simplex(unlabeled_preds, 1)
marginal = unlabeled_preds.mean(0)
# to increase the lagrangian..
lagrangian = (
-1 * (self.prior * (marginal * self.mu + 1 + (-self.mu).log())).sum()
)
lagrangian.backward()
self.mu_optim.step()
self.METERINTERFACE["residual"].add(self.mu.grad.abs().sum().item())
# to quantify:
marginal_loss = self.kl_criterion(
marginal.unsqueeze(0), self.prior.unsqueeze(0), disable_assert=True
)
self.METERINTERFACE["marginal"].add(marginal_loss.item())
def _train_loop(
self,
labeled_loader: DataLoader = None,
unlabeled_loader: DataLoader = None,
epoch: int = 0,
mode=ModelMode.TRAIN,
*args,
**kwargs,
):
self._model.set_mode(mode)
_max_iter = tqdm_(range(self.max_iter))
_max_iter.set_description(f"Training Epoch {epoch}")
self.METERINTERFACE["lr"].add(self._model.get_lr()[0])
for batch_num, (lab_img, lab_gt), (unlab_img, _) in zip(
_max_iter, labeled_loader, unlabeled_loader
):
lab_img, lab_gt = lab_img.to(self._device), lab_gt.to(self._device)
lab_preds = self._model(lab_img)
sup_loss = self.kl_criterion(
lab_preds,
class2one_hot(lab_gt, C=self._model.torchnet.num_classes).float(),
)
reg_loss = self._trainer_specific_loss(unlab_img)
self.METERINTERFACE["traloss"].add(sup_loss.item())
self.METERINTERFACE["traconf"].add(lab_preds.max(1)[1], lab_gt)
with ZeroGradientBackwardStep(
sup_loss + reg_loss, self._model
) as total_loss:
total_loss.backward()
self._update_mu(unlab_img)
report_dict = self._training_report_dict
_max_iter.set_postfix(report_dict)
print(f"Training Epoch {epoch}: {nice_dict(report_dict)}")
self.writer.add_scalar_with_tag("train", report_dict, global_step=epoch)
@property
def _training_report_dict(self):
report_dict = super()._training_report_dict
report_dict.update(
{"residual": self.METERINTERFACE["residual"].summary()["mean"]}
)
return report_dict
class SemiUDATrainer(SemiTrainer):
"""
This trainer is to impose UDA
"""
@lazy_load_checkpoint
def __init__(
self,
model: Model,
labeled_loader: DataLoader,
unlabeled_loader: DataLoader,
val_loader: DataLoader,
max_epoch: int = 100,
save_dir: str = "base",
checkpoint_path: str = None,
device="cpu",
config: dict = None,
max_iter: int = 100,
prior=None,
**kwargs,
) -> None:
super().__init__(
model,
labeled_loader,
unlabeled_loader,
val_loader,
max_epoch,
save_dir,
checkpoint_path,
device,
config,
max_iter,
**kwargs,
)
self.prior = prior
self.affine_transform = AffineTensorTransform(
min_rot=0, max_rot=15, min_scale=0.8, max_scale=1.2
)
self.entropy_entropy = Entropy()
def __init_meters__(self) -> List[Union[str, List[str]]]:
columns = super().__init_meters__()
self.METERINTERFACE.register_new_meter("uda_reg", AverageValueMeter())
self.METERINTERFACE.register_new_meter("entropy", AverageValueMeter())
self.METERINTERFACE.register_new_meter("marginal", AverageValueMeter())
self.METERINTERFACE.register_new_meter("unl_acc", ConfusionMatrix(5))
columns.extend(["uda_reg_mean", "marginal_mean", "entropy_mean"])
return columns
def _trainer_specific_loss(
self, unlab_img: Tensor, unlab_gt: Tensor, **kwargs
) -> Tensor:
unlab_img = unlab_img.to(self._device)
unlab_img_tf, _ = self.affine_transform(unlab_img)
all_preds = self._model(torch.cat([unlab_img, unlab_img_tf], dim=0))
unlabel_pred, unlabel_pred_tf = torch.chunk(all_preds, 2)
assert simplex(unlabel_pred) and simplex(unlabel_pred_tf)
reg = self.kl_criterion(unlabel_pred_tf, unlabel_pred.detach())
entropy = self.entropy_entropy(unlabel_pred)
reg += entropy * 0.1
self.METERINTERFACE["uda_reg"].add(reg.item())
self.METERINTERFACE["unl_acc"].add(unlabel_pred.max(1)[1], unlab_gt)
self.METERINTERFACE["entropy"].add(entropy.item())
return reg
@property
def _training_report_dict(self):
report_dict = super()._training_report_dict
report_dict.update(
{
"unl_acc": self.METERINTERFACE["unl_acc"].summary()["acc"],
"uda_reg": self.METERINTERFACE["uda_reg"].summary()["mean"],
"marginal": self.METERINTERFACE["marginal"].summary()["mean"],
}
)
return filter_dict(report_dict)
```
#### File: playground/subspaceClustering/subclassClustering.py
```python
from typing import List
import torch
from deepclustering.method import _Method
from deepclustering.model import Model
from torch import Tensor
class SubSpaceClusteringMethod(_Method):
def __init__(
self,
model: Model,
lamda: float = 0.1,
lr: float = 0.0001,
num_samples: int = 100,
device: torch.device = torch.device("cuda"),
*args,
**kwargs,
):
super().__init__(model, *args, **kwargs)
assert isinstance(
device, torch.device
), f"device should be torch.device, given {device}."
self.lr = float(lr)
self.lamda = float(lamda)
self.device = device
self.adj_matrix = torch.randn(
(num_samples, num_samples), dtype=torch.float32
).to(self.device)
self._diagnoal_remove(self.adj_matrix)
# self.adj_matrix = torch.eye(num_samples).to(self.device) #+ 0.1*torch.randn((num_samples,num_samples)).to(device)*torch.eye(num_samples).to(self.device)
print()
def _diagnoal_remove(self, matrix):
assert (
matrix.shape.__len__() == 2 and matrix.shape[0] == matrix.shape[1]
), f"check the matrix dimension, given {matrix.shape}"
for i in range(len(matrix)):
matrix[i, i] = 0
assert self.check_diagnal_zero(matrix), f"matrix diag remove failed."
@staticmethod
def check_diagnal_zero(matrix: Tensor) -> bool:
return torch.allclose(matrix.diag(), torch.zeros_like(matrix.diag()))
def set_input(self, imgs: Tensor, index: List[int], *args, **kwargs):
super().set_input(*args, **kwargs)
assert imgs.shape[0] == len(index), (
f"imgs and index lengths should be the same, given len(imgs)="
f"{len(imgs)}, len(index)={len(index)}."
)
self.imgs = imgs
# self.pred, self._representation = self.model(self.imgs)
self._representation = self.imgs.view(self.imgs.shape[0], -1)
self.index = index
assert self._representation.shape[0] == self.index.shape[0]
self.current_adj_matrix: Tensor = self.adj_matrix[index][:, index]
assert self.current_adj_matrix.shape == torch.Size([len(index), len(index)])
# choose the minibatch of adj_matrix
def update(self, *args, **kwargs):
super().update(*args, **kwargs)
self._update_dictionary()
# self._gradient_descent()
def _gradient_descent(self):
current_adj_matrix = self.current_adj_matrix.clone()
self._diagnoal_remove(current_adj_matrix)
_reconstr_loss = (
(self._representation - torch.mm(current_adj_matrix, self._representation))
.norm(p=2, dim=1)
.mean()
)
self.model.zero_grad()
_reconstr_loss.backward()
self.model.step()
# print(_reconstr_loss)
def _update_dictionary(self):
assert self.check_diagnal_zero(self.current_adj_matrix)
X2 = self._representation.mm(self._representation.t()).detach()
I = torch.eye(len(self.current_adj_matrix)).to(self.device)
for _ in range(1000):
current_adj_matrix_hat = self.current_adj_matrix - self.lr * X2.mm(
self.current_adj_matrix - I
)
current_adj_sign = current_adj_matrix_hat.sign()
new_current_adj = (
torch.max(
current_adj_matrix_hat.__abs__() - self.lr * self.lamda,
torch.zeros_like(current_adj_matrix_hat),
)
* current_adj_sign
)
self._diagnoal_remove(new_current_adj)
self.current_adj_matrix = new_current_adj
# update the whole matrix
for i, c in enumerate(self.index):
self.adj_matrix[c, self.index] = new_current_adj[:, i] # new_current_adj
# self.adj_matrix.scatter((self.index, self.index), -1000)
class SubSpaceClusteringMethod2(SubSpaceClusteringMethod):
def __init__(
self,
model: Model,
lamda: float = 0.1,
lr: float = 0.005,
num_samples: int = 100,
device: torch.device = torch.device("cuda"),
*args,
**kwargs,
):
super().__init__(model, lamda, lr, num_samples, device, *args, **kwargs)
def _update_dictionary(self):
# reconstruction:
current_adj_matrix = self.current_adj_matrix.clone()
for _ in range(1000):
self._diagnoal_remove(current_adj_matrix)
current_adj_matrix.requires_grad = True
_reconstr_loss = (
(
self._representation
- torch.mm(current_adj_matrix, self._representation)
)
.norm(p=2, dim=1)
.mean()
)
_sparsity_loss = current_adj_matrix.norm(p=1, dim=0).mean()
_loss = _reconstr_loss + _sparsity_loss
_loss.backward()
# print(f"sparsity:{_sparsity_loss}, reconstruction:{_reconstr_loss}")
new_current_adj_matrix = (
current_adj_matrix - self.lamda * current_adj_matrix.grad
)
new_current_adj_matrix = new_current_adj_matrix.detach()
current_adj_matrix.grad.zero_()
# new_current_adj_matrix[new_current_adj_matrix.__abs__()<=0.0001]=0 #* torch.eye(len(self.index)).to(self.device)
self._diagnoal_remove(new_current_adj_matrix)
current_adj_matrix = new_current_adj_matrix
for i, c in enumerate(self.index):
self.adj_matrix[c, self.index] = new_current_adj_matrix[
i
] # new_current_adj
print(
f"reconstruction:{_reconstr_loss}, sparsity:{_sparsity_loss}, current_adj_max:{new_current_adj_matrix.diag().max()}, min:{new_current_adj_matrix.diag().min()}"
)
def update(self):
self._update_dictionary()
```
#### File: playground/swa_cifar_benchmark/my_scheduler.py
```python
import math
import warnings
from functools import wraps
from torch.optim.optimizer import Optimizer
class _LRScheduler(object):
def __init__(self, optimizer: Optimizer, last_epoch=-1):
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault("initial_lr", group["lr"])
last_epoch = 0
else:
for i, group in enumerate(optimizer.param_groups):
if "initial_lr" not in group:
raise KeyError(
"param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i)
)
self.base_lrs = list(
map(lambda group: group["initial_lr"], optimizer.param_groups)
)
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(func, opt):
@wraps(func)
def wrapper(*args, **kwargs):
opt._step_count += 1
return func(*args, **kwargs)
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step, self.optimizer)
self.optimizer._step_count = 0
self._step_count = 0
self.step(last_epoch)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {
key: value for key, value in self.__dict__.items() if key != "optimizer"
}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn(
"Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate",
UserWarning,
)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn(
"Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule."
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate",
UserWarning,
)
self._step_count += 1
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group["lr"] = lr
class CosineAnnealingLR(_LRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))
When last_epoch=-1, sets initial lr as lr.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
self.T_max = T_max
self.eta_min = eta_min
super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [
self.eta_min
+ (base_lr - self.eta_min)
* (1 + math.cos(math.pi * self.last_epoch / self.T_max))
/ 2
for base_lr in self.base_lrs
]
class CosineAnnealingLR_(_LRScheduler):
def __init__(self, optimizer, alpha_1, alpha_2, T, last_epoch=-1):
assert alpha_1 >= alpha_2
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.T = T
super().__init__(optimizer, last_epoch)
def get_lr(self):
_epoch = self.last_epoch % self.T
return [
(self.alpha_1 + (self.alpha_2 - self.alpha_1) * ((_epoch + 1) / self.T))
* base_lr
for base_lr in self.base_lrs
]
def if_cycle_ends(self):
_epoch = self.last_epoch % self.T + 1
return _epoch == self.T
if __name__ == "__main__":
import torch
import matplotlib.pyplot as plt
from torchvision.models import resnet18
net = resnet18(pretrained=False)
opt = torch.optim.Adam(net.parameters())
sched = CosineAnnealingLR_(opt, 1, 0.01, 20)
lrs = []
if_cycle_ends = []
for i in range(50):
lrs.append(sched.get_lr()[0])
if_cycle_ends.append(sched.if_cycle_ends())
sched.step()
plt.plot(range(len(lrs)), lrs)
plt.plot(range(len(lrs)), [x * 0.001 for x in if_cycle_ends])
plt.plot()
plt.show()
```
#### File: deepclustering/augment/test_interface.py
```python
from unittest import TestCase
import numpy as np
import requests
import torch
from PIL import Image
from deepclustering.augment import TransformInterface
__doc__ = "this file tests functions in augment model"
URL = "https://cdn1.medicalnewstoday.com/content/images/articles/322/322868/golden-retriever-puppy.jpg"
class TestInterface(TestCase):
def setUp(self) -> None:
super().setUp()
self.color_img = Image.open(requests.get(URL, stream=True).raw)
assert np.array(self.color_img).shape[2] == 3
self.grey_img = Image.fromarray(np.array(self.color_img)[:, :, 0])
assert np.array(self.grey_img).shape.__len__() == 2
def test_config1(self):
config = {
"randomcrop": {"size": (20, 20)},
"resize": {"size": (32, 32)},
"Img2Tensor": {"include_rgb": False, "include_grey": True},
}
transform = TransformInterface(config)
output = transform(self.color_img)
assert output.shape[0] == 1
assert output.shape[1:] == torch.Size([32, 32])
output = transform(self.grey_img)
assert output.shape[0] == 1
assert output.shape[1:] == torch.Size([32, 32])
def test_config2(self):
config = {
"PILCutout": {"min_box": 100, "max_box": 200},
"resize": {"size": (321, 321)},
"Img2Tensor": {"include_rgb": True, "include_grey": False},
}
transform = TransformInterface(config)
output = transform(self.color_img)
assert output.shape[0] == 3
assert output.shape[1:] == torch.Size([321, 321])
with self.assertRaises(AssertionError):
output = transform(self.grey_img)
assert output.shape[0] == 1
assert output.shape[1:] == torch.Size([321, 321])
```
#### File: deepclustering/augment/test_sychronized_augmentation.py
```python
from unittest import TestCase
import numpy as np
import requests
from PIL import Image
from deepclustering.augment import TransformInterface
from deepclustering.augment.sychronized_augment import SequentialWrapper
URL = f"https://cdn1.medicalnewstoday.com/content/images/articles/322/322868/golden-retriever-puppy.jpg"
class Test_Sequential_Wrapper(TestCase):
def setUp(self) -> None:
super().setUp()
self.color_img = Image.open(requests.get(URL, stream=True).raw)
assert np.array(self.color_img).shape[2] == 3
self.mask = Image.fromarray(np.array(self.color_img)[:, :, 0])
assert np.array(self.mask).shape.__len__() == 2
def test_synchronized_transform(self):
config_1 = {
"randomcrop": {"size": (200, 200)},
"resize": {"size": (320, 320)},
"Img2Tensor": {"include_rgb": False, "include_grey": True},
}
transform1 = TransformInterface(config_1)
transform2 = TransformInterface(config_1)
synchronized_transform = SequentialWrapper(
img_transform=transform1,
target_transform=transform2,
if_is_target=[False, False],
)
result_imgs = synchronized_transform(self.color_img, self.color_img)
assert np.allclose(np.array(result_imgs[0]), np.array(result_imgs[1]))
```
#### File: deepclustering/augment/test_tensor_aug.py
```python
import requests
from PIL import Image
import numpy as np
from unittest import TestCase
import torch
from deepclustering.augment.tensor_augment import (
RandomCrop,
RandomHorizontalFlip,
RandomVerticalFlip,
)
URL = f"https://cdn1.medicalnewstoday.com/content/images/articles/322/322868/golden-retriever-puppy.jpg"
class TestTensorAugmentation(TestCase):
def setUp(self) -> None:
self.color_img = Image.open(requests.get(URL, stream=True).raw)
assert np.array(self.color_img).shape[2] == 3
self.grey_img = Image.fromarray(np.array(self.color_img)[:, :, 0])
assert np.array(self.grey_img).shape.__len__() == 2
self.cimg_np = np.array(self.color_img).transpose((2, 0, 1))[None]
assert self.cimg_np.shape.__len__() == 4
self.cimgs_np = np.concatenate((self.cimg_np, self.cimg_np), 0)
assert self.cimgs_np.shape.__len__() == 4
assert self.cimgs_np.shape[0] == 2
self.timg = torch.Tensor(self.cimg_np).float()
self.timgs = torch.Tensor(self.cimgs_np).float()
assert self.timg.shape.__len__() == 4
assert self.timgs.shape.__len__() == 4
def test_RandomCrop(self):
size = (500, 300)
transform = RandomCrop(size=size)
r_img = transform(self.timg)
r_imgs = transform(self.timgs)
assert r_imgs.shape[2:] == torch.Size(size)
assert r_img.shape[2:] == torch.Size(size)
def test_RandmHorizontalFlip(self):
transform = RandomHorizontalFlip(p=1)
r_img = transform(self.timg)
def test_RandmVertialFlip(self):
transform = RandomVerticalFlip(p=1)
r_img = transform(self.timg)
```
#### File: deepclustering/loss/test_imsat.py
```python
from unittest import TestCase
import torch
from deepclustering.loss.IMSAT_loss import MultualInformaton_IMSAT
class TestIMSATLoss(TestCase):
def setUp(self) -> None:
super().setUp()
self.pred_log = torch.randn(200, 10)
def test_multinformation_imsat(self):
criterion = MultualInformaton_IMSAT(mu=1.0)
MI, _ = criterion(self.pred_log)
assert MI > 0, f"MI should be aways positive, given {MI.item()}"
```
#### File: deepclustering/meters/test_Averagewith_std.py
```python
import time
from unittest import TestCase
import numpy as np
import torch
from deepclustering.meters import AveragewithStd, MeterInterface
from deepclustering.writer.draw_csv import DrawCSV2
class TestDrawAverageWithSTD(TestCase):
"""
This is to test the plotting of mean and std of a list of varying scalars
"""
def setUp(self) -> None:
config = {"avg": AveragewithStd()}
self.METER = MeterInterface(config)
columns_to_draw = [["avg_mean", "avg_lstd", "avg_hstd"]]
from pathlib import Path
self.drawer = DrawCSV2(
columns_to_draw=columns_to_draw, save_dir=Path(__file__).parent
)
def _train_loop(self, data, epoch):
for i in data:
self.METER["avg"].add(i)
time.sleep(0.1)
def test_torch(self):
for i in range(100):
data = torch.randn(10, 1) / (i + 1)
self._train_loop(data, i)
self.METER.step()
summary = self.METER.summary()
self.drawer.draw(summary)
def test_numpy(self):
for i in range(100):
data = np.random.randn(10, 1) / (i + 1)
self._train_loop(data, i)
self.METER.step()
summary = self.METER.summary()
self.drawer.draw(summary)
def test_list(self):
for i in range(100):
data = (np.random.randn(10, 1) / (i + 1)).squeeze().tolist()
self._train_loop(data, i)
self.METER.step()
summary = self.METER.summary()
self.drawer.draw(summary)
``` |
{
"source": "jizongFox/harmonic",
"score": 3
} |
#### File: harmonic/datasets/data.py
```python
import random
import numpy as np
import torch.utils.data as data
from PIL import Image
def default_loader(path):
return Image.open(path).convert('RGB')
class Reader(data.Dataset):
def __init__(self, image_list, labels_list=[], transform=None, target_transform=None, use_cache=True,
loader=default_loader):
self.images = image_list
self.loader = loader
if len(labels_list) is not 0:
assert len(image_list) == len(labels_list)
self.labels = labels_list
else:
self.labels = False
self.transform = transform
self.target_transform = target_transform
self.cache = {}
self.use_cache = use_cache
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
if idx not in self.cache:
img = self.loader(self.images[idx])
if self.labels:
target = Image.open(self.labels[idx])
else:
target = None
else:
img, target = self.cache[idx]
if self.use_cache:
self.cache[idx] = (img, target)
seed = np.random.randint(2147483647)
random.seed(seed)
if self.transform is not None:
img = self.transform(img)
random.seed(seed)
if self.labels:
if self.target_transform is not None:
target = self.target_transform(target)
return np.array(img), np.array(target)
``` |
{
"source": "jizongFox/IIC",
"score": 2
} |
#### File: utils/cluster/IID_losses.py
```python
import math
from functools import lru_cache
import torch
from torch import nn, Tensor
def IID_loss(x_out, x_tf_out, lamb=1.0):
# has had softmax applied
_, k = x_out.size()
p_i_j = compute_joint(x_out, x_tf_out)
assert (p_i_j.size() == (k, k))
p_i = p_i_j.sum(dim=1).view(k, 1).expand(k, k)
p_j = p_i_j.sum(dim=0).view(1, k).expand(k,
k) # but should be same, symmetric
loss = - p_i_j * (torch.log(p_i_j)
- lamb * torch.log(p_j)
- lamb * torch.log(p_i))
loss = loss.sum()
loss_no_lamb = - p_i_j * (torch.log(p_i_j)
- torch.log(p_j)
- torch.log(p_i))
loss_no_lamb = loss_no_lamb.sum()
return loss, loss_no_lamb
def compute_joint(x_out, x_tf_out):
# produces variable that requires grad (since args require grad)
bn, k = x_out.size()
assert (x_tf_out.size(0) == bn and x_tf_out.size(1) == k)
p_i_j = x_out.unsqueeze(2) * x_tf_out.unsqueeze(1) # bn, k, k
p_i_j = p_i_j.sum(dim=0) # k, k
p_i_j = (p_i_j + p_i_j.t()) / 2. # symmetrise
p_i_j = p_i_j / p_i_j.sum() # normalise
return p_i_j
def compute_joint_2D_with_padding_zeros(x_out: Tensor, x_tf_out: Tensor, *, symmetric: bool = True):
k = x_out.shape[1]
x_out = x_out.swapaxes(0, 1).reshape(k, -1)
N = x_out.shape[1]
x_tf_out = x_tf_out.swapaxes(0, 1).reshape(k, -1)
p_i_j = (x_out / math.sqrt(N)) @ (x_tf_out.t() / math.sqrt(N))
# p_i_j = p_i_j - p_i_j.min().detach() + 1e-8
# T x T x k x k
# p_i_j /= p_i_j.sum()
# symmetrise, transpose the k x k part
if symmetric:
p_i_j = (p_i_j + p_i_j.t()) / 2.0
p_i_j = p_i_j.view(1, 1, k, k)
return p_i_j.contiguous()
class RedundancyCriterion(nn.Module):
def __init__(self, *, eps: float = 1e-5, symmetric: bool = True, lamda: float = 1, alpha: float) -> None:
super().__init__()
self._eps = eps
self.symmetric = symmetric
self.lamda = lamda
self.alpha = alpha
def forward(self, x_out: Tensor, x_tf_out: Tensor):
k = x_out.shape[1]
p_i_j = compute_joint_2D_with_padding_zeros(x_out=x_out, x_tf_out=x_tf_out, symmetric=self.symmetric)
p_i_j = p_i_j.view(k, k)
self._p_i_j = p_i_j
target = ((self.onehot_label(k=k, device=p_i_j.device) / k) * self.alpha + p_i_j * (1 - self.alpha))
p_i = p_i_j.sum(dim=1).view(k, 1).expand(k, k) # p_i should be the mean of the x_out
p_j = p_i_j.sum(dim=0).view(1, k).expand(k, k) # but should be same, symmetric
constrained = (-p_i_j * (- self.lamda * torch.log(p_j + self._eps)
- self.lamda * torch.log(p_i + self._eps))
).sum()
pseudo_loss = -(target * (p_i_j + self._eps).log()).sum()
return pseudo_loss + constrained
@lru_cache()
def onehot_label(self, k, device):
label = torch.eye(k, device=device, dtype=torch.bool)
return label
def kl_criterion(self, dist: Tensor, prior: Tensor):
return -(prior * torch.log(dist + self._eps) + (1 - prior) * torch.log(1 - dist + self._eps)).mean()
``` |
{
"source": "jizongFox/kaggle-seizure-prediction",
"score": 2
} |
#### File: kaggle-seizure-prediction/linear_models/log_reg.py
```python
import numpy as np
import json, os
import preprocessors.fft as fft
from pandas import DataFrame
from utils.loader import load_test_data, load_train_data
from utils.config_name_creator import *
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from merger import merge_csv_files
from commons import reshape_data
def train(subject, data_path, reg_C=None):
d = load_train_data(data_path, subject)
x, y = d['x'], d['y']
x, y = reshape_data(x, y)
data_scaler = StandardScaler()
x = data_scaler.fit_transform(x)
lda = LogisticRegression(C=reg_C)
lda.fit(x, y)
return lda, data_scaler
def predict(subject, model, data_scaler, data_path, submission_path):
d = load_test_data(data_path, subject)
x_test, id = d['x'], d['id']
n_test_examples = x_test.shape[0]
n_timesteps = x_test.shape[3]
x_test = reshape_data(x_test)
x_test = data_scaler.transform(x_test)
pred_1m = model.predict_proba(x_test)[:, 1]
pred_10m = np.reshape(pred_1m, (n_test_examples, n_timesteps))
pred_10m = np.mean(pred_10m, axis=1)
ans = zip(id, pred_10m)
df = DataFrame(data=ans, columns=['clip', 'preictal'])
df.to_csv(submission_path + '/' + subject + '.csv', index=False, header=True)
return pred_10m
def run_trainer():
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
reg_list = [10000000, 100, 10, 1.0, 0.1, 0.01]
for reg_C in reg_list:
print reg_C
data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
submission_path = settings_dict['path']['submission_path'] + '/logreg_' + str(
reg_C) + '_' + create_fft_data_name(settings_dict)
if not os.path.exists(data_path):
fft.run_fft_preprocessor()
if not os.path.exists(submission_path):
os.makedirs(submission_path)
subjects = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']
for subject in subjects:
print subject
model, data_scaler, = train(subject, data_path, reg_C)
predict(subject, model, data_scaler, data_path, submission_path)
merge_csv_files(submission_path, subjects, 'submission')
merge_csv_files(submission_path, subjects, 'submission_softmax')
merge_csv_files(submission_path, subjects, 'submission_minmax')
merge_csv_files(submission_path, subjects, 'submission_median')
if __name__ == '__main__':
run_trainer()
```
#### File: kaggle-seizure-prediction/thesis_scripts/filters_plot.py
```python
import json
from utils.config_name_creator import create_cnn_model_name
import numpy as np
import matplotlib.pyplot as plt
import cPickle
import PIL.Image as Image
def paint_filter(patient_name, model_path):
with open(model_path + '/' + patient_name + '.pickle', 'rb') as f:
state = cPickle.load(f)
# first layer
W1 = state['weights'][0]
width, heights = W1.shape[3], W1.shape[2]
n_filters = W1.shape[0]
n_fbins = state['params']['n_fbins']
print W1.shape
x = np.zeros((heights, width * n_filters))
for i in range(n_filters):
x[:, i] = np.reshape(W1[i, 0, :, :], heights * width)
ax = plt.gca()
ax.set_yticks(range(0, heights, n_fbins))
ax.yaxis.grid(True, which='major', linestyle='-')
plt.imshow(x, interpolation='none')
plt.show()
if __name__ == '__main__':
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
s1 = '0.81448_nfreq6featumeanloghighc180lowcu0.1win_l60strid60globa0recep[1, 2]use_t0activ[urelu, urelu, urelu]dropo[0.2, 0.5]overl0strid[1, 1]train10weigh0.01scale0nkern[16, 32, 128]pool_[1, 1]l2_re0.0001valid10max_i150000rando1'
s2 = '0.80192_nfreq8featumeanlog_stdhighc180lowcu0.1win_l120strid120globa1recep[1, 1]use_t0activ[urelu, urelu, utanh]dropo[0.3, 0.6]overl4strid[1, 1]train10weigh0.01scale1nkern[16, 32, 512]pool_[1, 1]l2_re0.0001valid10max_i150000rando1'
model_path = settings_dict['path']['model_path'] + '/' + s2 # create_cnn_model_name(settings_dict)
names = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']
for patient_name in names:
paint_filter(patient_name, model_path)
```
#### File: kaggle-seizure-prediction/thesis_scripts/inputs_plot.py
```python
import numpy as np
import json
import os
import cPickle
import copy
import matplotlib.pyplot as plt
from theano import config
from test_labels_loader.loader import load_train_data
from test_labels_loader.config_name_creator import *
from test_labels_loader.data_scaler import scale_across_features, scale_across_time
config.floatX = 'float32'
def plot_examples(subject, data_path, scale):
d = load_train_data(data_path, subject)
x, y, filename_to_idx = d['x'], d['y'], d['filename_to_idx']
if scale:
x, _ = scale_across_time(x=x)
filename = 'Dog_1/Dog_1_interictal_segment_0001.mat'
idx = filename_to_idx[filename]
print filename_to_idx
fig = plt.figure()
fig.suptitle(filename)
print x[idx].shape
for i in range(x[idx].shape[0]):
fig.add_subplot(4, 4, i)
plt.imshow(x[idx, i, :, :], aspect='auto', origin='lower', interpolation='none')
plt.colorbar()
plt.show()
# for filename, idx in filename_to_idx.items():
# fig = plt.figure()
# fig.suptitle(filename)
# for i in range(x[idx].shape[0]):
# fig.add_subplot(4, 4, i)
# plt.imshow(x[idx, i, :, :], aspect='auto', origin='lower', interpolation='none')
# plt.colorbar()
# plt.show()
if __name__ == '__main__':
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
# path
data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
write_dir = data_path + '/img'
if not os.path.exists(write_dir):
os.mkdir(write_dir)
# params
model_params = settings_dict['model']
validation_params = settings_dict['validation']
names = ['Dog_1', 'Dog_3', 'Dog_2', 'Dog_5', 'Dog_4', 'Patient_1', 'Patient_2']
for subject in names:
print '***********************', subject, '***************************'
plot_examples(subject, data_path, scale=False)
```
#### File: kaggle-seizure-prediction/thesis_scripts/train_probs_plot.py
```python
import numpy as np
import json
import cPickle
import matplotlib.pyplot as plt
from theano import config
import matplotlib.cm as cmx
import matplotlib.colors as colors
from sklearn.metrics import roc_curve
from utils.loader import load_train_data
from utils.config_name_creator import *
from utils.data_scaler import scale_across_features, scale_across_time
from cnn.conv_net import ConvNet
config.floatX = 'float32'
def get_cmap(N):
color_norm = colors.Normalize(vmin=0, vmax=N - 1)
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv')
def map_index_to_rgb_color(index):
return scalar_map.to_rgba(index)
return map_index_to_rgb_color
def plot_train_probs(subject, data_path, model_path):
with open(model_path + '/' + subject + '.pickle', 'rb') as f:
state_dict = cPickle.load(f)
cnn = ConvNet(state_dict['params'])
cnn.set_weights(state_dict['weights'])
scalers = state_dict['scalers']
d = load_train_data(data_path, subject)
x, y = d['x'], d['y']
x, _ = scale_across_time(x, x_test=None, scalers=scalers) if state_dict['params']['scale_time'] \
else scale_across_features(x, x_test=None, scalers=scalers)
cnn.batch_size.set_value(x.shape[0])
probs = cnn.get_test_proba(x)
fpr, tpr, threshold = roc_curve(y, probs)
c = np.sqrt((1-tpr)**2+fpr**2)
opt_threshold = threshold[np.where(c==np.min(c))[0]]
print opt_threshold
x_coords = np.zeros(len(y), dtype='float64')
rng = np.random.RandomState(42)
x_coords += rng.normal(0.0, 0.08, size=len(x_coords))
plt.scatter(x_coords, probs, c=y, s=60)
plt.title(subject)
plt.show()
if __name__ == '__main__':
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
model_path = settings_dict['path']['model_path'] + '/' + create_cnn_model_name(settings_dict)
subjects = ['Patient_1', 'Patient_2', 'Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5']
for subject in subjects:
print '***********************', subject, '***************************'
plot_train_probs(subject, data_path, model_path)
``` |
{
"source": "jizongFox/MCD_DA",
"score": 3
} |
#### File: classification/datasets/svhn.py
```python
from scipy.io import loadmat
import numpy as np
from ..utils.utils import dense_to_one_hot
import sys
sys.path.insert(0, 'MCD_DA/classification/data')
def load_svhn():
svhn_train = loadmat('MCD_DA/classification/data/train_32x32.mat')
svhn_test = loadmat('MCD_DA/classification/data/test_32x32.mat')
svhn_train_im = svhn_train['X']
svhn_train_im = svhn_train_im.transpose(3, 2, 0, 1).astype(np.float32)
svhn_label = dense_to_one_hot(svhn_train['y'])
svhn_test_im = svhn_test['X']
svhn_test_im = svhn_test_im.transpose(3, 2, 0, 1).astype(np.float32)
svhn_label_test = dense_to_one_hot(svhn_test['y'])
return svhn_train_im, svhn_label, svhn_test_im, svhn_label_test
```
#### File: MCD_DA/segmentation/transform.py
```python
import collections
import math
import numbers
import random
import numpy as np
import torch
from PIL import Image
## highlight files containing transformation of PIL images
class Scale(object):
def __init__(self, size, interpolation=Image.BILINEAR):
assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
self.size = size
self.interpolation = interpolation
def __call__(self, img):
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img
if w < h:
ow = self.size
oh = int(self.size * h / w)
return img.resize((ow, oh), self.interpolation)
else:
oh = self.size
ow = int(self.size * w / h)
return img.resize((ow, oh), self.interpolation)
else:
return img.resize(self.size, self.interpolation)
## highlight: what does it do here.
class ToParallel(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
yield img
for t in self.transforms:
yield t(img)
class ToLabel(object):
def __call__(self, inputs):
# tensors = []
# for i in inputs:
# tensors.append(torch.from_numpy(np.array(i)).long())
tensors = torch.from_numpy(np.array(inputs)).long()
return tensors
class ToLabel_P(object):
def __call__(self, inputs):
tensors = []
for i in inputs:
tensors.append(torch.from_numpy(np.array(i)).long())
# tensors = torch.from_numpy(np.array(inputs)).long()
return tensors
class ReLabel(object):
def __init__(self, olabel, nlabel):
self.olabel = olabel
self.nlabel = nlabel
def __call__(self, inputs):
# assert isinstance(input, torch.LongTensor), 'tensor needs to be LongTensor'
for i in inputs:
i[i == self.olabel] = self.nlabel
return inputs
class ToSP(object):
def __init__(self, size):
self.scale2 = Scale(size / 2, Image.NEAREST)
self.scale4 = Scale(size / 4, Image.NEAREST)
self.scale8 = Scale(size / 8, Image.NEAREST)
self.scale16 = Scale(size / 16, Image.NEAREST)
self.scale32 = Scale(size / 32, Image.NEAREST)
self.scale64 = Scale(size / 64, Image.NEAREST)
def __call__(self, input):
# input2 = self.scale2(input)
# input4 = self.scale4(input)
# input8 = self.scale8(input)
# input16 = self.scale16(input)
# input32 = self.scale32(input)
input64 = self.scale64(input)
inputs = input # [input, input64]
# inputs =input
return inputs
class HorizontalFlip(object):
"""Horizontally flips the given PIL.Image with a probability of 0.5."""
def __call__(self, img: Image.Image) -> Image.Image:
return img.transpose(Image.FLIP_LEFT_RIGHT)
class VerticalFlip(object):
def __call__(self, img:Image.Image) -> Image.Image:
return img.transpose(Image.FLIP_TOP_BOTTOM)
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])
def labelcolormap(N):
cmap = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r = 0
g = 0
b = 0
id = i
for j in range(7):
str_id = uint82bin(id)
r = r ^ (np.uint8(str_id[-1]) << (7 - j))
g = g ^ (np.uint8(str_id[-2]) << (7 - j))
b = b ^ (np.uint8(str_id[-3]) << (7 - j))
id = id >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
return cmap
def pallet():
pallet = [[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]]
pallet = np.array(pallet)
return pallet
def colormap(n):
cmap = np.zeros([n, 3]).astype(np.uint8)
for i in np.arange(n):
r, g, b = np.zeros(3)
for j in np.arange(8):
r = r + (1 << (7 - j)) * ((i & (1 << (3 * j))) >> (3 * j))
g = g + (1 << (7 - j)) * ((i & (1 << (3 * j + 1))) >> (3 * j + 1))
b = b + (1 << (7 - j)) * ((i & (1 << (3 * j + 2))) >> (3 * j + 2))
cmap[i, :] = np.array([r, g, b])
return cmap
class Colorize(object):
def __init__(self, n=20):
self.cmap = labelcolormap(n)
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
class Colorize2(object):
def __init__(self, n=20):
self.cmap = pallet()
self.cmap = torch.from_numpy(self.cmap)
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
class RandomSizedCrop:
"""This is random sized cropping."""
def __init__(self, size=None, interpolation=Image.BILINEAR):
"""Set output size and type of interpolation."""
self.size = size
self.img_interpolation = interpolation
self.target_interpolation = Image.NEAREST
def __call__(self, img):
"""Random sized cropp -> resize into 'self.size'."""
# default size
if self.size is None:
self.size = img.size
# try 10times
for attempt in range(10):
area = img.size[0] * img.size[1]
# decide w, h
cropped_area = random.uniform(0.5, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
w = int(round(math.sqrt(cropped_area * aspect_ratio)))
h = int(round(math.sqrt(cropped_area / aspect_ratio)))
# which is larger (prob: 0.5)
if random.random() < 0.5:
w, h = h, w
# random crop, if possible
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
return img.resize(self.size, self.img_interpolation)
# Fallback
scale = Scale(self.size, interpolation=self.img_interpolation)
crop = CenterCrop(self.size)
return crop(scale(img))
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
return img.crop((x1, y1, x1 + tw, y1 + th))
class RandomHorizontalFlip:
"""
Random horizontal flip.
prob = 0.5
"""
def __call__(self, img):
if random.random() < 0.5:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
class RandomVerticalFlip:
"""
Random vertical flip.
prob = 0.5
"""
def __call__(self, img):
if random.random() < 0.5:
return img.transpose(Image.FLIP_TOP_BOTTOM)
return img
class RandomRotation:
"""
Random roatation.
-max_deg ~ deg
"""
def __call__(self, img, max_deg=10):
deg = np.random.randint(-max_deg, max_deg, 1)[0]
return img.rotate(deg)
``` |
{
"source": "jizongFox/rising",
"score": 2
} |
#### File: notebooks/medical_seg/dataset.py
```python
import re
from collections import Iterator
from pathlib import Path
import numpy as np
import SimpleITK as sitk
import torch
from torch.utils.data import Sampler
from torch.utils.data.dataset import Dataset, T_co
class ACDCDataset(Dataset):
def __init__(self, *, root: str, train: bool = True) -> None:
self.root = root
self.train = train
self._root = Path(root, "train" if train else "val")
self.images = filter(
self.image_filter, [str(x.relative_to(self._root)) for x in Path(self._root).rglob("*.nii.gz")]
)
self.images = sorted(self.images)
def __getitem__(self, index) -> T_co:
image_path = str(self._root / self.images[index])
gt_path = image_path.replace(".nii.gz", "_gt.nii.gz")
image = sitk.GetArrayFromImage(sitk.ReadImage(image_path)).astype(float, copy=False)[None, ...]
gt = sitk.GetArrayFromImage(sitk.ReadImage(gt_path)).astype(float, copy=False)[None, ...]
return {"image": torch.from_numpy(image), "label": torch.from_numpy(gt)}
def __len__(self):
return len(self.images)
@staticmethod
def image_filter(path: str):
_match = re.compile(r"patient\d+_frame\d+.nii.gz").search(str(path))
if _match is None:
return False
return True
class _InfiniteRandomIterator(Iterator):
def __init__(self, data_source, shuffle=True):
self.data_source = data_source
self.shuffle = shuffle
if self.shuffle:
self.iterator = iter(torch.randperm(len(self.data_source)).tolist())
else:
self.iterator = iter(torch.arange(start=0, end=len(self.data_source)).tolist())
def __next__(self):
try:
idx = next(self.iterator)
except StopIteration:
if self.shuffle:
self.iterator = iter(torch.randperm(len(self.data_source)).tolist())
else:
self.iterator = iter(torch.arange(start=0, end=len(self.data_source)).tolist())
idx = next(self.iterator)
return idx
class InfiniteRandomSampler(Sampler):
def __init__(self, data_source, shuffle=True):
super().__init__(data_source)
self.data_source = data_source
self.shuffle = shuffle
def __iter__(self):
return _InfiniteRandomIterator(self.data_source, shuffle=self.shuffle)
def __len__(self):
return len(self.data_source)
```
#### File: tests/transforms/test_spatial_transforms.py
```python
import random
import unittest
import SimpleITK as sitk
import torch
from matplotlib import pyplot as plt
from rising.constants import FInterpolation
from rising.loading import DataLoader
from rising.random import DiscreteParameter, UniformParameter
from rising.transforms import Mirror, ProgressiveResize, ResizeNative, Rot90, SizeStepScheduler, Zoom, \
ResizeNativeCentreCrop
from tests.realtime_viewer import multi_slice_viewer_debug
class TestSpatialTransforms(unittest.TestCase):
def setUp(self) -> None:
torch.manual_seed(0)
random.seed(0)
self.batch_dict = {
"data": self.load_nii_data("../../tests/data/patient004_frame01.nii.gz"),
"label": self.load_nii_data("../../tests/data/patient004_frame01_gt.nii.gz"),
}
def load_nii_data(self, path):
return torch.from_numpy(
sitk.GetArrayFromImage(sitk.ReadImage(str(path))).astype(float, copy=False)
).unsqueeze(1)
def test_mirror_transform(self):
trafo = Mirror(dims=DiscreteParameter((0, 1, (0, 1))), p_sample=0.5, keys=("data", "label"))
outp = trafo(**self.batch_dict)
image1, target1 = self.batch_dict.values()
image2, target2 = outp.values()
multi_slice_viewer_debug(image1.squeeze(), target1.squeeze())
multi_slice_viewer_debug(image2.squeeze(), target2.squeeze(), block=True)
def test_rot90_transform(self):
trafo = Rot90(dims=[0, 1], num_rots=DiscreteParameter((2,)), p_sample=0.5, keys=("data", "label"))
outp = trafo(**self.batch_dict)
image1, target1 = self.batch_dict.values()
image2, target2 = outp.values()
multi_slice_viewer_debug(image1.squeeze(), target1.squeeze())
multi_slice_viewer_debug(image2.squeeze(), target2.squeeze(), block=True)
trafo = Rot90(dims=[0, 1], num_rots=DiscreteParameter((2,)), p_sample=1, keys=("data", "label"))
outp = trafo(**self.batch_dict)
image1, target1 = self.batch_dict.values()
image2, target2 = outp.values()
multi_slice_viewer_debug(image1.squeeze(), target1.squeeze())
multi_slice_viewer_debug(image2.squeeze(), target2.squeeze(), block=True)
def test_resize_transform(self):
trafo = ResizeNative(
(128, 256),
keys=(
"data",
"label",
),
mode=(FInterpolation.bilinear, FInterpolation.nearest),
align_corners=(False, None),
)
out = trafo(**self.batch_dict)
image1, target1 = self.batch_dict.values()
image2, target2 = out.values()
multi_slice_viewer_debug(image1.squeeze(), target1.squeeze())
multi_slice_viewer_debug(image2.squeeze(), target2.squeeze(), block=True, no_contour=True)
def test_zoom_transform(self):
_range = (1.5, 2.0)
# scale_factor = UniformParameter(*_range)()
trafo = Zoom(scale_factor=[UniformParameter(*_range), UniformParameter(*_range)], keys=("data", "label"))
out = trafo(**self.batch_dict)
image1, target1 = self.batch_dict.values()
image2, target2 = out.values()
multi_slice_viewer_debug(image1.squeeze(), target1.squeeze(), block=False, no_contour=True)
multi_slice_viewer_debug(image2.squeeze(), target2.squeeze(), block=True, no_contour=True)
def test_progressive_resize(self):
image1, target1 = self.batch_dict.values()
multi_slice_viewer_debug(image1.squeeze(), target1.squeeze(), no_contour=True)
sizes = [1, 3, 6]
scheduler = SizeStepScheduler([1, 2], [112, 224, 336])
trafo = ProgressiveResize(scheduler, keys=("data", "label"))
for i in range(3):
outp = trafo(**self.batch_dict)
image2, target2 = outp.values()
multi_slice_viewer_debug(image2.squeeze(), target2.squeeze(), block=False, no_contour=True)
plt.show()
def test_size_step_scheduler(self):
scheduler = SizeStepScheduler([10, 20], [16, 32, 64])
self.assertEqual(scheduler(-1), 16)
self.assertEqual(scheduler(0), 16)
self.assertEqual(scheduler(5), 16)
self.assertEqual(scheduler(11), 32)
self.assertEqual(scheduler(21), 64)
def test_size_step_scheduler_error(self):
with self.assertRaises(TypeError):
scheduler = SizeStepScheduler([10, 20], [32, 64])
def test_progressive_resize_integration(self):
sizes = [1, 3, 6]
scheduler = SizeStepScheduler([1, 2], [1, 3, 6])
trafo = ProgressiveResize(scheduler)
dset = [self.batch_dict] * 10
loader = DataLoader(dset, num_workers=4, batch_transforms=trafo)
data_shape = [tuple(i["data"].shape) for i in loader]
self.assertIn((1, 10, 1, 1, 1), data_shape)
self.assertIn((1, 10, 3, 3, 3), data_shape)
self.assertIn((1, 10, 6, 6, 6), data_shape)
def test_resize_native_center_crop(self):
trafo = ResizeNativeCentreCrop(size=(1000, 2000), margin=(10, 15), keys=("data", "label"),
mode=(FInterpolation.bilinear, FInterpolation.nearest))
outp = trafo(**self.batch_dict)
image1, target1 = self.batch_dict.values()
image2, target2 = outp.values()
multi_slice_viewer_debug(image1.squeeze(), target1.squeeze())
multi_slice_viewer_debug(image2.squeeze(), target2.squeeze(), block=True)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jizt-it/jizt-backend-microservice",
"score": 2
} |
#### File: dispatcher/data/summary_dao_factory.py
```python
__version__ = '0.1.3'
import logging
from summary_dao_postgresql import SummaryDAOPostgresql
class SummaryDAOFactory:
"""Summary DAO Factory."""
_instance = None
def __new__(cls,
host: str,
dbname: str,
user: str,
password: str,
log_level: int = logging.ERROR
) -> SummaryDAOPostgresql:
"""Singleton.
Args:
host (:obj:`str`):
The database host.
dbname (:obj:`str`):
The database name.
user (:obj:`str`):
The database user.
password (:obj:`str`):
The user's password.
log_level (:obj:`int`, `optional`, defaults to `logging.ERROR`):
The log level.
Returns:
:obj:`SummaryDAOFactory`: The single instance
of the DAO.
"""
if cls._instance is None:
cls._instance = SummaryDAOPostgresql(
host,
dbname,
user,
password,
log_level
)
return cls._instance
```
#### File: dispatcher/data/summary_dao_postgresql.py
```python
__version__ = '0.1.8'
import logging
import psycopg2
import hashlib
from io import StringIO
from collections import OrderedDict
from psycopg2.extras import Json
from summary_dao_interface import SummaryDAOInterface
from schemas import Summary
from summary_status import SummaryStatus
from supported_models import SupportedModel
from supported_languages import SupportedLanguage
from datetime import datetime
class SummaryDAOPostgresql(SummaryDAOInterface): # TODO: manage errors in excepts
"""Summary DAO implementation for Postgresql.
For more information, see base class.
"""
def __init__(self, host, dbname, user, password, log_level):
logging.basicConfig(
format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
level=log_level,
datefmt='%d/%m/%Y %I:%M:%S %p'
)
self.logger = logging.getLogger("SummaryDAOPostgresql")
self.host = host
self.dbname = dbname
self.user = user
self.password = password
def get_summary(self, id_: str):
"""See base class."""
SQL = """SELECT summary_id, content, summary, model_name, params,
status, started_at, ended_at, language_tag, warnings
FROM jizt.id_raw_id_preprocessed JOIN jizt.summary
ON id_preprocessed = summary_id JOIN jizt.source
USING (source_id)
WHERE id_raw = %s;"""
SQL_UPDATE_LAST_ACCESSED = """UPDATE jizt.id_raw_id_preprocessed
SET last_accessed = %s
WHERE id_raw = %s;"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
cur.execute(SQL_UPDATE_LAST_ACCESSED, (datetime.now(), id_))
cur.execute(SQL, (id_,))
summary_row = cur.fetchone()
conn.commit()
if summary_row is not None:
return Summary(
id_=summary_row[0],
source=summary_row[1],
output=summary_row[2],
model=SupportedModel(summary_row[3]),
params=summary_row[4],
status=SummaryStatus(summary_row[5]),
started_at=summary_row[6],
ended_at=summary_row[7],
language=SupportedLanguage(summary_row[8])
), summary_row[9] # warnings
return (None, None) # summary doesn't exist
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def insert_summary(self, summary: Summary, cache: bool, warnings: dict):
"""See base class."""
SQL_GET_SOURCE = """SELECT source_id
FROM jizt.source
WHERE source_id = %s;"""
SQL_INSERT_SOURCE = """INSERT INTO jizt.source
VALUES (%s, %s, %s);"""
SQL_INSERT_SUMMARY = """INSERT INTO jizt.summary
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
SQL_INSERT_ID = """INSERT INTO jizt.id_raw_id_preprocessed
VALUES (%s, %s, %s, %s, %s);"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
source_id = self._get_unique_key(summary.source)
cur.execute(SQL_GET_SOURCE, (source_id,))
retrieved_source_id = cur.fetchone()
if retrieved_source_id is None:
cur.execute(
SQL_INSERT_SOURCE,
(source_id, summary.source, len(summary.source))
)
output_length = (len(summary.output) if summary.output is not None
else None)
cur.execute(
SQL_INSERT_SUMMARY,
(summary.id_, source_id,
summary.output, output_length,
summary.model, Json(summary.params),
summary.status, summary.started_at,
summary.ended_at, summary.language)
)
cur.execute(SQL_INSERT_ID, (summary.id_,
summary.id_,
cache,
datetime.now(),
Json(warnings)))
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def delete_summary(self,
id_: str,
delete_source: bool = False):
"""See base class."""
# Because of ON DELETE CASCADE, this will delete both the
# source and the summary
SQL_DELETE_ALSO_SOURCE = """DELETE FROM jizt.source
WHERE source_id = (
SELECT source_id FROM jizt.summary
WHERE summary_id = %s
);"""
# This will not delete the source
SQL_DELETE_SUMMARY = """DELETE FROM jizt.summary
WHERE summary_id = %s;"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
sql = SQL_DELETE_ALSO_SOURCE if delete_source else SQL_DELETE_SUMMARY
cur.execute(sql, (id_,))
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def update_summary(self,
id_: str,
summary: str = None, # output
params: dict = None,
status: str = None,
started_at: datetime = None,
ended_at: datetime = None,
warnings: dict = None):
"""See base class."""
args = OrderedDict({key: value for key, value in locals().items()
if value is not None and key not in ('self', 'id_')})
# Convert dicts to Json
dicts = [key for key in args if isinstance(args[key], dict)]
for key in dicts:
args[key] = Json(args[key])
if "warnings" in args:
warnings = args.pop("warnings")
keys = list(args.keys())
values = list(args.values()) + [id_]
concat = StringIO()
concat.write("UPDATE jizt.summary SET ")
for field in keys[:-1]:
concat.write(f"{field} = %s, ")
concat.write(f"{keys[-1]} = %s ")
concat.write("FROM jizt.id_raw_id_preprocessed ")
concat.write("WHERE id_raw = %s AND id_preprocessed = summary_id;")
SQL_UPDATE_SUMMARY = concat.getvalue()
SQL_UPDATE_WARNINGS = """UPDATE jizt.id_raw_id_preprocessed
SET warnings = %s
WHERE id_raw = %s;"""
if self.summary_exists(id_):
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
cur.execute(SQL_UPDATE_SUMMARY, values) # values is a list!
cur.execute(SQL_UPDATE_WARNINGS, (warnings, id_))
conn.commit()
return self.get_summary(id_)
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
else:
return (None, None)
def update_source(self,
old_source: str,
new_source: str,
old_summary_id: str,
new_summary_id: str):
"""See base class."""
# The source id is also modified in the summary table
# because of ON UPDATE CASCADE
SQL_UPDATE_SOURCE = """UPDATE jizt.source
SET source_id = %s,
content = %s,
content_length = %s
WHERE source_id = %s;"""
# The id in id_raw_id_preprocessed is also updated
# because of ON UPDATE CASCADE
SQL_UPDATE_SUMMARY_ID = """UPDATE jizt.summary
SET summary_id = %s
WHERE summary_id = %s;"""
# We insert the binding (new_summary_id -> new_summary_id) so that a summary
# can be also retrieved with its preprocessed id
SQL_INSERT_PREPROCESSED_ID = """INSERT INTO jizt.id_raw_id_preprocessed
(id_raw, id_preprocessed, cache, last_accessed)
SELECT %s, %s, cache, %s
FROM jizt.id_raw_id_preprocessed
WHERE id_raw = %s;"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
old_source_id = self._get_unique_key(old_source)
new_source_id = self._get_unique_key(new_source)
cur.execute(SQL_UPDATE_SOURCE, (new_source_id, new_source,
len(new_source), old_source_id))
cur.execute(SQL_UPDATE_SUMMARY_ID, (new_summary_id, old_summary_id))
cur.execute(SQL_INSERT_PREPROCESSED_ID, (new_summary_id,
new_summary_id,
datetime.now(),
old_summary_id))
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def update_preprocessed_id(self,
raw_id: str,
new_preprocessed_id: str):
"""See base class."""
SQL_UPDATE_ID = """UPDATE jizt.id_raw_id_preprocessed
SET id_preprocessed = %s,
last_accessed = %s
WHERE id_raw = %s
RETURNING cache;"""
SQL_UPDATE_CACHE = """UPDATE jizt.id_raw_id_preprocessed
SET cache = %s,
last_accessed = %s
WHERE id_raw = %s AND cache = FALSE;"""
# Because of ON DELETE CASCADE, this will delete both the
# source and the summary
SQL_DELETE_SUMMARY_OLD = """DELETE FROM jizt.source
WHERE source_id = (
SELECT source_id FROM jizt.summary
WHERE summary_id = %s
);"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
cur.execute(SQL_UPDATE_ID, (new_preprocessed_id,
datetime.now(),
raw_id))
cache = cur.fetchone()
if cache is not None:
cur.execute(SQL_UPDATE_CACHE, (cache[0],
datetime.now(),
new_preprocessed_id))
cur.execute(SQL_DELETE_SUMMARY_OLD, (raw_id,))
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def update_cache_true(self, id_: str):
"""See base class."""
SQL = """UPDATE jizt.id_raw_id_preprocessed
SET cache = TRUE,
last_accessed = %s
WHERE CACHE = FALSE AND (id_raw = %s OR id_raw IN (
SELECT id_preprocessed
FROM jizt.id_raw_id_preprocessed
WHERE id_raw = %s
));"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
cur.execute(SQL, (datetime.now(), id_, id_))
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def summary_exists(self, id_: str):
"""See base class."""
SQL_SELECT = """SELECT id_raw FROM jizt.id_raw_id_preprocessed
WHERE id_raw = %s;"""
SQL_UPDATE_LAST_ACCESSED = """UPDATE jizt.id_raw_id_preprocessed
SET last_accessed = %s
WHERE id_raw = %s;"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
cur.execute(SQL_UPDATE_LAST_ACCESSED, (datetime.now(), id_))
cur.execute(SQL_SELECT, (id_,))
conn.commit()
return cur.fetchone() is not None
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def source_exists(self, source: str):
"""See base class."""
SQL = """SELECT source_id FROM jizt.source
WHERE source_id = %s;"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
source_id = self._get_unique_key(source)
cur.execute(SQL, (source_id,))
return cur.fetchone() is not None
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def increment_summary_count(self, id_: str):
"""See base class."""
SQL = """UPDATE jizt.summary
SET request_count = request_count + 1
FROM jizt.id_raw_id_preprocessed
WHERE id_raw = %s AND id_preprocessed = summary_id
RETURNING request_count;"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
cur.execute(SQL, (id_,))
conn.commit()
return cur.fetchone()[0]
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def delete_if_not_cache(self, id_: str):
"""See base class."""
SQL_DELETE = """DELETE FROM jizt.id_raw_id_preprocessed
WHERE id_raw = %s AND cache = FALSE
RETURNING id_preprocessed;"""
# Check if the preprocessed id has to be cached
SQL_CACHE = """SELECT cache FROM jizt.id_raw_id_preprocessed
WHERE id_raw = %s;"""
# Because of ON DELETE CASCADE, this will delete both the
# source and the summary
SQL_DELETE_SUMMARY = """DELETE FROM jizt.source
WHERE source_id = (
SELECT source_id FROM jizt.summary
WHERE summary_id = %s
);"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
cur.execute(SQL_DELETE, (id_,))
preprocessed_id = cur.fetchone()
if preprocessed_id is not None:
if id_ == preprocessed_id[0]: # preprocessed_id is a tuple
# We have already checked that cache was False
cur.execute(SQL_DELETE_SUMMARY, preprocessed_id)
else:
cur.execute(SQL_CACHE, preprocessed_id)
cache = cur.fetchone()
if cache is not None and not cache[0]:
cur.execute(SQL_DELETE_SUMMARY, preprocessed_id)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
def cleanup_cache(self, older_than_seconds: int):
"""See base class."""
SQL_DELETE_ID_RAW = """
DELETE FROM jizt.id_raw_id_preprocessed
USING jizt.summary
WHERE id_preprocessed = summary_id AND
cache = FALSE AND status = 'completed' AND
last_accessed < NOW() - (%s::TEXT || ' seconds')::INTERVAL;"""
# Delete summaries that do not correspond to any request
SQL_DELETE_SUMMARY = """
DELETE FROM jizt.summary
WHERE summary_id IN (
SELECT summary_id
FROM jizt.summary
WHERE NOT EXISTS (SELECT 1 FROM jizt.id_raw_id_preprocessed
WHERE id_preprocessed = summary_id)
)
RETURNING source_id;"""
# Delete sources that do not correspond to any summary
SQL_DELETE_SOURCE = """DELETE FROM jizt.source
WHERE source_id IN (%s);"""
conn = None
try:
conn = self._connect()
with conn.cursor() as cur:
cur.execute(SQL_DELETE_ID_RAW, (older_than_seconds,))
cur.execute(SQL_DELETE_SUMMARY, (older_than_seconds,))
summaries_id = cur.fetchall()
if summaries_id:
# Transform from e.g., [(1,), (1,), (2,)] to (1, 2, 3)
summaries_id = tuple(i for tuple in set(summaries_id) for i in tuple)
cur.execute(SQL_DELETE_SOURCE, summaries_id)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
finally:
if conn is not None:
conn.close()
@classmethod
def _get_unique_key(cls, text: str) -> str:
"""Get a unique key for a text.
SHA-256 algorithm is used.
Args:
text (:obj:`str`):
The text to get the unique id from.
Returns:
:obj:`str`: The unique, SHA-256 ecrypted key.
"""
return hashlib.sha256(text.encode()).hexdigest()
def _connect(self):
"""Connect to the PostgreSQL database.
Returns:
:obj:`psycopg2.extensions.connection`: The connection
to the PostgreSQL database.
"""
try:
return psycopg2.connect(
host=self.host,
dbname=self.dbname,
user=self.user,
password=self.password
)
except (Exception, psycopg2.DatabaseError) as error:
self.logger.error(error)
```
#### File: text_postprocessor/truecase/TrueCaser.py
```python
import math
import os
import pickle
import string
import re
class TrueCaser(object):
def __init__(self, dist_file_path=None):
""" Initialize module with default data/english.dist file """
if dist_file_path is None:
dist_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"data/english.dist")
with open(dist_file_path, "rb") as distributions_file:
pickle_dict = pickle.load(distributions_file)
self.uni_dist = pickle_dict["uni_dist"]
self.backward_bi_dist = pickle_dict["backward_bi_dist"]
self.forward_bi_dist = pickle_dict["forward_bi_dist"]
self.trigram_dist = pickle_dict["trigram_dist"]
self.word_casing_lookup = pickle_dict["word_casing_lookup"]
def get_score(self, prev_token, possible_token, next_token):
pseudo_count = 5.0
# Get Unigram Score
nominator = self.uni_dist[possible_token] + pseudo_count
denominator = 0
for alternativeToken in self.word_casing_lookup[
possible_token.lower()]:
denominator += self.uni_dist[alternativeToken] + pseudo_count
unigram_score = nominator / denominator
# Get Backward Score
bigram_backward_score = 1
if prev_token is not None:
nominator = (
self.backward_bi_dist[prev_token + "_" + possible_token] +
pseudo_count)
denominator = 0
for alternativeToken in self.word_casing_lookup[
possible_token.lower()]:
denominator += (self.backward_bi_dist[prev_token + "_" +
alternativeToken] +
pseudo_count)
bigram_backward_score = nominator / denominator
# Get Forward Score
bigram_forward_score = 1
if next_token is not None:
next_token = next_token.lower() # Ensure it is lower case
nominator = (
self.forward_bi_dist[possible_token + "_" + next_token] +
pseudo_count)
denominator = 0
for alternativeToken in self.word_casing_lookup[
possible_token.lower()]:
denominator += (
self.forward_bi_dist[alternativeToken + "_" + next_token] +
pseudo_count)
bigram_forward_score = nominator / denominator
# Get Trigram Score
trigram_score = 1
if prev_token is not None and next_token is not None:
next_token = next_token.lower() # Ensure it is lower case
nominator = (self.trigram_dist[prev_token + "_" + possible_token +
"_" + next_token] + pseudo_count)
denominator = 0
for alternativeToken in self.word_casing_lookup[
possible_token.lower()]:
denominator += (
self.trigram_dist[prev_token + "_" + alternativeToken +
"_" + next_token] + pseudo_count)
trigram_score = nominator / denominator
result = (math.log(unigram_score) + math.log(bigram_backward_score) +
math.log(bigram_forward_score) + math.log(trigram_score))
return result
def first_token_case(self, raw):
return f'{raw[0].upper()}{raw[1:]}'
def get_true_case(self, sentence, out_of_vocabulary_token_option="title"):
""" Returns the true case for the passed tokens.
@param tokens: Tokens in a single sentence
@param outOfVocabulariyTokenOption:
title: Returns out of vocabulary (OOV) tokens in 'title' format
lower: Returns OOV tokens in lower case
as-is: Returns OOV tokens as is
"""
contractions = r"'[A-Za-z]+"
only_words = r"[A-Za-z]+"
pruned_sentence = re.sub(contractions, "", sentence)
tokens = re.findall(only_words, pruned_sentence)
tokens_true_case = []
for token_idx, token in enumerate(tokens):
if token in string.punctuation or token.isdigit():
tokens_true_case.append(token)
else:
token = token.lower()
if token in self.word_casing_lookup:
if len(self.word_casing_lookup[token]) == 1:
tokens_true_case.append(
list(self.word_casing_lookup[token])[0])
else:
prev_token = (tokens_true_case[token_idx - 1]
if token_idx > 0 else None)
next_token = (tokens[token_idx + 1]
if token_idx < len(tokens) - 1 else None)
best_token = None
highest_score = float("-inf")
for possible_token in self.word_casing_lookup[token]:
score = self.get_score(prev_token, possible_token,
next_token)
if score > highest_score:
best_token = possible_token
highest_score = score
tokens_true_case.append(best_token)
if token_idx == 0:
tokens_true_case[0] = self.first_token_case(tokens_true_case[0])
else: # Token out of vocabulary
if out_of_vocabulary_token_option == "title":
tokens_true_case.append(token.title())
elif out_of_vocabulary_token_option == "lower":
tokens_true_case.append(token.lower())
else:
tokens_true_case.append(token)
spans = []
offset = 0
sentence_lower = sentence.lower()
true_case_sentence = sentence_lower
for tk in tokens_true_case:
span = re.search(tk.lower(), sentence_lower[offset:]).span()
spans.append([s + offset for s in span])
offset += span[1]
for s, tk in zip(spans, tokens_true_case):
true_case_sentence = (f"{true_case_sentence[:s[0]]}{tk}"
f"{true_case_sentence[s[1]:]}")
return true_case_sentence
if __name__ == "__main__":
dist_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"data/english.dist")
caser = TrueCaser(dist_file_path)
while True:
ip = input("Enter a sentence: ")
print(caser.get_true_case(ip, "lower"))
``` |
{
"source": "JIZZ-in-my-pants-NTNU/NTNU-Automatic-Enrollment-2021",
"score": 2
} |
#### File: JIZZ-in-my-pants-NTNU/NTNU-Automatic-Enrollment-2021/datasets.py
```python
import numpy as np
import torch
from torchvision.transforms import ToTensor
import torchvision.transforms.functional as tvF
from torch.utils.data import Dataset, DataLoader
class MyDataset(Dataset):
def __init__(self, data_npy, label_npy, redux=0):
super(MyDataset, self).__init__()
self.data_list = np.load(data_npy)
if label_npy is None:
self.label_list = None
else:
self.label_list = torch.from_numpy(np.load(label_npy))
if redux:
self.data_list = self.data_list[:redux]
self.label_list = self.label_list[:redux]
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
data = tvF.to_tensor(self.data_list[idx]).float()
if self.label_list is None:
return data
label = self.label_list[idx]
return data, label
def load_dataset(data_npy, label_npy, redux, params, shuffled=False, single=False):
dataset = MyDataset(data_npy, label_npy, redux)
if single:
return DataLoader(dataset, batch_size=1, shuffle=shuffled)
else:
return DataLoader(dataset, batch_size=params.batch_size, shuffle=shuffled)
```
#### File: JIZZ-in-my-pants-NTNU/NTNU-Automatic-Enrollment-2021/net.py
```python
import torch
import torch.nn as nn
from torchsummary import summary
class Net(nn.Module):
def __init__(self, in_channels=1, num_classes=39):
super(Net, self).__init__()
self._block1 = nn.Sequential(
nn.Conv2d(in_channels, 4, 3, stride=1, padding=1),
nn.BatchNorm2d(4),
nn.LeakyReLU(0.01),
nn.MaxPool2d(2))
self._block2 = nn.Sequential(
nn.Conv2d(4, 16, 3, stride=1, padding=1),
nn.BatchNorm2d(16),
nn.LeakyReLU(0.01),
nn.MaxPool2d(2))
self._block3 = nn.Sequential(
nn.Linear(16*5*5, 64),
nn.ReLU(),
nn.BatchNorm1d(64),
nn.Linear(64, num_classes))
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
m.bias.data.zero_()
def forward(self, x):
pool1 = self._block1(x)
pool2 = self._block2(pool1)
flatten = pool2.view(pool2.size(0), -1)
return self._block3(flatten)
if __name__ == '__main__':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Net().to(device)
summary(model, (1, 20, 20))
```
#### File: JIZZ-in-my-pants-NTNU/NTNU-Automatic-Enrollment-2021/test.py
```python
import os
import torch
import torch.nn as nn
from argparse import ArgumentParser, Namespace
from datasets import load_dataset
from captha_recognition import CapthaRecognition
from utils import *
def parse_args():
parser = ArgumentParser(description='Captha Recognition')
parser.add_argument('--test-img', help='testing image path', default='./captha.jpg')
parser.add_argument('--load-ckpt', help='load model checkpoint')
parser.add_argument('--cuda', help='use cuda', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
params = parse_args()
img = cv2.imread(params.test_img, cv2.IMREAD_COLOR)
crop_imgs = captha_segmentation(img)
if crop_imgs is None:
print('None')
else:
test_data = np.array(crop_imgs, dtype=np.uint8).reshape(-1, 20, 20)
np.save('test_data.npy', test_data)
test_loader = load_dataset('test_data.npy', None, 0, params, shuffled=False, single=True)
os.remove('test_data.npy')
cr = CapthaRecognition(params, trainable=False)
cr.load_model(params.load_ckpt)
pred_idx = cr.test(test_loader)
pred_label = [idx_to_label(i) for i in pred_idx]
print(''.join(pred_label))
```
#### File: JIZZ-in-my-pants-NTNU/NTNU-Automatic-Enrollment-2021/train_valid_split.py
```python
import numpy as np
import pandas as pd
from utils import label_to_idx
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser()
parser.add_argument('--train-ratio', '-r', help='ratio of training data', default=0.8, type=float)
parser.add_argument('--load-dataset', '-l', help='captha dataset path', default='captha.csv')
return parser.parse_args()
if __name__ == '__main__':
params = parse_args()
# label to idx
captha_df = pd.read_csv(params.load_dataset)
captha_df['label'] = captha_df['label'].apply(label_to_idx)
# train-valid split
train_num = int(params.train_ratio * captha_df.shape[0])
train_df = captha_df[:train_num]
train_data = train_df.drop('label', axis=1).to_numpy(dtype=np.uint8).reshape((-1, 20, 20))
train_label = train_df['label'].to_numpy(dtype=np.int64).reshape((-1, ))
np.save('train_data.npy', train_data)
np.save('train_label.npy', train_label)
valid_df = captha_df[train_num:]
valid_data = valid_df.drop('label', axis=1).to_numpy(dtype=np.uint8).reshape((-1, 20, 20))
valid_label = valid_df['label'].to_numpy(dtype=np.int64).reshape((-1, ))
np.save('valid_data.npy', valid_data)
np.save('valid_label.npy', valid_label)
``` |
{
"source": "jj0hns0n/mednet",
"score": 3
} |
#### File: mednet/clickatell/__init__.py
```python
import urllib, urllib2
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>>"
__version__ = "0.1.1-alpha"
__copyright__ = "Copyright (c) 2006 <NAME>, 2008 <NAME>. All rights reserved."
__license__ = "The MIT License"
def require_auth(func):
"""
decorator to ensure that the Clickatell object is authed before proceeding
"""
def inner(self, *args, **kwargs):
if not self.has_authed:
self.auth()
return func(self, *args, **kwargs)
return inner
class ClickatellError(Exception):
"""
Base class for Clickatell errors
"""
class ClickatellAuthenticationError(ClickatellError):
pass
class Clickatell(object):
"""
Provides a wrapper around the Clickatell HTTP/S API interface
"""
def __init__ (self, username, password, api_id):
"""
Initialise the Clickatell class
Expects:
- username - your Clickatell Central username
- password - your Clickatell Central password
- api_id - your Clickatell Central HTTP API identifier
"""
self.has_authed = False
self.username = username
self.password = password
self.api_id = api_id
self.session_id = None
def auth(self, url='https://api.clickatell.com/http/auth'):
"""
Authenticate against the Clickatell API server
"""
post = [
('user', self.username),
('password', <PASSWORD>),
('api_id', self.api_id),
]
result = self.curl(url, post)
if result[0] == 'OK':
assert (32 == len(result[1]))
self.session_id = result[1]
self.has_authed = True
return True
else:
raise ClickatellAuthenticationError, ': '.join(result)
@require_auth
def getbalance(self, url='https://api.clickatell.com/http/getbalance'):
"""
Get the number of credits remaining at Clickatell
"""
post = [
('session_id', self.session_id),
]
result = self.curl(url, post)
if result[0] == 'Credit':
assert (0 <= result[1])
return result[1]
else:
return False
@require_auth
def getmsgcharge(self, apimsgid, url='https://api.clickatell.com/http/getmsgcharge'):
"""
Get the message charge for a previous sent message
"""
assert (32 == len(apimsgid))
post = [
('session_id', self.session_id),
('apimsgid', apimsgid),
]
result = self.curl(url, post)
result = ' '.join(result).split(' ')
if result[0] == 'apiMsgId':
assert (apimsgid == result[1])
assert (0 <= result[3])
return result[3]
else:
return False
@require_auth
def ping(self, url='https://api.clickatell.com/http/ping'):
"""
Ping the Clickatell API interface to keep the session open
"""
post = [
('session_id', self.session_id),
]
result = self.curl(url, post)
if result[0] == 'OK':
return True
else:
self.has_authed = False
return False
@require_auth
def sendmsg(self, message, url = 'https://api.clickatell.com/http/sendmsg'):
"""
Send a mesage via the Clickatell API server
Takes a message in the following format:
message = {
'to': 'to_msisdn',
'text': 'This is a test message',
}
Return a tuple. The first entry is a boolean indicating if the message
was send successfully, the second entry is an optional message-id.
Example usage::
result, uid = clickatell.sendmsg(message)
if result == True:
print "Message was sent successfully"
print "Clickatell returned %s" % uid
else:
print "Message was not sent"
"""
if not (message.has_key('to') or message.has_key('text')):
raise ClickatellError, "A message must have a 'to' and a 'text' value"
post = [
('session_id', self.session_id),
('from', '15026456443'),
('to', message['to']),
('text', message['text']),
]
result = self.curl(url, post)
if result[0] == 'ID':
assert (result[1])
return (True, result[1])
else:
return (False, None)
@require_auth
def tokenpay(self, voucher, url='https://api.clickatell.com/http/token_pay'):
"""
Redeem a voucher via the Clickatell API interface
"""
assert (16 == len(voucher))
post = [
('session_id', self.session_id),
('token', voucher),
]
result = self.curl(url, post)
if result[0] == 'OK':
return True
else:
return False
def curl(self, url, post):
"""
Inteface for sending web requests to the Clickatell API Server
"""
try:
data = urllib2.urlopen(url, urllib.urlencode(post))
except urllib2.URLError, v:
raise ClickatellError, v
return data.read().split(": ")
```
#### File: mednet/messaging/models.py
```python
from django.contrib.gis.db import models
from django.utils.encoding import *
from datetime import *
import hashlib, random
MESSAGE_STATUS_CHOICE = (
('NW', 'New Message'),
('IP', 'In Process'),
('CM', 'Complete'),
('IG', 'Ignored')
)
def guid_gen():
return hashlib.sha1(str(random.random())).hexdigest()
class CharNullField(models.CharField):
description = "CharField that stores NULL but returns ''"
def to_python(self, value):
if isinstance(value, models.CharField):
return value
if value==None:
return ""
else:
return value
def get_db_prep_value(self, value):
if value=="":
return None
else:
return value
class OutgoingSmsMessage(models.Model):
guid = models.CharField(max_length=512, default=guid_gen)
recipient = models.CharField(max_length=25)
message = models.CharField(max_length=160)
date_queued = models.DateTimeField(default=datetime.now)
date_sent = models.DateTimeField(null=True, blank=True)
receipt = CharNullField(max_length=512, null=True, blank=True, default=None)
def __unicode__(self):
return str(self.recipient + ' ' + str(self.message))
class Meta:
verbose_name = "Outgoing SMS Message"
class IncomingSmsMessage(models.Model):
guid = models.CharField(max_length=512)
sender = models.CharField(max_length=25)
message = models.CharField(max_length=255, null=True, blank=True)
date_sent = models.DateTimeField(null=True, blank=True)
notes = models.TextField(null=True, blank=True)
status = models.CharField(max_length=2, choices=MESSAGE_STATUS_CHOICE)
status_changed_date = models.DateTimeField(default=datetime.now)
receipt = models.CharField(max_length=512, null=True, blank=True)
objects = models.GeoManager()
def __unicode__(self):
return str(self.sender + ' ' + str(self.date_sent))
class Meta:
verbose_name = "Incoming SMS Message"
class VoiceMessage(models.Model):
gvoice_id = models.CharField(max_length=256)
start_time = models.DateTimeField(null=True, blank=True)
phone_number = models.CharField(max_length=100, null=True, blank=True)
subject = models.CharField(max_length=255, null=True, blank=True)
notes = models.TextField(null=True, blank=True)
mp3_url = models.CharField(max_length=255, blank=True, null=True)
status = models.CharField(max_length=2, choices=MESSAGE_STATUS_CHOICE)
status_changed_date = models.DateTimeField()
objects = models.GeoManager()
def __unicode__(self):
return str(self.phone_number + ' ' + str(self.start_time))
class Meta:
verbose_name = "Incoming Voice Message"
class MailMessage(models.Model):
from_address = models.CharField(max_length=512, null=True, blank=True)
subject = models.CharField(max_length=512,null=True,blank=True)
date_sent = models.DateTimeField(null=True,blank=True)
message = models.TextField(null=True,blank=True)
return_path = models.CharField(max_length=512, null=True, blank=True)
message_id = models.CharField(max_length=512, null=True, blank=True)
status = models.CharField(max_length=2, choices=MESSAGE_STATUS_CHOICE)
status_changed_date = models.DateTimeField()
objects = models.GeoManager()
def __unicode__(self):
return str(self.return_path + ' ' + str(self.date_sent))
class Meta:
verbose_name = "Incoming Mail Message"
class OutgoingMailMessage(models.Model):
guid = models.CharField(max_length=512, default=guid_gen)
to_address = models.EmailField()
subject = models.CharField(max_length=512,null=True,blank=True)
date_queued = models.DateTimeField(null=True,blank=True, default=datetime.now)
date_sent = models.DateTimeField(null=True,blank=True)
message = models.TextField(null=True,blank=True)
message_id = models.CharField(max_length=512, null=True, blank=True)
objects = models.GeoManager()
def __unicode__(self):
return str(self.to_address + ' ' + str(self.date_queued))
class Meta:
verbose_name = "Outgoing Mail Message"
```
#### File: mednet/messaging/views.py
```python
import sys
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from mednet.messaging.models import *
from mednet.sahana.models import *
from datetime import *
from time import *
import urllib
@login_required
def index(request):
voice_count = VoiceMessage.objects.filter(status='NW').count()
sms_count = IncomingSmsMessage.objects.filter(status='NW').count()
mail_count = MailMessage.objects.filter(status='NW').count()
return render_to_response('messaging/index.html', {'voice_count': voice_count, 'sms_count': sms_count, 'mail_count': mail_count}, context_instance=RequestContext(request))
@login_required
def mark_message(request):
print "here"
msgtype = request.GET['msgtype']
msgid = int(request.GET['msgid'])
status = request.GET['status']
if 'custom_msg' in request.GET:
custom_message = urllib.unquote(request.GET['custom_msg'])
else:
custom_message = ''
print custom_message
if(msgtype == 'voice'):
vm = VoiceMessage.objects.get(pk=msgid)
vm.status = status
vm.status_changed_date = datetime.now()
vm.save()
return HttpResponseRedirect('/mednet/messaging/next_message/voice/')
elif(msgtype == 'sms'):
sms = IncomingSmsMessage.objects.get(pk=msgid)
sms.status = status
sms.status_changed_date = datetime.now()
sms.save()
#Enqueue Return Message
if status=='CM':
out = OutgoingSmsMessage()
out.recipient = sms.sender
if(len(custom_message) == 0):
out.message = "Your message Has been processed"
else:
#TODO Truncate to 160 or send multiple messages
out.message = custom_message[:160]
out.date_queued = datetime.now()
out.save()
return HttpResponseRedirect('/mednet/messaging/next_message/sms/')
elif(msgtype == 'mail'):
mail = MailMessage.objects.get(pk=msgid)
mail.status = status
mail.status_changed_date = datetime.now()
mail.save()
if status=='CM':
out = OutgoingMailMessage()
out.to_address = mail.return_path
out.subject = "RE: " + mail.subject
if(len(custom_message) == 0):
out.message = "Your message Has been processed"
else:
out.message = custom_message
out.date_queued = datetime.now()
out.save()
return HttpResponseRedirect('/mednet/messaging/next_message/mail/')
return HttpResponse('ok')
pass
@login_required
def next_message(request, message_type):
print message_type
if(message_type == 'voice'):
try:
next_voice_message = VoiceMessage.objects.filter(status='NW').order_by('start_time')[0:1].get()
#next_voice_message.status = 'IP'
next_voice_message.status_changed_date = strftime("%Y-%m-%d %H:%M:%S", localtime())
next_voice_message.save()
hospitals = HmsHospital.objects.all().order_by('name')
return render_to_response('messaging/next_voice_message.html', {'next_voice_message': next_voice_message, 'hospitals': hospitals}, context_instance=RequestContext(request) )
except:
#No Messages Matching Criteria
print sys.exc_info()
return render_to_response('messaging/no_messages.html', {'type': 'Voice'}, context_instance=RequestContext(request))
elif(message_type == 'mail'):
try:
next_mail_message = MailMessage.objects.filter(status='NW').order_by('date_sent')[0:1].get()
#next_mail_message.status = 'IP'
next_mail_message.status_changed_date = strftime("%Y-%m-%d %H:%M:%S", localtime())
next_mail_message.save()
hospitals = HmsHospital.objects.all().order_by('name')
return render_to_response('messaging/next_email_message.html', {'next_mail_message': next_mail_message, 'hospitals': hospitals}, context_instance=RequestContext(request))
except:
#No Messages Matching Criteria
print sys.exc_info()
return render_to_response('messaging/no_messages.html', {'type': 'Mail'}, context_instance=RequestContext(request))
elif(message_type == 'sms'):
try:
next_sms_message = IncomingSmsMessage.objects.filter(status='NW').order_by('-date_sent')[0:1].get()
#next_sms_message.status = 'IP'
next_sms_message.status_changed_date = strftime("%Y-%m-%d %H:%M:%S", localtime())
next_sms_message.save()
hospitals = HmsHospital.objects.all().order_by('name')
return render_to_response('messaging/next_sms_message.html', {'next_sms_message': next_sms_message, 'hospitals': hospitals}, context_instance=RequestContext(request))
except:
#No Messages Matching Criteria
return render_to_response('messaging/no_messages.html', {'type': 'SMS'}, context_instance=RequestContext(request))
else:
#message type not valid
pass
```
#### File: mednet/olwidget/admin.py
```python
import copy
# Get the parts necessary for the methods we override
from django.contrib.admin import ModelAdmin
from django.contrib.gis.db import models
from django.contrib.gis.geos import GeometryCollection
from django.shortcuts import render_to_response
from django import template
from django.contrib.admin.options import IncorrectLookupParameters
from django.http import HttpResponse, HttpResponseRedirect
from django.core.exceptions import PermissionDenied
from django.utils.encoding import force_unicode
from django.utils.translation import ungettext
from olwidget.widgets import EditableMap, InfoMap, DEFAULT_PROJ
class GeoModelAdmin(ModelAdmin):
options = {}
list_map = None
list_map_options = {}
change_list_template = "admin/olwidget_change_list.html"
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin to use our map widget.
"""
if isinstance(db_field, models.GeometryField):
request = kwargs.pop('request', None)
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(
db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns an EditableMap subclass with options appropriate for the given
field.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING',
'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if db_field.geom_type == 'GEOMETRYCOLLECTION':
geometry = ['polygon', 'point', 'linestring']
else:
if db_field.geom_type in ('MULTIPOINT', 'POINT'):
geometry = 'point'
elif db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'):
geometry = 'polygon'
elif db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'):
geometry = 'linestring'
else:
# fallback: allow all types.
geometry = ['polygon', 'point', 'linestring']
options = copy.deepcopy(self.options)
options.update({
'geometry': geometry,
'isCollection': is_collection,
'name': db_field.name,
})
class Widget(EditableMap):
def __init__(self, *args, **kwargs):
kwargs['options'] = options
# OL rendering bug with floats requires this.
kwargs['template'] = "olwidget/admin_olwidget.html"
super(Widget, self).__init__(*args, **kwargs)
return Widget
def get_changelist_map(self, cl):
"""
Display a map in the admin changelist, with info popups
"""
if self.list_map:
info = []
for obj in cl.get_query_set():
# Transform the fields into one projection.
geoms = [getattr(obj, field) for field in self.list_map]
for geom in geoms:
geom.transform(int(DEFAULT_PROJ))
info.append((
GeometryCollection(geoms, srid=int(DEFAULT_PROJ)),
"<a href='%s'>%s</a>" % (
cl.url_for_result(obj),
force_unicode(obj)
)
))
return InfoMap(info, options=self.list_map_options)
return None
def changelist_view(self, request, extra_context=None):
# Copied from parent and modified where marked to add map based on
# change list and media.
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ChangeList, ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
try:
cl = ChangeList(request, self.model, list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk edit.
# Try to look up an action first, but if this isn't an action the POST
# will fall through to the bulk edit check, below.
if actions and request.method == 'POST':
response = self.response_action(request, queryset=cl.get_query_set())
if response:
return response
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == "POST" and self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
form.save_m2m()
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_unicode(opts.verbose_name)
else:
name = force_unicode(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_unicode(obj)}
self.message_user(request, msg)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'media': media,
'has_add_permission': self.has_add_permission(request),
'root_path': self.admin_site.root_path,
'app_label': app_label,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
}
# MODIFICATION
map = self.get_changelist_map(cl)
if map:
context['media'] += map.media
context['map'] = map
# END MODIFICATION
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=template.RequestContext(request))
```
#### File: mednet/piston_api/handlers.py
```python
import sys
from piston.handler import BaseHandler, AnonymousBaseHandler
from piston.emitters import Emitter, JSONEmitter
from piston_api.emitters import GeoJSONEmitter
from mednet.sahana.models import *
from mednet.messaging.models import *
from piston.utils import rc
from datetime import *
import hashlib, random
import urllib
JSONEmitter.unregister('json')
Emitter.register('json', GeoJSONEmitter, 'application/javascript; charset=utf-8')
outgoing_fields = ('date_queued', 'receipt', 'date_sent', 'message', 'recipient', 'guid')
#Incoming SMS
class AnonymousIncomingSmsHandler(BaseHandler):
allowed_methods=('GET','POST',)
model = IncomingSmsMessage
def read(self, request, message_id=None):
if(message_id):
return IncomingSmsMessage.objects.get(guid=message_id)
else:
return IncomingSmsMessage.objects.all()
def create(self, request):
if not self.has_model():
return rc.NOT_IMPLEMENTED
attrs = self.flatten_dict(request.POST)
print attrs
if attrs.has_key('data'):
ext_posted_data = simplejson.loads(request.POST.get('data'))
attrs = self.flatten_dict(ext_posted_data)
try:
inst = self.model.objects.get(**attrs)
return rc.DUPLICATE_ENTRY
except self.model.DoesNotExist:
try:
attrs['message'] = urllib.unquote(attrs['message']).decode('utf8')
except:
attrs['message'] = urllib.unquote(attrs['message'])
inst = self.model(**attrs)
inst.receipt = hashlib.sha1(str(random.random())).hexdigest()
inst.status_changed_date = datetime.now()
inst.status = 'NW'
inst.save()
return inst
except self.model.MultipleObjectsReturned:
return rc.DUPLICATE_ENTRY
class IncomingSmsHandler(BaseHandler):
allow_methods = ('GET',)
model = IncomingSmsMessage
anonymous = AnonymousIncomingSmsHandler
#Outgoing SMS
class AnonymousOutgoingSmsHandler(BaseHandler):
allowed_methods = ('GET','PUT')
model = OutgoingSmsMessage
fields = outgoing_fields
def read(self, request, message_date=None):
if(message_date):
try:
objects = OutgoingSmsMessage.objects.filter(receipt=None)
print objects
return objects
except:
rc.BAD_REQUEST
else:
return OutgoingSmsMessage.objects.all()
def update(self, request, *args, **kwargs):
attrs = self.flatten_dict(request.POST)
print attrs
try:
guid=attrs['guid']
instance = OutgoingSmsMessage.objects.get(guid=guid)
print instance
except self.model.DoesNotExist:
print "model.DoesNotExist"
return rc.NOT_FOUND
except self.model.MultipleObjectsReturned:
print "bad request1"
return rc.BAD_REQUEST
except:
print "bad request2"
print sys.exc_info()
return rc.BAD_REQUEST
attrs = self.flatten_dict(request.data)
for k,v in attrs.iteritems():
setattr(instance, k, v)
instance.save()
return instance
class OutgoingSmsHandler(BaseHandler):
allow_methods = ('GET',)
model = OutgoingSmsMessage
fields = outgoing_fields
anonymous = AnonymousOutgoingSmsHandler
#Hospitals
class AnonymousHospitalHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsHospital
def read(self, request, hospital_id=None):
if(hospital_id):
return HmsHospital.objects.get(pk=hospital_id)
else:
return HmsHospital.objects.all()
class HospitalHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsHospital
anonymous = AnonymousHospitalHandler
#Hospital Activities
class AnonymousHospitalActivityHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsActivity
def read(self, request, hospital_activity_id=None):
if(hospital_activity_id):
return HmsActivity.objects.get(pk=hospital_activity_id)
else:
return HmsActivity.objects.all()
class HospitalActivityHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsActivity
anonymous = AnonymousHospitalActivityHandler
#Hospital Bed Capacity
class AnonymousHospitalBedCapacityHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsBedCapacity
def read(self, request, hospital_bed_capacity_id=None):
if(hospital_bed_capacity_id):
return HmsBedCapacity.objects.get(pk=hospital_bed_capacity_id)
else:
return HmsBedCapacity.objects.all()
class HospitalBedCapacityHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsBedCapacity
anonymous = AnonymousHospitalBedCapacityHandler
#Hospital Contacts
class AnonymousHospitalContactHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsContact
def read(self, request, hospital_contact_id=None):
if(hospital_contact_id):
return HmsContact.objects.get(pk=hospital_contact_id)
else:
return HmsContact.objects.all()
class HospitalContactHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsContact
anonymous = AnonymousHospitalContactHandler
#Hospital Images
class AnonymousHospitalImageHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsImage
def read(self, request, hospital_image_id=None):
if(hospital_image_id):
return HmsImage.objects.get(pk=hospital_image_id)
else:
return HmsImage.objects.all()
class HospitalImageHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsImage
anonymous = AnonymousHospitalImageHandler
#Hospital Request
class AnonymousHospitalRequestHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsRequest
def read(self, request, hospital_request_id=None):
if(hospital_request_id):
return HmsRequest.objects.get(pk=hospital_request_id)
else:
return HmsRequest.objects.all()
class HospitalRequestHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsRequest
anonymous = AnonymousHospitalRequestHandler
#Hospital Resource
class AnonymousHospitalResourceHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsResource
def read(self, request, hospital_resource_id=None):
if(hospital_resource_id):
return HmsResource.objects.get(pk=hospital_resource_id)
else:
return HmsResource.objects.all()
class HospitalResourceHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsResource
anonymous = AnonymousHospitalResourceHandler
#Hospital Service
class AnonymousHospitalServiceHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsService
def read(self, request, hospital_service_id=None):
if(hospital_service_id):
return HmsService.objects.get(pk=hospital_service_id)
else:
return HmsService.objects.all()
class HospitalServiceHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsService
anonymous = AnonymousHospitalServiceHandler
#Hospital Status
class AnonymousHospitalStatusHandler(BaseHandler):
allowed_methods = ('GET',)
model = HmsStatus
def read(self, request, hospital_id=None):
if(hospital_id):
return HmsStatus.objects.filter(hospital=HmsHospital.objects.get(pk=hospital_id))
else:
return HmsStatus.objects.all()
class HospitalStatusHandler(BaseHandler):
allow_methods = ('GET',)
model = HmsStatus
anonymous = AnonymousHospitalStatusHandler
``` |
{
"source": "jj105/mindlogger-app-backend",
"score": 2
} |
#### File: girderformindlogger/models/pushNotification.py
```python
import datetime
import six
import time
import bson
from girderformindlogger.models.model_base import Model
from girderformindlogger.models.profile import Profile as ProfileModel
from girderformindlogger.models.user import User as UserModel
class ProgressState(object):
"""
Enum of possible progress states for progress records.
"""
ACTIVE = 'active'
SUCCESS = 'success'
ERROR = 'error'
EMPTY = 'empty'
@classmethod
def isComplete(cls, state):
return state == cls.SUCCESS or state == cls.ERROR
class PushNotification(Model):
"""
This model is used to represent a notification that should be streamed
to a specific user in some way. Each notification contains a
type field indicating what kind of notification it is, a userId field
indicating which user the notification should be sent to, a data field
representing the payload of the notification, a time field indicating the
time at which the event happened, and an optional expires field indicating
at what time the notification should be deleted from the database.
"""
current_time = datetime.datetime.utcnow().strftime('%Y/%m/%d %H:%M')
def initialize(self):
self.name = 'pushNotification'
self.ensureIndices(('assetId', 'notification_type', 'head', 'content',
'sendTime', 'creator_id', 'created', 'updated', 'progress', 'timezone', 'attempts'))
def validate(self, doc):
return doc
def replaceNotification(self, applet, event, user, original = None):
"""
Create a generic notification.
:param type: The notification type.
:type type: str
:param data: The notification payload.
:param user: User to send the notification to.
:type user: dict
:param expires: Expiration date (for transient notifications).
:type expires: datetime.datetime
:param token: Set this if the notification should correspond to a token
instead of a user.
:type token: dict
"""
current_date = datetime.datetime.utcnow()
current_user_date = current_date + datetime.timedelta(hours=int(user['timezone']))
current_time = time.time()
notification_type = 1
start_time = event['data']['notifications'][0]['start']
end_time = event['data']['notifications'][0]['end']
schedule = {
"start": (current_date - datetime.timedelta(days=1)).strftime('%Y/%m/%d'),
"end": (current_date + datetime.timedelta(days=365*40)).strftime('%Y/%m/%d')
}
users = []
if 'users' in event['data']:
users = [bson.ObjectId(oid=user) for user in event['data']['users'] if user]
if 'schedule' in event:
if 'dayOfMonth' in event['schedule']:
"""
Does not repeat configuration in case of single event with exact year, month, day
"""
if event['data'].get('notifications', None) and \
event['data']['notifications'][0]['random']:
end_time = event['data']['notifications'][0]['end']
if 'year' in event['schedule'] and 'month' in event['schedule'] \
and 'dayOfMonth' in event['schedule']:
current_date_schedule = str(str(event['schedule']['year'][0]) + '/' +
('0' + str(event['schedule']['month'][0] + 1))[-2:] + '/' +
('0' + str(event['schedule']['dayOfMonth'][0]))[-2:])
schedule['start'] = current_date_schedule
schedule['end'] = current_date_schedule
elif 'dayOfWeek' in event['schedule']:
"""
Weekly configuration in case of weekly event
"""
notification_type = 3
if 'start' in event['schedule'] and event['schedule']['start']:
schedule['start'] = datetime.datetime.fromtimestamp(
float(event['schedule']['start']) / 1000).strftime('%Y/%m/%d')
if 'end' in event['schedule'] and event['schedule']['end']:
schedule['end'] = datetime.datetime.fromtimestamp(
float(event['schedule']['end']) / 1000).strftime('%Y/%m/%d')
schedule['dayOfWeek'] = event['schedule']['dayOfWeek'][0]
else:
"""
Daily configuration in case of daily event
"""
notification_type = 2
if 'start' in event['schedule'] and event['schedule']['start']:
schedule['start'] = datetime.datetime.fromtimestamp(
float(event['schedule']['start']) / 1000).strftime('%Y/%m/%d')
if 'end' in event['schedule'] and event['schedule']['end']:
schedule['end'] = datetime.datetime.fromtimestamp(
float(event['schedule']['end']) / 1000).strftime('%Y/%m/%d')
push_notification = {
'_id': event.get('_id'),
'applet': applet,
'notification_type': notification_type,
'head': event['data']['title'],
'content': event['data']['description'],
'users': users,
'schedule': schedule,
'startTime': start_time,
'endTime': end_time,
'lastRandomTime': None,
'notifiedUsers': original.get('notifiedUsers') if original else [],
'dateSend': None,
'creator_id': user['_id'],
'created': current_time,
'updated': current_time,
'progress': ProgressState.ACTIVE,
'attempts': 0
}
if original:
self.current_time = datetime.datetime.utcnow().strftime('%Y/%m/%d %H:%M')
push_notification.update({
'_id': original.get('_id'),
'progress': original.get('progress'),
'attempts': original.get('attempts'),
'dateSend': original.get('dateSend'),
'notifiedUsers': self.update_notified_users(push_notification),
'lastRandomTime': original.get('lastRandomTime')
})
if start_time > current_user_date.strftime('%H:%M') \
and schedule['start'] >= current_user_date.strftime('%Y/%m/%d'):
push_notification.update({
'progress': ProgressState.ACTIVE,
'lastRandomTime': None
})
return self.save(push_notification)
return None
def delete_notification(self, event_id):
self.removeWithQuery(query={'_id': event_id})
def updateProgress(self, record, save=True, **kwargs):
"""
Update an existing progress record.
:param record: The existing progress record to update.
:type record: dict
:param total: Some numeric value representing the total task length. By
convention, setting this <= 0 means progress on this task is
indeterminate. Generally this shouldn't change except in cases where
progress on a task switches between indeterminate and determinate
state.
:type total: int, long, or float
:param state: Represents the state of the underlying task execution.
:type state: ProgressState enum value.
:param current: Some numeric value representing the current progress
of the task (relative to total).
:type current: int, long, or float
:param increment: Amount to increment the progress by. Don't pass both
current and increment together, as that behavior is undefined.
:type increment: int, long, or float
:param message: Message corresponding to the current state of the task.
:type message: str
:param expires: Set a custom (UTC) expiration time on the record.
Default is one hour from the current time.
:type expires: datetime
:param save: Whether to save the record to the database.
:type save: bool
"""
if 'increment' in kwargs:
record['data']['current'] += kwargs['increment']
for field, value in six.viewitems(kwargs):
if field in ('total', 'current', 'state', 'message'):
record['data'][field] = value
now = datetime.datetime.utcnow()
if 'expires' in kwargs:
expires = kwargs['expires']
else:
expires = now + datetime.timedelta(hours=1)
record['updated'] = now
record['expires'] = expires
record['updatedTime'] = time.time()
if save:
# Only update the time estimate if we are also saving
if (record['updatedTime'] > record['startTime']
and record['data']['estimateTime']):
if 'estimatedTotalTime' in record:
del record['estimatedTotalTime']
try:
total = float(record['data']['total'])
current = float(record['data']['current'])
if total >= current and total > 0 and current > 0:
record['estimatedTotalTime'] = \
total * (record['updatedTime'] - record['startTime']) / current
except ValueError:
pass
return self.save(record)
else:
return record
def get(self, user, since=None, token=None, sort=None):
"""
Get outstanding notifications for the given user.
:param user: The user requesting updates. None to use the token
instead.
:param since: Limit results to entities that have been updated
since a certain timestamp.
:type since: datetime
:param token: if the user is None, the token requesting updated.
:param sort: Sort field for the database query.
"""
q = {}
if user:
q['userId'] = user['_id']
else:
q['tokenId'] = token['_id']
if since is not None:
q['updated'] = {'$gt': since}
return self.find(q, sort=sort)
def update_notified_users(self, notification):
if len(notification['notifiedUsers']):
user_ids = [user['_id'] for user in notification['notifiedUsers']]
users = list(UserModel().get_users_by_ids(user_ids))
notification_start_date = notification['schedule']['start']
notification_end_date = notification['schedule']['end']
notification_h = int(
datetime.datetime.strptime(notification["startTime"], "%H:%M").hour)
notification_m = int(
datetime.datetime.strptime(notification["startTime"], "%H:%M").minute)
excluded_users = []
for user in users:
current_user_time = datetime.datetime.strptime(self.current_time, '%Y/%m/%d %H:%M') \
+ datetime.timedelta(hours=int(user['timezone']))
usr_h = int(current_user_time.strftime("%H"))
usr_m = int(current_user_time.strftime("%M"))
print(f'User m - {usr_m}')
print(f'Notification m - {notification_m}')
if notification_start_date <= current_user_time.strftime('%Y/%m/%d') \
<= notification_end_date and ((usr_h == notification_h
and notification_m > usr_m)
or usr_h != notification_h):
excluded_users.append(user['_id'])
user_ids = [user for user in notification['notifiedUsers'] if user['_id'] not in excluded_users]
return user_ids
return []
```
#### File: mindlogger-app-backend/test/test_local.py
```python
import json
import os
import pytest
from .testLib import fullTest
from girderformindlogger.constants import REPROLIB_CANONICAL
protocolUrl = ''.join([
REPROLIB_CANONICAL,
'protocols/ema-hbn/ema-hbn_schema'
])
act1 = ''.join([
REPROLIB_CANONICAL,
'activities/EmaHBNEvening/ema_evening_schema'
])
act2 = ''.join([
REPROLIB_CANONICAL,
'activities/EmaHBNMorning/ema_morning_schema'
])
act1Item = ''.join([
REPROLIB_CANONICAL,
'activities/EmaHBNEvening/items/good_bad_day'
])
act2Item = ''.join([
REPROLIB_CANONICAL,
'activities/EmaHBNMorning/items/sleeping_aids'
])
with open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'expected/test_1_HBN.jsonld'
)) as te:
expectedResults = json.loads(te.read())
@pytest.mark.parametrize(
"args",
[(protocolUrl, act1, act2, act1Item, act2Item, expectedResults)]
)
def test_1_HBN(args):
protocolUrl, act1, act2, act1Item, act2Item, expectedResults = args
try:
print('\n\n TEST 1: HBN')
# should be fixed in the future
# fullTest(protocolUrl, act1, act2, act1Item, act2Item, expectedResults)
except Exception as e:
print('\n\n ERROR:', e)
raise e
nestedProtocol = ''.join([
REPROLIB_CANONICAL,
'protocols/pediatric-screener/pediatric-screener_schema'
])
nact1 = ''.join([
REPROLIB_CANONICAL,
'activities/PediatricScreener-Parent/pediatric_screener_parent_schema'
])
nact2 = ''.join([
REPROLIB_CANONICAL,
'activities/PediatricScreener-SelfReport/'
'pediatric_screener_selfreport_schema'
])
nact1Item = ''.join([
REPROLIB_CANONICAL,
'activities/PediatricScreener-Parent/items/fidgety'
])
nact2Item = ''.join([
REPROLIB_CANONICAL,
'activities/PediatricScreener-SelfReport/items/having_less_fun'
])
# @pytest.mark.parametrize(
# "args",
# [(nestedProtocol, nact1, nact2, nact1Item, nact2Item)]
# )
# def test_2_pediatric_screener(args):
# nestedProtocol, nact1, nact2, nact1Item, nact2Item = args
#
# try:
# print('\n\n TEST 2: Pediatric Screener')
# fullTest(
# nestedProtocol,
# nact1,
# nact2,
# nact1Item,
# nact2Item
# )
# except Exception as e:
# print('\n\n ERROR:', e)
# raise e
``` |
{
"source": "jj11hh/schoolwork-utils",
"score": 3
} |
#### File: botemu_back/bin/app.py
```python
import thread
from sys import exit
from math import pi,sqrt
import pygame
from pygame.locals import *
import bot
import cli
from config import *
class App(object):
def __init__(self,fontfile=FONT_FILE,fontsize=FONT_SIZE,\
botcolor=BOTCOLOR,\
botpos=BOTPOS, bgimage=BGIMAGE, bgcolor=BGCOLOR,\
icon=ICON, winsize=WINSIZE):
self.caption = 'Botemu -- Created By Jiang'
pygame.init()
self.fpsclock=pygame.time.Clock()
self.font=pygame.font.Font(fontfile, fontsize)
self.screen = pygame.display.set_mode(winsize,0,32)
self.bgcolor = bgcolor
self.bgimage = pygame.image.load(bgimage).convert_alpha()
self.botmap = self.screen.subsurface(pygame.Rect(BOTMAP))
self.botmap.fill(self.bgcolor)
self.botmap.blit(self.bgimage,self.bgimage.get_rect())
self.bot=bot.Bot(self.botmap,botcolor,botpos)
self.bot.update()
self.botmap.blit(self.bot.surf,self.bot.rect)
_botrect = self.bot.rect
self.botarea = self.screen.subsurface(pygame.Rect(BOTAREA))
_botarea = self.bot.snapshot
self.botarea.blit(_botarea,_botarea.get_rect())
pygame.display.set_caption(self.caption)
if icon:
pygame.display.set_icon(pygame.image.load(icon))
self.paused = False
pygame.display.flip()
self.keyhandlers={
K_p : self.pause,
K_ESCAPE:lambda:pygame.event.post(pygame.event.Event(pygame.QUIT)),
K_s : self.bot.stop,
K_j : lambda:self.bot.speed_inc(0, 0.1),
K_k : lambda:self.bot.speed_inc(1, 0.1),
K_n : lambda:self.bot.speed_inc(0,-0.1),
K_m : lambda:self.bot.speed_inc(1,-0.1),
}
def pause(self):
self.bot.paused = not self.bot.paused
def run(self):
while True:
self.fpsclock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
return
if event.type == pygame.KEYDOWN:
handler = self.keyhandlers.get(event.key)
if callable(handler):handler()
self.botmap.fill(self.bgcolor)
self.botmap.blit(self.bgimage,self.bgimage.get_rect())
_botarea = self.bot.snapshot
self.botarea.blit(_botarea,_botarea.get_rect())
self.bot.update()
self.botmap.blit(self.bot.surf,self.bot.rect)
fontsurf=self.font.render(self.bot.info(),True,(0,0,0))
fontrect=fontsurf.get_rect()
self.screen.blit(fontsurf,fontrect)
pygame.display.update()
fps = self.fpsclock.get_fps()
with_fps = "{} - {:.2f} FPS".format(self.caption, fps)
pygame.display.set_caption(with_fps)
def start(self):
cmd = cli.Cli(self)
thread.start_new_thread(cmd.cmdloop,())
self.run()
if __name__ == "__main__":
app=App()
app.run()
``` |
{
"source": "JJ11teen/cloud-mappings",
"score": 3
} |
#### File: src/cloudmappings/errors.py
```python
class KeySyncError(ValueError):
def __init__(self, storage_provider_name: str, key: str, etag: str) -> None:
super().__init__(
f"Mapping is out of sync with cloud data.\n"
f"Cloud storage: '{storage_provider_name}'\n"
f"Key: '{key}', etag: '{etag}'"
)
class ValueSizeError(ValueError):
def __init__(self, storage_provider_name: str, key: str) -> None:
super().__init__(
f"Value is too big to fit in cloud.\n" f"Cloud storage: '{storage_provider_name}'\n" f"Key: '{key}'"
)
```
#### File: cloudmappings/storageproviders/azuretablestorage.py
```python
from typing import Dict
from urllib.parse import quote, unquote
from azure.core import MatchConditions
from azure.core.exceptions import (
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
)
from azure.data.tables import TableClient, UpdateMode
from azure.identity import DefaultAzureCredential
from .storageprovider import StorageProvider
def _chunk_bytes(data: bytes) -> Dict[str, bytes]:
# Max property size in azure tables is 64KiB
max_property_size = 64 * 1024
return {f"d_{k}": data[i : i + max_property_size] for k, i in enumerate(range(0, len(data), max_property_size))}
def _dechunk_entity(entity: Dict[str, bytes]) -> bytes:
return b"".join([v for k, v in entity.items() if k.startswith("d_")])
class AzureTableStorageProvider(StorageProvider):
def __init__(
self,
table_name: str,
endpoint: str = None,
credential=DefaultAzureCredential(),
connection_string: str = None,
) -> None:
if connection_string is not None:
self._table_client = TableClient.from_connection_string(conn_str=connection_string, table_name=table_name)
else:
self._table_client = TableClient(
endpoint=endpoint,
table_name=table_name,
credential=credential,
)
def encode_key(self, unsafe_key) -> str:
return quote(unsafe_key, safe="", errors="strict")
def decode_key(self, encoded_key) -> str:
return unquote(encoded_key, errors="strict")
def logical_name(self) -> str:
return (
"CloudStorageProvider=AzureTableStorage,"
f"StorageAccountName={self._table_client.account_name},"
f"TableName={self._table_client.table_name}"
)
def create_if_not_exists(self):
try:
self._table_client.create_table()
except ResourceExistsError:
return True
return False
def download_data(self, key: str, etag: str) -> bytes:
try:
entity = self._table_client.get_entity(
partition_key=key,
row_key="cm",
)
except ResourceNotFoundError as e:
if etag is None:
return None
self.raise_key_sync_error(key=key, etag=etag, inner_exception=e)
else:
if etag is not None and etag != entity.metadata["etag"]:
self.raise_key_sync_error(key=key, etag=etag)
return _dechunk_entity(entity)
def upload_data(self, key: str, etag: str, data: bytes) -> str:
if not isinstance(data, bytes):
raise ValueError("Data must be bytes like")
entity = {
"PartitionKey": key,
"RowKey": "cm",
**_chunk_bytes(data=data),
}
try:
if etag is None: # Not expecting existing data
response = self._table_client.create_entity(entity=entity)
else:
response = self._table_client.update_entity(
entity=entity,
mode=UpdateMode.REPLACE,
etag=etag,
match_condition=MatchConditions.IfNotModified,
)
except ResourceExistsError as e:
self.raise_key_sync_error(key=key, etag=etag, inner_exception=e)
except HttpResponseError as e:
if "update condition specified in the request was not satisfied" in e.exc_msg or (
"etag value" in e.exc_msg and "is not valid" in e.exc_msg
):
self.raise_key_sync_error(key=key, etag=etag, inner_exception=e)
elif (
e.model is not None
and e.model.additional_properties is not None
and "odata.error" in e.model.additional_properties
and "code" in e.model.additional_properties["odata.error"]
and e.model.additional_properties["odata.error"]["code"] == "EntityTooLarge"
):
self.raise_value_size_error(key=key, inner_exception=e)
else:
raise e
return response["etag"]
def delete_data(self, key: str, etag: str) -> None:
try:
self._table_client.delete_entity(
partition_key=key,
row_key="cm",
etag=etag,
match_condition=MatchConditions.IfNotModified,
)
except HttpResponseError as e:
if "update condition specified in the request was not satisfied" in e.exc_msg or (
"etag value" in e.exc_msg and "is not valid" in e.exc_msg
):
self.raise_key_sync_error(key=key, etag=etag, inner_exception=e)
else:
raise e
def list_keys_and_etags(self, key_prefix: str) -> Dict[str, str]:
if key_prefix is None:
query = self._table_client.list_entities()
else:
key_prefix_stop = key_prefix[:-1] + chr(ord(key_prefix[-1]) + 1)
query = self._table_client.query_entities(
f"PartitionKey ge '{key_prefix}' and PartitionKey lt '{key_prefix_stop}'"
)
return {e["PartitionKey"]: e.metadata["etag"] for e in query}
``` |
{
"source": "jj131204/AirBnB_clone_v2",
"score": 3
} |
#### File: AirBnB_clone_v2/web_flask/2-c_route.py
```python
from flask import Flask
app = Flask(__name__)
@app.route("/", strict_slashes=False)
def index():
"""index"""
return "Hello HBNB!"
@app.route('/hbnb', strict_slashes=False)
def hbnb():
"""hbnb"""
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def text(text):
"""display “C ” followed by the value of the text variable"""
return ('C ' + text.replace('_', ' '))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
``` |
{
"source": "jj1345/tk-auto-study",
"score": 3
} |
#### File: ocr_module/baidu_image/tokens.py
```python
import requests
def get_token(ak, sk) -> str:
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = 'https://aip.baidubce.com/oauth/2.0/token' \
f'?grant_type=client_credentials&client_id={ak}&client_secret={sk}'
response = requests.get(host)
if response.ok:
return response.json().get('access_token')
else:
raise RuntimeError(response.json().get('error_description'))
```
#### File: tk-auto-study/proxy_module/proxy_fetcher.py
```python
import logging
import random
from exception import KnownException
from .proxy import fetchers
class ProxyFecher(object):
cache_hosts: list[str]
fetched: int
def __init__(self):
self.cache_hosts: list[str] = []
self.fetched = 0
def random_pop(self) -> str:
if self.cache_hosts.__len__() == 0:
self.fetch_new_hosts()
idx = random.randint(0, len(self.cache_hosts))
return self.cache_hosts.pop(idx)
def empty(self) -> bool:
return len(self.cache_hosts) == 0 and self.fetched == len(fetchers)
def fetch_new_hosts(self):
if self.fetched == len(fetchers):
raise KnownException('已经更新过IP')
logging.info("正在获取最新代理IP")
self.cache_hosts.clear()
while self.fetched < len(fetchers):
try:
fetcher = fetchers[self.fetched]
for host in fetcher():
self.cache_hosts.append(host)
break
except Exception:
logging.warning("正在查找可用IP代理...")
self.fetched += 1
if(self.cache_hosts.__len__() == 0):
raise KnownException("无可用代理IP")
else:
self.fetched += 1
``` |
{
"source": "jj1993/refugees-visualisation",
"score": 3
} |
#### File: data/csv_to_JSON(Python-scripts)/makeRefugeeJSON.py
```python
import json
FILES = ['2010.csv', '2011.csv', '2012.csv', '2013.csv' ,'2014.csv', '2015.csv']
OUTPUTFILE = '../refugees.json'
def makeDict(filename):
"""
Leest de csv-file met de migratie-stromen data
Zet de data om in een lijst van dictionaries
"""
data=open(filename).read()
l = []
for flow in data.split("\n")[1:]:
t = []
for e in flow.split(";"):
if len(e) > 0:
if len(t) < 2:
e = ''.join(i for i in e if not i.isdigit())
e = e.decode('ascii', errors='replace')
else:
if e == "*" or e == "-": e = 0
e = int(str(e).replace('.', ''))
t.append(e)
if len(t) > 0:
d = {}
d["origin"], d["asylum"], d["start"], d["repart"], d["end"] = t
l.append(d)
return l
def combine(data):
"""
Combineert de variabelen tot een lijst
"""
newList = []
for n, year in enumerate(data):
key = FILES[n][:4]
nextKey = str(int(key) + 1)
rep = key+"rep"
for y in year:
for e in newList:
if (y["origin"] == e["origin"]
and y["asylum"] == e["asylum"]):
try:
e[key] = int((e[key] + y["start"])/2.0)
except:
e[key] = y["start"]
e[nextKey] = y["end"]
e[rep] = y["repart"]
break
else:
d = {}
d["origin"], d["asylum"], d[key], d[nextKey], d[rep] = (
y["origin"], y["asylum"], y["start"], y["end"], y["repart"]
)
newList.append(d)
return newList
def makeJSON(dataList, filename):
'''
Slaat JSON bestand op met een dictionary voor elke vluchtelingenstroom
'''
with open(filename, 'wb') as f:
json.dump(dataList, f, indent=True)
if __name__ == '__main__':
print "Sorting courses data..."
data = []
for f in FILES:
year = makeDict('../cleaned_data/'+f)
data.append(year)
c = combine(data)
print "Compiling JSON..."
makeJSON(c, OUTPUTFILE)
```
#### File: data/csv_to_JSON(Python-scripts)/makeTotalJSON.py
```python
import json
FILESSPACE = ['2010total', '2011total']
FILESCSV = ['2012total.csv', '2013total.csv', '2014total.csv', '2015total.csv','2016total.csv']
OUTPUTFILE = '../total.json'
def makeDictSpace(filename, l):
"""
Leest de csv-file met de data van het totale aantal vluchtelingen
Zet de data om in een lijst van dictionaries
"""
data=open(filename).read()
y = filename[13:17]
for line in data.split("\n"):
country = "*"
for n, c in enumerate(line):
if country[-1] == " " and (c.isdigit() or c == "-"):
if y == "2010":
country = "".join([i for i in country if not i.isdigit()])
if y == "2011":
for p, i in enumerate(country):
if i.isdigit():
country = country[:p]
country = country[1:-1]
country = country.decode('ascii', errors='ignore')
else:
country += c
continue
values = "".join(line[n:])
v = values.split(" ")[0]
v = v.replace(",","").replace(")","").replace("-","0")
v = float(v)
for i in l:
if i["asylum"] == country:
i[y] = v
break
else:
d = {}
d["asylum"] = country
d[y] = v
l.append(d)
break
return l
def makeDictCSV(filename, l):
"""
Leest de csv-file met de codeer data
Zet de data om in een lijst van dictionaries
"""
data=open(filename).read()
y = filename[13:17]
for line in data.split("\n"):
c, v = line.split(";")[:2]
c = "".join([i for i in c if not i.isdigit()])
if c[-1] == " ":
c = c[:-1]
c = c.decode('ascii', errors='ignore')
if v == "-":
v = "0"
v = float(v.replace(".",""))
for i in l:
if i["asylum"] == c:
i[y] = v
break
else:
d = {}
d["asylum"] = c
d[y] = v
l.append(d)
if c == "Zimbabwe":
break
return l
def makeJSON(dataList, filename):
'''
Slaat JSON bestand op met een dictionary voor elke landcode
'''
with open(filename, 'wb') as f:
json.dump(dataList, f, indent=True)
if __name__ == '__main__':
print "Sorting ISO data..."
l = []
for f in FILESSPACE:
l = makeDictSpace('../cleaned_data/'+f, l)
for f in FILESCSV:
l = makeDictCSV('../cleaned_data/'+f, l)
print "Compiling JSON..."
makeJSON(l, OUTPUTFILE)
``` |
{
"source": "jj2883/order_of_relu_bn",
"score": 2
} |
#### File: jj2883/order_of_relu_bn/main.py
```python
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import shutil
import time
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from models import *
from utils import progress_bar
#######################################################################
path_current = os.path.dirname(os.path.realpath(__file__))
path_subdir = 'data'
data_filename = 'resnext.txt'
path_file = os.path.join(path_current,path_subdir,data_filename)
f=open(path_file,'w')
#########################################################
global args
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run MNISTdefault = 100')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
# net = VGG('VGG19')
# net = ResNet18()
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
#net = ResNeXt29_2x64d()
net = ResNeXt29_2x64d_rb()
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.t7')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
##################################################################3
train_losses =np.zeros((args.epochs))
train_prec1s =np.zeros((args.epochs))
eval_losses =np.zeros((args.epochs))
eval_prec1s =np.zeros((args.epochs))
x_epoch = np.zeros((args.epochs))
global best_prec1
#####################################################################
################################################################################
def init_weight(net):
import math
for m in net.modules():
if isinstance(m, nn.Conv2d):
c_out, _, kh, kw = m.weight.size()
n = kh * kw * c_out
m.weight.data.normal_(0, math.sqrt(2 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
init_weight(net)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
###########################################################################
# Training
#def train(epoch):
def train(train_loader, model, criterion, optimizer, epoch,f):
########################################
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
#######################################
print('\nEpoch: %d' % epoch)
net.train()
######################
end = time.time()
#######################
train_loss = 0
correct = 0
total = 0
##################################################3
for batch_idx, (inputs, targets) in enumerate(train_loader):
data_time.update(time.time() - end)
##########################################################################3
inputs, targets = inputs.to(device), targets.to(device)
#optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
#loss.backward()
###########################################################
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.print_freq == 0:
f.write('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f}({loss.avg:.4f})\t'
'Prec@1 {top1.val:.2f}({top1.avg:.2f})\t'
'Prec@5 {top5.val:.2f}({top5.avg:.2f})\r\n'.format(
epoch, batch_idx, len(train_loader),
loss=losses, top1=top1, top5=top5))
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.2f} ({top1.avg:.2f})\t'
'Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(
epoch, batch_idx, len(train_loader),
loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg
#######################################################################
# optimizer.step()
# train_loss += loss.item()
# _, predicted = outputs.max(1)
# total += targets.size(0)
# correct += predicted.eq(targets).sum().item()
# progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
#def test(epoch):
def test(val_loader, net, criterion,f):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
end = time.time()
for batch_idx, (inputs, targets) in enumerate(val_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
#####################################################
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.print_freq == 0:
f.write('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\r\n'.format(
batch_idx, len(val_loader), loss=losses,
top1=top1, top5=top5))
print('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(val_loader), loss=losses,
top1=top1, top5=top5))
f.write(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}\r\n'
.format(top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return losses.avg, top1.avg
##############################################################################3
# test_loss += loss.item()
# _, predicted = outputs.max(1)
# total += targets.size(0)
# correct += predicted.eq(targets).sum().item()
# progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
# acc = 100.*correct/total
# if acc > best_acc:
# print('Saving..')
# state = {
# 'net': net.state_dict(),
# 'acc': acc,
# 'epoch': epoch,
# }
# if not os.path.isdir('checkpoint'):
# os.mkdir('checkpoint')
# torch.save(state, './checkpoint/ckpt.t7')
# best_acc = acc
for epoch in range(start_epoch, start_epoch+200):
# train(epoch)
# test(epoch)
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train_loss, train_prec1 = train(trainloader, net, criterion, optimizer, epoch,f)
# evaluate on validation set
eval_loss, eval_prec1 = validate(valloader, net, criterion,f)
train_losses[epoch] = train_loss
train_prec1s[epoch] = train_prec1
eval_losses[epoch] = eval_loss
eval_prec1s[epoch] = eval_prec1
x_epoch[epoch] = epoch
# train_losses =np.append( train_losses + train_loss
# train_prec1s = train_prec1s + train_prec1
# eval_losses = eval_losses + eval_loss
# eval_prec1s = eval_prec1s + eval_prec1
##
# train_loss.append(train_losses)
# train_prec1.append(train_prec1s)
# eval_loss.append(eval_losses)
# eval_prec1.append(eval_prec1s)
# remember best prec@1 and save checkpoint
is_best = eval_prec1 > best_prec1
best_prec1 = max(eval_prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': 'ResNext',
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
matplotlib.use('Agg')
plt.clf()
plt.close()
fig_loss = plt.figure()
ax_loss = fig_loss.add_subplot(1,1,1)
ax_loss.plot(x_epoch,train_losses,label='Train Loss')
ax_loss.plot(x_epoch,eval_losses,label='Test Loss')
ax_loss.legend(loc=1)
ax_loss.set_xlabel('epoch')
ax_loss.set_ylabel('loss')
ax_loss.set_title('Loss of Train and Test')
plot_loss_filename = 'VGGloss.png'
path_loss_file = os.path.join(path_current,path_subdir,plot_loss_filename)
fig_loss.savefig(path_loss_file)
plt.clf()
plt.close()
fig_prec = plt.figure()
ax_prec = fig_prec.add_subplot(1,1,1)
ax_prec.plot(x_epoch,train_prec1s,label='Train Best1')
ax_prec.plot(x_epoch,eval_prec1s,label='Test Best1')
ax_prec.legend(loc=1)
ax_prec.set_xlabel('epoch')
ax_prec.set_ylabel('Best1 Precision')
ax_prec.set_title('Best1 of Train and Test')
plot_prec_filename = 'VGGprec.png'
path_prec_file = os.path.join(path_current,path_subdir,plot_prec_filename)
fig_prec.savefig(path_prec_file)
f.close()
``` |
{
"source": "jj314/omnipy",
"score": 3
} |
#### File: jj314/omnipy/batt_check.py
```python
import time
import RPi.GPIO as GPIO
from threading import Thread, RLock
class SpiBatteryVoltageChecker:
def __init__(self):
# Hardware setup
self.adcs = [0] # voltage divider connected to channel 0 of mcp3002
self.cutoff = 3 # low battery cutoff (when LipoShim shuts dowwn)
self.maxvolts = 4.2 # max voltage for the battery, equivalent to 100% charge
self.vref = 3.3 # vref of the ADC
self.res1 = 180 # resistor connected to VBATT (/1000)
self.res2 = 100 # resistor connected to GND (/1000)
self.reps = 10 # how many times to take each measurement for averaging
self.pcround = 1 # round % battery to nearest
# Define Pins/Ports on ADC
self.SPICLK = 16
self.SPIMISO = 20
self.SPIMOSI = 21
self.SPICS = 13
self.battery_level = -1
self.adc_readings = []
self.sync_lock = RLock()
try:
# Set up set up GPIO & SPI interface pins
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.SPIMOSI, GPIO.OUT)
GPIO.setup(self.SPIMISO, GPIO.IN)
GPIO.setup(self.SPICLK, GPIO.OUT)
GPIO.setup(self.SPICS, GPIO.OUT)
# initial reading to determine availability
average = 0
for i in range(0, 10):
average = self._get_moving_average()
bp = self._get_percentage(average)
if bp <= 0.01:
print("spi reader not detected")
GPIO.cleanup()
else:
self.battery_level = bp
self.service_thread = Thread(target=self._service_loop)
self.service_thread.setDaemon(True)
self.service_thread.start()
except:
print("Failed to set up GPIO pins for battery level reading")
GPIO.cleanup()
def get_measurement(self):
with self.sync_lock:
return self.battery_level
def _service_loop(self):
while True:
time.sleep(60)
try:
with self.sync_lock:
self.battery_level = self._get_percentage(self._get_moving_average())
except:
pass
# ADC code based on an adafruit example for mcp3008
def _readadc(self, adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 1) or (adcnum < 0)):
return -1
if (adcnum == 0):
commandout = 0x6
else:
commandout = 0x7
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout <<= 5 # we only need to send 3 bits here
for i in range(3):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout /= 2 # first bit is 'null' so drop it
return adcout
def _get_moving_average(self):
self.adc_readings.append(self._get_adc_reading())
reading_count = len(self.adc_readings)
if reading_count > self.reps:
self.adc_readings = self.adc_readings[reading_count-self.reps:reading_count]
return float(sum(self.adc_readings) / len(self.adc_readings))
def _get_adc_reading(self):
adc_sum = 0
for adcnum in self.adcs:
try:
adc_sum += self._readadc(adcnum, self.SPICLK, self.SPIMOSI, self.SPIMISO, self.SPICS)
except:
print("Error reading adc value")
time.sleep(0.05)
return float(adc_sum / len(self.adcs))
def _get_percentage(self, adc_reading):
# convert analogue reading to volts and %, accounting for vref and setup of resistor bridge
volts = adc_reading * ( self.vref / 1024 ) * (self.res1 + self.res2) / self.res2
voltspc = int ( 100 * ( volts - self.cutoff ) / ( self.maxvolts - self.cutoff ) )
voltspcround = self.pcround * round( voltspc / self.pcround )
if (voltspcround > 100):
voltspcround = 100
if (voltspcround < 0):
voltspcround = 0
return voltspcround
if __name__ == '__main__':
sbc = SpiBatteryVoltageChecker()
while True:
try:
print("Battery is now at %d percent" % sbc.get_measurement())
time.sleep(10)
except KeyboardInterrupt:
break
```
#### File: omnipy/podcomm/manchester.py
```python
import random
def encodeSingleByte(d):
e = 0
for i in range(0, 8):
e = e >> 2
if d & 0x01 == 0:
e |= 0x8000
else:
e |= 0x4000
d = d >> 1
return bytes([(e >> 8), e & 0xff])
class ManchesterCodec:
def __init__(self):
#self.preamble = bytes([0x65,0x66]) * 20 + bytes([0xa5, 0x5a])
self.preamble = bytes()
self.decode_dict = dict()
self.encode_dict = dict()
for i in range(0, 256):
enc = encodeSingleByte(i)
self.decode_dict[enc] = i
self.encode_dict[i] = enc
self.noiseSeq = 0
noiseNibbles = '0123478bcdef'
self.noiseLines = []
for x in range(0, 32):
noiseLine = "f"
for i in range(0, 79):
noiseLine += random.choice(noiseNibbles)
self.noiseLines.append(bytearray.fromhex(noiseLine))
def decode(self, data):
decoded = bytes()
for i in range(0, len(data), 2):
word = data[i:i+2]
if word in self.decode_dict:
decoded += bytes([self.decode_dict[word]])
else:
break
return decoded
def encode(self, data):
encoded = self.preamble
for i in data:
encoded += self.encode_dict[i]
encoded += self.noiseLines[self.noiseSeq]
self.noiseSeq += 1
self.noiseSeq %= 32
return encoded[0:80]
```
#### File: omnipy/podcomm/pdm.py
```python
from .protocol import *
from .protocol_radio import PdmRadio
from .nonce import *
from .exceptions import PdmError, OmnipyError, PdmBusyError, StatusUpdateRequired
from .definitions import *
from .packet_radio import TxPower
from decimal import *
from datetime import datetime, timedelta
from threading import RLock
import time
g_lock = RLock()
class PdmLock():
def __init__(self, timeout=2):
self.fd = None
self.timeout = timeout
def __enter__(self):
if not g_lock.acquire(blocking=True, timeout=self.timeout):
raise PdmBusyError()
def __exit__(self, exc_type, exc_val, exc_tb):
g_lock.release()
class Pdm:
def __init__(self, pod):
if pod is None:
raise PdmError("Cannot instantiate pdm without pod")
self.pod = pod
self.nonce = None
self.radio = None
self.time_adjustment = 0
self.debug_status_skip = False
self.logger = getLogger()
def stop_radio(self):
if self.radio is not None:
self.radio.stop()
self.radio = None
def start_radio(self):
self.get_radio(new=True)
def get_nonce(self):
if self.nonce is None:
if self.pod.id_lot is None or self.pod.id_t is None:
return None
if self.pod.nonce_last is None or self.pod.nonce_seed is None:
self.nonce = Nonce(self.pod.id_lot, self.pod.id_t)
else:
self.nonce = Nonce(self.pod.id_lot, self.pod.id_t, self.pod.nonce_last, self.pod.nonce_seed)
return self.nonce
def get_radio(self, new=False):
if self.radio is not None and new:
self.radio.stop()
self.radio = None
if self.radio is None:
if self.pod.radio_message_sequence is None or self.pod.radio_packet_sequence is None:
self.pod.radio_message_sequence = 0
self.pod.radio_packet_sequence = 0
self.radio = PdmRadio(self.pod.radio_address,
msg_sequence=self.pod.radio_message_sequence,
pkt_sequence=self.pod.radio_packet_sequence)
return self.radio
def send_request(self, request, with_nonce=False, double_take=False,
expect_critical_follow_up=False,
tx_power=TxPower.Normal):
nonce_obj = self.get_nonce()
if with_nonce:
nonce_val = nonce_obj.getNext()
request.set_nonce(nonce_val)
self.pod.nonce_syncword = None
response = self.get_radio().send_message_get_message(request, double_take=double_take,
expect_critical_follow_up=expect_critical_follow_up,
tx_power=tx_power)
response_parse(response, self.pod)
if with_nonce and self.pod.nonce_syncword is not None:
self.logger.info("Nonce resync requested")
nonce_obj.sync(self.pod.nonce_syncword, request.sequence)
nonce_val = nonce_obj.getNext()
request.set_nonce(nonce_val)
self.pod.nonce_syncword = None
self.get_radio().message_sequence = request.sequence
response = self.get_radio().send_message_get_message(request, double_take=double_take,
expect_critical_follow_up=expect_critical_follow_up)
response_parse(response, self.pod)
if self.pod.nonce_syncword is not None:
self.get_nonce().reset()
raise PdmError("Nonce sync failed")
def _internal_update_status(self, update_type=0):
if self.debug_status_skip:
return
self._assert_pod_address_assigned()
self.send_request(request_status(update_type))
def update_status(self, update_type=0):
try:
with PdmLock():
self.logger.info("Updating pod status, request type %d" % update_type)
self.pod.last_command = { "command": "STATUS", "type": update_type, "success": False }
self._internal_update_status(update_type)
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.update_status(update_type=update_type)
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def acknowledge_alerts(self, alert_mask):
try:
with PdmLock():
self.logger.info("Acknowledging alerts with bitmask %d" % alert_mask)
self.pod.last_command = {"command": "ACK_ALERTS", "mask": alert_mask, "success": False}
self._assert_pod_address_assigned()
self._internal_update_status()
self._assert_immediate_bolus_not_active()
self._assert_can_acknowledge_alerts()
if self.pod.state_alert | alert_mask != self.pod.state_alert:
raise PdmError("Bitmask invalid for current alert state")
request = request_acknowledge_alerts(alert_mask)
self.send_request(request, with_nonce=True)
if self.pod.state_alert & alert_mask != 0:
raise PdmError("Failed to acknowledge one or more alerts")
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.acknowledge_alerts(alert_mask)
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
# def configure_reservoir_alarm(self, iu_reservoir_level=None):
# try:
# with PdmLock(0):
# if iu_reservoir_level is None:
# request = request_clear_low_reservoir_alert()
# else:
# request = request_set_low_reservoir_alert(self.pod.var_alert_low_reservoir)
# self.send_request(request, with_nonce=True)
# self.pod.var_alert_low_reservoir_set = True
# except OmnipyError:
# raise
# except Exception as e:
# raise PdmError("Unexpected error") from e
#
# def configure_pod_expiry_alarm(self, minutes_after_activation=None):
# try:
# with PdmLock(0):
# if minutes_after_activation is None:
# request = request_clear_pod_expiry_alert()
# else:
# request = request_set_pod_expiry_alert(minutes_after_activation)
# self.send_request(request, with_nonce=True)
# self.pod.var_alert_low_reservoir_set = True
# except OmnipyError:
# raise
# except Exception as e:
# raise PdmError("Unexpected error") from e
def hf_silence_will_fall(self):
try:
with PdmLock():
self._internal_update_status()
self._assert_immediate_bolus_not_active()
if self.pod.state_alert > 0:
self.logger.info("Acknowledging alerts with bitmask %d" % self.pod.state_alert)
self.pod.last_command = {"command": "ACK_ALERTS", "mask": self.pod.state_alert, "success": False}
request = request_acknowledge_alerts(self.pod.state_alert)
self.send_request(request, with_nonce=True)
self.pod.last_command = {"command": "ACK_ALERTS", "mask": self.pod.state_alert, "success": False}
self._internal_update_status(1)
active_alerts = []
if self.pod.state_alerts is not None:
for ai in range(0,8):
if self.pod.state_alerts[ai] > 0:
active_alerts.append(ai)
if len(active_alerts) == 0:
self.logger.info("No alerts active")
else:
self.logger.info("Clearing alerts: %s" % str(active_alerts))
acs = []
for i in active_alerts:
ac = AlertConfiguration()
ac.activate = False
ac.alert_after_minutes = 0
ac.alert_duration = 0
ac.alert_index = i
acs.append(ac)
request = request_acknowledge_alerts(self.pod.state_alert)
self.send_request(request, with_nonce=True)
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.hf_silence_will_fall()
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def is_busy(self):
try:
with PdmLock(0):
return self._is_bolus_running(no_live_check=True)
except PdmBusyError:
return True
except OmnipyError:
raise
except Exception as e:
raise PdmError("Unexpected error") from e
def bolus(self, bolus_amount):
try:
with PdmLock():
self.pod.last_command = {"command": "BOLUS", "units": bolus_amount, "success": False}
self._assert_pod_address_assigned()
self._assert_can_generate_nonce()
self._internal_update_status()
self._assert_immediate_bolus_not_active()
self._assert_not_faulted()
self._assert_status_running()
if self.pod.var_maximum_bolus is not None and bolus_amount > self.pod.var_maximum_bolus:
raise PdmError("Bolus exceeds defined maximum bolus of %.2fU" % self.pod.var_maximum_bolus)
if bolus_amount < DECIMAL_0_05:
raise PdmError("Cannot do a bolus less than 0.05U")
if self._is_bolus_running():
raise PdmError("A previous bolus is already running")
if bolus_amount > self.pod.insulin_reservoir:
raise PdmError("Cannot bolus %.2f units, insulin_reservoir capacity is at: %.2f")
self.logger.debug("Bolusing %0.2f" % float(bolus_amount))
request = request_bolus(bolus_amount)
self.send_request(request, with_nonce=True)
if self.pod.state_bolus != BolusState.Immediate:
raise PdmError("Pod did not confirm bolus")
self.pod.last_enacted_bolus_start = self.get_time()
self.pod.last_enacted_bolus_amount = float(bolus_amount)
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.bolus(bolus_amount)
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def cancel_bolus(self):
try:
with PdmLock():
self.logger.debug("Canceling bolus")
self.pod.last_command = {"command": "BOLUS_CANCEL", "canceled": 0, "success": False}
self._assert_pod_address_assigned()
self._assert_can_generate_nonce()
self._internal_update_status()
self._assert_not_faulted()
self._assert_status_running()
if self._is_bolus_running():
request = request_cancel_bolus()
self.send_request(request, with_nonce=True)
if self.pod.state_bolus == BolusState.Immediate:
raise PdmError("Failed to cancel bolus")
else:
self.pod.last_enacted_bolus_amount = float(-1)
self.pod.last_enacted_bolus_start = self.get_time()
self.pod.last_command["success"] = True
self.pod.last_command["canceled"] = self.pod.insulin_canceled
else:
raise PdmError("Bolus is not running")
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.cancel_bolus()
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def cancel_temp_basal(self):
try:
with PdmLock():
self.logger.debug("Canceling temp basal")
self.pod.last_command = {"command": "TEMPBASAL_CANCEL", "success": False}
if not self.debug_status_skip:
self._assert_pod_address_assigned()
self._assert_can_generate_nonce()
self._internal_update_status()
self._assert_immediate_bolus_not_active()
self._assert_not_faulted()
self._assert_status_running()
if self._is_temp_basal_active() or self.debug_status_skip:
request = request_cancel_temp_basal()
self.send_request(request, with_nonce=True)
if self.pod.state_basal == BasalState.TempBasal:
raise PdmError("Failed to cancel temp basal")
else:
self.pod.last_enacted_temp_basal_duration = float(-1)
self.pod.last_enacted_temp_basal_start = self.get_time()
self.pod.last_enacted_temp_basal_amount = float(-1)
self.pod.last_command["success"] = True
else:
self.logger.warning("Cancel temp basal received, while temp basal was not active. Ignoring.")
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.cancel_temp_basal()
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def set_temp_basal(self, basalRate, hours, confidenceReminder=False):
try:
with PdmLock():
self.logger.debug("Setting temp basal %02.2fU/h for %02.1fh"% (float(basalRate), float(hours)))
self.pod.last_command = {"command": "TEMPBASAL",
"duration_hours": hours,
"hourly_rate": basalRate,
"success": False}
if not self.debug_status_skip:
self._assert_pod_address_assigned()
self._assert_can_generate_nonce()
self._internal_update_status()
self._assert_immediate_bolus_not_active()
self._assert_not_faulted()
self._assert_status_running()
if hours > 12 or hours < 0.5:
raise PdmError("Requested duration is not valid")
if self.pod.var_maximum_temp_basal_rate is not None and \
basalRate > Decimal(self.pod.var_maximum_temp_basal_rate):
raise PdmError("Requested rate exceeds maximum temp basal setting")
if basalRate > Decimal(30):
raise PdmError("Requested rate exceeds maximum temp basal capability")
if self._is_temp_basal_active():
self.logger.debug("Canceling active temp basal before setting a new temp basal")
request = request_cancel_temp_basal()
self.send_request(request, with_nonce=True)
if self.pod.state_basal == BasalState.TempBasal:
raise PdmError("Failed to cancel running temp basal")
request = request_temp_basal(basalRate, hours)
self.send_request(request, with_nonce=True)
if self.pod.state_basal != BasalState.TempBasal:
raise PdmError("Failed to set temp basal")
else:
self.pod.last_enacted_temp_basal_duration = float(hours)
self.pod.last_enacted_temp_basal_start = self.get_time()
self.pod.last_enacted_temp_basal_amount = float(basalRate)
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.set_temp_basal(basalRate=basalRate, hours=hours)
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def set_basal_schedule(self, schedule):
try:
with PdmLock():
self.logger.debug("Setting basal schedule: %s"% schedule)
self.pod.last_command = {"command": "BASALSCHEDULE",
"hourly_rates": schedule,
"success": False}
self._assert_pod_address_assigned()
self._assert_can_generate_nonce()
self._internal_update_status()
self._assert_immediate_bolus_not_active()
self._assert_not_faulted()
self._assert_status_running()
if self._is_temp_basal_active():
raise PdmError("Cannot change basal schedule while a temp. basal is active")
self._assert_basal_schedule_is_valid(schedule)
pod_date = datetime.utcnow() + timedelta(minutes=self.pod.var_utc_offset) \
+ timedelta(seconds=self.time_adjustment)
hours = pod_date.hour
minutes = pod_date.minute
seconds = pod_date.second
request = request_set_basal_schedule(schedule, hour=hours, minute=minutes, second=seconds)
self.send_request(request, with_nonce=True, double_take=True)
if self.pod.state_basal != BasalState.Program:
raise PdmError("Failed to set basal schedule")
else:
self.pod.var_basal_schedule = schedule
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.set_basal_schedule(schedule)
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def deactivate_pod(self):
try:
with PdmLock():
self._assert_immediate_bolus_not_active()
self.logger.debug("Deactivating pod")
self.pod.last_command = {"command": "DEACTIVATE", "success": False}
self._internal_update_status()
self._assert_can_deactivate()
request = request_deactivate()
self.send_request(request, with_nonce=True)
if self.pod.state_progress != PodProgress.Inactive:
raise PdmError("Failed to deactivate")
else:
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.deactivate_pod()
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def pair_pod(self, candidate_address, utc_offset):
try:
with PdmLock():
self.logger.debug("Activating pod")
self.pod.last_command = {"command": "PAIR",
"address": candidate_address,
"utc_offset": utc_offset,
"success": False}
if self.pod.state_progress > PodProgress.PairingSuccess:
raise PdmError("Pod is already paired")
self.pod.var_utc_offset = utc_offset
radio = None
if self.pod.state_progress is None or \
self.pod.state_progress < PodProgress.TankFillCompleted:
self.pod.radio_address = 0xffffffff
radio = self.get_radio(new=True)
radio.radio_address = 0xffffffff
request = request_assign_address(candidate_address)
response = self.get_radio().send_message_get_message(request, message_address=0xffffffff,
ack_address_override=candidate_address,
tx_power=TxPower.Low)
response_parse(response, self.pod)
self._assert_pod_can_activate()
else:
self._internal_update_status()
if self.pod.state_progress == PodProgress.TankFillCompleted:
self.pod.var_activation_date = self.get_time()
pod_date = datetime.utcfromtimestamp(self.pod.var_activation_date) \
+ timedelta(minutes=self.pod.var_utc_offset)
year = pod_date.year
month = pod_date.month
day = pod_date.day
hour = pod_date.hour
minute = pod_date.minute
if radio is None:
radio = self.get_radio(new=True)
radio.radio_address = 0xffffffff
radio.message_sequence = 1
request = request_setup_pod(self.pod.id_lot, self.pod.id_t, candidate_address,
year, month, day, hour, minute)
response = self.get_radio().send_message_get_message(request, message_address=0xffffffff,
ack_address_override=candidate_address,
tx_power=TxPower.Low)
response_parse(response, self.pod)
self._assert_pod_paired()
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.pair_pod(candidate_address, utc_offset)
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def activate_pod(self):
try:
with PdmLock():
self.logger.debug("Activating pod")
self.pod.last_command = {"command": "ACTIVATE",
"success": False}
radio = self.get_radio(new=True)
self._internal_update_status()
if self.pod.state_progress > PodProgress.ReadyForInjection:
raise PdmError("Pod is already activated")
if self.pod.state_progress == PodProgress.PairingSuccess:
if radio is not None:
self.pod.radio_packet_sequence = radio.packet_sequence
radio.radio_address = self.pod.radio_address
radio.message_sequence = 2
self.pod.nonce_seed = 0
self.pod.nonce_last = None
# if self.pod.var_alert_low_reservoir is not None:
# if not self.pod.var_alert_low_reservoir_set:
# request = request_set_low_reservoir_alert(self.pod.var_alert_low_reservoir)
# self.send_request(request, with_nonce=True, tx_power=TxPower.Low)
# self.pod.var_alert_low_reservoir_set = True
#
ac = AlertConfiguration()
ac.activate = True
ac.alert_index = 7
ac.alert_after_minutes = 5
ac.alert_duration = 55
ac.beep_type = BeepType.BipBeepFourTimes
ac.beep_repeat_type = BeepPattern.OnceEveryFiveMinutes
acs = [ac]
request = request_alert_setup(acs)
self.send_request(request, with_nonce=True)
# request = request_delivery_flags(0, 0)
# self.send_request(request, with_nonce=True)
request = request_delivery_flags(0, 0)
self.send_request(request, with_nonce=True)
request = request_prime_cannula()
self.send_request(request, with_nonce=True)
time.sleep(55)
self._internal_update_status()
while self.pod.state_progress != PodProgress.ReadyForInjection:
time.sleep(5)
self._internal_update_status()
# if self.pod.state_progress == PodProgress.ReadyForInjection:
# if self.pod.var_alert_replace_pod is not None:
# if not self.pod.var_alert_replace_pod_set:
# request = request_set_pod_expiry_alert(self.pod.var_alert_replace_pod - self.pod.state_active_minutes)
# self.send_request(request, with_nonce=True, tx_power=TxPower.Low)
# self.pod.var_alert_replace_pod_set = True
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.activate_pod()
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def inject_and_start(self, basal_schedule):
try:
with PdmLock():
self.logger.debug("Starting pod")
self.pod.last_command = {"command": "START",
"hourly_rates": basal_schedule,
"success": False}
self._internal_update_status()
if self.pod.state_progress >= PodProgress.Running:
raise PdmError("Pod has passed the injection stage")
if self.pod.state_progress < PodProgress.ReadyForInjection:
raise PdmError("Pod is not ready for injection")
if self.pod.state_progress == PodProgress.ReadyForInjection:
self._assert_basal_schedule_is_valid(basal_schedule)
pod_date = datetime.utcnow() + timedelta(minutes=self.pod.var_utc_offset) \
+ timedelta(seconds=self.time_adjustment)
hour = pod_date.hour
minute = pod_date.minute
second = pod_date.second
request = request_set_basal_schedule(basal_schedule, hour, minute, second)
self.send_request(request, with_nonce=True, double_take=True, expect_critical_follow_up=True)
if self.pod.state_progress != PodProgress.BasalScheduleSet:
raise PdmError("Pod did not acknowledge basal schedule")
if self.pod.state_progress == PodProgress.BasalScheduleSet:
# if not self.pod.var_alert_after_prime_set:
ac1 = AlertConfiguration()
ac1.activate = False
ac1.alert_index = 7
ac1.alert_duration = 0
ac1.alert_after_minutes = 0
ac1.beep_type = 0
ac1.beep_repeat_type = 0
ac2 = AlertConfiguration()
ac2.activate = False
ac2.alert_index = 0
ac2.trigger_auto_off = True
ac2.duration = 15
ac2.beep_repeat_type = 2
ac2.beep_type = 2
ac2.alert_duration = 0
acs = [ac1, ac2]
request = request_alert_setup(acs)
self.send_request(request, with_nonce=True)
request = request_insert_cannula()
self.send_request(request, with_nonce=True)
if self.pod.state_progress != PodProgress.Inserting:
raise PdmError("Pod did not acknowledge cannula insertion start")
if self.pod.state_progress == PodProgress.Inserting:
time.sleep(13)
self._internal_update_status()
if self.pod.state_progress != PodProgress.Running:
raise PdmError("Pod did not get to running state")
self.pod.var_insertion_date = self.get_time()
self.pod.last_command["success"] = True
except StatusUpdateRequired:
self.logger.info("Requesting status update first")
self._internal_update_status()
self.inject_and_start(basal_schedule)
except Exception as e:
raise PdmError("Unexpected error") from e
finally:
self._savePod()
def _savePod(self):
try:
radio = self.get_radio()
if radio is not None:
self.pod.radio_message_sequence = radio.message_sequence
self.pod.radio_packet_sequence = radio.packet_sequence
nonce = self.get_nonce()
if nonce is not None:
self.pod.nonce_last = nonce.lastNonce
self.pod.nonce_seed = nonce.seed
return self.pod.Save()
except Exception as e:
raise PdmError("Pod status was not saved") from e
def _is_bolus_running(self, no_live_check=False):
if self.pod.state_last_updated is not None and self.pod.state_bolus != BolusState.Immediate:
return False
if self.pod.last_enacted_bolus_amount is not None \
and self.pod.last_enacted_bolus_start is not None:
if self.pod.last_enacted_bolus_amount < 0:
return False
now = self.get_time()
bolus_end_earliest = (self.pod.last_enacted_bolus_amount * 39) + 1 + self.pod.last_enacted_bolus_start
bolus_end_latest = (self.pod.last_enacted_bolus_amount * 41) + 3 + self.pod.last_enacted_bolus_start
if now > bolus_end_latest:
return False
elif now < bolus_end_earliest:
return True
if no_live_check:
return True
self._internal_update_status()
return self.pod.state_bolus == BolusState.Immediate
def _is_basal_schedule_active(self):
if self.pod.state_last_updated is not None and self.pod.state_basal == BasalState.NotRunning:
return False
self._internal_update_status()
return self.pod.state_basal == BasalState.Program
def _is_temp_basal_active(self):
if self.pod.state_last_updated is not None and self.pod.state_basal != BasalState.TempBasal:
return False
if self.pod.last_enacted_temp_basal_start is not None \
and self.pod.last_enacted_temp_basal_duration is not None:
if self.pod.last_enacted_temp_basal_amount < 0:
return False
now = self.get_time()
temp_basal_end_earliest = self.pod.last_enacted_temp_basal_start + \
(self.pod.last_enacted_temp_basal_duration * 3600) - 60
temp_basal_end_latest = self.pod.last_enacted_temp_basal_start + \
(self.pod.last_enacted_temp_basal_duration * 3660) + 60
if now > temp_basal_end_latest:
return False
elif now < temp_basal_end_earliest:
return True
self._internal_update_status()
return self.pod.state_basal == BasalState.TempBasal
def _assert_pod_activate_can_start(self):
self._assert_pod_address_not_assigned()
def _assert_basal_schedule_is_valid(self, schedule):
if schedule is None:
raise PdmError("No basal schedule defined")
if len(schedule) != 48:
raise PdmError("A full schedule of 48 half hours is needed")
min_rate = Decimal("0.05")
max_rate = Decimal("30")
for entry in schedule:
if entry < min_rate:
raise PdmError("A basal rate schedule entry cannot be less than 0.05U/h")
if entry > max_rate:
raise PdmError("A basal rate schedule entry cannot be more than 30U/h")
def _assert_pod_address_not_assigned(self):
if self.pod is None:
raise PdmError("No pod instance created")
if self.pod.radio_address is not None and self.pod.radio_address != 0xffffffff:
raise PdmError("Radio radio_address already set")
def _assert_pod_address_assigned(self):
if self.pod.radio_address is None:
raise PdmError("Radio address not set")
def _assert_pod_can_activate(self):
if self.pod is None:
raise PdmError("No pod instance created")
if self.pod.id_lot is None:
raise PdmError("Lot number unknown")
if self.pod.id_t is None:
raise PdmError("Serial number unknown")
if self.pod.state_progress != PodProgress.TankFillCompleted:
raise PdmError("Pod is not at the expected state of Tank Fill Completed")
def _assert_pod_paired(self):
if self.pod.radio_address is None or self.pod.radio_address == 0 \
or self.pod.radio_address == 0xffffffff:
raise PdmError("Radio radio_address not accepted")
if self.pod.state_progress != PodProgress.PairingSuccess:
raise PdmError("Progress does not indicate pairing success")
def _assert_can_deactivate(self):
self._assert_pod_address_assigned()
self._assert_can_generate_nonce()
if self.pod.state_progress < PodProgress.PairingSuccess:
raise PdmError("Pod is not paired")
if self.pod.state_progress > PodProgress.AlertExpiredShuttingDown:
raise PdmError("Pod already deactivated")
def _assert_can_acknowledge_alerts(self):
self._assert_pod_address_assigned()
if self.pod.state_progress < PodProgress.PairingSuccess:
raise PdmError("Pod not paired completely yet.")
if self.pod.state_progress == PodProgress.ErrorShuttingDown:
raise PdmError("Pod is shutting down, cannot acknowledge alerts.")
if self.pod.state_progress == PodProgress.AlertExpiredShuttingDown:
raise PdmError("Acknowledgement period expired, pod is shutting down")
if self.pod.state_progress > PodProgress.AlertExpiredShuttingDown:
raise PdmError("Pod is not active")
def _assert_can_generate_nonce(self):
if self.pod.id_lot is None:
raise PdmError("Lot number is not defined")
if self.pod.id_t is None:
raise PdmError("Pod serial number is not defined")
def _assert_status_running(self):
if self.pod.state_progress < PodProgress.Running:
raise PdmError("Pod is not yet running")
if self.pod.state_progress > PodProgress.RunningLow:
raise PdmError("Pod has stopped")
def _assert_not_faulted(self):
if self.pod.state_faulted:
raise PdmError("Pod is state_faulted")
def _assert_no_active_alerts(self):
if self.pod.state_alert != 0:
raise PdmError("Pod has active alerts")
def _assert_immediate_bolus_not_active(self):
if self._is_bolus_running():
raise PdmError("Pod is busy delivering a bolus")
def set_time_adjustment(self, adjustment):
self.time_adjustment = adjustment
def get_time(self):
return time.time() + self.time_adjustment
```
#### File: omnipy/podcomm/protocol.py
```python
from podcomm.protocol_common import *
from podcomm.definitions import *
from enum import IntEnum
from decimal import Decimal
import struct
import time
DECIMAL_0_05 = Decimal("0.05")
DECIMAL_2_00 = Decimal("2")
class StatusRequestType(IntEnum):
Standard = 0
def request_assign_address(address):
cmd_body = struct.pack(">I", address)
return PdmMessage(PdmRequest.AssignAddress, cmd_body)
def request_setup_pod(lot, tid, address, year, month, day, hour, minute):
cmd_body = struct.pack(">I", address)
cmd_body += bytes([0x14, 0x04])
cmd_body += bytes([month, day, year - 2000, hour, minute])
cmd_body += struct.pack(">I", lot)
cmd_body += struct.pack(">I", tid)
return PdmMessage(PdmRequest.SetupPod, cmd_body)
def request_alert_setup(alert_configurations):
cmd_body = bytes()
for ac in alert_configurations:
if ac.alert_after_minutes is None and ac.alert_after_reservoir is None and ac.activate:
raise PdmError("Either alert_after_minutes or alert_after_reservoir must be set")
elif ac.alert_after_minutes is not None and ac.alert_after_reservoir is not None:
raise PdmError("Only one of alert_after_minutes or alert_after_reservoir must be set")
if ac.alert_duration is not None:
if ac.alert_duration > 0x1FF:
raise PdmError("Alert duration in minutes cannot be more than %d" % 0x1ff)
elif ac.alert_duration < 0:
raise PdmError("Invalid alert duration value")
if ac.alert_after_minutes is not None and ac.alert_after_minutes > 4800:
raise PdmError("Alert cannot be set beyond 80 hours")
if ac.alert_after_minutes is not None and ac.alert_after_minutes < 0:
raise PdmError("Invalid value for alert_after_minutes")
if ac.alert_after_reservoir is not None and ac.alert_after_reservoir > 50:
raise PdmError("Alert cannot be set for more than 50 units")
if ac.alert_after_reservoir is not None and ac.alert_after_reservoir < 0:
raise PdmError("Invalid value for alert_after_reservoir")
b0 = ac.alert_index << 4
if ac.activate:
b0 |= 0x08
if ac.alert_after_reservoir is not None:
b0 |= 0x04
if ac.trigger_auto_off:
b0 |= 0x02
b0 |= (ac.alert_duration >> 8) & 0x0001
b1 = ac.alert_duration & 0x00ff
b2 = 0
b3 = 0
if ac.alert_after_reservoir is not None:
reservoir_limit = int(ac.alert_after_reservoir * 10)
b2 = reservoir_limit >> 8
b3 = reservoir_limit & 0x00ff
if ac.alert_after_minutes is not None:
b2 = ac.alert_after_minutes >> 8
b3 = ac.alert_after_minutes & 0x00ff
cmd_body += bytes([b0, b1, b2, b3, ac.beep_repeat_type, ac.beep_type])
return PdmMessage(PdmRequest.ConfigureAlerts, cmd_body)
def request_set_basal_schedule(schedule, hour, minute, second):
halved_schedule = []
for entry in schedule:
halved_schedule.append(entry / DECIMAL_2_00)
current_hh = hour * 2
if minute < 30:
seconds_past_hh = minute * 60
else:
seconds_past_hh = (minute - 30) * 60
current_hh += 1
seconds_past_hh += second
seconds_to_hh = 1800 - seconds_past_hh
pulse_list = getPulsesForHalfHours(halved_schedule)
ise_list = getInsulinScheduleTableFromPulses(pulse_list)
ise_body = getStringBodyFromTable(ise_list)
pulse_body = getStringBodyFromTable(pulse_list)
command_body = bytes([0])
body_checksum = bytes([current_hh])
current_hh_pulse_count = pulse_list[current_hh]
remaining_pulse_count = int(current_hh_pulse_count * seconds_to_hh / 1800)
body_checksum += struct.pack(">H", seconds_to_hh * 8)
body_checksum += struct.pack(">H", remaining_pulse_count)
checksum = getChecksum(body_checksum + pulse_body)
command_body += struct.pack(">H", checksum)
command_body += body_checksum
command_body += ise_body
msg = PdmMessage(PdmRequest.InsulinSchedule, command_body)
reminders = 0
# if confidenceReminder:
# reminders |= 0x40
command_body = bytes([reminders])
pulse_entries = getPulseIntervalEntries(halved_schedule)
table_index = 0
for pulses10, interval, indices in pulse_entries:
if current_hh in indices:
command_body += bytes([table_index])
ii = indices.index(current_hh)
pulses_past_intervals = int(ii * 1800000000 / interval)
pulses_past_this_interval = int(seconds_past_hh * 1000000 / interval) + 1
remaining_pulses_this_interval = pulses10 - pulses_past_this_interval - pulses_past_intervals
microseconds_to_next_interval = interval - (seconds_past_hh * 1000000 % interval)
command_body += struct.pack(">H", remaining_pulses_this_interval)
command_body += struct.pack(">I", microseconds_to_next_interval)
break
else:
table_index += 1
for pulse_count, interval, _ in pulse_entries:
command_body += struct.pack(">H", pulse_count)
command_body += struct.pack(">I", interval)
msg.add_part(PdmRequest.BasalSchedule, command_body)
return msg
def request_prime_cannula():
return _bolus_message(52, pulse_speed=8, delivery_delay=1)
def request_insert_cannula():
return _bolus_message(10, pulse_speed=8, delivery_delay=1)
def request_status(status_request_type=0):
cmd_body = bytes([status_request_type])
return PdmMessage(PdmRequest.Status, cmd_body)
def request_acknowledge_alerts(alert_mask):
return PdmMessage(PdmRequest.AcknowledgeAlerts, bytes([alert_mask]))
def request_purge_insulin(iu_to_purge):
return _bolus_message(pulse_count=int(iu_to_purge / DECIMAL_0_05),
pulse_speed=8,
delivery_delay=1)
def request_bolus(iu_bolus):
return _bolus_message(pulse_count=int(iu_bolus / DECIMAL_0_05))
def request_cancel_bolus():
return _cancel_activity_message(bolus=True)
def request_temp_basal(basal_rate_iuhr, duration_hours):
half_hour_count = int(duration_hours * DECIMAL_2_00)
hh_units = [basal_rate_iuhr / DECIMAL_2_00] * half_hour_count
pulseList = getPulsesForHalfHours(hh_units)
iseList = getInsulinScheduleTableFromPulses(pulseList)
iseBody = getStringBodyFromTable(iseList)
pulseBody = getStringBodyFromTable(pulseList)
cmd_body = bytes([0x01])
body_checksum = bytes([half_hour_count])
body_checksum += struct.pack(">H", 0x3840)
body_checksum += struct.pack(">H", pulseList[0])
checksum = getChecksum(body_checksum + pulseBody)
cmd_body += struct.pack(">H", checksum)
cmd_body += body_checksum
cmd_body += iseBody
msg = PdmMessage(PdmRequest.InsulinSchedule, cmd_body)
reminders = 0
# if confidenceReminder:
# reminders |= 0x40
cmd_body = bytes([reminders, 0x00])
pulseEntries = getPulseIntervalEntries(hh_units)
firstPulseCount, firstInterval, _ = pulseEntries[0]
cmd_body += struct.pack(">H", firstPulseCount)
cmd_body += struct.pack(">I", firstInterval)
for pulseCount, interval, _ in pulseEntries:
cmd_body += struct.pack(">H", pulseCount)
cmd_body += struct.pack(">I", interval)
msg.add_part(PdmRequest.TempBasalSchedule, cmd_body)
return msg
def request_cancel_temp_basal():
return _cancel_activity_message(temp_basal=True)
def request_stop_basal_insulin():
return _cancel_activity_message(basal=True)
def request_deactivate():
return PdmMessage(PdmRequest.DeactivatePod, bytes())
def request_delivery_flags(byte16, byte17):
cmd_body = bytes([byte16, byte17])
return PdmMessage(PdmRequest.SetDeliveryFlags, cmd_body)
def response_parse(response, pod):
pod.nonce_syncword = None
parts = response.get_parts()
for response_type, response_body in parts:
if response_type == PodResponse.VersionInfo:
parse_version_response(response_body, pod)
elif response_type == PodResponse.DetailInfo:
parse_information_response(response_body, pod)
elif response_type == PodResponse.ResyncRequest:
parse_resync_response(response_body, pod)
elif response_type == PodResponse.Status:
parse_status_response(response_body, pod)
else:
raise ProtocolError("Unknown response type %02X" % response_type)
def parse_information_response(response, pod):
if response[0] == 0x01:
pod.state_alerts = struct.unpack(">8H", response[3:])
elif response[0] == 0x02:
pod.state_last_updated = time.time()
pod.state_faulted = True
pod.state_progress = response[1]
parse_delivery_state(pod, response[2])
pod.insulin_canceled = struct.unpack(">H", response[3:5])[0] * 0.05
pod.radio_message_sequence = response[5]
pod.insulin_delivered = struct.unpack(">H", response[6:8])[0] * 0.05
pod.fault_event = response[8]
pod.fault_event_rel_time = struct.unpack(">H", response[9:11])[0]
pod.insulin_reservoir = struct.unpack(">H", response[11:13])[0] * 0.05
pod.state_active_minutes = struct.unpack(">H", response[13:15])[0]
pod.state_alert = response[15]
pod.fault_table_access = response[16]
pod.fault_insulin_state_table_corruption = response[17] >> 7
pod.fault_internal_variables = (response[17] & 0x60) >> 6
pod.fault_immediate_bolus_in_progress = (response[17] & 0x10) >> 4
pod.fault_progress_before = (response[17] & 0x0F)
pod.radio_low_gain = (response[18] & 0xC0) >> 6
pod.radio_rssi = response[18] & 0x3F
pod.fault_progress_before_2 = (response[19] & 0x0F)
pod.fault_information_type2_last_word = struct.unpack(">H", response[20:22])[0]
elif response[0] == 0x03:
pass
elif response[0] == 0x05:
pass
elif response[0] == 0x06:
pass
elif response[0] == 0x46:
pass
elif response[0] == 0x50:
pass
elif response[0] == 0x51:
pass
else:
raise ProtocolError("Failed to parse the information response of type 0x%2X with content: %s"
% (response[0], response.hex()))
def parse_resync_response(response, pod):
if response[0] == 0x14:
pod.nonce_syncword = struct.unpack(">H", response[1:])[0]
else:
raise ProtocolError("Unknown resync request 0x%2x from pod" % response[0])
def parse_status_response(response, pod):
pod.state_last_updated = time.time()
s = struct.unpack(">BII", response)
parse_delivery_state(pod, s[0] >> 4)
pod.state_progress = PodProgress(s[0] & 0xF)
pod.radio_message_sequence = (s[1] & 0x00007800) >> 11
pod.insulin_delivered = ((s[1] & 0x0FFF8000) >> 15) * 0.05
pod.insulin_canceled = (s[1] & 0x000007FF) * 0.05
pod.state_faulted = ((s[2] >> 31) != 0)
pod.state_alert = (s[2] >> 23) & 0xFF
pod.state_active_minutes = (s[2] & 0x007FFC00) >> 10
pod.insulin_reservoir = (s[2] & 0x000003FF) * 0.05
def parse_delivery_state(pod, delivery_state):
if delivery_state & 8 > 0:
pod.state_bolus = BolusState.Extended
elif delivery_state & 4 > 0:
pod.state_bolus = BolusState.Immediate
else:
pod.state_bolus = BolusState.NotRunning
if delivery_state & 2 > 0:
pod.state_basal = BasalState.TempBasal
elif delivery_state & 1 > 0:
pod.state_basal = BasalState.Program
else:
pod.state_basal = BasalState.NotRunning
def parse_version_response(response, pod):
pod.state_last_updated = time.time()
if len(response) == 27:
pod.id_version_unknown_7_bytes = response[0:7].hex()
response = response[7:]
mx = response[0]
my = response[1]
mz = response[2]
pod.id_version_pm = "%d.%d.%d" % (mx, my, mz)
ix = response[3]
iy = response[4]
iz = response[5]
pod.id_version_pi = "%d.%d.%d" % (ix, iy, iz)
pod.id_version_unknown_byte = "%d" % response[6]
pod.state_progress = response[7] & 0x0F
pod.id_lot = struct.unpack(">I", response[8:12])[0]
pod.id_t = struct.unpack(">I", response[12:16])[0]
if len(response) == 21:
pod.radio_low_gain = response[17] >> 6
pod.radio_rssi = response[17] & 0b00111111
pod.radio_address = struct.unpack(">I", response[17:21])[0]
else:
pod.radio_address = struct.unpack(">I", response[16:20])[0]
def _bolus_message(pulse_count, pulse_speed=16, reminders=0, delivery_delay=2):
commandBody = bytes([0x02])
bodyForChecksum = b"\x01"
pulse_span = pulse_speed * pulse_count
bodyForChecksum += struct.pack(">H", pulse_span)
bodyForChecksum += struct.pack(">H", pulse_count)
bodyForChecksum += struct.pack(">H", pulse_count)
checksum = getChecksum(bodyForChecksum)
commandBody += struct.pack(">H", checksum)
commandBody += bodyForChecksum
msg = PdmMessage(PdmRequest.InsulinSchedule, commandBody)
commandBody = bytes([reminders])
commandBody += struct.pack(">H", pulse_count * 10)
commandBody += struct.pack(">I", delivery_delay * 100000)
commandBody += b"\x00\x00\x00\x00\x00\x00"
msg.add_part(PdmRequest.BolusSchedule, commandBody)
return msg
def _cancel_activity_message(basal=False, bolus=False, temp_basal=False):
c = 0
if bolus:
c = c | 0x04
if temp_basal:
c = c | 0x02
if basal:
c = c | 0x01
cmd_body = bytes([c])
msg = PdmMessage(PdmRequest.CancelDelivery, cmd_body)
return msg
``` |
{
"source": "jj358mhz/ScannerPi",
"score": 2
} |
#### File: ScannerPi/alerts/alerts_slack.py
```python
import json
import requests
import syslog
# The Broadcastify API endpoint URL ** DO NOT ALTER **
BROADCASTIFY_API_URL = 'https://api.broadcastify.com/owner/?a=feed&feedId='
# Enter the account data for your Broadcastify feed
FEED_ID = '' # ENTER YOUR BROADCASTIFY FEED ID HERE
USERNAME = '' # ENTER YOUR BROADCASTIFY USERNAME HERE
PASSWORD = '' # ENTER YOUR BROADCASTIFY PASSWORD HERE
# This threshold amount is the number of listeners that need to be exceeded before Slack alerts are sent out
ALERT_THRESHOLD = 0 # ENTER YOUR DESIRED ALERT LISTENER THRESHOLD HERE
# The Slack endpoint URL
WEBHOOK_URL = '' # ENTER SLACK WEBHOOK URL
def broadcastify_request():
"""Fetches the response from the Broadcastify feed API"""
global BROADCASTIFY_API_URL, FEED_ID, USERNAME, PASSWORD
url = BROADCASTIFY_API_URL + FEED_ID + '&type=json&u=' + USERNAME + '&p=' + PASSWORD
data = {} # Sets empty data dictionary
try:
r = requests.get(url)
data = r.json()
syslog.syslog(syslog.LOG_INFO, f"Broadcastify API endpoint healthy, response data is: {data}")
except ConnectionError as error:
syslog.syslog(syslog.LOG_ALERT, f"Broadcastify API endpoint returned error code {error}")
return data
def slack_post(slack_payload):
"""Posts the message to the Slack webhook"""
global WEBHOOK_URL
sp = requests.post(WEBHOOK_URL, data=json.dumps(slack_payload), headers={'Content-Type': 'application/json'})
if sp.status_code != 200:
raise ValueError(f"Request to Slack returned an error {sp.status_code}, the response is: {sp.text}")
syslog.syslog(syslog.LOG_ALERT, f"Request to Slack returned a {sp.status_code}, the response is: {sp.text}")
return sp.status_code
def main():
"""Main executable"""
global ALERT_THRESHOLD, FEED_ID
# Parses the Broadcastify JSON response
response = broadcastify_request()
descr = response['Feed'][0]['descr']
listeners = response['Feed'][0]['listeners']
status = response['Feed'][0]['status']
# Slack status message payloads
slack_payload_feed_up = {
"text": f"*{descr} Broadcastify Alert* :cop::fire:\n"
f"Listener threshold *{ALERT_THRESHOLD}* exceeded, the number of listeners = *{listeners}*\n"
f"Broadcastify status code is: {status} <healthy is 1, unhealthy is 0>\n"
f"Listen to the feed here: <https://www.broadcastify.com/listen/feed/{FEED_ID}>\n"
f"Manage the feed here: <https://www.broadcastify.com/manage/feed/{FEED_ID}>"
}
slack_payload_feed_down = {
"text": f"*{descr} Broadcastify Alert* :ghost:\n"
"*FEED IS DOWN*\n"
f"Broadcastify status code is: {status} <healthy is 1, unhealthy is 0>\n"
f"Manage the feed here: <https://www.broadcastify.com/manage/feed/{FEED_ID}>"
}
# Calls the Slack webhook for message POST'ing
if not status:
slack_post(slack_payload_feed_down)
syslog.syslog(syslog.LOG_ALERT, "Feed is down")
else:
if listeners >= ALERT_THRESHOLD:
slack_post(slack_payload_feed_up)
syslog.syslog(syslog.LOG_INFO, f"Listener threshold {ALERT_THRESHOLD} exceeded,\n"
f"the number of listeners = {listeners}, firing a Slack alert")
if __name__ == '__main__':
main()
``` |
{
"source": "jj46/cisco_acl",
"score": 3
} |
#### File: cisco_acl/cisco_acl/convert_mask.py
```python
import ipaddress
import re
from cisco_acl.regexes import ace_match
def translate_mask(acl_lines, from_type, to_type):
""" Translate between various mask definitions in ACEs
Args:
acl_lines: list of ACEs
from_type: "wc" or "subnet"
to_type: "wc", "subnet", or "cidr"
Returns:
list of ACEs with subnet masks translated
Examples:
>>> acl_lines = ['permit tcp 10.0.1.0 0.0.0.255 any eq 443']
>>> translate_mask(acl_lines, 'wc', 'subnet')
['permit tcp 10.0.1.0 255.255.255.0 any eq 443']
>>> translate_mask(acl_lines, 'wc', 'cidr')
['permit tcp 10.0.1.0/24 any eq 443']
>>> acl_lines = ['permit tcp 172.16.1.0 255.255.255.0 any eq 80']
>>> translate_mask(acl_lines, 'subnet', 'wc')
['permit tcp 172.16.1.0 0.0.0.255 any eq 80']
>>> translate_mask(acl_lines, 'subnet', 'cidr')
['permit tcp 172.16.1.0/24 any eq 80']
"""
output_lines = []
types = ['wc', 'subnet', 'cidr']
if from_type not in types or to_type not in types:
raise TypeError
# determine if we have a subnet in the ACL line
subnet_regex = '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
subnets_to_process = {}
for acl_line in acl_lines:
output_line = acl_line
m = ace_match(acl_line.strip())
if m:
source_host = m['source']
destination_host = m['destination']
for network in [source_host, destination_host]:
if re.match(subnet_regex, network):
try:
ip_object = ipaddress.ip_network('/'.join(network.split()))
subnets_to_process[network] = ip_object
except ValueError as e:
continue
for subnet in subnets_to_process:
if from_type == 'wc' and to_type == 'cidr':
output_line = re.sub(subnet, subnets_to_process[subnet].with_prefixlen, output_line)
elif from_type == 'wc' and to_type == 'subnet':
output_line = re.sub(subnet, ' '.join(subnets_to_process[subnet].with_netmask.split('/')), output_line)
elif from_type == 'subnet' and to_type == 'wc':
output_line = re.sub(subnet, ' '.join(subnets_to_process[subnet].with_hostmask.split('/')), output_line)
elif from_type == 'subnet' and to_type == 'cidr':
output_line = re.sub(subnet, subnets_to_process[subnet].with_prefixlen, output_line)
output_lines.append(output_line)
return output_lines
```
#### File: cisco_acl/cisco_acl/port_translations.py
```python
import re
import json
import os
import logging
from cisco_acl.regexes import ace_match
logging.getLogger(__name__)
translation_file = os.path.join(os.path.dirname(__file__), 'port_translations.json')
with open(translation_file, mode='rt') as f:
translation_groups = json.loads(f.read())
def translate_port(acl_format, protocol, ports, conversion_type):
"""
Translate a port from name to number or vice versa
Args:
acl_format (str): 'ios' or 'asa'
protocol (str): protocol from ACE
ports (str): ports from ace (ex. '80', '80 443')
conversion_type (str): 'to_name' or 'to_number'
Returns:
list: translated ports
"""
def translate(port):
if conversion_type == 'to_name':
for port_name, num in translation_groups[acl_format][protocol].items():
try:
if int(num) == int(port):
return port_name
except:
return port
elif conversion_type == 'to_number':
logging.debug(list(translation_groups[acl_format][protocol].keys()))
if port in translation_groups[acl_format][protocol]:
return translation_groups[acl_format][protocol][port]
return port
translated_ports = [translate(port) for port in ports]
if not translated_ports:
return ports
else:
return translated_ports
class PortTranslator:
def __init__(self, ace):
self.ace = ace
self.formats = ['ios', 'asa']
self.permission = dict()
self._parse_ace()
def _parse_ace(self):
"""
Parse an ACE using the cisco_acl.regexes library
"""
permission = ace_match(self.ace)
if not permission:
raise SyntaxError('Invalid ACE: {0}'.format(self.ace))
self.permission = permission
def translate_ace(self, acl_format='ios', conversion_type='to_name'):
"""
Translate ports in an ACE between names and numbers
Args:
acl_format (str): 'ios' or 'asa'
conversion_type (str): 'to_name' or 'to_number'
Returns:
str: ace with ports translated
"""
if self.permission['protocol'].lower() not in ['tcp', 'udp']:
return self.ace
if acl_format not in translation_groups:
raise ValueError('ACL format "{0}" not in {1}'.format(acl_format, list(translation_groups.keys())))
conversion_types = ['to_name', 'to_number']
if conversion_type not in conversion_types:
raise ValueError('Unknown conversion type: {0} Acceptable types: {1}'.format(
conversion_type, conversion_types))
# We have two possible conversions to make (source ports, destination ports)
ports = [self.permission['source_ports'], self.permission['destination_ports']]
line = self.ace
for port in ports:
if port is None:
continue
if re.match('^object-group', port, flags=re.I):
continue
port = port.lower()
_ports = port.split()[1:]
_translated_ports = translate_port(
acl_format, self.permission['protocol'].lower(), _ports, conversion_type
)
if len(_translated_ports) > 1:
_translated_ports = ' '.join(_translated_ports)
else:
_translated_ports = _translated_ports[0]
if port.startswith('e'): # eq
line = re.sub(port, 'eq {}'.format(_translated_ports), line, flags=re.I)
elif port.startswith('gt'): # gt
line = re.sub(port, 'gt {}'.format(_translated_ports), line, flags=re.I)
elif port.startswith('ge'): # ge
line = re.sub(port, 'ge {}'.format(_translated_ports), line, flags=re.I)
elif port.startswith('lt'): # lt
line = re.sub(port, 'lt {}'.format(_translated_ports), line, flags=re.I)
elif port.startswith('le'): # le
line = re.sub(port, 'le {}'.format(_translated_ports), line, flags=re.I)
elif port.startswith('n'): # neq
line = re.sub(port, 'neq {}'.format(_translated_ports), line, flags=re.I)
elif port.startswith('r'): # range
line = re.sub(port, 'range {}'.format(_translated_ports), line, flags=re.I)
else:
raise ValueError('Invalid Port: {0} in ACE: {1}'.format(port, self.ace))
logging.debug('ACE "{0}" translated to "{1}"'.format(self.ace, line))
return line
```
#### File: cisco_acl/tests/test_acl_audit.py
```python
import os.path
from cisco_acl.acl_audit import AclAuditor
def test_acl_audit():
aclfile = os.path.join(os.path.dirname(__file__), 'data/acl2')
a = AclAuditor(acl=aclfile)
assert a.errors[9].startswith('Invalid ACE')
assert a.errors[11].startswith('Invalid subnet')
assert a.errors[12].startswith('Invalid host IP')
assert a.errors[13].startswith('Invalid port')
assert len(a.errors) == 4
```
#### File: cisco_acl/tests/test_acl_regexes.py
```python
import os
from cisco_acl.regexes import ace_match
def test_acl_regexes():
acl_file = os.path.join(os.path.dirname(__file__), 'data/acl1')
with open(acl_file) as f:
acl_lines = f.readlines()
for line in acl_lines:
# allow for comments
if line.startswith('#') or line.startswith('!'):
continue
assert ace_match(line)
def test_bad_ace():
ace = 'permit tcp any host eq 80'
assert ace_match(ace) is False
``` |
{
"source": "jj48642/spyfor",
"score": 2
} |
#### File: spyfor/spyfor/workbook.py
```python
import pandas as pd
import xlsxwriter as xl
from .Format_Regression_Output import PrintRegressions as PrintRegressions
from .Reg_Obj import RegObject as RegObject
from .Stata_Reg_Obj import res_obj as res_obj
from .Stata_Reg_Obj import spearman_obj as spearman_obj
from .Stata_Reg_Obj import pearson_obj as pearson_obj
class tableWorkBook:
def __init__(self, print_directory, appendix_dir=None):
if appendix_dir is not None:
try:
self.df_appendix = pd.read_csv(filepath_or_buffer=appendix_dir)
except UnicodeDecodeError:
try:
self.df_appendix = pd.read_csv(filepath_or_buffer=appendix_dir, encoding="ISO-8859-1")
except:
self.df_appendix = None
print("Error Loading Appendix. Proceeding with no Appendix")
else:
self.df_appendix = None
print("No Appendix Loaded")
self.workbook = xl.Workbook(print_directory)
self.res_list = []
self.sheet_counter = 1
self.printer = None
self.pearson = None
self.spearman = None
def capture_regression_information(self):
""""Captures regression information from Stata and stores information in Python"""
self.res_list.append(res_obj())
def capture_pearson(self):
self.pearson = pearson_obj()
def capture_spearman(self):
""" Captures the spearman matrix from stata. The user needs to use pwcorr , sig for this comman to work"""
self.spearman = spearman_obj()
def compile_corr(self):
if self.pearson is None:
print("No pearson correlations collected")
if self.spearman is None:
print("No spearman correlations collected")
def compile_worksheet(self, sheet_title=None, sheet_sub_title=None, display_control=True, display_se=False, interest_variables=[], control_variables=[]):
"""Compiles information from regressions into a worksheet and clears temporary regression storage"""
if sheet_title is None:
sheet_title = "Table " + str(self.sheet_counter)
if sheet_sub_title is None:
sheet_sub_title = "Insert Table Description Here"
regression_object = RegObject(self.res_list, interest_variables, control_variables)
self.printer = PrintRegressions(reg_obj=regression_object, print_workbook=self.workbook, sheet_title=sheet_title,
sheet_sub_title=sheet_sub_title, df_appendix=self.df_appendix,
display_control=display_control, display_se=display_se)
# adds 1 to the sheet counter in case the user does not specifiy table names
self.sheet_counter += 1
# clears out the results list so that the list of specifications does not keep growing.
self.res_list = []
def print_workbook(self):
"""Prints the excel workbook to the specified filename"""
self.workbook.close()
``` |
{
"source": "jj4jj/pbdc",
"score": 2
} |
#### File: pbdc/pbdc/pbdc.py
```python
from __future__ import unicode_literals
from jinja2 import Template, Environment, TemplateSyntaxError
import os
import sys
import traceback
import datetime
########################################################################################################
gtpl_env = Environment()
cex_ext_type_2_pb2type = {
'uint8':'uint32',
'uint16':'uint32',
'int8':'int32',
'int16':'int32',
}
cex_ext_type_max_value = {
'uint8':255,
'uint16':65535,
'int8':128,
'int16': 32768,
}
cex_ext_type_min_value = {
'uint8':0,
'uint16':0,
'int8':-127,
'int16': -32767,
}
cex_ext_types = cex_ext_type_2_pb2type.keys()
pb_int_types = ('uint32','uint64','int32','int64','fixed64')
pb_error_types = ('sfixed64','sfixed32','fixed32')
pb_reserve_names = set(("construct","hash","alignas","alignof","and","and_eq","asm","atomic_cancel","atomic_commit","atomic_noexcept","auto","bitand","bitor","bool","break","case","catch","char","char16_t","char32_t","class","compl","concept","const","constexpr","const_cast","continue","co_await","co_return","co_yield","decltype","default","delete","do","double","dynamic_cast","else","enum","explicit","export","extern","false","float","for","friend","goto","if","import","inline","int","long","module","mutable","namespace","new","noexcept","not","not_eq","nullptr","operator","or","or_eq","private","protected","public","reinterpret_cast","register","requires","return","short","signed","sizeof","static","static_assert","static_cast","struct","switch","synchronized","template","this","thread_local","throw","true","try","typedef","typeid","typename","union","unsigned","using","virtual","void","volatile","wchar_t","while","xor","xor_eq"))
primitive_types_integer = set(list(cex_ext_types) + list(pb_int_types))
primitive_types_real = set(('float','double'))
primitive_types_number = primitive_types_integer | primitive_types_real
primitive_types = set(('string','bool','bytes')) | primitive_types_number
cex_primitive_types_2_cxx_types = {
'uint8':'uint8_t',
'int8':'int8_t',
'uint16':'uint16_t',
'int16':'int16_t',
'uint32':'uint32_t',
'int32':'int32_t',
'uint64':'uint64_t',
'int64':'int64_t',
'float':'float',
'double':'double',
'bool':'bool',
'string':'error type',
'bytes':'error type',
'fixed32':'uint32_t',
'sfixed32':'int32_t',
'fixed64':'uint64_t',
'sfixed64':'int64_t',
}
declare_text='//!!! PLEASE DO NOT EDIT THIS FILE !!!\n'\
'//Cause this file is generated by pbdc libarary tools\n'\
'//ANY problem in using, please let me knwon (contact mail:<EMAIL>)\n'\
'//Last generate at time :%s. \n' % datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
class CodeGenerater(object):
def __init__(self, ext_name, template):
self.ext_name=ext_name
self.template=template
template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'templates')
backends = {
'pb2': CodeGenerater('.proto','pb2.proto'),
'res.s.h': CodeGenerater('.store.h','restb.store.h'),
'res.s.c': CodeGenerater('.store.cc','restb.store.cc'),
'res.i.h': CodeGenerater('.idx.h','restb.idx.h'),
'res.i.c': CodeGenerater('.idx.cc','restb.idx.cc'),
'res.v.h': CodeGenerater('.verify.h','restb.verify.h'),
'res.v.c': CodeGenerater('.verify.cc','restb.verify.cc'),
'cex': CodeGenerater('.cex.hpp','cex.hpp'),
'srpc.h': CodeGenerater('.rpc.h', 'srpc.h'),
'crpc.h': CodeGenerater('.rpc.h', 'crpc.h'),
'rpcc.c': CodeGenerater('.rpc.cc', 'rpc.cc'),
}
########################################################################################################
def camel_style_trans(cname):
pre_is_lower = False
rname = ''
for ch in cname:
if ch.isupper():
if pre_is_lower:
rname += '_'
pre_is_lower = False
rname += ch.lower()
else:
rname += ch
pre_is_lower = True
return rname
def camel_style_name(name):
rname = ''
trans = True
for ch in name:
if ch == '_':
trans = True
else:
if ch.islower() and trans:
rname += ch.upper()
trans = False
else:
rname += ch
return rname
#CSMsgLoginQuery->LoginQuery->login_query
def rpc_name(entry_type_name, service_name='Msg'):
pos = entry_type_name.find(service_name)
if pos == -1:
raise Exception('rpc define error for pdE type do not have a service type prefix type:%s' % entry_type_name)
return camel_style_trans(entry_type_name[pos+len(service_name):])
def rpc_svc_name(cmdtype):
idx = 1
is_upper=True
pos = cmdtype.find('Msg')
beg = 0
if pos != -1:
for ch in cmdtype[pos+3+2:]:
idx = idx+1
if ch.islower():
is_upper=False
if beg == 0 and ch.isupper and not is_upper:
beg = pos+3
is_upper = True
if beg > 0 and not is_upper and ch.isupper():
return cmdtype[beg:pos+3+idx]
raise Exception('error rpc type:"%s" for rpc type name should be like "<ProtoName>Msg<Family><Action>" such as "SSMsgRoleLogin" and it will gen FaimilySvc::Action() method' % cmdtype)
def rpc_method_name(cmdtype):
svc_name = rpc_svc_name(cmdtype)
svc_name_pos = cmdtype.find(svc_name)
assert svc_name_pos != -1, 'rpc type name must include service name'
return cmdtype[svc_name_pos+len(svc_name):]
###################################################################################################
def rpc_cmd(entry_type_name, service_name, cmd_prefix):
#print cmd_prefix
rname = rpc_name(entry_type_name, service_name)
return cmd_prefix + rname.upper()
def cex_is_num(field):
return field['t'] in primitive_types_number
def cex_is_ext_primitive(field):
return field['cex_type'] in cex_ext_types
def cex_ext_max_value(field):
return cex_ext_type_max_value[field['cex_type']]
def cex_ext_min_value(field):
return cex_ext_type_min_value[field['cex_type']]
def cex_is_msg(field):
if field['t'] in primitive_types:
return False
tm = gtx.meta.get(field['t'], None)
if tm is None:
pt = gtx.pragma['type'].get(field['t'],None)
if pt is None or pt == 'msg':
return True
return False
else:
if tm.type == 'msg' or tm.type == 'table':
return True
assert tm.type == 'enum', 'field type:%s(%s) is not a msg and not enum in cex ref is error !' % (tm.name, tm.type)
return False
def cex_is_enum(field):
if field['t'] in primitive_types:
return False
tm = gtx.meta.get(field['t'], None)
if tm is None:
pt = gtx.pragma['type'].get(field['t'],None)
return pt == 'enum'
else:
return tm.type == 'enum'
def cex_name(field):
return field['n']
def cex_type(field):
mtype = field['t']
if cex_is_msg(field):
mtype = field['t']+'Cx'
elif cex_is_enum(field):
mtype = field['t']
elif field['t'] == 'string':
mtype = 'pbdcex::string_t<%s>' % field['length']
elif field['t'] == 'bytes':
mtype = 'pbdcex::bytes_t<%s>' % field['length']
else:
mtype = cex_primitive_types_2_cxx_types[field['cex_type']]
mtype = mtype.replace('.','::')
##################
if field['repeat']:
return 'pbdcex::array_t<%s,%s>' % (mtype, field['count'])
return mtype
def cex_has_default_value(msgdef):
for fd in msgdef.fields:
if fd.get('v', 0) != 0:
return True
return False
def length(a):
return len(a)
gtpl_env.filters={'rpc_name':rpc_name,
'rpc_cmd':rpc_cmd,
'camel_style_name':camel_style_name,
'cex_type': cex_type,
'cex_name': cex_name,
'cex_is_num': cex_is_num,
'cex_is_msg': cex_is_msg,
'cex_is_enum': cex_is_enum,
'cex_is_ext_primitive': cex_is_ext_primitive,
'cex_ext_min_value': cex_ext_min_value,
'cex_ext_max_value': cex_ext_max_value,
'cex_is_enum': cex_is_enum,
'cex_has_default_value': cex_has_default_value,
'length': length,
'rpc_svc_name': rpc_svc_name,
'rpc_method_name': rpc_method_name,
}
###################################################################################################
class DefStage(dict):
def __init__(self, type, name, options={}):
self.type = type
self.name = name
self.options = options
self.fields = []
self.req = []
self.res = []
self.pkeys = []
#####################
self.begin()
def on_table_end(self):
if self.options.get('limit_size', 0) == 0:
self.options['limit_size'] = 1000*1000*10
if self.options.get('id', None) is True:
id_alias = self.options.get('id_alias', 'id')
self.fields.insert(0, dict(t='int32',n=id_alias, required=True))
self.options['ks'] = id_alias
self.options['idx'] = 'id'
if self.options.get('id_max', None) is None:
self.options['id_max'] = 255
else:
if self.options.get('idx', None) is None:
self.options['idx'] = 'list'
if self.options.get('ks',None) is None:
self.options['ks'] = self.fields[0]['n']
for k in self.options['ks'].split(','):
for fd in self.fields:
if k == fd['n']:
self.pkeys.append(fd)
break
def on_msg_end(self):
if self.options.get('ks',None) is None:
self.options['ks'] = ','.join([fd['n'] for fd in self.fields if not fd.get('repeat',False)])
self.pkeys = self.fields
else:
fks = self.options['ks'].split(',')
self.pkeys = []
for fk in fks:
for fd in self.fields:
if fd['n'] != fk:
continue
assert fd.get('repeat',False) is False, 'msg:"%s" define keys must not be repeat members def:"%s"' % (self.name, str(fd))
self.pkeys.append(fd)
break
assert len(self.pkeys) > 0,'error define ks for msg:"%s" property ks:"%s"' % (self.name, self.options['ks'])
if self.options.get('cex',None) is True:
gtx.cexs.append(self)
def begin(self):
hook = getattr(self, 'on_%s_begin' % self.type, None)
if hook:
hook()
def end(self):
hook = getattr(self, 'on_%s_end' % self.type, None)
if hook:
hook()
def __getattr__(self, attr):
return self.get(attr, None)
def __setattr__(self, attr, val):
self[attr]=val
class Ctx(dict):
def __init__(self):
self.reset()
def reset(self, name=''):
self.file = name
self.imports = []
self.package = ''
self.stack = []
self.meta = {}
self.defs = []
self.cexs = []
self.pragma = {
'type':{}
}
################cex extension#####
self.CEX_MIN_MEMSET_BLOCK_SIZE=(4*1024)
self.codegen = 'pb2'
self.declare = declare_text
####
self.rpc_no_login = []
self.rpc_type_cmd = {}
def __getattr__(self, attr):
return self.get(attr, None)
def __setattr__(self, attr, val):
self[attr]=val
def on_file_begin(self):
for bkd in backends:
ext = bkd.replace('.','_')
hook = getattr(self, 'on_%s_begin' % ext, None)
if hook:
hook()
def on_rpcc_c_end(self):
cmds = []
for mprefix in self.rpc_no_login:
cmds.append(self.rpc_type_cmd[mprefix])
self.rpc_no_login = cmds
def on_file_end(self):
for bkd in backends:
ext = bkd.replace('.','_')
hook = getattr(self, 'on_%s_end' % ext, None)
if hook:
hook()
def cex_check_msg_constraints(self, msgdef):
for fd in msgdef.fields:
if (fd['t'] == 'string' or fd['t'] == 'bytes') and fd.get('length', None) is None:
raise Exception('cex define %s.%s property "length" is required !' % (msgdef.name, fd['n']))
if fd['repeat'] and fd.get('repeat', None) is None:
raise Exception('cex define %s.%s property "count" is required !' % (msgdef.name, fd['n']))
###
def cex_build_type_reference(self):
checked = set()
tunkown = set()
queue = [cex for cex in self.cexs]
while len(queue) > 0:
ct = queue.pop()
if ct.name in checked:
continue
self.cex_check_msg_constraints(ct)
checked.add(ct.name)
ct.cex_refs = []
for fd in ct.fields:
if fd['t'] in primitive_types:
continue
tr = self.meta.get(fd['t'], None)
if tr is None:
tunkown.add(fd['t'])
else:
if tr.type == 'enum':
continue
else:
if tr.type != 'msg':
raise Exception('error cex parsing for type def type "%s" "%s" , '\
'must def by pdMsg or pdTab or pdEnum' % (tr.type, tr.name))
if fd['t'] in ct.cex_refs:
continue
if ct.pack and not tr.pack:
tr.pack = ct.pack
ct.cex_refs.append(fd['t'])
queue.insert(0, tr)
#
return tunkown
def cex_topology_sort(self):
def find_unrefs(root, excepts, meta):
if root.name in excepts:
return []
if len(set(root.cex_refs)-set(excepts)) == 0:
return [root.name]
res = []
for cr in root.cex_refs:
if cr in excepts:
continue
res.extend(find_unrefs(meta[cr], excepts, meta))
return res
################################################
stypes = []
excepts = []
while True:
#find unrefs
unref_types = []
for df in self.cexs:
dunref = find_unrefs(df, excepts, self.meta)
excepts.extend(dunref)
unref_types.extend(dunref)
if len(unref_types) == 0:
break
stypes.extend(unref_types)
return stypes
def on_cex_end(self):
#build a graph########################################
self.cex_unknowns = self.cex_build_type_reference()
for unk in self.cex_unknowns:
unkt = self.pragma['type'].get(unk, None)
if unkt is None:
print 'WARNNING: cex reference type:"%s" is a extern type and not found a '\
'pragma info, assume it is message type' % (unk,)
#######################################################
stypes = self.cex_topology_sort()
#print stypes, self.cex_unknowns
self.cex_defs = []
for st in stypes:
self.cex_defs.append(self.meta[st])
########################################################################################################
gtx = Ctx()
def pdPragma(p, key, val=None):
gtx.pragma[p][key] = val
def pdFile(name, codegen='pb2'):
gtx.reset(name)
gtx.codegen=codegen
gtx.on_file_begin()
def pdImport(name):
gtx.imports.append(name)
def pdPackage(name):
gtx.package = name
def pdMsg(name, **kwargs):
gtx.stack.append(DefStage('msg', name, kwargs))
def pdService(name, **kwargs):
gtx.stack.append(DefStage('service', name, kwargs))
def pdConfig(name, **kwargs):
kwargs['cex'] = True
kwargs['config'] = True
gtx.stack.append(DefStage('msg', name, kwargs))
def pdCex(name, **kwargs):
kwargs['cex'] = True
gtx.stack.append(DefStage('msg', name, kwargs))
def pdEnum(name, **kwargs):
#print gtx.stack
gtx.stack.append(DefStage('enum', name, kwargs))
def pdTab(name, **kwargs):
gtx.stack.append(DefStage('table', name, kwargs))
def pdRpc(name, **kwargs):
if kwargs.get('no_login',False) is True:
gtx.rpc_no_login.append(name)
gtx.stack.append(DefStage('rpc', name, kwargs))
def check_keys(kwargs, keys=[]):
for k in keys:
v = kwargs.get(k, None)
if v is None:
raise Exception('not found the key:"%s" in define context:"%s:%s"...' % (k, str(gtx.stack[-1].name), str(gtx.stack[-1].type)))
def pdRes(**kwargs):
if 'rpc' != gtx.stack[-1].type:
raise Exception('def response should be in a Rpc context (need a pdRpc before)')
check_keys(kwargs, ['t','n'])
gtx.stack[-1].res.append(kwargs)
def pdReq(**kwargs):
if 'rpc' != gtx.stack[-1].type:
raise Exception('def request should be in a Rpc context (need a pdRpc before)')
check_keys(kwargs, ['t','n'])
gtx.stack[-1].req.append(kwargs)
def pdE(*args, **kwargs):
assert len(gtx.stack) > 0,'define field (entry) not in a context'
if kwargs.get('repeat',None) is None:
kwargs['repeat'] = False
if kwargs.get('required',None) is None:
kwargs['required'] = False
current_ctype = gtx.stack[-1].type
if current_ctype == 'msg' or current_ctype == 'table':
if(len(args) > 0):
kwargs['t']=args[0]
if(len(args) > 1):
kwargs['n']=args[1]
check_keys(kwargs, ['n','t'])
###################################cex##############
ft = kwargs['t']
kwargs['cex_type'] = ft
if kwargs['t'] == 'string' and kwargs.get('length', 0) is 0:
kwargs['length'] = 512
if ft in cex_ext_types:
kwargs['t'] = cex_ext_type_2_pb2type[ft]
if current_ctype == 'service':
if(len(args) > 0):
kwargs['t']=args[0]
kwargs.pop('n',None)
check_keys(kwargs, ['t'])
gtx.rpc_type_cmd[kwargs['t']] = rpc_cmd(kwargs['t'], gtx.stack[-1].name, gtx.stack[-1].options['cmd_pre'])
if current_ctype == 'enum':
if(len(args) > 0):
kwargs['n']=args[0]
if(len(args) > 1):
kwargs['v']=args[1]
check_keys(kwargs, ['n','v'])
dt = kwargs.get('t',None)
assert (dt not in pb_error_types),'pbdc deprecate protobuf using this type "%s" the field define "%s"' % (dt, str(kwargs))
dn = kwargs.get('n',None)
assert (dn not in pb_reserve_names),'pbdc can not use the reserve keywords like:"%s" in field define "%s"' % (dn, str(kwargs))
gtx.stack[-1].fields.append(kwargs)
def pdA(*args, **kwargs):
assert len(gtx.stack) > 0,'define field (entry) not in a context'
kwargs['repeat'] = True
pdE(*args, **kwargs)
def pdGenerate(ctx, codegen, outdir):
customs = set(codegen.split(','))
supports = set(ctx.codegen.split(','))
for cg in backends:
found = False
for gen in supports:
if gen == 'all' or cg.find(gen) != -1:
found=True
break
if not found:
continue
found = False
for gen in customs:
if gen == 'all' or cg.find(gen) != -1:
found=True
break
if not found:
continue
tp = os.path.join(template_dir,backends[cg].template)
ext_name = backends[cg].ext_name
path = os.path.join(outdir, ctx.file+ext_name)
try:
print 'generating [%s@%s]' % (cg, ctx.file),os.path.abspath(path),'...'
data = gtpl_env.from_string(open(tp).read()).render(ctx).encode('utf-8')
except TemplateSyntaxError, e:
##exception jinja2.TemplateSyntaxError(message, lineno, name=None, filename=None)
raise Exception('syntax error for def for "#%d ---> %s" code gen type:%s' % (e.lineno, e.message, cg))
except Exception as e:
traceback.print_exc()
#raise Exception('python syntax error for "%s %s" code gen type : "%s:%s"' % (str(type(e)),str(e), ctx.file, cg))
raise
open(path,'wb+').write(data)
def getopt(k):
if len(sys.argv) < 2:
return None
kvopts = [opt.strip().split('=') for opt in sys.argv[1:]]
for kv in kvopts:
if len(kv) == 2 and (kv[0] == k or kv[0] == '--%s' % k):
return kv[1]
return None
def pdEnd():
assert len(gtx.stack) >= 0,'defination not match for end'
outdir = getopt('outdir') or './'
codegen = getopt('codegen') or 'all'
if len(gtx.stack) == 0:
gtx.on_file_end()
#print gtx
assert len(gtx.file) > 0,'end of file generation error . did you forget "pdFile(...)" ?'
pdGenerate(gtx, codegen, outdir)
gtx.reset()
else:
ddef = gtx.stack.pop()
ddef.end()
gtx.meta[ddef.name] = ddef
gtx.defs.append(ddef)
``` |
{
"source": "jj4jj/sdv",
"score": 2
} |
#### File: jj4jj/sdv/log.py
```python
import logging
import config
import sys
def get_function_frame_info(depth=1):
frame = sys._getframe(depth)
code = frame.f_code
return (code.co_filename, code.co_firstlineno, code.co_name)
class MYLogger:
def __init__(self, logfile):
self.logger = logging.getLogger()
#debug lv
self.logger.setLevel(getattr(logging, config.LOG_LEVEL))
rh = logging.handlers.TimedRotatingFileHandler(logfile, 'D')
fm = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
rh.setFormatter(fm)
self.logger.addHandler(rh)
def get(self):
return self.logger
_logger = None
def init_logger(logfile):
global _logger
if _logger is None:
_logger = MYLogger(logfile)
def set_level(lv):
#todo
pass
def get_internal_logger():
if _logger is None:
return logging
return _logger.get()
def debug(msg, layer=2):
fn, no, fc = get_function_frame_info(layer)
dmsg = '%s:%i (%s)-' % (fn, no, fc)
get_internal_logger().debug(dmsg+msg)
def info(msg, layer=2):
fn, no, fc = get_function_frame_info(layer)
dmsg = '%s:%i (%s)-' % (fn, no, fc)
get_internal_logger().info(dmsg+msg)
def warn(msg, layer=2):
fn, no, fc = get_function_frame_info(layer)
dmsg = '%s:%i (%s)-' % (fn, no, fc)
get_internal_logger().warn(dmsg+msg)
def error(msg, layer=2):
fn, no, fc = get_function_frame_info(layer)
dmsg = '%s:%i (%s)-' % (fn, no, fc)
get_internal_logger().error(dmsg+msg)
def fatal(msg, layer=2):
fn, no, fc = get_function_frame_info(layer)
dmsg = '%s:%i (%s)-' % (fn, no, fc)
get_internal_logger().critical(dmsg+msg)
``` |
{
"source": "jj4jj/TurboBuildUE4",
"score": 3
} |
#### File: fastbuild-v1.02/Scripts/FixWhitespace.py
```python
import os
import sys
def getfiles(outFiles):
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk("."):
path = root.split('/')
for file in files:
if file.endswith(".bff") or file.endswith(".cpp") or file.endswith(".h"):
fullPath = root + '/' + file
outFiles.append(fullPath)
for root, dirs, files in os.walk("../External/"):
path = root.split('/')
for file in files:
if file.endswith(".bff"):
fullPath = root + '/' + file
outFiles.append(fullPath)
files = []
print('Getting list of files...')
getfiles(files)
print('Checking files...')
for file in files:
fixedFile = []
fixed = False
f = open(file, 'r')
for line in f.readlines():
newLine = ""
# does line need tab fixup
if line.find("\t") != -1:
for i in range(0, len(line)):
c = line[i]
j = len(newLine)
if c == "\t":
if (j % 4) == 0:
newLine += ' '
elif (j % 4) == 1:
newLine += ' '
elif (j % 4) == 2:
newLine += ' '
elif (j % 4) == 3:
newLine += ' '
else:
newLine += c
else:
newLine = line
# does line need trimming?
newLine = newLine.rstrip();
newLine += '\n'
if newLine != line:
fixed = True
fixedFile += newLine
f.close()
# any changes made?
if fixed == True:
try:
f = open(file, 'w')
f.writelines(fixedFile)
f.close()
print(' - FIXED: {}'.format(file))
except:
print(' - FIX FAILED: {}'.format(file))
``` |
{
"source": "jj6725/SoilServer",
"score": 3
} |
#### File: jj6725/SoilServer/server.py
```python
from flask import Flask, jsonify, make_response, abort
from flask_cors import CORS
from sht31d import SHT31D
from soilsensor import SoilSensor
##########################################################################################
# Python Home Soil Server
##########################################################################################
app = Flask(__name__, static_url_path = "")
CORS(app)
sht31d = None
soil = None
try:
sht31d = SHT31D()
except:
print("SHT31D not present")
pass
try:
soil = SoilSensor()
except:
print("Soil Sensor not present")
pass
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Not found' } ), 404)
@app.route('/devices', methods = ['GET'])
def getDevices():
sht31dStatus = "Offline"
soilStatus = "Offline"
try:
sht31d.getStatus()
sht31dStatus = "OK"
except:
sht31dStatus = "Offline"
try:
if soil is not None:
soilStatus = "OK"
except:
soilStatus ="Offline"
return jsonify({'sht31d': sht31dStatus, 'i2cSoil': soilStatus})
@app.route('/data', methods = ['GET'])
def getData():
try:
return jsonify({'temperature': sht31d.getTemperature(),'humidity': sht31d.getHumidity()})
except:
return offlineError()
@app.route('/temp', methods = ['GET'])
def getTemp():
try:
return jsonify( {'temperature': sht31d.getTemperature()} )
except:
return offlineError()
@app.route('/humidity', methods = ['GET'])
def getHumidity():
try:
return jsonify( {'humidity': sht31d.getHumidity()} )
except:
return offlineError()
@app.route('/moisture', methods = ['GET'])
def getMoisture():
try:
return jsonify( {'moisture': soil.getMoisture()} )
except:
return offlineError()
def offlineError():
return make_response(jsonify( {'error': 'Devices are offline'} ), 500)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=6725, debug=True)
```
#### File: jj6725/SoilServer/sht31d.py
```python
import time
import board
import busio
import adafruit_sht31d
class SHT31D:
count = 0
def getTemperature(self):
self.count += 1
temperature = 0
try:
temperature = self.sensor.temperature
except:
raise("Device error")
else:
return temperature
def getHumidity(self):
self.count += 1
humidity = 0
try:
humidity = self.sensor.relative_humidity
except:
raise("Device error")
else:
return humidity
def heat(self):
if self.count > 10:
self.sensor.heater = True
time.sleep(1)
self.sensor.heater = False
self.count = 0
def getStatus(self):
status = 0
try:
status = self.sensor.status
except:
raise("Device error")
else:
return status
def __init__(self):
count = 0
self.sensor = adafruit_sht31d.SHT31D(busio.I2C(board.SCL, board.SDA),address=0x44)
if __name__ == "__main__":
sensor = SHT31D()
print("Status: {0:b}".format(sensor.getStatus()))
print("Temperature: {0:0.1f} C".format(sensor.getTemperature()))
print("Humidity: {0:0.1f} %%".format(sensor.getHumidity()))
``` |
{
"source": "jjaakko/MCMC_cipher",
"score": 3
} |
#### File: jjaakko/MCMC_cipher/cli.py
```python
from pathlib import Path
import json
import argparse
import adjacent_letters
import MCMC
import cipher
import alphabets
def compute_stats(args):
print(f"Computing adjacent letter counts from {args.src[0]} ...")
letter_counts = adjacent_letters.get_count_from_file(
filename=Path(args.src[0]), language_code=args.lang[0]
)
destination = f"{args.dest[0]}_{args.lang[0]}.json"
letter_counts_as_json: str = json.dumps(letter_counts, indent=4)
with open(Path(destination), "w") as f:
f.write(letter_counts_as_json)
print(f"Adjacent letter counts written in {destination}")
def mcmc(args):
with open(args.src[0]) as f:
encrypted_text = f.read()
mcmc = MCMC.SubstitutionCipherMCMC(args.lang[0])
initial_cipher = cipher.create_random_cipher(mcmc.alphabets)
mcmc_cipher, acr = mcmc.MCMC(
encrypted_text, initial_cipher, iters=int(args.iters[0]), logging=500
)
print("\n\n")
print(cipher.apply(encrypted_text, mcmc_cipher, mcmc.alphabets))
print(f"ACR: {acr}")
def encrypt(args):
alphabets_ = alphabets.get(args.lang[0])
cipher_str: str = cipher.create_random_cipher(alphabets_)
with open(args.src[0]) as f:
text_to_encrypt = f.read()
encrypted_text = cipher.apply(text_to_encrypt, cipher_str, alphabets_)
with open(args.dest[0], "w") as f:
f.write(encrypted_text)
print("Encryted text: ")
print(encrypted_text)
print(f"Cipher: {cipher_str}")
if __name__ == "__main__":
# Define command and options for computing adjacent letter counts.
parser = argparse.ArgumentParser(
description="Decrypt substitution cipher with MCMC"
)
subparsers = parser.add_subparsers()
parser_a = subparsers.add_parser(
"count_from_file", help="Count adjacent letters from a specified file."
)
parser_a.add_argument("--src", nargs=1, required=True)
parser_a.add_argument("--dest", nargs=1, required=True)
parser_a.add_argument("--lang", choices=["fi", "en"], nargs=1, required=True)
parser_a.set_defaults(func=compute_stats)
parser_b = subparsers.add_parser(
"decrypt", help="Use MCMC to decrypt substitution cipher."
)
parser_b.add_argument(
"--src", nargs=1, required=True, help="Filename containing the text to decrypt."
)
parser_b.add_argument(
"--iters", nargs=1, required=True, help="Iterations to be used with MCMC."
)
parser_b.add_argument("--lang", choices=["fi", "en"], nargs=1, required=True)
parser_b.set_defaults(func=mcmc)
parser_c = subparsers.add_parser(
"encrypt", help="Use MCMC to encrypt with a random substitution cipher."
)
parser_c.add_argument(
"--src", nargs=1, required=True, help="Filename containing the text to encrypt."
)
parser_c.add_argument(
"--dest", nargs=1, required=True, help="Filename for saving encrypted text."
)
parser_c.add_argument("--lang", choices=["fi", "en"], nargs=1, required=True)
parser_c.set_defaults(func=encrypt)
args = parser.parse_args()
args.func(args)
``` |
{
"source": "jjaakko/sniptly",
"score": 3
} |
#### File: src/sniptly/file_line_wrapper.py
```python
class FileLineWrapper(object):
def __init__(self, f):
self.f = f
self.line = 0
def close(self):
return self.f.close()
def readline(self):
self.line += 1
return self.f.readline()
# to allow using in 'with' statements.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
```
#### File: src/sniptly/output.py
```python
from typing import Any
from click import echo, style
def out(message: str, new_line: bool = True, **styles: Any) -> None:
if "bold" not in styles:
styles["bold"] = True
message = style(message, **styles)
echo(message, nl=new_line)
def err(message: str, new_line: bool = True, **styles: Any) -> None:
if "fg" not in styles:
styles["fg"] = "red"
message = style(message, **styles)
echo(message, nl=new_line)
```
#### File: src/sniptly/utils.py
```python
from typing import List
def extensions_to_glob_patterns(extensions: List) -> List[str]:
"""Generate a list of glob patterns from a list of extensions.
"""
patterns: List[str] = []
for ext in extensions:
pattern = ext.replace(".", "*.")
patterns.append(pattern)
return patterns
``` |
{
"source": "jjaakola/bang-a-gong",
"score": 3
} |
#### File: api_status_monitor/consumer/database_connection.py
```python
import logging
import psycopg2
class DatabaseConnection():
"""Database connection manager.
"""
def __init__(self, host, port, user, dbname, password, sslmode):
self._conn = None
self._host = host
self._port = port
self._user = user
self._dbname = dbname
self._password = password
self._sslmode = "require" if sslmode else None
def get_connection(self):
if not self._conn or self._conn.closed:
try:
self._conn = psycopg2.connect(dbname=self._dbname,
user=self._user,
host=self._host,
port=self._port,
password=self._password,
sslmode=self._sslmode)
except Exception:
logging.error("Unable to connect to PostgreSQL database.", exc_info=1)
self._conn = None
return self._conn
def close(self):
try:
if self._conn:
self._conn.close()
except Exception:
logging.warning("Database connection close failed.")
```
#### File: api_status_monitor/consumer/database_migrator.py
```python
import logging
from datetime import datetime, timedelta
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
CHECK_DATABASE = """
SELECT 1 FROM pg_database WHERE pg_database.datname = 'api_statuses'
"""
CREATE_DATABASE = "CREATE DATABASE api_statuses"
ENABLE_PGCRYPTO = "CREATE EXTENSION IF NOT EXISTS \"pgcrypto\""
CREATE_STATUS_TABLE = """
CREATE TABLE IF NOT EXISTS api_status (
site_id uuid NOT NULL,
endpoint varchar(255) NOT NULL,
status_code smallint NOT NULL,
response_time_ms smallint NOT NULL,
sla_ms smallint NOT NULL,
success boolean NOT NULL,
ts timestamp without time zone NOT NULL,
log varchar(255),
error varchar(255),
CONSTRAINT fk_site FOREIGN KEY (site_id) REFERENCES sites(id)
) PARTITION BY RANGE (EXTRACT(YEAR FROM ts), EXTRACT(MONTH FROM ts))
"""
"""
The sla_ms could be in the sites table. The difference is if sla_ms
would be changed it would change also the history. The change would
be global when looking from the rows in the api_status.
This is the reason it is in api_status and in this exercise redundant.
Partitioning of data is for each month.
If API/webpage status is monitored twice a minute, the row count
for each month is ~87600 for each monitored API/webpage.
The old partitions can be detached and archived.
"""
CREATE_API_STATUS_PARTITIONS_TMPL = """
CREATE TABLE IF NOT EXISTS api_status_{year}_{month}
PARTITION OF api_status (ts default '{year}-{month}-01')
FOR VALUES FROM ({year}, {month}) TO ({year_n}, {month_n})
"""
CREATE_SITES_TABLE = """
CREATE TABLE IF NOT EXISTS sites (
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
site varchar(255) UNIQUE
)
"""
"""
The primary key is random uuid to counter the enumeration attack.
"""
CREATE_SITES_INDEX = """
CREATE INDEX IF NOT EXISTS sites_index ON sites (site)
"""
class DatabaseMigrator():
"""DatabaseMigrator requires the connection manager to be
created with administrative privileges.
"""
def __init__(self, connection_manager):
self.connection_manager = connection_manager
def create_database(self):
conn = self.connection_manager.get_connection()
if conn:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = self.connection_manager.get_connection().cursor()
cur.execute(CHECK_DATABASE)
res = cur.fetchone()
if not res:
logging.info("Database api_statuses does not exist. Creating.")
cur.execute(CREATE_DATABASE)
conn.commit()
self.connection_manager.close()
else:
raise MigrationException("Could not create the database.")
def migrate(self):
conn = self.connection_manager.get_connection()
if conn:
cur = conn.cursor()
cur.execute(ENABLE_PGCRYPTO)
cur.execute(CREATE_SITES_TABLE)
cur.execute(CREATE_SITES_INDEX)
cur.execute(CREATE_STATUS_TABLE)
# Create api status data partitions twelve months ahead.
# Get the first day of the month.
current_month = datetime.today().date().replace(day=1)
for i in range(12):
year = current_month.strftime("%Y")
month = current_month.strftime("%m")
# Add 32 days to move datetime enough in future
# to roll the month. The current month always has
# the first day set so it is safe to add 32 days.
# The boundary test would be on January due to
# February having 28/29 days and others 31 or 30
# days. So 32 days will always roll to next month.
next_month = (current_month + timedelta(days=32)) \
.replace(day=1)
year_of_next_month = next_month.strftime("%Y")
month_of_next_month = next_month.strftime("%m")
create_partition = CREATE_API_STATUS_PARTITIONS_TMPL.format(
year=year, month=month, year_n=year_of_next_month,
month_n=month_of_next_month)
cur.execute(create_partition)
current_month = next_month
conn.commit()
self.connection_manager.close()
else:
raise MigrationException("Could not migrate the database.")
class MigrationException(Exception):
pass
```
#### File: api_status_monitor/consumer/kafkaconsumer.py
```python
import logging
class APIStatusKafkaConsumer():
"""APIStatusKafkaConsumer is tied to consume single topic given
as :status_topic.
All parameters are required to be valid objects.
"""
def __init__(self, status_topic, kafka_consumer, database_writer):
assert status_topic is not None
assert kafka_consumer is not None
assert database_writer is not None
self.consumer = kafka_consumer
self.status_topic = status_topic
self.consumer.subscribe(self.status_topic)
self.database_writer = database_writer
def close(self):
"""Closes the Kafka consumer and the database writer.
"""
logging.info("Closing consumer.")
try:
self.consumer.close()
self.database_writer.close()
except Exception:
logging.warning("Consumer close raised an exception.", exc_info=1)
logging.info("Consumer closed.")
def consume(self):
"""Poll messages instead of using the consumer iterator.
Using poll gives better control to shutdown after reading
a batch of messages.
"""
topics = self.consumer.poll(timeout_ms=100)
for topic, messages in topics.items():
for message in messages:
logging.debug("Topic: %s and value %s", topic, message.value)
try:
self.database_writer.persist(message.value)
except Exception:
logging.warning("Database writing failed.", exc_info=1)
self. consumer.commit()
```
#### File: tests/consumer/test_database_writer.py
```python
import unittest
from unittest.mock import patch, Mock
from api_status_monitor.common.apistatus import APIStatusInformation
from api_status_monitor.consumer.database_writer import DatabaseWriter
class TestDatabaseWriter(unittest.TestCase):
@patch(
'api_status_monitor.consumer.database_connection.DatabaseConnection')
def test_persist_ok(self, mock_cm):
status = APIStatusInformation("site", "endpoint",
200, "data", "", 1500, 2000)
db_writer = DatabaseWriter(mock_cm)
mock_conn = Mock()
mock_cm.get_connection.return_value = mock_conn
mock_cur = Mock()
mock_conn.cursor.return_value = mock_cur
db_writer.persist(status)
mock_cm.get_connection.assert_called_once()
mock_conn.cursor.assert_called_once()
mock_cur.execute.assert_called_once()
@patch(
'api_status_monitor.consumer.database_connection.DatabaseConnection')
def test_persist_no_connection(self, mock_cm):
status = APIStatusInformation("site", "endpoint",
200, "data", "", 1500, 2000)
db_writer = DatabaseWriter(mock_cm)
mock_cm.get_connection.return_value = None
db_writer.persist(status)
mock_cm.get_connection.assert_called_once()
@patch(
'api_status_monitor.consumer.database_connection.DatabaseConnection')
def test_persist_execute_raises_exception(self, mock_cm):
status = APIStatusInformation("site", "endpoint",
200, "data", "", 1500, 2000)
db_writer = DatabaseWriter(mock_cm)
mock_conn = Mock()
mock_cm.get_connection.return_value = mock_conn
mock_cur = Mock()
mock_conn.cursor.return_value = mock_cur
mock_cur.execute.side_effect = Exception("error")
db_writer.persist(status)
mock_cm.get_connection.assert_called_once()
mock_conn.rollback.assert_called_once()
```
#### File: tests/producer/test_producer.py
```python
import asyncio
import unittest
from unittest.mock import patch, Mock
from kafka.admin import NewTopic
from kafka.errors import TopicAlreadyExistsError
from api_status_monitor.producer import Producer
class TestProducer(unittest.IsolatedAsyncioTestCase):
@patch('api_status_monitor.producer.Producer._get_admin_client')
def test_create_topic(self, mock_admin_client_getter):
admin_client_mock = Mock()
mock_admin_client_getter.return_value = admin_client_mock
producer = Producer({}, "bs_servers", "topic", {'kw': 'args'})
producer.create_topic()
topic = NewTopic(name="topic", num_partitions=1, replication_factor=1)
admin_client_mock.create_topics.assert_called_once()
admin_client_mock.close.assert_called_once()
@patch('api_status_monitor.producer.Producer._get_admin_client')
def test_create_topic_raises_exception(self, mock_admin_client_getter):
admin_client_mock = Mock()
mock_admin_client_getter.return_value = admin_client_mock
admin_client_mock.create_topics.side_effect = \
TopicAlreadyExistsError("error")
producer = Producer({}, "bs_servers", "topic", {'kw': 'args'})
producer.create_topic()
topic = NewTopic(name="topic", num_partitions=1, replication_factor=1)
admin_client_mock.create_topics.assert_called_once()
admin_client_mock.close.assert_called_once()
class TestProducerAsync(unittest.IsolatedAsyncioTestCase):
@patch('api_status_monitor.producer.Producer._get_kafka_producer')
@patch('api_status_monitor.producer.Producer.running')
async def test_read_task(self, mock_running, mock_get_kafka_producer):
# just one round.
mock_running.side_effect = [True, False]
mock_kafka_producer = Mock()
mock_get_kafka_producer.return_value = mock_kafka_producer
mock_reader = Mock()
mock_reader.url.return_value = "url"
result_future = asyncio.Future()
result_future.set_result("ok")
mock_reader.read.return_value = result_future
loop = asyncio.get_event_loop()
producer = Producer({}, "bs_servers", "topic", {'kw': 'args'})
s = await producer.reader_task(mock_reader, 1)
mock_reader.url.assert_called_once()
mock_reader.read.assert_called_once()
mock_get_kafka_producer.assert_called_once()
mock_kafka_producer.send.assert_called_once()
mock_kafka_producer.send.assert_called_once_with("topic", "ok")
@patch('api_status_monitor.producer.Producer._get_kafka_producer')
@patch('api_status_monitor.producer.Producer.running')
async def test_read_task_publish_raises_exception(
self, mock_running, mock_get_kafka_producer):
# just one round.
mock_running.side_effect = [True, False]
mock_kafka_producer = Mock()
mock_get_kafka_producer.return_value = mock_kafka_producer
mock_reader = Mock()
mock_reader.url.return_value = "url"
result_future = asyncio.Future()
result_future.set_result("ok")
mock_reader.read.return_value = result_future
mock_kafka_producer.send.side_effect = Exception("error")
loop = asyncio.get_event_loop()
producer = Producer({}, "bs_servers", "topic", {'kw': 'args'})
s = await producer.reader_task(mock_reader, 1)
mock_reader.url.assert_called_once()
mock_reader.read.assert_called_once()
mock_get_kafka_producer.assert_called_once()
mock_kafka_producer.send.assert_called_once_with("topic", "ok")
``` |
{
"source": "jjaapro/TwitterSentiment",
"score": 3
} |
#### File: TwitterSentiment/lib/TwitterSentiment.py
```python
import pandas as pd
import sqlite3
import string
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import GridSearchCV
import joblib
import tweepy
import re
from string import punctuation
class TwitterSentiment:
def __init__(self, **kwargs):
self.consumer_key = kwargs.get('consumer_key', '')
self.consumer_secret = kwargs.get('consumer_secret', '')
self.access_token = kwargs.get('access_token', '')
self.access_token_secret = kwargs.get('access_token_secret', '')
self.auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token, self.access_token_secret)
self.api = tweepy.API(self.auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
try:
self.api.verify_credentials()
print("Authentication OK")
except tweepy.error.TweepError as ex:
print("Error during authentication")
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.conn = sqlite3.connect('./data/data.db')
cursor = self.conn.cursor()
cursor.execute("DROP TABLE IF EXISTS tweets")
self.conn.commit()
cursor.execute("CREATE TABLE tweets(date none, location none, followers none, friends none, message none)")
self.conn.commit()
cursor.close()
def train(self, train_dataset_path: string = './data/training/training_data.csv'):
data = pd.read_csv(train_dataset_path, error_bad_lines=False)
data.columns = ['label', 'id', 'date', 'source', 'user', 'text']
data = data.drop(['id', 'source', 'date', 'user'], axis=1)
positives = data['label'][data.label == 4]
neutrals = data['label'][data.label == 2]
negatives = data['label'][data.label == 0]
print('Number of positive tagged sentences is: {}'.format(len(positives)))
print('Number of neutral tagged sentences is: {}'.format(len(neutrals)))
print('Number of negative tagged sentences is: {}'.format(len(negatives)))
print('Total length of the data is: {}'.format(data.shape[0]))
print("\nTraining...")
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(data['text'], data['label'], test_size=0.2)
pipeline = Pipeline([
('bow', CountVectorizer(strip_accents='ascii', stop_words='english', lowercase=True)),
('tfidf', TfidfTransformer()),
('classifier', MultinomialNB()),
])
parameters = {'bow__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'classifier__alpha': (1e-2, 1e-3),
}
grid = GridSearchCV(pipeline, cv=10, param_grid=parameters, verbose=1)
grid.fit(self.X_train, self.y_train)
print("\nBest Model: %f using %s" % (grid.best_score_, grid.best_params_))
print('\n')
means = grid.cv_results_['mean_test_score']
stds = grid.cv_results_['std_test_score']
params = grid.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("Mean: %f Stdev:(%f) with: %r" % (mean, stdev, param))
print("\nTraining finished!")
print("\n")
joblib.dump(grid, "./data/twitter_sentiment.pkl")
def clean_message(self, tweet):
tweet = re.sub(r'\&\w*;', '', tweet)
tweet = re.sub('@[^\s]+', '', tweet)
tweet = re.sub(r'\$\w*', '', tweet)
tweet = tweet.lower()
tweet = re.sub(r'https?:\/\/.*\/\w*', '', tweet)
tweet = re.sub(r'#\w*', '', tweet)
tweet = re.sub(r'[' + punctuation.replace('@', '') + ']+', ' ', tweet)
tweet = re.sub(r'\b\w{1,2}\b', '', tweet)
tweet = re.sub(r'\s\s+', ' ', tweet)
tweet = tweet.lstrip(' ')
tweet = ''.join(c for c in tweet if c <= '\uFFFF')
return tweet
def search_tweets(self, query, item_limit: int = 100):
cursor = self.conn.cursor()
cursor.execute("DELETE FROM tweets")
self.conn.commit()
tweets = tweepy.Cursor(self.api.search, q=query, lang="en", tweet_mode='extended').items(item_limit)
for tweet in tweets:
try:
fields = [tweet.created_at, tweet.user.location, tweet.user.followers_count, tweet.user.friends_count,
tweet.full_text]
cursor.execute("INSERT INTO tweets VALUES (?,?,?,?,?)", fields)
except tweepy.error.TweepError as ex:
pass
cursor.close()
df_twtr = pd.read_sql_query("SELECT * FROM tweets", self.conn)
df_twtr['date'] = pd.to_datetime(df_twtr['date'])
df_twtr = df_twtr.sort_values(by='date', ascending=True)
df_twtr = df_twtr.reset_index().drop('index', axis=1)
df_twtr.head()
df_twtr['message'] = df_twtr['message'].apply(self.clean_message)
return df_twtr
def prediction(self, query, item_limit: int = 100):
df_twtr = self.search_tweets(query, item_limit)
model_NB = joblib.load("./data/twitter_sentiment.pkl")
y_predictions = model_NB.predict(self.X_test)
print('\n')
print('Accuracy score: ', accuracy_score(self.y_test, y_predictions))
print('Confusion matrix: \n', confusion_matrix(self.y_test, y_predictions))
print('\n')
print('0 = negative, 2 = neutral, 4 = positive')
print(classification_report(self.y_test, y_predictions))
tweet_preds = model_NB.predict(df_twtr['message'])
df_tweet_predictions = df_twtr.copy()
df_tweet_predictions['predictions'] = tweet_preds
neg = df_tweet_predictions.predictions.value_counts()[0]
neu = df_tweet_predictions.predictions.value_counts()[2]
pos = df_tweet_predictions.predictions.value_counts()[4]
print('Model predictions: Positives - {}, Neutrals - {}, Negatives - {}'.format(pos, neu, neg))
df_tweet_predictions.to_pickle('./data/tweet_predicts_df.p')
``` |
{
"source": "jjac111/Flower-Identifier",
"score": 3
} |
#### File: jjac111/Flower-Identifier/image_utils.py
```python
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import PIL
import numpy as np
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
to_pil = transforms.ToPILImage(mode= 'RGB')
image = to_pil(image)
width, height = image.width, image.height
if height <= width:
width = int(width*256/height)
height = 256
else:
height = int(height*256/width)
width = 256
image = image.resize((width, height))
left = (width - 224)/2
top = (height - 224)/2
right = (width + 224)/2
bottom = (height + 224)/2
image = image.crop((left, top, right, bottom))
np_image = np.array(image)
np_image = np_image/255
for ij in np.ndindex(np_image.shape[:2]):
np_image[ij] = (np_image[ij] - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225]
return np_image.transpose((2,0,1))
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.cpu().numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
```
#### File: jjac111/Flower-Identifier/predict.py
```python
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import json
import PIL
import numpy as np
from collections import OrderedDict
import time
import argparse
from train_utils import *
from image_utils import *
def predict(image_path, model, topk=5, cat_to_name=None):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
image = PIL.Image.open(image_path)
to_tensor = transforms.ToTensor()
image = to_tensor(image)
image = torch.tensor(process_image(image))
image = image.unsqueeze(dim=0)
image = image.to(device).float()
model = model.to(device).float()
log_ps = model(image)
ps = torch.exp(log_ps)
top_p, top_classes = ps.topk(topk, dim=1)
idx_to_class = {value:key for key, value in model.class_to_idx.items()}
labels = []
if cat_to_name:
for clss in top_classes[0]:
labels.append(cat_to_name[idx_to_class[clss.item()]])
else:
for clss in top_classes[0]:
labels.append(str(idx_to_class[clss.item()]))
return top_p, labels
parser = argparse.ArgumentParser(description='Use a pretrained Artificial Neural Network to predict the type of a flower image input.')
parser.add_argument('input', type=str,
help='Directory where of image input (Only tested for JPG).')
parser.add_argument('--checkpoint', type=str,
help='Directory of the ANN training checkpoint. If not specified, \'checkpoint.pth\' will be searched in the local directory.')
parser.add_argument('--top_k', type=int,
help='The top number of category probabilities calculated for the input.')
parser.add_argument('--category_names', type=str,
help='Learning rate for the gradient descecent. Default is 0.001')
parser.add_argument('--gpu', action='store_true',
help='If specified, the network training will take place in the GPU which drastically accelerates the process. If GPU is not available, CPU will be used instead.')
args = parser.parse_args()
image_path = args.input
cat_to_name = None
device = 'cpu'
checkpoint = args.checkpoint
top_k = args.top_k
category_names = args.category_names
if args.category_names:
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
else:
print('No category file specified. Categorical labels will not be translated.\n')
if args.gpu:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device == 'cpu': print('There is no GPU available. Using CPU instead.\n')
if not checkpoint:
checkpoint = 'checkpoint.pth'
if not top_k:
top_k = 5
model = load_model(checkpoint)
model.eval()
probs, class_names = predict(image_path, model, top_k, cat_to_name)
print(class_names)
print('\n')
print(probs)
#PLOTTING
'''
x = np.arange(len(class_names))
y = probs.tolist()[0]
plt.subplot(2,1,1)
ax = plt.gca()
imshow(image= image, ax= ax)
plt.show()
plt.subplot(2,1,2)
plt.barh(x, y, align='center')
plt.yticks(x, class_names)
plt.xticks(rotation=30)
ax = plt.gca()
ax.set_xlim(min(y)*0.95, max(y)*1.05)
plt.show()
'''
```
#### File: jjac111/Flower-Identifier/train_utils.py
```python
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import numpy as np
from collections import OrderedDict
def save_model(model, in_features, hidden_neurons, out_features, epochs, optimizer, arch, dataset, save_dir=None):
model.class_to_idx = dataset.class_to_idx
model_checkpoint = {'state_dict': model.state_dict(),
'input': in_features,
'hidden': [int(hidden_neurons*2/3), int(hidden_neurons*1/3)],
'output': out_features,
'epochs': epochs,
'optimizer_state': optimizer.state_dict(),
'class_to_idx': model.class_to_idx,
'arch': arch}
if not save_dir:
save_dir = ''
torch.save(model_checkpoint, save_dir+'checkpoint.pth')
def test_model(model, testloader, device='cpu', criterion=torch.nn.NLLLoss()):
test_loss = 0
accuracy = 0
with torch.no_grad():
model.eval()
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
log_ps = model(inputs)
ps = torch.exp(log_ps)
test_loss += criterion(log_ps, labels)
top_prob, top_class = ps.topk(1, dim=1)
equality = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equality.type(torch.FloatTensor))
print(f"Test Loss: {test_loss/len(testloader)}")
print(f"Tested Accuracy: {round(float(accuracy*100/len(testloader)), 2)}%")
def load_model(filename, path= ""):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_cp = torch.load(path+filename, map_location=lambda storage, loc: storage)
layers = OrderedDict()
layers['0'] = nn.Linear(model_cp['input'], model_cp['hidden'][0])
layers['relu0'] = nn.ReLU()
layers['d0'] = nn.Dropout(p=0.2)
for i in range(len(model_cp['hidden'])):
if i != len(model_cp['hidden']) -1:
layers[str(i+1)] = nn.Linear(model_cp['hidden'][i], model_cp['hidden'][i+1])
layers['relu'+str(i+1)] = nn.ReLU()
layers['d'+str(i+1)] = nn.Dropout(p=0.2)
else:
layers[str(len(model_cp['hidden']))] = nn.Linear(model_cp['hidden'][-1], model_cp['output'])
layers['logsoftmax'] = nn.LogSoftmax(dim=1)
#Works only for this type of architecture for now
model = models.densenet121(pretrained= True)
for param in model.parameters():
param.requires_grad = False
model.classifier = nn.Sequential(layers)
model.load_state_dict(model_cp['state_dict'], True)
model.class_to_idx = model_cp['class_to_idx']
return model.to(device)
``` |
{
"source": "JJack27/BLEHub",
"score": 3
} |
#### File: JJack27/BLEHub/gateway.py
```python
import pygatt
import bluepy.btle as btle
import os
import psutil
import time
class Gateway:
# Constructor of the Gateway
# Arguments:
# - sub_proc: the subprocess to raise when new devices connected
# - between_scan: integer, the number of seconds that main process pauses before the next scan
# Return:
# - Gateway
def __init__(self, sub_proc, between_scan=5, debug=False):
# maps mac address to pid of sub-process
# with the format: {mac_address: pid}
self._mac_proc_table = {}
# Scanner for discovering devices
self._scanner = pygatt.GATTToolBackend()
self._scanner.reset()
# the sub-process function to raise when new deveice connected
self._sub_proc = sub_proc
self._debug = debug
self._between_scan = between_scan
self._total_rounds = 0
self._broke_rounds = 0
# validate if the given mac address is a bracelet
# Arugments:
# - mac_addr: string, mac address
# Return:
# - bool
def _validate_mac_addr(self, mac_addr):
test_mac_addr = ['66:55:44:33:22:11']
if ( mac_addr.lower() in test_mac_addr):
print("Found!")
return True
# update self._mac_proc_table
# arguments:
# - mac_addrs: a list of dictionaries with format: {'address': xxx.
# 'name':}
# return:
# - None
def _update_mac_table(self, mac_addrs):
self._total_rounds += 1
mac_addr_list = [i['address'] for i in mac_addrs]
# remove and kill the process when the corresponding bracelet is not detected
removing = []
for mac_addr, pid in self._mac_proc_table.items():
# Check if given process is a zombie process
# print(psutil.Process(pid).status() == psutil.STATUS_ZOMBIE)
if(psutil.Process(pid).status() == psutil.STATUS_ZOMBIE):
os.waitpid(pid, 0)
removing.append(mac_addr)
if(self._debug):
print("Process (%s) is killed!"%pid)
if(len(removing) > 0):
self._broke_rounds += 1
if(self._debug):
print("Round %d, %s. (%d/%d, %f)" %
(self._total_rounds, str(bool(len(removing))),
self._total_rounds, self._broke_rounds,
self._total_rounds / self._broke_rounds))
for addr in removing:
self._mac_proc_table.pop(addr)
# add new bracelet
for mac_addr in mac_addr_list:
valid_addr = self._validate_mac_addr(mac_addr)
# new bracelet that haven't been detected yet.
if valid_addr and mac_addr not in self._mac_proc_table.keys():
# raise a sub-process to receive the bluetooth data
pid = os.fork()
if(pid == 0):
# in sub-process
self._sub_proc(mac_addr)
else:
# in parent process
# update self._mac_proc_table
self._mac_proc_table[mac_addr] = pid
# Sleep for X seconds, then continue scanning. Default = 10
time.sleep(self._between_scan)
# Print and return the list of mac address of connected devices
# Arguments:
# - None
# Return:
# - list
def get_connected_device(self):
if(self._debug):
for addr in self._mac_proc_table.keys():
print(addr)
return self._mac_proc_table.keys()
# Print and return the self._mac_proc_table
# Arguments:
# - None
# Return:
# - Dict
def get_mac_proc_table(self):
if(self._debug):
for addr, pid in self._mac_proc_table.items():
print("%s %s"%(addr, pid))
return self._mac_proc_table
# Scanning and return all nearby devices
# Arguments:
# - None
# Returns:
# - List<dictionary>
def scan(self):
print("Scanning...")
return self._scanner.scan()
# The interface to start running the gateway
# - Constantly discover the new devices
# - update the self.mac_proc_table
def run(self):
self._scanner.start()
while True:
if(self._debug):
print("=============")
self.get_mac_proc_table()
if(self._mac_proc_table == {}):
devices = self.scan()
self._update_mac_table(devices)
if(self._debug):
print("found %d devices" % len(devices))
#self._update_mac_table(devices)
```
#### File: JJack27/BLEHub/runserver.py
```python
from gateway import Gateway
from sub_process import sub_proc
def main(*args):
print("Starting Gateway...")
gateway = Gateway(sub_proc, debug=False)
gateway.run()
if __name__ == '__main__':
main()
```
#### File: JJack27/BLEHub/test.py
```python
import pygatt
import struct
adapter = pygatt.GATTToolBackend()
def print_data(handle, value):
print("Received data: %s" % struct.unpack('f', value)[0])
try:
adapter.start()
device = adapter.connect('c7:36:e0:31:ab:ae',
address_type=pygatt.BLEAddressType.random)
print(device)
#device.subscribe('401dc6f1-3f8d-11e5-afbb-0002a5d5c51b', callback=print_data)
while True:
value = device.char_read('401dc6f1-3f8d-11e5-afbb-0002a5d5c51b')
print(struct.unpack('f', value)[0])
finally:
adapter.stop()
``` |
{
"source": "JJackol/AIIR_Do_dna",
"score": 2
} |
#### File: backend_api/process/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.template import loader
from django.core.files.storage import FileSystemStorage
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
import string
import random
from process.models import User
def index(request):
"""
"Strona główna" umożlwiająca upload pliku
"""
# TODO: Tu trzeba sprawdzić czy użytkownik jest zalogowany !!!
# I ewentualnie przekierować go do strony /login
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
random_identifier = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
return render(request, 'load_file.html', {
'uploaded_file_url': uploaded_file_url,
'calculation_id': random_identifier
})
return render(request, 'load_file.html')
@csrf_exempt
def login(request):
username = request.POST.get('username')
password = request.POST.get('password')
for user in User.objects.all():
if user.username == username and user.password == password:
return redirect('/')
return render(request, 'pages/login.html')
@csrf_exempt
def register(request):
if request.method == 'GET':
return render(request, 'pages/register.html')
else:
username = request.POST.get('username')
password = request.POST.get('password')
User.objects.create(username=username, password=password)
return render(request, 'pages/login.html')
def track_progress(request, calculation_name):
"""Strona na której można śledzić postęp obliczeń
Arguments:
calculation_name {string} -- nazwa kanału komunikacyjnego
"""
return render(request, 'progress_tracker.html', {
'calculation_name': calculation_name
})
def websocket_sending_message_demo(request, calculation_name, message):
"""Demonstracja wykorzystania websocketów do rozgłaszania statusu obliczeń
Arguments:
calculation_name {string} -- nazwa kanału komunikacyjnego
message {string} -- wiadomość do rozgłoszenia
"""
# pobranie instancji kanału komunikacyjnego
channel_layer = get_channel_layer()
# wygenerowanie nazwy kanału
tracker_group_name = 'tracker_%s' % calculation_name
# wysłanie wiadomości do nasłuchujących użytkowników
async_to_sync(channel_layer.group_send)(
tracker_group_name,
{
'type': 'progress_update', 'message': message
}
)
return HttpResponse("OK")
``` |
{
"source": "jjacob/DailyPythonScripts",
"score": 2
} |
#### File: experimental/BLTUnfold/getScaleFactors.py
```python
import pickle
def get7TeVMuonScaleFactors():
f = open('MuonEfficiencies2011_44X.pkl', 'r')
data = pickle.load(f)
ptBins = data['combRelPFISO12_2011A']['pt_abseta>1.2'].keys()
etaBins = ['pt_abseta>1.2','pt_abseta<1.2']
lumi_2011A = 2.311
lumi_2011B = 2.739
lumi_2011 = 5.050
for etaBin in etaBins:
for ptBin in ptBins:
a = data['combRelPFISO12_2011A'][etaBin][ptBin]['data/mc']['efficiency_ratio']
b = data['combRelPFISO12_2011B'][etaBin][ptBin]['data/mc']['efficiency_ratio']
correction = ((lumi_2011A/lumi_2011) * a) + ((lumi_2011B/lumi_2011)* b)
# Eta bins
lowEta = -1
highEta = -2
if etaBin.find('<')>=0:
lowEta = 0
highEta = 1.2
elif etaBin.find('>')>=0:
lowEta = 1.2
highEta = 10
pass
# Pt bin
lowPt = ptBin.split('_')[0]
highPt = ptBin.split('_')[-1]
print 'scaleFactor( %s, %s, %s, %s, %s ),' % ( lowEta, highEta, lowPt, highPt, correction )
pass
pass
pass
def get8TeVMuonScaleFactors():
fID = open('MuonEfficiencies_Run2012ReReco_53X.pkl', 'r')
fISO = open('MuonEfficiencies_ISO_Run_2012ReReco_53X.pkl', 'r')
fTRIG = open('SingleMuonTriggerEfficiencies_eta2p1_Run2012ABCD_v5trees.pkl', 'r')
dataID = pickle.load(fID)['Tight']
dataISO = pickle.load(fISO)['combRelIsoPF04dBeta<012_Tight']
dataTRIG = pickle.load(fTRIG)['IsoMu24']['TightID_IsodB']
ptBins = dataID['ptabseta<0.9'].keys()
etaBins = ['ptabseta<0.9','ptabseta0.9-1.2','ptabseta1.2-2.1','ptabseta2.1-2.4']
# Different just to be annoying
trigEtaBins = {
'ptabseta2.1-2.4':'PT_ABSETA_Endcaps_1p2to2p1',
'ptabseta1.2-2.1':'PT_ABSETA_Endcaps_1p2to2p1',
'ptabseta0.9-1.2':'PT_ABSETA_Transition_0p9to1p2',
'ptabseta<0.9':'PT_ABSETA_Barrel_0to0p9'}
for etaBin in etaBins:
for ptBin in ptBins:
idCorrection = dataID[etaBin][ptBin]['data/mc']['efficiency_ratio']
isoCorrection = dataISO[etaBin][ptBin]['data/mc']['efficiency_ratio']
trigCorrection = 1.
if not( ptBin == '10_20' or ptBin == '20_25' ):
if ( ptBin == '140_300'):
trigCorrection = dataTRIG[ trigEtaBins[etaBin] ]['140_500']['data/mc']['efficiency_ratio']
else:
trigCorrection = dataTRIG[ trigEtaBins[etaBin] ][ptBin]['data/mc']['efficiency_ratio']
correction = idCorrection*isoCorrection*trigCorrection
# Eta bins
lowEta = -1
highEta = -2
if etaBin.find('<0.9')>=0:
lowEta = 0
highEta = 0.9
elif etaBin.find('0.9-1.2')>=0:
lowEta = 0.9
highEta = 1.2
elif etaBin.find('1.2-2.1')>=0:
lowEta = 1.2
highEta = 2.1
elif etaBin.find('2.1-2.4')>=0:
lowEta = 2.1
highEta = 2.4
pass
# Pt bin
lowPt = ptBin.split('_')[0]
highPt = ptBin.split('_')[-1]
print 'scaleFactor( %s, %s, %s, %s, %s ),' % ( lowEta, highEta, lowPt, highPt, correction )
pass
pass
pass
print 'MUON 7TEV\n'
get7TeVMuonScaleFactors()
print '\nMUON 8TEV\n'
get8TeVMuonScaleFactors()
```
#### File: experimental/BLTUnfold/produceUnfoldingHistograms.py
```python
from rootpy.tree import Tree
from rootpy.plotting import Hist, Hist2D, Canvas
from rootpy.io import root_open, File
from rootpy.interactive import wait
from optparse import OptionParser
from config.variable_binning import bin_edges
from scaleFactors import *
class channel:
def __init__(self, channelName, treeName, outputDirName):
self.channelName = channelName
self.treeName = treeName
self.outputDirName = outputDirName
pass
pass
# For debug
def setup_canvas():
canvas = Canvas(width=700, height=500)
canvas.SetLeftMargin(0.15)
canvas.SetBottomMargin(0.15)
canvas.SetTopMargin(0.10)
canvas.SetRightMargin(0.05)
return canvas
# Top pt weight
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/TopPtReweighting
def topPtWeight( is7TeV ):
if is7TeV:
return 'sqrt( exp(0.174-0.00137*unfolding.hadronicTopPt) * exp(0.174-0.00137*unfolding.leptonicTopPt) )'
else:
return 'sqrt( exp(0.159-0.00141*unfolding.hadronicTopPt) * exp(0.159-0.00141*unfolding.leptonicTopPt) )'
# Get the lepton scale factors
def getScaleFactor( is7TeV, channelName ):
if is7TeV:
if channelName is 'ePlusJets':
return '(1)'
else:
return convertScaleFactorsToString(muon7TeVScaleFactors)
else:
if channelName is 'ePlusJets':
return convertScaleFactorsToString(electron8TeVScaleFactors)
else:
return convertScaleFactorsToString(muon8TeVScaleFactors)
pass
# Convert the scale factors into a string for Tree::Draw
def convertScaleFactorsToString( scaleFactors ):
firstScaleFactor = True
sfString = '( '
for scaleFactor in scaleFactors:
if ( firstScaleFactor ):
sfString += '( ( ( abs( unfolding.leptonEta ) > '+scaleFactor.etaLowEdge+') && ( abs( unfolding.leptonEta ) < '+scaleFactor.etaHighEdge+') && ( unfolding.leptonPt > '+scaleFactor.ptLowEdge+') && ( unfolding.leptonPt < '+scaleFactor.ptHighEdge+') ) * '+scaleFactor.factor+') '
firstScaleFactor = False
else :
sfString += '+ ( ( ( abs( unfolding.leptonEta ) > '+scaleFactor.etaLowEdge+') && ( abs( unfolding.leptonEta ) < '+scaleFactor.etaHighEdge+') && ( unfolding.leptonPt > '+scaleFactor.ptLowEdge+') && ( unfolding.leptonPt < '+scaleFactor.ptHighEdge+') ) * '+scaleFactor.factor+') '
sfString += ')'
return sfString
def copyEventFilterHist( inputFile, outputFile ):
eventFilter = inputFile.Get('EventFilter/EventCounter')
outputFile.cd()
eventFilterOutputDir = outputFile.mkdir('EventFilter')
eventFilterOutputDir.cd()
eventFilter.Write()
inputFile.cd()
pass
fileNames = {
'8TeV' : {
'central' : '/hdfs/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/BLT/TTJets_MassiveBinDECAY_TuneZ2star_8TeV/TTJets_nTuple_53X_mc_merged.root',
'scaleup' : '/hdfs/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/BLT/TTJets_scaleup_TuneZ2star_8TeV-madgraph-tauola/TTJets-scaleup_nTuple_53X_mc_merged.root',
'scaledown' : '/hdfs/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/BLT/TTJets_scaledown_TuneZ2star_8TeV-madgraph-tauola/TTJets-scaledown_nTuple_53X_mc_merged.root',
'matchingup' : '/hdfs/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/BLT/TTJets_matchingup_TuneZ2star_8TeV-madgraph-tauola/TTJets-matchingup_nTuple_53X_mc_merged.root',
'matchingdown' : '/hdfs/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/BLT/TTJets_matchingdown_TuneZ2star_8TeV-madgraph-tauola/TTJets-matchingdown_nTuple_53X_mc_merged.root',
'powheg' : '/hdfs/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/BLT/TT_CT10_TuneZ2star_8TeV-powheg-tauola/TTJets_nTuple_53X_mc_merged.root',
'powhegherwig' : '/hdfs/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/BLT/TT_CT10_AUET2_8TeV-powheg-herwig/TTJets_nTuple_53X_mc_merged.root',
'mcatnlo' : '/hdfs/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/BLT/TT_8TeV-mcatnlo/TTJets_nTuple_53X_mc_merged.root',
},
'7TeV' : {
'central' : '/hdfs/TopQuarkGroup/mc/7TeV/v11/NoSkimUnfolding/BLT/TTJets_MSDecays_central_TuneZ2_7TeV-madgraph-tauola/TTJets_nTuple_53X_mc_merged.root',
'scaledown' :'/hdfs/TopQuarkGroup/mc/7TeV/v11/NoSkimUnfolding/BLT/TTJets_MSDecays_scaledown_TuneZ2star_7TeV-madgraph-tauola/TTJets-scaledown_nTuple_53X_mc_merged.root',
'scaleup' : '/hdfs/TopQuarkGroup/mc/7TeV/v11/NoSkimUnfolding/BLT/TTJets_MSDecays_scaleup_TuneZ2star_7TeV-madgraph-tauola/TTJets-scaleup_nTuple_53X_mc_merged.root',
}
}
channels = [
channel( 'ePlusJets', 'rootTupleTreeEPlusJets', 'electron'),
channel( 'muPlusJets', 'rootTupleTreeMuPlusJets', 'muon')
]
def main():
parser = OptionParser()
parser.add_option('--topPtReweighting', action='store_true', dest='applyTopPtReweighting', default=False )
parser.add_option('--is7TeV', action='store_true', dest='is7TeVData', default=False )
parser.add_option('-p', '--pdfWeight', type='int', dest='pdfWeight', default=0 )
parser.add_option('-s', '--sample', dest='sample', default='central')
parser.add_option('-d', '--debug', action='store_true', dest='debug', default=False)
parser.add_option('-n', action='store_true', dest='donothing', default=False)
parser.add_option('-e', action='store_true', dest='extraHists', default=False)
parser.add_option('-f',action='store_true', dest='fineBinned', default=False)
(options, _) = parser.parse_args()
# Input file name
file_name = 'crap.root'
if options.is7TeVData:
file_name = fileNames['7TeV'][options.sample]
else:
file_name = fileNames['8TeV'][options.sample]
# Output file name
outputFileName = 'crap.root'
outputFileDir = 'unfolding/'
energySuffix = '8TeV'
if options.is7TeVData:
energySuffix = '7TeV'
if options.applyTopPtReweighting:
outputFileName = outputFileDir+'/unfolding_TTJets_%s_asymmetric_withTopPtReweighting.root' % energySuffix
elif options.pdfWeight != 0:
outputFileName = outputFileDir+'/unfolding_TTJets_%s_asymmetric_pdfWeight_%i.root' % ( energySuffix, options.pdfWeight )
elif options.sample != 'central':
outputFileName = outputFileDir+'/unfolding_TTJets_%s_%s_asymmetric.root' % ( energySuffix, options.sample )
else:
outputFileName = outputFileDir+'/unfolding_TTJets_%s_asymmetric.root' % energySuffix
with root_open( file_name, 'read' ) as f, root_open( outputFileName, 'recreate') as out:
copyEventFilterHist( f, out )
for channel in channels:
if options.debug and channel.channelName != 'muPlusJets' : continue
print 'Channel : ',channel.channelName
# Get the tree
tree = f.Get(channel.treeName+'/'+channel.channelName+'Tree')
# Keep record of pdf weight
if options.pdfWeight != 0:
pdfWeight = '( unfolding.PDFWeights[%i]/unfolding.PDFWeights[0] )' % options.pdfWeight
pdfWeightHist = Hist( 10, 0.8, 1.2, name='pdfWeights_'+channel.channelName )
if not options.donothing:
tree.Draw( pdfWeight, hist=pdfWeightHist)
outputDir = 0
if not ( out.FindObject('pdfWeights') ):
outputDir = out.mkdir('pdfWeights')
else :
outputDir = out.Get('pdfWeights')
outputDir.cd()
pdfWeightHist.Write()
pass
for variable in bin_edges:
if options.debug and variable != 'HT' : continue
print '--->Doing variable :',variable
# Output dir name
metSuffix='_patType1CorrectedPFMet'
if variable is 'HT':
metSuffix=''
pass
# Make dir in output file
outputDir = out.mkdir('unfolding_'+variable+'_analyser_'+channel.outputDirName+'_channel'+metSuffix)
# Variable names in tree
genSelection = '( unfolding.GenSelection == 1 )'
genWeight = '( unfolding.puWeight )'
# PU weights dodgy for 7 TeV, but should be v. close to 1
if options.is7TeVData:
genWeight = '( 1 )'
offlineSelection = '( unfolding.OfflineSelection == 1 )'
offlineWeight = '( unfolding.bTagWeight * unfolding.puWeight )'
# PU weights dodgy for 7 TeV, but should be v. close to 1
if options.is7TeVData:
offlineWeight = '( unfolding.bTagWeight )'
fakeSelection = '( ' + offlineSelection+"&&!"+genSelection +' ) '
genVariable = 'unfolding.gen'+variable
recoVariable = 'unfolding.reco'+variable
# Weights derived from variables in tree
if options.applyTopPtReweighting:
ptWeight = topPtWeight( options.is7TeVData )
offlineWeight += ' * '+ptWeight
genWeight += ' * '+ptWeight
pass
# Apply pdf weight
if options.pdfWeight != 0:
pdfWeight = '( unfolding.PDFWeights[%i]/unfolding.PDFWeights[0] )' % options.pdfWeight
offlineWeight += ' * '+pdfWeight
genWeight += ' * '+pdfWeight
pass
# Scale factors
scaleFactor = getScaleFactor( options.is7TeVData, channel.channelName )
offlineWeight += ' * '+scaleFactor
# Histograms to fill
# 1D histograms
truth = Hist( bin_edges[variable], name='truth')
measured = Hist( bin_edges[variable], name='measured')
fake = Hist( bin_edges[variable], name='fake')
# 2D histograms
response = Hist2D( bin_edges[variable], bin_edges[variable], name='response')
response_without_fakes = Hist2D( bin_edges[variable], bin_edges[variable], name='response_without_fakes')
response_only_fakes = Hist2D( bin_edges[variable], bin_edges[variable], name='response_only_fakes')
if options.fineBinned:
minVar = bin_edges[variable][0]
maxVar = bin_edges[variable][-1]
nBins = int(maxVar - minVar)
truth = Hist( nBins, minVar, maxVar, name='truth')
measured = Hist( nBins, minVar, maxVar, name='measured')
fake = Hist( nBins, minVar, maxVar, name='fake')
response = Hist2D( nBins, minVar, maxVar, nBins, minVar, maxVar, name='response')
response_without_fakes = Hist2D( nBins, minVar, maxVar, nBins, minVar, maxVar, name='response_without_fakes')
response_only_fakes = Hist2D( nBins, minVar, maxVar, nBins, minVar, maxVar, name='response_only_fakes')
# Some interesting histograms
puOffline = Hist( 20, 0, 2, name='puWeights_offline')
# Fill histograms
# 1D
if not options.donothing:
tree.Draw(genVariable,genWeight+'*'+genSelection,hist=truth)
tree.Draw(recoVariable,offlineWeight+'*'+offlineSelection,hist=measured)
tree.Draw(recoVariable,offlineWeight+'*'+fakeSelection,hist=fake)
# 2D
tree.Draw(recoVariable+':'+genVariable,offlineWeight+'*'+offlineSelection,hist=response)
tree.Draw(recoVariable+':'+genVariable,offlineWeight+'* ('+offlineSelection+'&&'+genSelection +')',hist=response_without_fakes)
tree.Draw(recoVariable+':'+genVariable,offlineWeight+'*'+fakeSelection,hist=response_only_fakes)
if options.extraHists:
tree.Draw( 'unfolding.puWeight','unfolding.OfflineSelection',hist=puOffline)
pass
# Output histgorams to file
outputDir.cd()
truth.Write()
measured.Write()
fake.Write()
response.Write()
response_without_fakes.Write()
response_only_fakes.Write()
if options.extraHists:
puOffline.Write()
pass
pass
pass
pass
if __name__ == '__main__':
main()
```
#### File: experimental/BLTUnfold/scaleFactors.py
```python
class scaleFactor:
def __init__( self, etaLowEdge, etaHighEdge, ptLowEdge, ptHighEdge, factor ):
self.etaLowEdge = str(etaLowEdge)
self.etaHighEdge = str(etaHighEdge)
self.ptLowEdge = str(ptLowEdge)
self.ptHighEdge = str(ptHighEdge)
self.factor = str(factor)
pass
muon8TeVScaleFactors = [
scaleFactor( 0, 0.9, 10, 20, 0.912386496723 ),
scaleFactor( 0, 0.9, 25, 30, 0.972452055282 ),
scaleFactor( 0, 0.9, 30, 35, 0.970834089244 ),
scaleFactor( 0, 0.9, 20, 25, 0.965862226978 ),
scaleFactor( 0, 0.9, 60, 90, 0.972944008855 ),
scaleFactor( 0, 0.9, 50, 60, 0.972142437533 ),
scaleFactor( 0, 0.9, 40, 50, 0.970196629776 ),
scaleFactor( 0, 0.9, 90, 140, 0.984919198606 ),
scaleFactor( 0, 0.9, 35, 40, 0.971533870084 ),
scaleFactor( 0, 0.9, 140, 300, 0.997372291394 ),
scaleFactor( 0.9, 1.2, 10, 20, 0.950076238802 ),
scaleFactor( 0.9, 1.2, 25, 30, 0.963613089993 ),
scaleFactor( 0.9, 1.2, 30, 35, 0.959083862361 ),
scaleFactor( 0.9, 1.2, 20, 25, 0.980396102182 ),
scaleFactor( 0.9, 1.2, 60, 90, 0.949572545054 ),
scaleFactor( 0.9, 1.2, 50, 60, 0.955914704027 ),
scaleFactor( 0.9, 1.2, 40, 50, 0.957735548464 ),
scaleFactor( 0.9, 1.2, 90, 140, 0.974531465976 ),
scaleFactor( 0.9, 1.2, 35, 40, 0.95863318261 ),
scaleFactor( 0.9, 1.2, 140, 300, 0.983740867212 ),
scaleFactor( 1.2, 2.1, 10, 20, 0.989958505637 ),
scaleFactor( 1.2, 2.1, 25, 30, 1.00650756009 ),
scaleFactor( 1.2, 2.1, 30, 35, 1.00196917262 ),
scaleFactor( 1.2, 2.1, 20, 25, 0.990402317676 ),
scaleFactor( 1.2, 2.1, 60, 90, 0.981360591419 ),
scaleFactor( 1.2, 2.1, 50, 60, 0.986171182488 ),
scaleFactor( 1.2, 2.1, 40, 50, 0.991245531521 ),
scaleFactor( 1.2, 2.1, 90, 140, 1.00374013398 ),
scaleFactor( 1.2, 2.1, 35, 40, 0.994337896106 ),
scaleFactor( 1.2, 2.1, 140, 300, 0.965619755393 ),
scaleFactor( 2.1, 2.4, 10, 20, 1.12236018183 ),
scaleFactor( 2.1, 2.4, 25, 30, 1.09821221889 ),
scaleFactor( 2.1, 2.4, 30, 35, 1.07688652489 ),
scaleFactor( 2.1, 2.4, 20, 25, 1.1134083821 ),
scaleFactor( 2.1, 2.4, 60, 90, 0.992520345769 ),
scaleFactor( 2.1, 2.4, 50, 60, 1.00908832239 ),
scaleFactor( 2.1, 2.4, 40, 50, 1.0267340195 ),
scaleFactor( 2.1, 2.4, 90, 140, 1.04942096158 ),
scaleFactor( 2.1, 2.4, 35, 40, 1.04866260908 ),
scaleFactor( 2.1, 2.4, 140, 300, 0.894756597947 ),
# Dummy scale factors for where none are provided
scaleFactor( 0, 2.4, 300, 1000000, 1 ), # High pt
scaleFactor( 2.4, 10, 10, 1000000, 1 ), # Large eta
]
muon7TeVScaleFactors = [
scaleFactor( 1.2, 10, 10, 20, 0.994514170516 ),
scaleFactor( 1.2, 10, 80, 250, 0.990876331253 ),
scaleFactor( 1.2, 10, 20, 30, 1.0014788215 ),
scaleFactor( 1.2, 10, 50, 60, 0.998511209452 ),
scaleFactor( 1.2, 10, 40, 50, 1.00204337349 ),
scaleFactor( 1.2, 10, 60, 80, 0.994487765818 ),
scaleFactor( 1.2, 10, 30, 40, 1.00232011278 ),
scaleFactor( 0, 1.2, 10, 20, 0.964435887378 ),
scaleFactor( 0, 1.2, 80, 250, 0.996798528141 ),
scaleFactor( 0, 1.2, 20, 30, 0.979669551678 ),
scaleFactor( 0, 1.2, 50, 60, 0.991581251912 ),
scaleFactor( 0, 1.2, 40, 50, 0.992806774333 ),
scaleFactor( 0, 1.2, 60, 80, 0.991186501183 ),
scaleFactor( 0, 1.2, 30, 40, 0.987894599962 ),
scaleFactor( 1.2, 10, 250, 1000000, 1 ),
# Dummy scale factors for where none are provided
scaleFactor( 0, 10, 250, 1000000, 1 ), # High pt
]
electron8TeVScaleFactors = [
scaleFactor( 0, 0.8, 20, 30, 0.949*0.987),
scaleFactor( 0, 0.8, 30, 40, 0.939*0.987),
scaleFactor( 0, 0.8, 40, 50, 0.950*0.997),
scaleFactor( 0, 0.8, 50, 200, 0.957*0.998),
scaleFactor( 0.8, 1.478, 20, 30, 0.990*0.964),
scaleFactor( 0.8, 1.478, 30, 40, 0.920*0.964),
scaleFactor( 0.8, 1.478, 40, 50, 0.949*0.980),
scaleFactor( 0.8, 1.478, 50, 200, 0.959*0.988),
scaleFactor( 1.478, 2.5, 20, 30, 0.857*1.004),
scaleFactor( 1.478, 2.5, 30, 40, 0.907*1.004),
scaleFactor( 1.478, 2.5, 40, 50, 0.937*1.033),
scaleFactor( 1.478, 2.5, 50, 200, 0.954*0.976),
# Dummy scale factors for where none are provided
scaleFactor( 0, 2.5, 200, 1000000, 1 ), # High pt
scaleFactor( 2.5, 10, 20, 1000000, 1 ), # Large eta
]
```
#### File: experimental/fitTest/newSimpleFitTest.py
```python
import unittest
from tools.Fitting import Minuit, FitData, FitDataCollection
from rootpy.plotting import Hist, Canvas, Legend
from math import sqrt
from ROOT import TH1, gStyle, TVirtualPad
from templates_electron import *
from initial_values_electron_eta import *
gStyle.SetOptStat(0)
useT1 = 1
useT2 = 1
useT3 = 1
useT4 = 1
useTemplatesFromFile = True
useDataFromFile = False
variable = 'absolute_eta'
whichBinFromFile = 1
t1Name = 'TTJet'
t2Name = 'SingleTop'
t3Name = 'V+Jets'
t4Name = 'QCD'
nBins = len( inputTemplates[variable]['data'][whichBinFromFile] )
drawScancan = False
nTemplates = useT1 + useT2 + useT3 + useT4
nData = 20000
nTemplate = 1000000
t1Scale = 1
t2Scale = 1
t3Scale = 1
t4Scale = 1
# import numpy as np
def getMax(histograms):
maximum = 0
for hist in histograms:
current_max = hist.GetMaximum()
if current_max > maximum:
maximum = current_max
return maximum
def checkFitResults( target, fitResult ):
if not (abs( target - fitResult[0] ) < fitResult[1]):
if (abs( target - fitResult[0] ) < 2*fitResult[1]):
print 'Almost ok FIT'
else:
print 'BAD FIT'
print target, fitResult[0]
pass
return
def getTemplatesFromFile():
t1Template = inputTemplates[variable][t1Name][whichBinFromFile]
t2Template = inputTemplates[variable][t2Name][whichBinFromFile]
t3Template = inputTemplates[variable][t3Name][whichBinFromFile]
t4Template = inputTemplates[variable][t4Name][whichBinFromFile]
return t1Template, t2Template, t3Template, t4Template
def getDataFromFile():
dataTemplate = inputTemplates[variable]['data'][whichBinFromFile]
h_data = Hist(nBins,0,nBins, title = 'data' )
for bin in range( 1, nBins+1 ):
h_data.SetBinContent( bin, dataTemplate[bin-1])
pass
h_data.Scale(absolute_eta_initialValues['data'][whichBinFromFile][0])
return h_data
# Define the templates
h_t1Shape = Hist(nBins,0,nBins, title ='t1Shape' )
h_t2Shape = Hist(nBins,0,nBins, title ='t2Shape' )
h_t3Shape = Hist(nBins,0,nBins, title ='t3Shape' )
h_t4Shape = Hist(nBins,0,nBins, title ='t4Shape' )
h_t1 = Hist(nBins,0,nBins, title =t1Name )
h_t2 = Hist(nBins,0,nBins, title =t2Name )
h_t3 = Hist(nBins,0,nBins, title =t3Name )
h_t4 = Hist(nBins,0,nBins, title =t4Name )
h_data = Hist(nBins,0,nBins, title = 'data' )
if useTemplatesFromFile:
templates = getTemplatesFromFile()
for bin in range( 1, nBins+1 ):
h_t1.SetBinContent( bin, templates[0][bin-1] )
h_t2.SetBinContent( bin, templates[1][bin-1] )
h_t3.SetBinContent( bin, templates[2][bin-1] )
h_t4.SetBinContent( bin, templates[3][bin-1] )
pass
h_t1.Scale( absolute_eta_initialValues[t1Name][whichBinFromFile][0])
h_t2.Scale( absolute_eta_initialValues[t2Name][whichBinFromFile][0])
h_t3.Scale( absolute_eta_initialValues[t3Name][whichBinFromFile][0])
h_t4.Scale( absolute_eta_initialValues[t4Name][whichBinFromFile][0])
else :
# Fill the histograms
h_t1Shape.SetBinContent(1,20)
h_t1Shape.SetBinContent(2,20)
h_t1Shape.SetBinContent(3,20)
h_t1Shape.SetBinContent(4,50)
h_t1Shape.SetBinContent(5,50)
h_t1Shape.SetBinContent(6,100)
h_t1Shape.SetBinContent(7,100)
h_t1Shape.SetBinContent(8,50)
h_t1Shape.SetBinContent(9,50)
h_t1Shape.SetBinContent(10,40)
h_t2Shape.SetBinContent(1,0)
h_t2Shape.SetBinContent(2,90)
h_t2Shape.SetBinContent(3,0)
h_t2Shape.SetBinContent(4,30)
h_t2Shape.SetBinContent(5,30)
h_t2Shape.SetBinContent(6,20)
h_t2Shape.SetBinContent(7,20)
h_t2Shape.SetBinContent(8,20)
h_t2Shape.SetBinContent(9,10)
h_t2Shape.SetBinContent(10,10)
h_t3Shape.SetBinContent(1,20)
h_t3Shape.SetBinContent(2,20)
h_t3Shape.SetBinContent(3,20)
h_t3Shape.SetBinContent(4,50)
h_t3Shape.SetBinContent(5,50)
h_t3Shape.SetBinContent(6,100)
h_t3Shape.SetBinContent(7,100)
h_t3Shape.SetBinContent(8,50)
h_t3Shape.SetBinContent(9,50)
h_t3Shape.SetBinContent(10,40)
if useT1: h_t1.FillRandom( h_t1Shape, nTemplate )
if useT2: h_t2.FillRandom( h_t2Shape, nTemplate )
if useT3: h_t3.FillRandom( h_t3Shape, nTemplate )
if useT4: h_t4.FillRandom( h_t4Shape, nTemplate )
pass
# h_data.Scale(nData)
# h_data.FillRandom( t1Scale * h_t1Shape + t2Scale * h_t2Shape + t3Scale * h_t3Shape, nData )
fillingHistogram = 0
if useT1: fillingHistogram += t1Scale * h_t1Shape
if useT2: fillingHistogram += t2Scale * h_t2Shape
if useT3: fillingHistogram += t3Scale * h_t3Shape
if useT4: fillingHistogram += t4Scale * h_t4Shape
if useDataFromFile:
h_data = getDataFromFile()
else:
# h_data.FillRandom( ( h_t1 * t1Scale + h_t2 * t2Scale + h_t3 * t3Scale ), nData )
# print 'Integral :',h_data.Integral()
# h_data = h_t1 * t1Scale + h_t2 * t2Scale + h_t3 * t3Scale
# h_data.FillRandom( h_t3, nData )
dataFillingHistogram=0
if useT1: dataFillingHistogram += h_t1.Clone()
if useT2: dataFillingHistogram += h_t2.Clone()
if useT3: dataFillingHistogram += h_t3.Clone()
if useT4: dataFillingHistogram += h_t4.Clone()
# h_data = dataFillingHistogram
h_data = h_t1 * 1.3
# h_data.Scale(absolute_eta_initialValues['data'][whichBinFromFile][0] / h_data.Integral() )
# h_data.FillRandom( dataFillingHistogram, int(absolute_eta_initialValues['data'][whichBinFromFile][0]) )
# h_data.FillRandom( dataFillingHistogram, int(absolute_eta_initialValues['data'][whichBinFromFile][0]) )
pass
# for bin in range (0,nBins+1):
# # h_data.SetBinContent( bin, t1Scale * h_t1.GetBinContent( bin ) + t2Scale*h_t2.GetBinContent( bin ) + t3Scale*h_t3.GetBinContent( bin ) )
# h_data.SetBinError(bin, sqrt(h_data.GetBinContent(bin)))
# h_t1.SetBinError( bin, sqrt(h_t1.GetBinContent(bin)))
# h_t2.SetBinError( bin, sqrt(h_t2.GetBinContent(bin)))
# h_t3.SetBinError( bin, sqrt(h_t3.GetBinContent(bin)))
# h_t4.SetBinError( bin, sqrt(h_t4.GetBinContent(bin)))
# pass
# Make pretty
h_t1.SetLineColor(4)
h_t2.SetLineColor(8)
h_t3.SetLineColor(6)
h_t4.SetLineColor(7)
h_data.SetLineColor(1)
h_data.SetMarkerColor(1)
ymax = getMax( [h_data, h_t1, h_t2, h_t3] )
ymax = ymax*1.1
h_data.GetYaxis().SetRangeUser(0,ymax)
h_t1.GetYaxis().SetRangeUser(0,ymax)
h_t2.GetYaxis().SetRangeUser(0,ymax)
h_t3.GetYaxis().SetRangeUser(0,ymax)
h_t4.GetYaxis().SetRangeUser(0,ymax)
c = Canvas()
c.Divide(2)
c.cd(1)
h_data.Draw('PE')
h_t1.Draw('SAME HIST')
h_t2.Draw('SAME HIST')
h_t3.Draw('SAME HIST')
h_t4.Draw('SAME HIST')
templates = {
}
if useT1: templates['t1'] = h_t1
if useT2: templates['t2'] = h_t2
if useT3: templates['t3'] = h_t3
if useT4: templates['t4'] = h_t4
fit_data = FitData( h_data, templates, fit_boundaries = ( 0, h_data.nbins() ) )
fit_collection = FitDataCollection()
fit_collection.add( fit_data )
minuit_fitter = Minuit( fit_collection, method = 'logLikelihood', verbose = True )
minuit_fitter.fit()
results = minuit_fitter.readResults()
c.cd(2)
ymax = h_data.GetBinContent( h_data.GetMaximumBin() ) * 1.1
h_data.GetYaxis().SetRangeUser(0,ymax)
h_data.Draw('PE')
leg = Legend(nTemplates+2)
leg.AddEntry( h_data, style='LEP')
h_tSumAfter=0
print '----> Target \t Fit Result'
if useT1:
h_t1After = h_t1.Clone()
h_t1After.Scale( results['t1'][0] / h_t1.Integral() )
h_t1After.Draw('SAME HIST')
h_tSumAfter += h_t1After
leg.AddEntry( h_t1After, style='L')
t1AfterCont = h_t1.Integral() * t1Scale * h_data.Integral() / ( h_t1.Integral() * t1Scale + h_t2.Integral() * t2Scale + h_t3.Integral() * t3Scale )
print '%s : \t %.3g \t %.3g +/- %.3g' % ( t1Name, t1AfterCont,results['t1'][0],results['t1'][1] )
scan1 = results['t1'][2]
pass
if useT2:
h_t2After = h_t2.Clone()
h_t2After.Scale( results['t2'][0] / h_t2.Integral() )
h_t2After.Draw('SAME HIST')
h_tSumAfter += h_t2After
leg.AddEntry( h_t2After, style='L')
t2AfterCont = h_t2.Integral() * t2Scale * h_data.Integral() / ( h_t1.Integral() * t1Scale + h_t2.Integral() * t2Scale + h_t3.Integral() * t3Scale )
print '%s : \t %.3g \t %.3g +/- %.3g' % ( t2Name, t2AfterCont,results['t2'][0],results['t2'][1] )
scan2 = results['t2'][2]
pass
if useT3:
h_t3After = h_t3.Clone()
h_t3After.Scale( results['t3'][0] / h_t3.Integral() )
h_t3After.Draw('SAME HIST')
h_tSumAfter += h_t3After
leg.AddEntry( h_t3After, style='L')
t3AfterCont = h_t3.Integral() * t3Scale * h_data.Integral() / ( h_t1.Integral() * t1Scale + h_t2.Integral() * t2Scale + h_t3.Integral() * t3Scale )
print '%s : \t %.3g \t %.3g +/- %.3g' % ( t3Name, t3AfterCont, results['t3'][0],results['t3'][1] )
scan3 = results['t3'][2]
pass
if useT4:
h_t4After = h_t4.Clone()
h_t4After.Scale( results['t4'][0] / h_t4.Integral() )
h_t4After.Draw('SAME HIST')
h_tSumAfter += h_t4After
leg.AddEntry( h_t4After, style='L')
t4AfterCont = h_t4.Integral() * t4Scale * h_data.Integral() / ( h_t1.Integral() * t1Scale + h_t2.Integral() * t2Scale + h_t4.Integral() * t4Scale )
print '%s : \t %.3g \t %.3g +/- %.3g' % ( t4Name, t4AfterCont, results['t4'][0],results['t4'][1] )
scan4 = results['t4'][2]
pass
h_tSumAfter.SetLineColor(2)
h_tSumAfter.SetLineStyle(7)
h_tSumAfter.SetMarkerSize(0)
h_tSumAfter.Draw('SAME HIST')
chis = 0
for bin in range(1, h_data.nbins()+1):
# print ( h_data.GetBinContent(bin) - h_tSumAfter.GetBinContent(bin) )**2
chis += ( h_data.GetBinContent(bin) - h_tSumAfter.GetBinContent(bin) )**2
pass
print 'CHI 2 :',chis
# ymax = getMax( [h_data, h_t1After, h_t2After, h_t3After] )
# h_data.GetYaxis().SetRangeUser(0,ymax)
# h_t1After.GetYaxis().SetRangeUser(0,ymax)
# h_t2After.GetYaxis().SetRangeUser(0,ymax)
# h_t3After.GetYaxis().SetRangeUser(0,ymax)
leg.AddEntry( h_tSumAfter, style='L', label='Sum')
leg.Draw()
c.Update()
if drawScancan:
scancan = Canvas()
scancan.Divide(nTemplates)
scancan.SetLogy()
nCan = 1
if useT1:
scancan.cd(nCan)
# scan1.SetMaximum(scan1.GetMaximum()/100)
scan1.SetMarkerStyle(20)
# scan1.SetMarkerSize(1)
scan1.Draw('AP')
nCan = nCan+1
pass
if useT2:
scancan.cd(nCan)
scan2.SetMaximum(1000)
scan2.SetMarkerStyle(20)
# scan2.SetMarkerSize(20)
scan2.Draw('AP')
nCan = nCan+1
pass
if useT3:
scancan.cd(nCan)
scan3.SetMaximum(1000)
scan3.SetMarkerStyle(20)
# scan3.SetMarkerSize(20)
scan3.Draw('AP')
nCan = nCan+1
pass
if useT4:
scancan.cd(nCan)
scan4.SetMaximum(1000)
scan4.SetMarkerStyle(20)
# scan3.SetMarkerSize(20)
scan4.Draw('AP')
nCan = nCan+1
pass
scancan.Update()
pass
# contour1 = results['contour'][0]
# contour2 = results['contour'][1]
# concan = Canvas()
# contour2.SetFillColor(42)
# contour2.Draw('ALF')
# contour1.SetFillColor(38)
# contour1.Draw('LF')
# concan.Update()
# checkFitResults( t1AfterCont, results['t1'] )
# checkFitResults( t2AfterCont, results['t2'] )
# checkFitResults( t3AfterCont, results['t3'] )
raw_input()
```
#### File: DailyPythonScripts/experimental/parsing_script_events_number.py
```python
import sys, string
import glob
from math import *
events = [ ]
def getList(file):
input = open(file, "r")
lines = input.readlines()
N_events = 0
for line in lines:
columns = string.split(line)
N_events += int(columns[5])
print "Number of processed events: ", N_events
if __name__ == "__main__":
print "Welcome to crab number of processed events parsing script."
args = sys.argv
if not len(args) >= 2:
print "Please specify a file with a table to get the numbers of events from."
sys.exit()
file = sys.argv[1]
print "Looking at the table in the file", file
getList(file)
```
#### File: DailyPythonScripts/experimental/roofit_expert.py
```python
from __future__ import division
from optparse import OptionParser
from math import sqrt
import sys
# rootpy
from rootpy.io import File
from ROOT import RooFit, RooRealVar, RooDataHist, RooArgList, RooHistPdf, RooArgSet, RooAddPdf
from ROOT import RooChi2Var, RooFormulaVar, RooMinuit, TCanvas, RooPlot, RooGaussian, RooProdPdf, RooLinkedList
from config.variable_binning import variable_bins_ROOT
from tools.Calculation import decombine_result
from uncertainties import ufloat
from config import XSectionConfig
from config.summations_common import b_tag_summations
# copied from 01_get_fit_results.py
def get_histogram(input_file, histogram_path, b_tag_bin=''):
b_tag_bin_sum_rules = b_tag_summations
histogram = None
if b_tag_bin in b_tag_bin_sum_rules.keys(): # summing needed
b_tag_bins_to_sum = b_tag_bin_sum_rules[b_tag_bin]
histogram = input_file.Get(histogram_path + '_' + b_tag_bins_to_sum[0]).Clone()
for bin_i in b_tag_bins_to_sum[1:]:
histogram += input_file.Get(histogram_path + '_' + bin_i)
else:
if b_tag_bin == '':
histogram = input_file.Get(histogram_path)
else:
histogram = input_file.Get(histogram_path + '_' + b_tag_bin)
return histogram.Clone()
def get_histograms(channel, input_files, variable, met_type, variable_bin, b_tag_bin, rebin=1):
global b_tag_bin_VJets
global electron_control_region, muon_control_region
histograms = {}
if not variable in measurement_config.histogram_path_templates.keys():
print 'Fatal Error: unknown variable ', variable
sys.exit()
abs_eta = ''
abs_eta_data = ''
abs_eta_template = measurement_config.histogram_path_templates[variable]
if variable == 'HT':
abs_eta = abs_eta_template % (analysis_type[channel], variable_bin, channel)
abs_eta_data = abs_eta
else:
if measurement_config.centre_of_mass == 8:
abs_eta = abs_eta_template % (analysis_type[channel], met_type, variable_bin, channel)
else: # hot fix for 2011 data. Needs reprocessing for nicer paths
lepton = channel.title()
abs_eta = abs_eta_template % (analysis_type[channel], lepton, met_type, variable_bin, channel)
if 'JetRes' in met_type:
abs_eta_data = abs_eta.replace('JetResDown', '')
abs_eta_data = abs_eta_data.replace('JetResUp', '')
if 'patPFMet' in met_type:
abs_eta = abs_eta.replace('patPFMet', 'PFMET')
else:
abs_eta_data = abs_eta
for sample, file_name in input_files.iteritems():
h_abs_eta = None
if sample == 'data':
h_abs_eta = get_histogram(file_name, abs_eta_data, b_tag_bin)
elif sample == 'V+Jets':
# extracting the V+Jets template from its specific b-tag bin (>=0 by default) and scaling it to analysis b-tag bin
h_abs_eta = get_histogram(file_name, abs_eta, b_tag_bin)
h_abs_eta_VJets_specific_b_tag_bin = get_histogram(file_name, abs_eta, b_tag_bin_VJets)
try:
h_abs_eta_VJets_specific_b_tag_bin.Scale(h_abs_eta.Integral() / h_abs_eta_VJets_specific_b_tag_bin.Integral())
h_abs_eta = h_abs_eta_VJets_specific_b_tag_bin
except:
print 'WARNING: V+Jets template from ' + str(file_name) + ', histogram ' + abs_eta + ' in ' + b_tag_bin_VJets + \
' b-tag bin is empty. Using central bin (' + b_tag_bin + '), integral = ' + str(h_abs_eta.Integral())
else:
h_abs_eta = get_histogram(file_name, abs_eta, b_tag_bin)
h_abs_eta.Rebin(rebin)
histograms[sample] = h_abs_eta
if channel == 'electron':
global electron_QCD_MC_file
h_abs_eta_mc = get_histogram(electron_QCD_MC_file, abs_eta, b_tag_bin)
h_abs_eta_mc.Rebin(rebin)
# data-driven QCD template extracted from all-inclusive eta distributions
abs_eta = 'TTbar_plus_X_analysis/%s/Ref selection/Electron/electron_AbsEta' % (analysis_type[channel])
abs_eta = abs_eta.replace('Ref selection', electron_control_region)
h_abs_eta = get_histogram(input_files['data'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['V+Jets'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['TTJet'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['SingleTop'], abs_eta, '0btag')
electron_QCD_normalisation_factor = 1
h_abs_eta.Rebin(20)
if measurement_config.centre_of_mass == 8:
electron_QCD_normalisation_factor = h_abs_eta_mc.Integral() / h_abs_eta.Integral()
if electron_QCD_normalisation_factor == 0:
electron_QCD_normalisation_factor = 1 / h_abs_eta.Integral()
if measurement_config.centre_of_mass == 7:
# scaling to 10% of data
electron_QCD_normalisation_factor = 0.1 * histograms['data'].Integral() / h_abs_eta.Integral()
h_abs_eta.Scale(electron_QCD_normalisation_factor)
histograms['QCD'] = h_abs_eta
if channel == 'muon':
# data-driven QCD template extracted from all-inclusive eta distributions
global muon_QCD_file, muon_QCD_MC_file
h_abs_eta_mc = get_histogram(muon_QCD_MC_file, abs_eta, b_tag_bin)
h_abs_eta_mc.Rebin(rebin)
abs_eta = 'TTbar_plus_X_analysis/%s/Ref selection/Muon/muon_AbsEta' % (analysis_type[channel])
abs_eta = abs_eta.replace('Ref selection', muon_control_region)
# abs_eta = measurement_config.special_muon_histogram
# h_abs_eta = get_histogram(muon_QCD_file, abs_eta, '')
h_abs_eta = get_histogram(input_files['data'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['TTJet'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['V+Jets'], abs_eta, '0btag')
h_abs_eta = h_abs_eta - get_histogram(input_files['SingleTop'], abs_eta, '0btag')
muon_QCD_normalisation_factor = 1
h_abs_eta.Rebin(20)
if measurement_config.centre_of_mass == 8:
muon_QCD_normalisation_factor = h_abs_eta_mc.Integral() / h_abs_eta.Integral()
if muon_QCD_normalisation_factor == 0:
muon_QCD_normalisation_factor = 1 / h_abs_eta.Integral()
if measurement_config.centre_of_mass == 7:
muon_QCD_normalisation_factor = 0.05 * histograms['data'].Integral() / h_abs_eta.Integral()
h_abs_eta.Scale(muon_QCD_normalisation_factor)
histograms['QCD'] = h_abs_eta
return histograms
def get_fitted_normalisation_from_ROOT(channel, input_files, variable, met_type, b_tag_bin):
results = {}
initial_values = {}
templates = {}
for variable_bin in variable_bins_ROOT[variable]:
histograms = get_histograms(channel,
input_files,
variable=variable,
met_type=met_type,
variable_bin=variable_bin,
b_tag_bin=b_tag_bin,
rebin=measurement_config.rebin
)
# create signal histograms
h_eta_signal = histograms['TTJet'] + histograms['SingleTop']
N_ttbar_before_fit = histograms['TTJet'].Integral()
N_SingleTop_before_fit = histograms['SingleTop'].Integral()
N_vjets_before_fit = histograms['V+Jets'].Integral()
N_qcd_before_fit = histograms['QCD'].Integral()
N_signal_before_fit = N_ttbar_before_fit + N_SingleTop_before_fit
N_ttbar_error_before_fit = sum(histograms['TTJet'].errors())
N_SingleTop_error_before_fit = sum(histograms['SingleTop'].errors())
N_vjets_error_before_fit = sum(histograms['V+Jets'].errors())
N_QCD_error_before_fit = sum(histograms['QCD'].errors())
if (N_SingleTop_before_fit != 0):
TTJet_SingleTop_ratio = N_ttbar_before_fit / N_SingleTop_before_fit
else:
print 'Bin ', variable_bin, ': ttbar/singleTop ratio undefined for %s channel! Setting to 0.' % channel
TTJet_SingleTop_ratio = 0
leptonAbsEta = RooRealVar("leptonAbsEta", "leptonAbsEta", 0., 2.4)
# this has to move to tools/Fitting.py
vars = RooArgList()
vars.add(leptonAbsEta)
vars_set = RooArgSet()
vars_set.add(leptonAbsEta)
n_event_obs = histograms['data'].Integral()
lowerBound = 0.
upperBound = n_event_obs + 10 * sqrt(n_event_obs)
n_init = n_event_obs / 2.
data = RooDataHist("data", "dataset with leptonAbsEta", vars, histograms['data'])
rh_vj = RooDataHist("rh_vj", "vj", vars, histograms['V+Jets'])
rh_qcd = RooDataHist("rh_qcd", "qcd", vars, histograms['QCD'])
rh_signal = RooDataHist("rh_signal", "signal", vars, h_eta_signal)
pdf_vj = RooHistPdf ("pdf_vj", "V+Jets pdf", vars_set, rh_vj, 0)
pdf_qcd = RooHistPdf("pdf_qcd", "QCD pdf ", vars_set, rh_qcd, 0)
pdf_signal = RooHistPdf("pdf_signal", "single top pdf", vars_set, rh_signal, 0)
# RooRealVar(const char *name, const char *title, Double_t value, Double_t minValue, Double_t maxValue, const char *unit) :
nSignal = RooRealVar("nSignal", "number of single top + ttbar events", N_signal_before_fit, lowerBound, upperBound, "event")
nvj = RooRealVar ("nvj", "number of V+Jets bgnd events", N_vjets_before_fit, lowerBound, upperBound, "event")
nqcd = RooRealVar("nqcd", "number of QCD bgnd events", N_QCD_error_before_fit, lowerBound, upperBound, "event")
model = RooAddPdf("model", "sig+vj+qcd",
RooArgList(pdf_signal, pdf_vj, pdf_qcd),
RooArgList(nSignal, nvj, nqcd)
)
vj_constraint = RooGaussian("nvj_constraint", "nvj_constraint", nvj, RooFit.RooConst(N_vjets_before_fit), RooFit.RooConst(0.5 * N_vjets_before_fit))
qcd_constraint = RooGaussian("nqcd_constraint", "nqcd_constraint", nqcd, RooFit.RooConst(N_qcd_before_fit), RooFit.RooConst(2 * N_qcd_before_fit))
model_with_constraints = RooProdPdf("model_with_constraints", "model with gaussian constraints",
RooArgSet(model, vj_constraint, qcd_constraint), RooLinkedList())
model_with_constraints.fitTo(data, RooFit.Minimizer("Minuit2", "Migrad")) #WARNING: number of cores changes the results!!!
# nll = model.createNLL(data, RooFit.NumCPU(2))
# RooMinuit(nll).migrad()
# frame1 = nSignal.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nSignal"))
# nll.plotOn(frame1, RooFit.ShiftToZero())
# frame2 = nvj.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nvj"))
# nll.plotOn(frame2, RooFit.ShiftToZero())
# frame3 = nqcd.frame(RooFit.Bins(100), RooFit.Range(lowerBound, n_event_obs), RooFit.Title("LL and profileLL in nqcd"))
# nll.plotOn(frame3, RooFit.ShiftToZero())
#
# pll_nSignal = nll.createProfile(nSignal)
# pll_nSignal.plotOn(frame1, RooFit.LineColor(2))
# frame1.SetMinimum(0)
# frame1.SetMaximum(3)
#
# pll_nvj = nll.createProfile(nvj)
# pll_nvj.plotOn(frame2, RooFit.LineColor(2))
# frame2.SetMinimum(0)
# frame2.SetMaximum(3)
#
# pll_nqcd = nll.createProfile(nqcd)
# pll_nqcd.plotOn(frame3, RooFit.LineColor(2))
# frame3.SetMinimum(0)
# frame3.SetMaximum(3)
# c = TCanvas("profilell","profilell",1200, 400)
# c.Divide(3)
# c.cd(1)
# frame1.Draw()
# c.cd(2)
# frame2.Draw()
# c.cd(3)
# frame3.Draw()
# c.SaveAs('profileLL.png')
# model.fitTo(data, RooFit.Minimizer("Minuit2", "Migrad"), RooFit.NumCPU(1))#WARNING: number of cores changes the results!!!
fit_results = {}
fit_results['signal'] = (nSignal.getVal(), nSignal.getError())
fit_results['QCD'] = ufloat(nqcd.getVal(), nqcd.getError())
fit_results['V+Jets'] = ufloat(nvj.getVal(), nvj.getError())
N_ttbar, N_SingleTop = decombine_result(fit_results['signal'], TTJet_SingleTop_ratio)
fit_results['signal'] = ufloat(nSignal.getVal(), nSignal.getError())
fit_results['TTJet'] = ufloat(N_ttbar)
fit_results['SingleTop'] = ufloat(N_SingleTop)
if results == {}: # empty
for sample in fit_results.keys():
results[sample] = [fit_results[sample]]
else:
for sample in fit_results.keys():
results[sample].append(fit_results[sample])
return results, None, None
if __name__ == '__main__':
# setup
parser = OptionParser()
parser.add_option("-p", "--path", dest="path", default='data',
help="set output path for JSON files")
parser.add_option("-v", "--variable", dest="variable", default='MET',
help="set the variable to analyse (MET, HT, ST, MT)")
parser.add_option("-b", "--bjetbin", dest="bjetbin", default='2m',
help="set b-jet multiplicity for analysis. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m")
parser.add_option("--bjetbin-vjets", dest="bjetbin_VJets", default='0m',
help="set b-jet multiplicity for V+Jets samples. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m")
parser.add_option("-m", "--metType", dest="metType", default='type1',
help="set MET type for analysis of MET, ST or MT")
parser.add_option("-c", "--centre-of-mass-energy", dest="CoM", default=8, type=int,
help="set the centre of mass energy for analysis. Default = 8 [TeV]")
(options, args) = parser.parse_args()
measurement_config = XSectionConfig(options.CoM)
# caching of variables for shorter access
ttbar_theory_systematic_prefix = measurement_config.ttbar_theory_systematic_prefix
vjets_theory_systematic_prefix = measurement_config.vjets_theory_systematic_prefix
generator_systematics = measurement_config.generator_systematics
categories_and_prefixes = measurement_config.categories_and_prefixes
met_systematics_suffixes = measurement_config.met_systematics_suffixes
analysis_types = measurement_config.analysis_types
translate_options = measurement_config.translate_options
generator_systematics = measurement_config.generator_systematics
categories_and_prefixes = measurement_config.categories_and_prefixes
met_systematics_suffixes = met_systematics_suffixes
analysis_type = analysis_types
variable = options.variable
met_type = translate_options[options.metType]
b_tag_bin = translate_options[options.bjetbin]
b_tag_bin_VJets = translate_options[options.bjetbin_VJets]
path_to_files = measurement_config.path_to_files
output_path = options.path
# possible options:
# --continue : continue from saved - skips ROOT files, reads from JSON?
# get data from histograms or JSON files
# data and muon_QCD file with SFs are the same for central measurement and all systematics
data_file_electron = File(measurement_config.data_file_electron)
data_file_muon = File(measurement_config.data_file_muon)
muon_QCD_file = File(measurement_config.muon_QCD_file)
SingleTop_file = File(measurement_config.SingleTop_file)
muon_QCD_MC_file = File(measurement_config.muon_QCD_MC_file)
electron_QCD_MC_file = File(measurement_config.electron_QCD_MC_file)
TTJet_file = File(measurement_config.ttbar_category_templates['central'])
VJets_file = File(measurement_config.VJets_category_templates['central'])
electron_control_region = measurement_config.electron_control_region
muon_control_region = measurement_config.muon_control_region
input_files = {
'TTJet': TTJet_file,
'SingleTop': SingleTop_file,
'V+Jets': VJets_file,
'data': data_file_electron,
}
fit_results_electron, initial_values_electron, templates_electron = get_fitted_normalisation_from_ROOT('electron',
input_files={
'TTJet': TTJet_file,
'SingleTop': SingleTop_file,
'V+Jets': VJets_file,
'data': data_file_electron,
},
variable=variable,
met_type=met_type,
b_tag_bin=b_tag_bin,
)
print 'TTJet:', fit_results_electron['TTJet']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['TTJet']))
print
print 'SingleTop:', fit_results_electron['SingleTop']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['SingleTop']))
print
print 'V+Jets:', fit_results_electron['V+Jets']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['V+Jets']))
print
print 'QCD:', fit_results_electron['QCD']
print 'Sum = {:10.2f}'.format(sum(fit_results_electron['QCD']))
```
#### File: DailyPythonScripts/experimental/sum_files_and_histograms.py
```python
from rootpy.logger import logging
from ROOT import TFile, gROOT
from argparse import ArgumentParser
from tools.file_utilities import make_folder_if_not_exists, get_files_in_path, merge_ROOT_files, get_process_from_file
File = TFile.Open
gcd = gROOT.cd
def sum_b_tag_bins_in_file(file_in_path):
global existing_bins, to_be_created, existing_histogram_file, input_folder, output_folder
logging.debug('Processing file %s' % file_in_path)
output_file_name = file_in_path.replace('.root', '_summed.root')
output_file_name = output_file_name.replace(input_folder, output_folder)
#run rootinfo on file
#or read the output (histogram list)
input_file = open(existing_histogram_file)
seen_summed_hists = False
histogram_set = get_set_of_histogram_paths(input_file, seen_summed_hists)
logging.debug('Found %d unique b-tag binned histograms' %len(histogram_set))
if seen_summed_hists:
logging.warn('Summed histograms have been detected. Will skip this part')
return
input_file.close()
directories = []
for path in histogram_set:
histogram_path, histogram_name, b_tag_bin = get_histogram_info_tuple(path)
directories.append(histogram_path)
logging.debug('opening file %s ' % output_file_name)
output_file = File(output_file_name, 'recreate')
cd = output_file.cd
logging.debug( 'creating folder structure')
create_folder_structure(output_file, directories)
logging.debug( 'created folder structure')
logging.debug('opening file %s ' % file_in_path)
input_file = File(file_in_path, 'read')
get_histogram = input_file.Get
logging.debug('opened file')
new_histograms = {}
for histogram in histogram_set:
cd()
logging.debug('Processing histogram: %s' % histogram)
histogram_path, histogram_name, b_tag_bin = get_histogram_info_tuple(histogram)
logging.debug('Found histogram_path %s' % histogram_path)
logging.debug('Found histogram_name %s' % histogram_name)
cd(histogram_path)
existing_histograms = [get_histogram(histogram + '_' + existing_bin).Clone() for existing_bin in existing_bins]
for bin_i, b_tag_bin in enumerate(existing_bins):#write existing b-tag bins
current_histogram_name = histogram_name + '_' + b_tag_bin
existing_histograms[bin_i].Write(current_histogram_name)
for bin_i, b_tag_bin in enumerate(to_be_created):#write new b-tag bins
current_histogram_name = histogram_name + '_' + b_tag_bin
new_histogram = existing_histograms[bin_i].Clone(current_histogram_name)
for existing_histogram in existing_histograms[bin_i + 1:]:
new_histogram.Add(existing_histogram)
new_histogram.Write(current_histogram_name)
input_file.Close()
output_file.Close()
logging.debug( 'Finished %s' % file_in_path)
logging.debug( 'Output: %s' % output_file_name)
del new_histograms, histogram_set, input_file, output_file
return
def get_set_of_histogram_paths(input_file, seen_summed_hists = False):
global existing_bins, to_be_created, filter_on_folders, filter_on_histograms
histogram_list = []
add_histogram = histogram_list.append
checked_n_entries = 0
for histogram_path in input_file.readlines():
checked_n_entries += 1
if checked_n_entries % 10000 == 0:
logging.debug( 'Checked %d' %checked_n_entries)
if not filter_string(histogram_path, filter_on_folders):
continue
if not filter_string(histogram_path, filter_on_histograms):
continue
histogram_path = histogram_path.rstrip(' \n')
directory, histogram_name, b_tag_bin = get_histogram_info_tuple(histogram_path)
logging.debug('Searching %s' % histogram_path)
logging.debug('Found directory %s' % directory)
logging.debug('Found histogram_name %s' % histogram_name)
logging.debug('Found b_tag_bin %s' % b_tag_bin)
if b_tag_bin in existing_bins:
histogram_name = '_'.join(histogram_name.split('_')[:-1])
logging.debug('Adding histogram %s' % (directory + histogram_name))
add_histogram(directory + histogram_name)
if b_tag_bin in to_be_created:
seen_summed_hists = True
return set(histogram_list)#only unique ones
def filter_string(input_string, filter_list):
accept = False
if not filter_list: #empty list
accept = True
else:
for filter_item in filter_list:
if filter_item in input_string:
accept = True #found a matching entry
break
return accept
def create_folder_structure(root_file, path_list):
get_directory = root_file.Get
for path in path_list:
directories = path.split('/')
current_path = ''
root_file.cd()
for directory in directories:
if current_path == '':
if not get_directory(directory):
root_file.mkdir(directory)
current_path = directory
else:
current_dir = get_directory(current_path)
if not current_dir.Get(directory):
current_dir.mkdir(directory)
current_path += "/" + directory
def get_histogram_info_tuple(histogram_in_path):
histogram_name = histogram_in_path.split('/')[-1]
directory = ''.join(histogram_in_path.rsplit(histogram_name, 1)[:-1])
b_tag_bin = histogram_name.split('_')[-1]
return directory, histogram_name, b_tag_bin
def merge_files_by_process(root_files):
global input_folder, output_folder
electron_qcd_samples = [ 'QCD_Pt-20to30_BCtoE',
'QCD_Pt-30to80_BCtoE',
'QCD_Pt-80to170_BCtoE',
'QCD_Pt-20to30_EMEnriched',
'QCD_Pt-30to80_EMEnriched',
'QCD_Pt-80to170_EMEnriched',
'GJets_HT-40To100',
'GJets_HT-100To200',
'GJets_HT-200']
singleTop_samples = [ 'T_tW-channel',
'T_t-channel',
'T_s-channel',
'Tbar_tW-channel',
'Tbar_t-channel',
'Tbar_s-channel']
wplusjets_samples = [ 'W1Jet', 'W2Jets', 'W3Jets', 'W4Jets']
vplusjets_samples = wplusjets_samples
vplusjets_samples.append('DYJetsToLL')
diboson_samples = [ 'WWtoAnything', 'WZtoAnything', 'ZZtoAnything']
signal_samples = [ 'TTJet', 'SingleTop']
summations = {
'QCD_Electron':electron_qcd_samples,
'SingleTop' : singleTop_samples,
'WPlusJets' : wplusjets_samples,
'VPlusJets' : vplusjets_samples,
'DiBoson': diboson_samples,
'Signal': signal_samples
}
summation_files = {}
file_template = ''
template_token = '<temp>'
for summation, samples in summations.iteritems():
summation_files[summation] = []
for file_in_path in root_files:
process_name = get_process_from_file(file_in_path)
if not file_template:
file_template = file_in_path.replace(process_name, template_token)
file_template = file_template.replace(input_folder, output_folder)
if process_name in samples:
summation_files[summation].append(file_in_path)
for summation, files in summation_files.iteritems():
output_file = file_template.replace(template_token, summation)
merge_ROOT_files(files, output_file)
if __name__ == "__main__":
parser = ArgumentParser(description='Sum b-tag binned histograms and merge files into main processes (e.g. 6 single top samples into one).')
parser.add_argument('input_folder', metavar='input_folder',
help="input folder for histogram files to be summed")
parser.add_argument('output_folder', metavar='output_folder',
help="output folder for the summed histogram files to be written to.",
default = '', nargs='?')
parser.add_argument('--debug',
help="Turn on debug output",
action='store_true')
args = parser.parse_args()
if args.debug:
logging.basicConfig()#fancy logger for better error messages
else:
logging.basicConfig(level=logging.WARNING)#turn debug off
existing_histogram_file = 'data/list_of_7TeV_histograms.txt'
existing_bins = ['0btag', '1btag', '2btags','3btags','4orMoreBtags']
to_be_created = ['0orMoreBtag','1orMoreBtag','2orMoreBtags','3orMoreBtags']
filter_on_folders = ['TTbarPlusMetAnalysis']
filter_on_histograms = []
#first sum up the histograms
input_folder = args.input_folder
output_folder = args.output_folder
if not output_folder:
output_folder = input_folder
make_folder_if_not_exists(output_folder)
root_files = get_files_in_path(input_folder, file_ending='.root')
# from multiprocessing import Pool
# pool = Pool(processes=4)
# pool.map_async(sum_b_tag_bins_in_file, root_files)
# pool.close()
# pool.join()
# sum_b_tag_bins_in_file(input_folder + '/' + 'TTJet_5050pb_PFElectron_PFMuon_PF2PATJets_PFMET_MCatNLO.root')
map(sum_b_tag_bins_in_file, root_files)
```
#### File: DailyPythonScripts/experimental/tau_value_determination.py
```python
from __future__ import division
from math import log10, pow
from rootpy.io import File
import matplotlib.pyplot as plt
import matplotlib
from copy import deepcopy
from ROOT import Double, TH1F, TGraph
from config.variable_binning import bin_edges
from tools.file_utilities import read_data_from_JSON
from tools.hist_utilities import value_error_tuplelist_to_hist
from tools.Unfolding import Unfolding, get_unfold_histogram_tuple
from tools.ROOT_utililities import set_root_defaults
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 28}
matplotlib.rc( 'font', **font )
def drange( start, stop, step ):
r = start
while r < stop:
yield r
r += step
def get_tau_from_global_correlation( h_truth, h_measured, h_response, h_data = None ):
tau_0 = 1e-7
tau_max = 0.2
number_of_iterations = 10000
# tau_step = ( tau_max - tau_0 ) / number_of_iterations
optimal_tau = 0
minimal_rho = 9999
bias_scale = 0.
unfolding = Unfolding( h_truth,
h_measured,
h_response,
method = 'RooUnfoldTUnfold',
tau = tau_0 )
if h_data:
unfolding.unfold( h_data )
else: # closure test
unfolding.unfold( h_measured )
# cache functions and save time in the loop
Unfold = unfolding.unfoldObject.Impl().DoUnfold
GetRho = unfolding.unfoldObject.Impl().GetRhoI
# create lists
tau_values = []
rho_values = []
add_tau = tau_values.append
add_rho = rho_values.append
# for current_tau in drange(tau_0, tau_max, tau_step):
for current_tau in get_tau_range( tau_0, tau_max, number_of_iterations ):
Unfold( current_tau, h_data, bias_scale )
current_rho = GetRho( TH1F() )
add_tau( current_tau )
add_rho( current_rho )
if current_rho < minimal_rho:
minimal_rho = current_rho
optimal_tau = current_tau
return optimal_tau, minimal_rho, tau_values, rho_values
def draw_global_correlation( tau_values, rho_values, tau, rho, channel, variable ):
plt.figure( figsize = ( 16, 16 ), dpi = 200, facecolor = 'white' )
plt.plot( tau_values, rho_values )
plt.xscale('log')
plt.title(r'best $\tau$ from global correlation')
plt.xlabel( r'$\tau$', fontsize = 40 )
plt.ylabel( r'$\bar{\rho}(\tau)$', fontsize = 40 )
ax = plt.axes()
ax.annotate( r"$\tau = %.3g$" % tau,
xy = ( tau, rho ), xycoords = 'data',
xytext = ( 0.0010, 0.5 ), textcoords = 'data',
arrowprops = dict( arrowstyle = "fancy,head_length=0.4,head_width=0.4,tail_width=0.4",
connectionstyle = "arc3" ),
size = 40,
)
if use_data:
plt.savefig( 'plots/tau_from_global_correlation_%s_channel_%s_DATA.png' % ( channel, variable ) )
else:
plt.savefig( 'plots/tau_from_global_correlation_%s_channel_%s_MC.png' % ( channel, variable ) )
def get_tau_from_L_shape( h_truth, h_measured, h_response, h_data = None ):
tau_min = 1e-7
tau_max = 0.2
number_of_scans = 10000
# the best values depend on the variable!!!
# number_of_scans = 60
# tau_min = 1e-6
# tau_max = 1e-7 * 20000 + tau_min
# tau_min = 1e-7
# tau_max = 1e-2
unfolding = Unfolding( h_truth,
h_measured,
h_response,
method = 'RooUnfoldTUnfold',
tau = tau_min )
if h_data:
unfolding.unfold( h_data )
else: # closure test
unfolding.unfold( h_measured )
l_curve = TGraph()
unfolding.unfoldObject.Impl().ScanLcurve( number_of_scans, tau_min, tau_max, l_curve )
best_tau = unfolding.unfoldObject.Impl().GetTau()
x_value = unfolding.unfoldObject.Impl().GetLcurveX()
y_value = unfolding.unfoldObject.Impl().GetLcurveY()
return best_tau, l_curve, x_value, y_value
def draw_l_shape( l_shape, best_tau, x_value, y_value, channel, variable ):
total = l_shape.GetN()
x_values = []
y_values = []
add_x = x_values.append
add_y = y_values.append
for i in range( 0, total ):
x = Double( 0 )
y = Double( 0 )
l_shape.GetPoint( i, x, y )
add_x( x )
add_y( y )
plt.figure( figsize = ( 16, 16 ), dpi = 200, facecolor = 'white' )
plt.plot( x_values, y_values )
plt.xlabel( r'log10($\chi^2$)', fontsize = 40 )
plt.ylabel( 'log10(curvature)', fontsize = 40 )
ax = plt.axes()
ax.annotate( r"$\tau = %.3g$" % best_tau,
xy = ( x_value, y_value ), xycoords = 'data',
xytext = ( 0.3, 0.3 ), textcoords = 'figure fraction',
arrowprops = dict( arrowstyle = "fancy,head_length=0.4,head_width=0.4,tail_width=0.4",
connectionstyle = "arc3" ),
size = 40,
)
if use_data:
plt.savefig( 'plots/tau_from_L_shape_%s_channel_%s_DATA.png' % ( channel, variable ) )
else:
plt.savefig( 'plots/tau_from_L_shape_%s_channel_%s_MC.png' % ( channel, variable ) )
def get_data_histogram( channel, variable, met_type ):
fit_result_input = '../../data/8TeV/%(variable)s/fit_results/central/fit_results_%(channel)s_%(met_type)s.txt'
fit_results = read_data_from_JSON( fit_result_input % {'channel': channel, 'variable': variable, 'met_type':met_type} )
fit_data = fit_results['TTJet']
h_data = value_error_tuplelist_to_hist( fit_data, bin_edges[variable] )
return h_data
def get_tau_range( tau_min, tau_max, number_of_points ):
# Use 3 scan points minimum
if number_of_points < 3:
number_of_points = 3
# Setup Vector
result = [0] * number_of_points
# Find the scan points
# Use equidistant steps on a logarithmic scale
step = ( log10( tau_max ) - log10( tau_min ) ) / ( number_of_points - 1 );
for i in range ( 0, number_of_points ):
result[i] = pow( 10., ( log10( tau_min ) + i * step ) );
return result;
if __name__ == '__main__':
set_root_defaults()
use_data = True
input_file_8Tev = '/storage/TopQuarkGroup/mc/8TeV/NoSkimUnfolding/v10/TTJets_MassiveBinDECAY_TuneZ2star_8TeV-madgraph-tauola/unfolding_v10_Summer12_DR53X-PU_S10_START53_V7C-v1_NoSkim/TTJets_nTuple_53X_mc_merged_001.root'
met_type = 'patType1CorrectedPFMet'
# ST and HT have the problem of the overflow bin in the truth/response matrix
# 7 input bins and 8 output bins (includes 1 overflow bin)
variables = ['MET', 'WPT', 'MT' , 'ST', 'HT']
centre_of_mass = 8
ttbar_xsection = 225.2
luminosity = 19712
input_file = File( input_file_8Tev )
taus_from_global_correlaton = {}
taus_from_L_shape = {}
for channel in ['electron', 'muon']:
taus_from_global_correlaton[channel] = {}
taus_from_L_shape[channel] = {}
for variable in variables:
print 'Doing variable"', variable, '" in', channel, '-channel'
h_truth, h_measured, h_response, _ = get_unfold_histogram_tuple(
inputfile = input_file,
variable = variable,
channel = channel,
met_type = met_type,
centre_of_mass = centre_of_mass,
ttbar_xsection = ttbar_xsection,
luminosity = luminosity )
h_data = None
if use_data:
h_data = get_data_histogram( channel, variable, met_type )
else:
h_data = deepcopy( h_measured )
tau, rho, tau_values, rho_values = get_tau_from_global_correlation( h_truth, h_measured, h_response, h_data )
draw_global_correlation( tau_values, rho_values, tau, rho, channel, variable )
tau, l_curve, x, y = get_tau_from_L_shape( h_truth, h_measured, h_response, h_data )
draw_l_shape( l_curve, tau, x, y, channel, variable )
```
#### File: DailyPythonScripts/experimental/unfoldAndMeasure_2012.py
```python
from __future__ import division
from optparse import OptionParser
import sys, os
from array import array
# rootpy
from ROOT import TFile
from rootpy import asrootpy
from rootpy.io import File
from rootpy.plotting import Hist, Hist2D
# DailyPythonScripts
from config.variable_binning_8TeV import bin_widths, bin_edges
from tools.Calculation import calculate_xsection, calculate_normalised_xsection
from tools.hist_utilities import hist_to_value_error_tuplelist, value_error_tuplelist_to_hist
from tools.Unfolding import Unfolding
from tools.file_utilities import read_data_from_JSON, write_data_to_JSON, make_folder_if_not_exists
import config.RooUnfold as unfoldCfg
luminosity = 5814
ttbar_xsection = 225.19
path_to_files = '/storage/TopQuarkGroup/results/histogramfiles/AN-13-015_V3/'
file_for_unfolding = File(path_to_files + 'unfolding_merged.root', 'read')
file_for_powheg = File(path_to_files + 'unfolding_TTJets_8TeV_powheg.root', 'read')
file_for_mcatnlo = File(path_to_files + 'unfolding_TTJets_8TeV_mcatnlo.root', 'read')
file_for_scaledown = File(path_to_files + 'unfolding_TTJets_8TeV_scaledown.root', 'read')
file_for_scaleup = File(path_to_files + 'unfolding_TTJets_8TeV_scaleup.root', 'read')
file_for_matchingdown = File(path_to_files + 'unfolding_TTJets_8TeV_matchingdown.root', 'read')
file_for_matchingup = File(path_to_files + 'unfolding_TTJets_8TeV_matchingup.root', 'read')
def unfold_results(results, category, channel, h_truth, h_measured, h_response, method):
global variable, path_to_JSON
h_data = value_error_tuplelist_to_hist(results, bin_edges[variable])
unfolding = Unfolding(h_truth, h_measured, h_response, method=method)
#turning off the unfolding errors for systematic samples
if category != 'central':
unfoldCfg.Hreco = 0
h_unfolded_data = unfolding.unfold(h_data)
#export the D and SV distributions
SVD_path = path_to_JSON + '/' + variable + '/unfolding_objects/' + channel + '/kv_' + str(unfoldCfg.SVD_k_value) + '/'
make_folder_if_not_exists(SVD_path)
if method == 'TSVDUnfold':
SVDdist = TFile(SVD_path + method + '_SVDdistributions_' + category + '.root', 'recreate')
directory = SVDdist.mkdir('SVDdist')
directory.cd()
unfolding.unfoldObject.GetD().Write()
unfolding.unfoldObject.GetSV().Write()
# unfolding.unfoldObject.GetUnfoldCovMatrix(data_covariance_matrix(h_data), unfoldCfg.SVD_n_toy).Write()
SVDdist.Close()
else:
SVDdist = TFile(SVD_path + method + '_SVDdistributions_Hreco' + str(unfoldCfg.Hreco) + '_' + category + '.root', 'recreate')
directory = SVDdist.mkdir('SVDdist')
directory.cd()
unfolding.unfoldObject.Impl().GetD().Write()
unfolding.unfoldObject.Impl().GetSV().Write()
h_truth.Write()
h_measured.Write()
h_response.Write()
# unfolding.unfoldObject.Impl().GetUnfoldCovMatrix(data_covariance_matrix(h_data), unfoldCfg.SVD_n_toy).Write()
SVDdist.Close()
#export the whole unfolding object if it doesn't exist
if method == 'TSVDUnfold':
unfolding_object_file_name = SVD_path + method + '_unfoldingObject_' + category + '.root'
else:
unfolding_object_file_name = SVD_path + method + '_unfoldingObject_Hreco' + str(unfoldCfg.Hreco) + '_' + category + '.root'
if not os.path.isfile(unfolding_object_file_name):
unfoldingObjectFile = TFile(unfolding_object_file_name, 'recreate')
directory = unfoldingObjectFile.mkdir('unfoldingObject')
directory.cd()
if method == 'TSVDUnfold':
unfolding.unfoldObject.Write()
else:
unfolding.unfoldObject.Impl().Write()
unfoldingObjectFile.Close()
del unfolding
return hist_to_value_error_tuplelist(h_unfolded_data)
def data_covariance_matrix(data):
values = list(data)
get_bin_error = data.GetBinError
cov_matrix = Hist2D(len(values), -10, 10, len(values), -10, 10, type = 'D')
for bin_i in range(len(values)):
error = get_bin_error(bin_i+1)
cov_matrix.SetBinContent(bin_i+1, bin_i+1, error*error)
return cov_matrix
def get_unfold_histogram_tuple(inputfile, variable, channel, met_type):
folder = None
if not 'HT' in variable:
folder = inputfile.Get('unfolding_%s_analyser_%s_channel_%s' % (variable, channel, met_type))
else:
folder = inputfile.Get('unfolding_%s_analyser_%s_channel' % (variable, channel))
n_bins = len(bin_edges[variable]) - 1
bin_edge_array = array('d', bin_edges[variable])
#h_fakes = asrootpy(folder.fake_AsymBins)
#h_truth = asrootpy(folder.truth.Rebin(n_bins, 'truth', bin_edge_array))
h_truth = asrootpy(folder.truth_AsymBins).Clone()
#h_measured = asrootpy(folder.measured.Rebin(n_bins, 'measured', bin_edge_array))
h_measured = asrootpy(folder.measured_AsymBins).Clone()
h_response = folder.response_without_fakes_AsymBins.Clone() # response_AsymBins
nEvents = inputfile.EventFilter.EventCounter.GetBinContent(1)#number of processed events
lumiweight = ttbar_xsection * luminosity / nEvents #ttbar x-section = 225.2pb, lumi = 5814pb-1
h_truth.Scale(lumiweight)
h_measured.Scale(lumiweight)
h_response.Scale(lumiweight)
return h_truth, h_measured, h_response
def get_unfolded_normalisation(TTJet_fit_results, category, channel):
global variable, met_type, path_to_JSON
h_truth, h_measured, h_response = get_unfold_histogram_tuple(file_for_unfolding, variable, channel, met_type)
MADGRAPH_results = hist_to_value_error_tuplelist(h_truth)
POWHEG_results = hist_to_value_error_tuplelist(get_unfold_histogram_tuple(file_for_powheg, variable, channel, met_type)[0])
MCATNLO_results = hist_to_value_error_tuplelist(get_unfold_histogram_tuple(file_for_mcatnlo, variable, channel, met_type)[0])
matchingdown_results = hist_to_value_error_tuplelist(get_unfold_histogram_tuple(file_for_matchingdown, variable, channel, met_type)[0])
matchingup_results = hist_to_value_error_tuplelist(get_unfold_histogram_tuple(file_for_matchingup, variable, channel, met_type)[0])
scaledown_results = hist_to_value_error_tuplelist(get_unfold_histogram_tuple(file_for_scaledown, variable, channel, met_type)[0])
scaleup_results = hist_to_value_error_tuplelist(get_unfold_histogram_tuple(file_for_scaleup, variable, channel, met_type)[0])
TTJet_fit_results_unfolded = unfold_results(TTJet_fit_results,
category,
channel,
h_truth,
h_measured,
h_response,
'RooUnfoldSvd'
# 'TSVDUnfold'
)
normalisation_unfolded = {
'TTJet_measured' : TTJet_fit_results,
'TTJet_unfolded' : TTJet_fit_results_unfolded,
'MADGRAPH': MADGRAPH_results,
#other generators
'POWHEG': POWHEG_results,
'MCATNLO': MCATNLO_results,
#systematics
'matchingdown': matchingdown_results,
'matchingup': matchingup_results,
'scaledown': scaledown_results,
'scaleup': scaleup_results
}
write_data_to_JSON(normalisation_unfolded, path_to_JSON + '/' + variable + '/xsection_measurement_results' + '/kv' + str(unfoldCfg.SVD_k_value) + '/' + category + '/normalisation_' + channel + '_' + met_type + '.txt')
return normalisation_unfolded
def calculate_xsections(normalisation, category, channel):
global variable, met_type, path_to_JSON
# calculate the x-sections
TTJet_xsection = calculate_xsection(normalisation['TTJet_measured'], luminosity, 0.15) # L in pb1
TTJet_xsection_unfolded = calculate_xsection(normalisation['TTJet_unfolded'], luminosity, 0.15) # L in pb1
MADGRAPH_xsection = calculate_xsection(normalisation['MADGRAPH'], luminosity, 0.15) # L in pb1
POWHEG_xsection = calculate_xsection(normalisation['POWHEG'], luminosity, 0.15) # L in pb1
MCATNLO_xsection = calculate_xsection(normalisation['MCATNLO'], luminosity, 0.15) # L in pb1
matchingdown_xsection = calculate_xsection(normalisation['matchingdown'], luminosity, 0.15) # L in pb1
matchingup_xsection = calculate_xsection(normalisation['matchingup'], luminosity, 0.15) # L in pb1
scaledown_xsection = calculate_xsection(normalisation['scaledown'], luminosity, 0.15) # L in pb1
scaleup_xsection = calculate_xsection(normalisation['scaleup'], luminosity, 0.15) # L in pb1
xsection_unfolded = {'TTJet_measured' : TTJet_xsection,
'TTJet_unfolded' : TTJet_xsection_unfolded,
'MADGRAPH': MADGRAPH_xsection,
'POWHEG': POWHEG_xsection,
'MCATNLO': MCATNLO_xsection,
#systematics
'matchingdown': matchingdown_xsection,
'matchingup': matchingup_xsection,
'scaledown': scaledown_xsection,
'scaleup': scaleup_xsection
}
write_data_to_JSON(xsection_unfolded, path_to_JSON + '/' + variable + '/xsection_measurement_results' + '/kv' + str(unfoldCfg.SVD_k_value) + '/' + category + '/xsection_' + channel + '_' + met_type + '.txt')
def calculate_normalised_xsections(normalisation, category, channel, normalise_to_one = False):
global variable, met_type, path_to_JSON
TTJet_normalised_xsection = calculate_normalised_xsection(normalisation['TTJet_measured'], bin_widths[variable], normalise_to_one)
TTJet_normalised_xsection_unfolded = calculate_normalised_xsection(normalisation['TTJet_unfolded'], bin_widths[variable], normalise_to_one)
MADGRAPH_normalised_xsection = calculate_normalised_xsection(normalisation['MADGRAPH'], bin_widths[variable], normalise_to_one)
POWHEG_normalised_xsection = calculate_normalised_xsection(normalisation['POWHEG'], bin_widths[variable], normalise_to_one)
MCATNLO_normalised_xsection = calculate_normalised_xsection(normalisation['MCATNLO'], bin_widths[variable], normalise_to_one)
matchingdown_normalised_xsection = calculate_normalised_xsection(normalisation['matchingdown'], bin_widths[variable], normalise_to_one)
matchingup_normalised_xsection = calculate_normalised_xsection(normalisation['matchingup'], bin_widths[variable], normalise_to_one)
scaledown_normalised_xsection = calculate_normalised_xsection(normalisation['scaledown'], bin_widths[variable], normalise_to_one)
scaleup_normalised_xsection = calculate_normalised_xsection(normalisation['scaleup'], bin_widths[variable], normalise_to_one)
normalised_xsection = {'TTJet_measured' : TTJet_normalised_xsection,
'TTJet_unfolded' : TTJet_normalised_xsection_unfolded,
'MADGRAPH': MADGRAPH_normalised_xsection,
'POWHEG': POWHEG_normalised_xsection,
'MCATNLO': MCATNLO_normalised_xsection,
#systematics
'matchingdown': matchingdown_normalised_xsection,
'matchingup': matchingup_normalised_xsection,
'scaledown': scaledown_normalised_xsection,
'scaleup': scaleup_normalised_xsection
}
filename = path_to_JSON + '/' + variable + '/xsection_measurement_results' + '/kv' + str(unfoldCfg.SVD_k_value) + '/' + category + '/normalised_xsection_' + channel + '_' + met_type + '.txt'
if normalise_to_one:
filename = filename.replace('normalised_xsection', 'normalised_to_one_xsection')
write_data_to_JSON(normalised_xsection, filename)
if __name__ == '__main__':
# setup
parser = OptionParser()
parser.add_option("-p", "--path", dest="path", default='data/',
help="set path to JSON files")
parser.add_option("-v", "--variable", dest="variable", default='MET',
help="set the variable to analyse (MET, HT, ST, MT)")
parser.add_option("-b", "--bjetbin", dest="bjetbin", default='2m',
help="set b-jet multiplicity for analysis. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m")
parser.add_option("-m", "--metType", dest="metType", default='type1',
help="set MET type for analysis of MET, ST or MT")
parser.add_option("-k", "--k_value", type='int',
dest="k_value", default=6,
help="k-value for SVD unfolding")
parser.add_option("-H", "--hreco", type='int',
dest="Hreco", default=2,
help="Hreco parameter for error treatment in RooUnfold")
translateOptions = {
'0':'0btag',
'1':'1btag',
'2':'2btags',
'3':'3btags',
'0m':'0orMoreBtag',
'1m':'1orMoreBtag',
'2m':'2orMoreBtags',
'3m':'3orMoreBtags',
'4m':'4orMoreBtags',
#mettype:
'pf':'PFMET',
'type1':'patType1CorrectedPFMet'
}
categories = [ 'central', 'matchingup', 'matchingdown', 'scaleup', 'scaledown', 'BJet_down', 'BJet_up', 'JES_down', 'JES_up', 'LightJet_down', 'LightJet_up', 'PU_down', 'PU_up' ]
(options, args) = parser.parse_args()
variable = options.variable
unfoldCfg.SVD_k_value = options.k_value
unfoldCfg.Hreco = options.Hreco
met_type = translateOptions[options.metType]
b_tag_bin = translateOptions[options.bjetbin]
path_to_JSON = options.path
for category in categories:
#Setting up systematic MET for JES up/down samples
met_type = translateOptions[options.metType]
if category == 'JES_up':
met_type += 'JetEnUp'
if met_type == 'PFMETJetEnUp':
met_type = 'patPFMetJetEnUp'
elif category == 'JES_down':
met_type += 'JetEnDown'
if met_type == 'PFMETJetEnDown':
met_type = 'patPFMetJetEnDown'
#read fit results from JSON
TTJet_fit_results_electron = read_data_from_JSON(path_to_JSON + '/' + variable + '/fit_results/' + category + '/fit_results_electron_' + met_type + '.txt')['TTJet']
TTJet_fit_results_muon = read_data_from_JSON(path_to_JSON + '/' + variable + '/fit_results/' + category + '/fit_results_muon_' + met_type + '.txt')['TTJet']
#change back to original MET type for the unfolding
met_type = translateOptions[options.metType]
#ad-hoc switch for PFMET -> patMETsPFlow
if met_type == 'PFMET':
met_type = 'patMETsPFlow'
#get unfolded normalisation
unfolded_normalisation_electron = get_unfolded_normalisation(TTJet_fit_results_electron, category, 'electron')
unfolded_normalisation_muon = get_unfolded_normalisation(TTJet_fit_results_muon, category, 'muon')
#measure xsection
calculate_xsections(unfolded_normalisation_electron, category, 'electron')
calculate_xsections(unfolded_normalisation_muon, category, 'muon')
calculate_normalised_xsections(unfolded_normalisation_electron, category, 'electron')
calculate_normalised_xsections(unfolded_normalisation_muon, category, 'muon')
normalise_to_one = True
calculate_normalised_xsections(unfolded_normalisation_electron, category, 'electron', normalise_to_one)
calculate_normalised_xsections(unfolded_normalisation_muon, category, 'muon', normalise_to_one)
```
#### File: DailyPythonScripts/legacy/makeHLTPlots.py
```python
from ROOT import *
import HistGetter
import HistPlotter
import inputFiles
outputFormats = ['png', 'pdf']
outputFolder = '/storage/results/plots/ElectronHad/'
saveAs = HistPlotter.saveAs
triggers = [
# 'HLT_Ele25_CaloIdVT_TrkIdT_CentralJet30',
# 'HLT_Ele25_CaloIdVT_TrkIdT_DiCentralJet30',
'HLT_Ele25_CaloIdVT_TrkIdT_TriCentralJet30',
# 'HLT_Ele25_CaloIdVT_TrkIdT_QuadCentralJet30',
# 'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30',
# 'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_DiCentralJet30',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_TriCentralJet30',
# 'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_TriCentralPFJet30',
# 'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_QuadCentralJet30'
]
triggerVariables = ['jet_pt',
'jet_eta',
'jet_phi',
'jet_eta_PtGT45',
'jet_phi_PtGT45']
triggerModifiers = ['visited', 'fired']
def makeHLTPlots(hists, rebin=1):
print 'Making HLT plots'
data = hists['data']
ttbar = hists['ttbar']
plots = ['HLTStudy/' + trigger + '/' + variable for trigger in triggers for variable in triggerVariables]
efficiency = {}
mc_efficiency = {}
for jetbin in HistPlotter.allJetBins:
for plot in plots: #make all plots
if 'Quad' in plot and not '4' in jetbin:#only >=4 jet bin for QuadJet trigger
continue
elif 'Tri' in plot and ((not '3' in jetbin and not '4' in jetbin) or '3orMoreJets' in jetbin):
#only ==3, >=4 jet bins for TriJet trigger
continue
elif 'Di' in plot and not '2' in jetbin:
continue
print plot + '_' + jetbin
fired = data[plot + '_' + 'fired_' + jetbin]
visited = data[plot + '_' + 'visited_' + jetbin]
mc_fired = ttbar[plot + '_' + 'fired_' + jetbin]
mc_visited = ttbar[plot + '_' + 'visited_' + jetbin]
# calculate the sum of weights for correct error calculation
#http://root.cern.ch/root/html/TH1.html#TH1:Sumw2
fired.Sumw2()
visited.Sumw2()
mc_fired.Sumw2()
mc_visited.Sumw2()
xlimits, xTitle, yTitle, fitfunction, fitRange = getParams(plot, rebin)
fired.GetXaxis().SetRangeUser(xlimits[0], xlimits[1])
visited.GetXaxis().SetRangeUser(xlimits[0], xlimits[1])
mc_fired.GetXaxis().SetRangeUser(xlimits[0], xlimits[1])
mc_visited.GetXaxis().SetRangeUser(xlimits[0], xlimits[1])
fired.Rebin(rebin)
visited.Rebin(rebin)
mc_fired.Rebin(rebin)
mc_visited.Rebin(rebin)
efficiency[plot + jetbin] = TEfficiency(fired, visited)
mc_efficiency[plot + jetbin] = TEfficiency(mc_fired, mc_visited)
eff = efficiency[plot + jetbin].Clone("Copy")
mceff = mc_efficiency[plot + jetbin].Clone("CopyMC")
setStyles(eff, mceff)
saveName = plot + '_' + 'efficiency'
saveName = saveName.replace('Jet30/', 'Jet30_')
legend = getLegend(eff, mceff)
caption = getCaption()
c = TCanvas("cname" + plot + jetbin, 'cname', 900, 900)
eff.Draw('P0')
mceff.Draw('SAMEP0')
legend.Draw('same')
caption.Draw('same')
saveAs(c, saveName + '_' + jetbin, outputFolder = outputFolder)
def getParams(plot, rebin):
xlimits = [10,200]
xTitle = 'jet p_{T} (GeV)'
yTitle = 'efficiency/(GeV)'
fitfunction = ''
fitRange = [-9999, 9999]
if 'jet_pt' in plot:
xlimits = [10,200]
xTitle = 'jet p_{T} (GeV)'
yTitle = 'efficiency/(%d GeV)' % (1*rebin)
fitfunction = "[0]*exp([1]*exp([2]*x))"
fitRange = [20,200]
elif 'jet_eta' in plot:
xlimits = [-3,3]
xTitle = 'jet #eta (GeV)'
yTitle = 'efficiency/(%0.1f)' % (0.1*rebin)
fitfunction = 'pol2'
fitRange = [-3,3]
elif 'jet_phi' in plot:
xlimits = [-4.,4.]
xTitle = 'jet #phi (GeV)'
yTitle = 'efficiency/(%0.1f)' % (0.1*rebin)
fitfunction = 'pol0'
fitRange = [-3.1,3.1]
return xlimits, xTitle, yTitle, fitfunction, fitRange
def setStyles(dataPlot, mcPlot):
mcPlot.SetLineColor(2)
mcPlot.SetMarkerColor(2)
mcPlot.SetMarkerStyle(22)
def getLegend(dataPlot, mcPlot):
leg = TLegend(0.7, 0.2, 0.8, 0.3)
leg.SetBorderSize(0);
leg.SetLineStyle(0);
leg.SetTextFont(42);
leg.SetFillStyle(0);
leg.AddEntry(dataPlot, 'data', 'P')
leg.AddEntry(mcPlot, 'MC', 'P')
return leg
def getCaption():
tex = TLatex(0.18,1,"CMS Preliminary 2011, #sqrt{s} = 7 TeV, L = 4.69 fb^{-1}");
tex.SetNDC();
tex.SetTextAlign(13);
tex.SetTextFont(42);
tex.SetTextSize(0.04);
tex.SetLineWidth(2);
return tex
if __name__ == '__main__':
gROOT.SetBatch(True)
gROOT.ProcessLine('gErrorIgnoreLevel = 1001;')
files = inputFiles.files
hltFiles = {}
hltFiles['data'] = inputFiles.files['data']
hltFiles['ttbar'] = inputFiles.files['ttbar']
triggerPlots = ['HLTStudy/' + trigger + '/' + variable + '_' + modifier for trigger in triggers for variable in triggerVariables for modifier in triggerModifiers]
HistPlotter.setStyle()
hists = HistGetter.getHistsFromFiles(triggerPlots, hltFiles, jetBins=HistPlotter.allJetBins)
makeHLTPlots(hists)
```
#### File: DailyPythonScripts/legacy/makePrettyPlots.py
```python
import tools.PlottingUtilities as plotting
import FILES
import ROOTFileReader as reader
import QCDRateEstimation
def plot(histpath, qcdShapeFrom, qcdShapeForSystematics, qcdRateEstimate, rebin=1, suffixes=[]):
inputFiles = FILES.files
#get histograms
if len(suffixes) > 0:
for suffix in suffixes:
hist = histpath + '_' + suffix
histograms = reader.getHistogramDictionary(histpath, inputFiles)
else:
histograms = reader.getHistogramDictionary(histpath, inputFiles)
if __name__ == "__main__":
inputFiles = FILES.files
estimateQCD = QCDRateEstimation.estimateQCDWithRelIso
plot(histpath='TTbarEplusJetsPlusMetAnalysis/Ref selection/MET/patMETsPFlow/Angle_lepton_MET',
qcdShapeFrom ='TTbarEplusJetsPlusMetAnalysis/Ref selection/QCDConversions/MET/patMETsPFlow/Angle_lepton_MET',
qcdShapeForSystematics = 'TTbarEplusJetsPlusMetAnalysis/Ref selection/QCD non iso e+jets/MET/patMETsPFlow/Angle_lepton_MET',
qcdRateEstimate=estimateQCD,
rebin=1,
suffixes=['0btag', '1btag', '2orMoreBtags'])
```
#### File: DailyPythonScripts/legacy/nTupleInfo.py
```python
from __future__ import division
from ROOT import *
import sys, os
from math import sqrt
def getBranchInfo(listOfBranches):
branches = []
bapp = branches.append
for branch in listOfBranches:
info = {}
info['name'] = branch.GetName()
info['totalSize'] = branch.GetTotalSize()
info['totalBytes'] = branch.GetTotBytes()
info['zippedBytes'] = branch.GetZipBytes()
bapp(info)
return branches
def printTwikiTable(branches, filesize):
prevObj = ' '
info = {}
info['totalSize'] = 0
info['zippedBytes'] = 0
info['totalBytes'] = 0
for branch in sorted(branches):
name = branch['name']
size = branch['totalSize'] / 1024 / 1024 #MB
zipSize = branch['zippedBytes'] / 1024 / 1024#MB
compression = size / zipSize
totalBytes = branch['totalBytes'] / 1024 / 1024#MB
buffer = (size - totalBytes) * 1024#KB
fraction = zipSize / filesize * 100#%
obj = ' '
if '.' in name:
obj = name.split('.')[0] + '.'
else:
obj = name.capitalize()
if not name.startswith(prevObj):
if '.' in prevObj:
Osize = info['totalSize']
OzipSize = info['zippedBytes']
Ocompression = Osize / OzipSize
Obuffer = (size - info['totalBytes'] / 1024 / 1024) * 1024#KB
Ofraction = OzipSize / filesize * 100
print '| *Total* | %.3f | %.3f | %.2f | %.3f | %.2f%% |' % (Osize, OzipSize, Ocompression, Obuffer, Ofraction)
print '%ENDTWISTY%'
print
#print summary
print '---+++ %s' % obj.replace('.', '')
print '%TWISTY{mode="div" showlink="Show " hidelink="Hide " firststart="hide" showimgright="%ICONURLPATH{toggleopen-small}%" hideimgright="%ICONURLPATH{toggleclose-small}%"}%'
print '| *%s* ||||||' % obj.replace('.', '')
print '| *Name* | *Total Size (MB)* | *Compressed size (MB)* | *compression factor* | *Buffer (KB)* | *Fraction of file size* |'
info['totalSize'] = 0
info['zippedBytes'] = 0
info['totalBytes'] = 0
else:
info['totalSize'] += size
info['zippedBytes'] += zipSize
info['totalBytes'] += totalBytes
print '| !%s | %.3f | %.3f | %.2f | %.3f | %.2f%% |' % (name, size, zipSize, compression, buffer, fraction)
prevObj = obj
print '%ENDTWISTY%'
def printBiggestConsumers(branches, filesize):
consumers = []
for branch in sorted(branches):
consumer = {}
zipSize = branch['zippedBytes'] / 1024 / 1024#MB
fraction = zipSize / filesize * 100#%
consumer[branch['zippedBytes']] = branch
consumers.append(consumer)
top = 10
current = 1
print '| *Name* | *Compressed size (MB)* | *Fraction of file size* |'
for consumer in sorted(consumers, reverse=True):
if current > top:
break
current += 1
branch = consumer[consumer.keys()[0]]
zipSize = branch['zippedBytes'] / 1024 / 1024#MB
print '| !%s | %.3f | %.3f%% |' %(branch['name'], zipSize, zipSize / filesize * 100)#%)
# print branch['name'], zipSize, zipSize / filesize * 100#%
def getTriggers(chain):
for event in chain:
triggers = event.__getattr__("Trigger.HLTNames")
for trigger in triggers:
if not 'not found' in trigger:
print ' * ' + trigger
break
if __name__ == '__main__':
gROOT.SetBatch(1);
chain = TChain("rootTupleTree/tree");
filesize = 0
if len(sys.argv) < 2:
print 'wrong usage'
files = sys.argv[1:]
add = chain.Add
size = os.path.getsize
for file in files:
add(file)
filesize += size(file)
filesize = filesize/ 1024 / 1024#MB
branches = getBranchInfo(chain.GetListOfBranches())
numberOfEvents = chain.GetEntries()
if '_data_' in files[0]:
print '---++ DATA content'
else:
print '---++ MC content'
sizePerEvent = filesize/numberOfEvents*1024
print 'Size of event: %.3f KB +- %.3f' % (sizePerEvent, 1/sqrt(numberOfEvents)*sizePerEvent)
printTwikiTable(branches, filesize)
print '---+++ Biggest consumers'
printBiggestConsumers(branches, filesize)
print '---+++ Available Triggers'
getTriggers(chain)
```
#### File: DailyPythonScripts/legacy/prescaleTableParser.py
```python
import csv
import sys
''' Summarises the results given by the makeHLTPrescaleTable.py script in
https://twiki.cern.ch/twiki/bin/viewauth/CMS/HLTriggerTools#make_HLTPrescaleTable_py
'''
def getTriggers(csvFileName):
file = open(csvFileName, 'rb')
data = csv.reader(file, delimiter = ';')
fieldNames = getFieldNames(data)
triggers, prescaledTriggers = createTriggerDictionaries(fieldNames)
reader = csv.DictReader( file, fieldnames = fieldNames ,delimiter = ';')
triggers, prescaledTriggers = fillTriggers(reader, triggers, prescaledTriggers)
return triggers, prescaledTriggers
def getFieldNames(data):
fieldNames = []
for row in data:
#look for first row starting with 'run'
if len(row) > 0 and row[0] == 'run':
fieldNames = row#this has the format: run, '', listoftriggers
break
return fieldNames
def createTriggerDictionaries(fieldNames):
triggers = {}
prescaledTriggers = {}
for name in fieldNames[2:]:
triggers[name] = []
prescaledTriggers[name] = {'prescale': '', 'runs': []}
return triggers, prescaledTriggers
def fillTriggers(data, triggers, prescaledTriggers):
for row in data:
for name, value in row.iteritems():
if not name == '' or not name == 'run':#ommit emtpy and run columns
if value == '1':#exists in the menu and has prescale = 1
if triggers.has_key(name):
triggers[name].append(row['run'])
elif value:#exists in the menu and has prescale !=1
if prescaledTriggers.has_key(name):
prescaledTriggers[name]['prescale'] = value
prescaledTriggers[name]['runs'].append(row['run'])
return triggers, prescaledTriggers
def printTriggersAsTwikiTable(triggers, prescaledTriggers):
print '| *trigger* | *runs* |'
for key in sorted(triggers.keys()):
runs = sorted(triggers[key])
if len(runs) > 0:
print '| =%s= |' % key, runs[0], '-', runs[-1], '|'
print
print '| *trigger* | *runs* | *prescales* |'
for key in sorted(prescaledTriggers.keys()):
runs =sorted(prescaledTriggers[key]['runs'])
prescale = prescaledTriggers[key]['prescale']
if len(runs) > 0:
print '| =%s= |' % key, runs[0], '-', runs[-1], '|', prescale, '|'
if __name__ == "__main__":
csvFileName = '/Users/lkreczko/Dropbox/Documents/Analysis/trigger/out2.csv'
if len(sys.argv) > 1:
csvFileName = sys.argv[1]
triggers, prescaledTriggers = getTriggers(csvFileName)
printTriggersAsTwikiTable(triggers, prescaledTriggers)
```
#### File: DailyPythonScripts/legacy/printCutFlow.py
```python
from __future__ import division
from ROOT import *
import tools.ROOTFileReader as FileReader
import tools.PlottingUtilities as plotting
import FILES
from math import sqrt
import QCDRateEstimation
cuts = None
cuts_electrons = [
"Skim", #
"Event cleaning and HLT", #
"Electron", #
"Muon Veto", #
"Electron veto", #
"Conversion veto", #
"$\geq 3$ jets", #
"$\geq 4$ jets", #
"$\geq 1$ CSV b-tag", #
"$\geq 2$ CSV b-tag" #
]
cuts_muons = [
"Skim", #
"Event cleaning and HLT", #
"Muon", #
"Electron veto", #
"Muon Veto", #
"$\geq 3$ jets", #
"$\geq 4$ jets", #
"$\geq 1$ CSV b-tag", #
"$\geq 2$ CSV b-tag" #
]
def printCutFlow(hist, analysis, outputFormat='Latex'):
scale_ttbar = 164.4 / 157.5
used_data = 'ElectronHad'
lepton = 'Electron/electron'
if 'Mu' in analysis:
used_data = 'SingleMu'
lepton = 'Muon/muon'
hist_1mBtag = 'TTbarPlusMetAnalysis/' + analysis + '/Ref selection/' + lepton + '_AbsEta_1orMoreBtag'
hist_2mBtag = 'TTbarPlusMetAnalysis/' + analysis + '/Ref selection/' + lepton + '_AbsEta_2orMoreBtags'
hist_names = [hist, #due to b-tag scale factors these are not as simple any more
hist_1mBtag,
hist_2mBtag
]
inputfiles = {}
for sample in FILES.samplesToLoad:
inputfiles[sample] = FILES.files[sample]
hists = FileReader.getHistogramsFromFiles(hist_names, inputfiles)
for sample in hists.keys():
for histname in hists[sample].keys():
hists[sample][histname].Sumw2()
if analysis == 'EPlusJets':
hists['QCD'] = plotting.sumSamples(hists, plotting.qcd_samples)
else:
hists['QCD'] = hists['QCD_Pt-20_MuEnrichedPt-15']
hists['SingleTop'] = plotting.sumSamples(hists, plotting.singleTop_samples)
hists['Di-Boson'] = plotting.sumSamples(hists, plotting.diboson_samples)
hists['W+Jets'] = plotting.sumSamples(hists, plotting.wplusjets_samples)
# hists['SumMC'] = plotting.sumSamples(hists, plotting.allMC_samples)
header = "| Step | TTJet | W+jets | DY + Jets | single top | Di-boson | QCD | Sum MC | Data |"
row = " | %s | %d +- %d | %d +- %d | %d +- %d | %d +- %d | %d +- %d | %d +- %d | %d +- %d | %d | "
if outputFormat == 'Latex':
header = "Selection step & \\ttbar & W + Jets & Z + Jets & Single-top & Di-boson & QCD~ & Sum MC & Data\\\\"
row = " %s & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & %d \\\\ "
print header
numbers, errors = getEventNumbers(hists, hist, hist_1mBtag, hist_2mBtag)# + '_0orMoreBtag')
for step in range(len(cuts)):
nums = numbers[step]
errs = errors[step]
nums['TTJet'] = nums['TTJet'] * scale_ttbar
errs['TTJet'] = errs['TTJet'] * scale_ttbar
if analysis == 'EPlusJets' and step >= len(cuts) - 3:#have only estimates for >= 4 jet and beyond
histForEstimation = 'TTbarPlusMetAnalysis/EPlusJets/QCD e+jets PFRelIso/Electron/electron_pfIsolation_03_0orMoreBtag'
if step == len(cuts) - 2:
histForEstimation = 'TTbarPlusMetAnalysis/EPlusJets/QCD e+jets PFRelIso/Electron/electron_pfIsolation_03_1orMoreBtag'
if step == len(cuts) - 1:
histForEstimation = 'TTbarPlusMetAnalysis/EPlusJets/QCD e+jets PFRelIso/Electron/electron_pfIsolation_03_2orMoreBtags'
estimate = QCDRateEstimation.estimateQCDWithRelIso(FILES.files, histForEstimation)
nums['QCD'], errs['QCD'] = estimate['estimate'], estimate['absoluteError']
if analysis == 'MuPlusJets' and step >= len(cuts) - 3:#have only estimates for >= 4 jet and beyond
scale = 1.21
nums['QCD'], errs['QCD'] = nums['QCD'] * scale, errs['QCD'] * scale
sumMC = nums['TTJet'] + nums['W+Jets'] + nums['DYJetsToLL'] + nums['SingleTop'] + nums['QCD'] + nums['Di-Boson']
sumMC_err = sqrt(errs['TTJet'] ** 2 + errs['W+Jets'] ** 2 + errs['DYJetsToLL'] ** 2 + errs['SingleTop'] ** 2 + errs['QCD'] ** 2 + errs['Di-Boson'] ** 2)
print row % (cuts[step], nums['TTJet'], errs['TTJet'], nums['W+Jets'], errs['W+Jets'], nums['DYJetsToLL'], errs['DYJetsToLL'],
nums['SingleTop'], errs['SingleTop'], nums['Di-Boson'], errs['Di-Boson'], nums['QCD'], errs['QCD'], sumMC, sumMC_err, nums[used_data])
def getEventNumbers(hists, histname, hist_1mBtag, hist_2mBtag):
eventNumbers = []
errorValues = []
for step in range(len(cuts)):
events = {}
errors = {}
for sample in hists.keys():
events[sample] = hists[sample][histname].GetBinContent(step + 1)
errors[sample] = hists[sample][histname].GetBinError(step + 1)
if step == len(cuts) - 2:
events[sample] = hists[sample][hist_1mBtag].Integral()
entries = hists[sample][hist_1mBtag].GetEntries()
if not entries == 0:
errors[sample] = sqrt(entries) / entries * events[sample]
else:
errors[sample] = 0
if step == len(cuts) - 1:
events[sample] = hists[sample][hist_2mBtag].Integral()
entries = hists[sample][hist_2mBtag].GetEntries()
if not entries == 0:
errors[sample] = sqrt(entries) / entries * events[sample]
else:
errors[sample] = 0
eventNumbers.append(events)
errorValues.append(errors)
return eventNumbers, errorValues
if __name__ == "__main__":
gROOT.SetBatch(True)
gROOT.ProcessLine('gErrorIgnoreLevel = 1001;')
cuts = cuts_electrons
print '=' * 120
print 'TTbarEplusJetsRefSelection'
printCutFlow('EventCount/TTbarEplusJetsRefSelection', 'EPlusJets')
print '=' * 120
cuts = cuts_muons
print '=' * 120
print 'TTbarMuPlusJetsRefSelection'
printCutFlow('EventCount/TTbarMuPlusJetsRefSelection', 'MuPlusJets')
print '=' * 120
```
#### File: DailyPythonScripts/legacy/purityAndStability_METbins.py
```python
import FILES
import tools.ROOTFileReader as FileReader
from ROOT import gROOT
import tools.FileUtilities as FileUtils
fileTemplate = 'data/correctionFactors/correctionFactors_%s_%s_JSON.txt'
samples = [
'TTJet',
'POWHEG',
'PYTHIA6',
'MCatNLO',
'TTJets-matchingdown',
'TTJets-matchingup',
'TTJets-scaledown',
'TTJets-scaleup',
]
metbins = [
'0-25',
'25-45',
'45-70',
'70-100',
'100-inf'
]
metTypes = ['patMETsPFlow', 'patType1CorrectedPFMet', 'patType1p2CorrectedPFMet' ]
metsystematics_sources = [
"patType1p2CorrectedPFMetElectronEnUp",
"patType1p2CorrectedPFMetElectronEnDown",
"patType1p2CorrectedPFMetMuonEnUp",
"patType1p2CorrectedPFMetMuonEnDown",
"patType1p2CorrectedPFMetTauEnUp",
"patType1p2CorrectedPFMetTauEnDown",
"patType1p2CorrectedPFMetJetResUp",
"patType1p2CorrectedPFMetJetResDown",
"patType1p2CorrectedPFMetJetEnUp",
"patType1p2CorrectedPFMetJetEnDown",
"patType1p2CorrectedPFMetUnclusteredEnUp",
"patType1p2CorrectedPFMetUnclusteredEnDown",
"patType1CorrectedPFMetElectronEnUp",
"patType1CorrectedPFMetElectronEnDown",
"patType1CorrectedPFMetMuonEnUp",
"patType1CorrectedPFMetMuonEnDown",
"patType1CorrectedPFMetTauEnUp",
"patType1CorrectedPFMetTauEnDown",
"patType1CorrectedPFMetJetResUp",
"patType1CorrectedPFMetJetResDown",
"patType1CorrectedPFMetJetEnUp",
"patType1CorrectedPFMetJetEnDown",
"patType1CorrectedPFMetUnclusteredEnUp",
"patType1CorrectedPFMetUnclusteredEnDown",
"patPFMetElectronEnUp",
"patPFMetElectronEnDown",
"patPFMetMuonEnUp",
"patPFMetMuonEnDown",
"patPFMetTauEnUp",
"patPFMetTauEnDown",
"patPFMetJetResUp",
"patPFMetJetResDown",
"patPFMetJetEnUp",
"patPFMetJetEnDown",
"patPFMetUnclusteredEnUp",
"patPFMetUnclusteredEnDown",
]
metTypes.extend(metsystematics_sources)
def getMETVariablesFrom2DPlot(analysisType, sample, metType, bjetbin):
hist = 'TTbarPlusMetAnalysis/' + analysisType + '/Ref selection/MET/%s/RecoMET_vs_GenMET_%s' % (metType, bjetbin)
correctionFactors = {}
purities = {}
stabilities = {}
numberOfGenEvents = {}
numberOfRecoEvents = {}
recoMETvsGenMET = FileReader.getHistogramFromFile(hist, FILES.files[sample])
for metbin in metbins:
lowerLimit, upperLimit = metbin.split('-')
lowerLimit, upperLimit = float(lowerLimit), float(upperLimit)
lowerXbin = recoMETvsGenMET.GetXaxis().FindBin(lowerLimit + 0.001)
lowerYbin = recoMETvsGenMET.GetYaxis().FindBin(lowerLimit + 0.001)
upperXbin, upperYbin = 0,0
overflowX, overflowY = recoMETvsGenMET.GetNbinsX()+1, recoMETvsGenMET.GetNbinsY()+1
if not upperLimit == 'inf':
upperXbin = recoMETvsGenMET.GetXaxis().FindBin(upperLimit - 0.001)
upperYbin = recoMETvsGenMET.GetYaxis().FindBin(upperLimit - 0.001)
else:
upperXbin = overflowX
upperYbin = overflowY
N_reco = recoMETvsGenMET.Integral(0, overflowX, lowerYbin, upperYbin)
N_gen = recoMETvsGenMET.Integral(lowerXbin, upperXbin, 0, overflowY)
nRecoPlusnGen = recoMETvsGenMET.Integral(lowerXbin, upperXbin, lowerYbin, upperYbin)
purity = nRecoPlusnGen/N_reco
stability = nRecoPlusnGen/N_gen
correctionFactor = N_gen/N_reco
correctionFactors[metbin] = correctionFactor
purities[metbin] = purity
stabilities[metbin] = stability
numberOfGenEvents[metbin] = N_gen
numberOfRecoEvents[metbin] = N_reco
result = {
'correctionFactors': correctionFactors,
'purities': purities,
'stabilities': stabilities,
'numberOfGenEvents': numberOfGenEvents,
'numberOfRecoEvents':numberOfRecoEvents
}
return result
def getMETVariables(analysisType, sample, metType, bjetbin):
base = 'TTbarPlusMetAnalysis/' + analysisType + '/Ref selection/BinnedMETAnalysis/'
analyser = 'Electron_%s_bin_%s/electron_AbsEta_%s'
if 'Mu' in analysisType:
analyser = 'Muon_%s_bin_%s/muon_AbsEta_%s'
correctionFactors = {}
purities = {}
stabilities = {}
numberOfGenEvents = {}
numberOfRecoEvents = {}
for metbin in metbins:
genMET = base + analyser % ('GenMET', metbin, bjetbin)
PFMET = base + analyser % (metType, metbin, bjetbin)
genMETs = FileReader.getHistogramFromFile(genMET, FILES.files[sample])
PFMETs = FileReader.getHistogramFromFile(PFMET, FILES.files[sample])
N_gen = genMETs.Integral()
N_reco = PFMETs.Integral()
purity = (N_gen + N_reco) / N_reco
stability = (N_gen + N_reco) / N_gen
correctionFactor = N_gen / N_reco
correctionFactors[metbin] = correctionFactor
purities[metbin] = purity
stabilities[metbin] = stability
numberOfGenEvents[metbin] = N_gen
numberOfRecoEvents[metbin] = N_reco
result = {
'correctionFactors': correctionFactors,
'purities': purities,
'stabilities': stabilities,
'numberOfGenEvents': numberOfGenEvents,
'numberOfRecoEvents':numberOfRecoEvents
}
return result
def saveToFile(correctionFactors, analysisType, bjetbin):
stringForFile = ''
fileName = fileTemplate % (analysisType, bjetbin)
stringForFile += str(correctionFactors) + '\n'
import json
stringForFile = json.dumps(correctionFactors, sort_keys=True, indent=4)
FileUtils.writeStringToFile(stringForFile, fileName)
if __name__ == "__main__":
from optparse import OptionParser
gROOT.SetBatch(True)
gROOT.ProcessLine('gErrorIgnoreLevel = 1001;')
parser = OptionParser()
parser.add_option("-b", "--bjetbin", dest="bjetbin", default='2m',
help="set b-jet multiplicity for analysis. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m")
parser.add_option("-a", "--analysisType", dest="analysisType", default='EPlusJets',
help="set analysis type: EPlusJets or MuPlusJets")
parser.add_option("-t", "--test",
action="store_true", dest="test", default=False,
help="Run test")
translateOptions = {
'0':'0btag',
'1':'1btag',
'2':'2btags',
'3':'3btags',
'0m':'0orMoreBtag',
'1m':'1orMoreBtag',
'2m':'2orMoreBtags',
'3m':'3orMoreBtags',
'4m':'4orMoreBtags',
}
(options, args) = parser.parse_args()
bjetbin = translateOptions[options.bjetbin]
analysisType = options.analysisType
correctionFactors = {}
for sample in samples:
correctionFactors[sample] = {}
for metType in metTypes:
# variables = getMETVariables(analysisType, sample, metType, bjetbin)
variables = getMETVariablesFrom2DPlot(analysisType, sample, metType, bjetbin)
correctionFactors[sample][metType] = variables['correctionFactors']
saveToFile(correctionFactors, analysisType, bjetbin)
```
#### File: DailyPythonScripts/legacy/QCDRateEstimation.py
```python
from __future__ import division
from math import sqrt
import tools.ROOTFileReader as FileReader
import tools.PlottingUtilities as plotting
import FILES
try:
from uncertainties import ufloat
from uncertainties import umath
except:
print "Could not find uncertainties package, please install for full functionality"
print 'http://packages.python.org/uncertainties/'
ufloatEnabled = False
from ROOT import Double
DEBUG = False
allMC = ['TTJet', 'DYJetsToLL', 'QCD_Pt-20to30_BCtoE', 'QCD_Pt-30to80_BCtoE',
'QCD_Pt-80to170_BCtoE', 'QCD_Pt-20to30_EMEnriched', 'QCD_Pt-30to80_EMEnriched',
'QCD_Pt-80to170_EMEnriched', 'GJets_HT-40To100', 'GJets_HT-100To200',
'GJets_HT-200', 'WWtoAnything', 'WZtoAnything', 'ZZtoAnything', 'T_tW-channel',
'T_t-channel', 'T_s-channel', 'Tbar_tW-channel', 'Tbar_t-channel',
'Tbar_s-channel', 'W1Jet', 'W2Jets', 'W3Jets', 'W4Jets'
]
qcd = ['QCD_Pt-20to30_BCtoE', 'QCD_Pt-30to80_BCtoE',
'QCD_Pt-80to170_BCtoE', 'QCD_Pt-20to30_EMEnriched', 'QCD_Pt-30to80_EMEnriched',
'QCD_Pt-80to170_EMEnriched', 'GJets_HT-40To100', 'GJets_HT-100To200',
'GJets_HT-200']
btag_latex = {
'0orMoreBtag':'$ \geq 0$ b-tags',
'0btag':'0 b-tag',
'1btag':'1 b-tag',
'2orMoreBtags':'$\geq 2$ b-tags'
}
#relIso method is overestimating by 45%
defaultHistogram = 'TTbarEplusJetsPlusMetAnalysis/Ref selection/QCD e+jets PFRelIso/Electron/electron_pfIsolation_03_0orMoreBtag'
relIsoBias = 0.0
def estimateQCDWithRelIso(inputFiles, histogramForEstimation=defaultHistogram, function='expo',
fitRange=(0.3, 1.6), fitRangesForSystematics=[(0.2, 1.6), (0.4, 1.6)]):
inputFile = inputFiles['SingleElectron']
if DEBUG:
print '*' * 120
print "Estimating QCD using a fit to RelIso"
print 'Input file = ', inputFile
print 'Histogram = ', histogramForEstimation
print 'Fit function = ', function
print 'Fit range = ', fitRange
print 'Fit ranges for systematics = ', fitRangesForSystematics
print '*' * 120
histogramForEstimation = FileReader.getHistogramFromFile(histogramForEstimation, inputFile)
result = relIsoMethodWithSystematics(histogramForEstimation, function, fitRange=fitRange, fitRangesForSystematics=fitRangesForSystematics,
applyBiasCorrection=True)
return result
def relIsoMethodWithSystematics(histogramForEstimation=defaultHistogram, function='expo',
fitRange=(0.3, 1.6), fitRangesForSystematics=[(0.2, 1.6), (0.4, 1.6)], applyBiasCorrection=True):
centralResult = relIsoMethod(histogramForEstimation, function, fitRange=fitRange)
centralEstimate, centralAbsoluteError = centralResult['estimate'], centralResult['absoluteError']
absoluteStatisticError = centralAbsoluteError
centralRelativeError = 0
if not centralEstimate == 0:
centralRelativeError = centralAbsoluteError / centralEstimate
centralRelativeErrorSquared = centralRelativeError ** 2
#systematic errors
systematicErrorFromOtherFitRangesSquared = 0
for currentRange in fitRangesForSystematics:
currentResult = relIsoMethod(histogramForEstimation, function, fitRange=currentRange)
currentEstimate, err = currentResult['estimate'], currentResult['absoluteError']
deviation = currentEstimate - centralEstimate
if not centralEstimate == 0:
systematicErrorFromOtherFitRangesSquared += (deviation / centralEstimate) ** 2
centralRelativeErrorSquared += systematicErrorFromOtherFitRangesSquared
relativeSytematicErrorSquared = systematicErrorFromOtherFitRangesSquared
relativeErrorFromBiasCorrection = 0
if applyBiasCorrection:
reductionFromBias = 1 - relIsoBias
centralEstimate = centralEstimate * reductionFromBias
relativeErrorFromBiasCorrection = relIsoBias
centralRelativeErrorSquared += relativeErrorFromBiasCorrection ** 2
relativeSytematicErrorSquared += relativeErrorFromBiasCorrection ** 2
absoluteSystematicError = sqrt(relativeSytematicErrorSquared) * centralEstimate
centralAbsoluteError = sqrt(absoluteSystematicError ** 2 + absoluteStatisticError ** 2)
# absoluteStatisticError = centralRelativeError * centralEstimate
result = {
'estimate':centralEstimate,
'absoluteError': centralAbsoluteError,
'absoluteSystematicError': absoluteSystematicError,
'absoluteStatisticError': absoluteStatisticError,
'fit':centralResult['fit']}
return result
def relIsoMethod(histogramForEstimation, function='expo',
fitRange=(0.3, 1.6), signalRegion=(0., 0.1)):
histogramForEstimation = histogramForEstimation.Clone('tmp')
#investigate them
binWidthOfOriginalHistoram = 0.01
rebinOfOriginalHistogram = 10
estimate = 0
relativeErrorSquared = 0
histogramForEstimation.Rebin(rebinOfOriginalHistogram)
fit = None
fit = performFit(histogramForEstimation, function, fitRange)
if fit:
estimate = fit.Integral(signalRegion[0], signalRegion[1]) / (binWidthOfOriginalHistoram * rebinOfOriginalHistogram)
for parErr in range(0, fit.GetNumberFreeParameters()):
par = fit.GetParameter(parErr)
err = fit.GetParError(parErr)
if not par == 0:
relativeErrorSquared += (err / par) ** 2
result = {'estimate': estimate,
'absoluteError':sqrt(relativeErrorSquared) * estimate,
'relativeError':sqrt(relativeErrorSquared),
'fit':fit}
return result
def performFit(histogram, function, fitRange):
histogram = histogram.Clone('fitting')
numberOfFreeParameters = -1
fit = None
histogram.Fit(function, "Q0", "ah", fitRange[0], fitRange[1])
fit = histogram.GetFunction(function)
if fit:
return fit.Clone()
else:
return None
#Estimate the bias on MC only
def getRelIsoCalibrationCurve(inputFiles, histogramForEstimation=defaultHistogram, function='expo',
fitRanges=[(0.2, 1.6), (0.3, 1.6), (0.4, 1.6)]):
if DEBUG:
print '*' * 120
print "Estimating QCD using a fit to RelIso"
print 'Input files = ', inputFiles
print 'Histogram = ', histogramForEstimation
print 'Fit function = ', function
print 'Fit ranges = ', fitRanges
print '*' * 120
#get histograms
#instead of data use sum MC
def doPerformanceStudyOnMCOnly(inputFiles,
histogramForEstimation=defaultHistogram,
function='expo',
fitRanges=[(0.2, 1.6), (0.3, 1.6), (0.4, 1.6)]):
if DEBUG:
print '*' * 120
print "Estimating QCD using a fit to RelIso"
print 'Histogram = ', histogramForEstimation
print 'Fit functions = ', function
print 'Fit ranges = ', fitRanges
print '*' * 120
#get histograms
histograms = FileReader.getHistogramDictionary(histogramForEstimation, inputFiles)
global allMC, qcd
histograms['SumMC'] = plotting.sumSamples(histograms, allMC)
histograms['QCD'] = plotting.sumSamples(histograms, qcd)
# qcdInSignalRegion = histograms['QCD'].Integral()
# qcdError = 0
# if not qcdInSignalRegion == 0:
# qcdError = qcdInSignalRegion / sqrt(qcdInSignalRegion)
import copy
results = {}
qcdInSignalRegion, qcdError = getIntegral(histograms['QCD'], (0, 0.1))
# getRelIsoCalibrationCurve(inputFiles, histogramForEstimation, function, fitRanges)
for fitRange in fitRanges:
#take all other fit ranges as systematics
fitRangesForSystematics = copy.deepcopy(fitRanges)
fitRangesForSystematics.remove(fitRange)
#instead of data use sum MC
resultFromMethod = relIsoMethodWithSystematics(histograms['SumMC'], function, fitRange, fitRangesForSystematics, False)
estimate, absoluteError = resultFromMethod['estimate'], resultFromMethod['absoluteError']
N_est = ufloat((estimate, absoluteError))
N_qcd = ufloat((qcdInSignalRegion, qcdError))
relativeDeviation = N_est / N_qcd
result = {}
result['performance'] = (relativeDeviation.nominal_value, relativeDeviation.std_dev())
result['estimate'] = (estimate, absoluteError)
result['qcdInSignalRegion'] = (qcdInSignalRegion, qcdError)
result['fitfunction'] = function
result['fitRange'] = fitRange
result['fitRangesForSystematics'] = fitRangesForSystematics
result['fit'] = resultFromMethod['fit']
results[str(fitRange)] = result
return results
def printPerformanceResults(btag_results, btagBins):
print 'function, range & $N_{est,\,QCD}^{data}$ & $N_{true,\,QCD}^{MC}$ & $f_{est./true}$ \\\\'
for btag in btagBins:
print '\hline'
results = btag_results[btag]
print '%s & & & \\\\' % btag_latex[btag]
print '\hline'
for fitrange, result in results.iteritems():
N_qcd, N_qcd_error = result['qcdInSignalRegion']
est, err = result['estimate']
performance, performanceError = result['performance']
format = (result['fitfunction'], str(result['fitRange']), N_qcd, N_qcd_error, est, err, performance, performanceError)
print '%s, %s & $%.0f \pm %.0f$ & $%.0f \pm %.0f$ & $ %.3f \pm %.3f$ \\\\' % format
def getIntegral(histogram, integralRange=(0, 0.1)):
firstBin = histogram.GetXaxis().FindBin(integralRange[0])
lastBin = histogram.GetXaxis().FindBin(integralRange[1])
integral = 0
absoluteError = Double(0)
integral = histogram.IntegralAndError(firstBin, lastBin, absoluteError)
return integral, absoluteError
def printResults(results, btagBins):
header = "region & $N_{est,\,QCD}^{MC}$ & $N_{est,\,QCD}^{data}$ &"
header += "$\\frac{(data - MC(QCD^{MC}))}{data}$ &"
header += "$\\frac{(data -MC(QCD^{data}))}{data}$ \\\\"
print header
print '\hline'
rowTemplate = "%s & $%.0f \pm %.0f$ & "
rowTemplate += "$%.0f \pm %.0f$ (stat.) $\pm %.0f$ (syst.) &"
rowTemplate += "%s & %s \\\\"
global btag_latex
for btag in btagBins:
result = results[btag]
ndata = result['N_data']
nQCD = result['N_QCD']
nSumMC = result['N_SumMC']
nSumMC_QCDFromData = nSumMC - nQCD + result['estimate']
data_MC_diff = (ndata - nSumMC) / ndata * 100
data_MC_diff_QCDFromData = (ndata - nSumMC_QCDFromData) / ndata * 100
result['data-MC(QCDFromMC)'] = data_MC_diff
result['data-MC(QCDFromData)'] = data_MC_diff_QCDFromData
formatting = (btag_latex[btag], result['N_QCD'], result['N_QCD_Error'], result['estimate'],
result['absoluteStatisticError'], result['absoluteSystematicError'],
('%.2f' % data_MC_diff) + '\%', ('%.2f' % data_MC_diff_QCDFromData) + '\%')
print rowTemplate % formatting
def plotFits(results):
pass
def getStuff(histogramForEstimation, inputFiles):
histograms = FileReader.getHistogramDictionary(histogramForEstimation, inputFiles)
global allMC, qcd
histograms['SumMC'] = plotting.sumSamples(histograms, allMC)
histograms['QCD'] = plotting.sumSamples(histograms, qcd)
qcdInSignalRegion, qcdError = getIntegral(histograms['QCD'], (0, 0.1))
data, dataError = getIntegral(histograms['SingleElectron'], (0, 0.1))
sumMC, sumMCError = getIntegral(histograms['SumMC'], (0, 0.1))
result = {
'N_data': data,
'N_QCD': qcdInSignalRegion,
'N_QCD_Error': qcdError,
'N_SumMC': sumMC
}
return result
if __name__ == '__main__':
btagBins = [
'0orMoreBtag',
'0btag',
'1btag',
'2orMoreBtags'
]
histBase = 'TTbar_plus_X_analysis/EPlusJets/QCD e+jets PFRelIso/Electron/electron_pfIsolation_03_%s'
results = {}
mcresults = {}
for btag in btagBins:
hist = histBase % btag
results[btag] = estimateQCDWithRelIso(FILES.files, histogramForEstimation=hist)
results[btag].update(getStuff(hist, FILES.files))
mcresults[btag] = doPerformanceStudyOnMCOnly(FILES.files, hist, function='expo')
print
printResults(results, btagBins)
print
printPerformanceResults(mcresults, btagBins)
```
#### File: DailyPythonScripts/legacy/QCDShapeExtraction.py
```python
import ROOTFileReader as reader
def getQCDShape(file, histname, histnameForSystematics, rebin=1, suffix=''):
errors = None
if not suffix == '':
histogramForShape = histname + '_' + suffix
histogramForComparison = histnameForSystematics + '_' + suffix
histogramForShape = reader.getHistogramFromFile(histogramForShape, file)
histogramForComparison = reader.getHistogramFromFile(histogramForComparison, file)
#sum weights for correct error calculation
histogramForShape.Sumw2()
histogramForComparison.Sumw2()
#rebin
histogramForShape.Rebin(rebin)
histogramForComparison.Rebin(rebin)
#get normalisation
nShape = histogramForShape.Integral()
nCompare = histogramForComparison.Integral()
def getShapeErrorHistogram(files,
histogramForShape='topReconstruction/backgroundShape/mttbar_conversions_withMETAndAsymJets',
histogramForComparison='topReconstruction/backgroundShape/mttbar_antiIsolated_withMETAndAsymJets',
rebin=1,
suffix=''):
errors = None
# for suffix in suffixes:
# if suffix in histname:
if not suffix == '':
histogramForShape = histogramForShape + '_' + suffix
histogramForComparison = histogramForComparison + '_' + suffix
hists = [histogramForShape, histogramForComparison]
hists = getHistsFromFiles(hists, files)
histogramForShape = hists['data'][histogramForShape]
histogramForComparison = hists['data'][histogramForComparison]
histogramForShape.Sumw2()
histogramForComparison.Sumw2()
histogramForShape.Rebin(rebin)
histogramForComparison.Rebin(rebin)
nShape = histogramForShape.Integral()
nCompare = histogramForComparison.Integral()
if nShape > 0 and nCompare > 0:
histogramForShape.Scale(1 / nShape)
histogramForComparison.Scale(1 / nCompare)
errors = histogramForShape.Clone('ShapeErrors')
errors.Add(histogramForComparison, -1)#subtraction
for bin in range(1, errors.GetNbinsX()):
errors.SetBinContent(bin, fabs(errors.GetBinContent(bin)))
errors.Divide(histogramForShape)
return errors
```
#### File: legacy/tools/ErrorCalculation.py
```python
def getResultFromConstantRatio(result, ratio):
return 0, 0
```
#### File: legacy/tools/ROOTFileReader.py
```python
from ROOT import TFile, gROOT
import tools.Log as Log
from config.sampleSummations import btag_bins_inclusive, btag_sums
openRootFile = TFile.Open
gcd = gROOT.cd
#Reads a single histogram from a single file
def getHistogramFromFile(histname, filename):
rootFile = TFile.Open(filename)
getHist = rootFile.Get
testIfFalidFile(rootFile, filename)
btag_found = ''
for btag in btag_bins_inclusive:
if btag in histname:
btag_found = btag
break
rootHist = None
# sumEvents = 0
if btag_found == '':
rootHist = getHist(histname)
if not isValidHist(rootHist, histname, filename):
return
else:
listOfExclusiveBins = btag_sums[btag_found]
exclhists = []
for excbin in listOfExclusiveBins:
hist = getHist(histname.replace(btag_found, excbin))
if not isValidHist(hist, histname.replace(btag_found, excbin), filename):
return
exclhists.append(hist)
rootHist = exclhists[0].Clone()
for hist in exclhists[1:]:
rootHist.Add(hist)
gcd()
histogram = rootHist.Clone()
rootFile.Close()
return histogram
def testIfFalidFile(rootFile, filename):
if not rootFile:
Log.logErrorMessage('Could not find rootFile: ' + filename)
def isValidHist(rootHist, histname, filename):
if not rootHist:
Log.logErrorMessage('Histogram \n"%s" \ncould not be found in rootFile:\n%s' % (histname, filename))
return False
return True
#Reads a single histogram from each given rootFile
#and returns a dictionary with the same naming as 'files'
def getHistogramDictionary(histname, files={}):
hists = {}
for sample, filename in files.iteritems():
hists[sample] = getHistogramFromFile(histname, filename)
return hists
#Reads a list of histograms from each given file
def getHistogramsFromFiles(histnames=[], files={}, verbose = False):
histograms = {}
nHistograms = 0
for sample, filename in files.iteritems():
rootFile = openRootFile(filename)
getHist = rootFile.Get
histograms[sample] = {}
for histname in histnames:
btag_found = ''
for btag in btag_bins_inclusive:
if btag in histname:
btag_found = btag
break
rootHist = None
if btag_found == '':
rootHist = getHist(histname)
if not isValidHist(rootHist, histname, filename):
continue
else:
listOfExclusiveBins = btag_sums[btag_found]
exclhists = []
for excbin in listOfExclusiveBins:
hist = getHist(histname.replace(btag_found, excbin))
if not isValidHist(hist, histname.replace(btag_found, excbin), filename):
continue
exclhists.append(hist)
rootHist = exclhists[0].Clone()
for hist in exclhists[1:]:
rootHist.Add(hist)
gcd()
nHistograms += 1
histograms[sample][histname] = rootHist.Clone()
if verbose and nHistograms % 5000 == 0:
print 'Read', nHistograms, 'histograms'
rootFile.Close()
return histograms
```
#### File: src/cross_section_measurement/00_pick_bins.py
```python
from rootpy import asrootpy
from rootpy.io import File
from tools.Calculation import calculate_purities, calculate_stabilities
from tools.hist_utilities import rebin_2d
from config.variable_binning import bin_edges as old_binning
from config import XSectionConfig
def main():
'''
Step 1: Get the 2D histogram for every sample (channel and/or centre of mass energy)
Step 2: Change the size of the first bin until it fulfils the minimal criteria
Step 3: Check if it is true for all other histograms. If not back to step 2
Step 4: Repeat step 2 & 3 until no mo bins can be created
'''
p_min = 0.5
s_min = 0.5
# we also want the statistical error to be larger than 5%
# this translates (error -= 1/sqrt(N)) to (1/0.05)^2 = 400
n_min = 100
# n_min = 200 # N = 200 -> 7.1 % stat error
for variable in ['MET', 'HT', 'ST', 'MT', 'WPT']:
histogram_information = get_histograms( variable )
best_binning, histogram_information = get_best_binning( histogram_information , p_min, s_min, n_min )
print 'The best binning for', variable, 'is:'
print 'bin edges =', best_binning
print 'N_bins =', len( best_binning ) - 1
print '-' * 120
print 'The corresponding purities and stabilities are:'
for info in histogram_information:
# old_hist = rebin_2d( info['hist'], old_binning[variable], old_binning[variable] )
# old_purities = calculate_purities( old_hist.Clone() )
# old_stabilities = calculate_stabilities( old_hist.Clone() )
# print_console(info, old_purities, old_stabilities, print_old = True)
print_latex_table(info, variable, best_binning)
print '=' * 120
def get_histograms( variable ):
config_7TeV = XSectionConfig( 7 )
config_8TeV = XSectionConfig( 8 )
path_electron = ''
path_muon = ''
histogram_name = 'response_without_fakes'
if variable == 'MET':
path_electron = 'unfolding_MET_analyser_electron_channel_patType1CorrectedPFMet/%s' % histogram_name
path_muon = 'unfolding_MET_analyser_muon_channel_patType1CorrectedPFMet/%s' % histogram_name
elif variable == 'HT':
path_electron = 'unfolding_HT_analyser_electron_channel/%s' % histogram_name
path_muon = 'unfolding_HT_analyser_muon_channel/%s' % histogram_name
elif variable == 'ST':
path_electron = 'unfolding_ST_analyser_electron_channel_patType1CorrectedPFMet/%s' % histogram_name
path_muon = 'unfolding_ST_analyser_muon_channel_patType1CorrectedPFMet/%s' % histogram_name
elif variable == 'MT':
path_electron = 'unfolding_MT_analyser_electron_channel_patType1CorrectedPFMet/%s' % histogram_name
path_muon = 'unfolding_MT_analyser_muon_channel_patType1CorrectedPFMet/%s' % histogram_name
elif variable == 'WPT':
path_electron = 'unfolding_WPT_analyser_electron_channel_patType1CorrectedPFMet/%s' % histogram_name
path_muon = 'unfolding_WPT_analyser_muon_channel_patType1CorrectedPFMet/%s' % histogram_name
histogram_information = [
{'file': config_7TeV.unfolding_madgraph_raw,
'CoM': 7,
'path':path_electron,
'channel':'electron'},
{'file':config_7TeV.unfolding_madgraph_raw,
'CoM': 7,
'path':path_muon,
'channel':'muon'},
{'file':config_8TeV.unfolding_madgraph_raw,
'CoM': 8,
'path':path_electron,
'channel':'electron'},
{'file':config_8TeV.unfolding_madgraph_raw,
'CoM': 8,
'path':path_muon,
'channel':'muon'},
]
for histogram in histogram_information:
f = File( histogram['file'] )
# scale to lumi
nEvents = f.EventFilter.EventCounter.GetBinContent( 1 ) # number of processed events
config = XSectionConfig( histogram['CoM'] )
lumiweight = config.ttbar_xsection * config.new_luminosity / nEvents
histogram['hist'] = f.Get( histogram['path'] ).Clone()
histogram['hist'].Scale( lumiweight )
# change scope from file to memory
histogram['hist'].SetDirectory( 0 )
f.close()
return histogram_information
def get_best_binning( histogram_information, p_min, s_min, n_min ):
'''
Step 1: Change the size of the first bin until it fulfils the minimal criteria
Step 3: Check if it is true for all other histograms. If not back to step 2
Step 4: Repeat step 2 & 3 until no mo bins can be created
'''
histograms = [info['hist'] for info in histogram_information]
bin_edges = []
purities = {}
stabilities = {}
current_bin_start = 0
current_bin_end = 0
first_hist = histograms[0]
n_bins = first_hist.GetNbinsX()
while current_bin_end < n_bins:
current_bin_end, _, _, _ = get_next_end( histograms, current_bin_start, current_bin_end, p_min, s_min, n_min )
if not bin_edges:
# if empty
bin_edges.append( first_hist.GetXaxis().GetBinLowEdge( current_bin_start + 1 ) )
bin_edges.append( first_hist.GetXaxis().GetBinLowEdge( current_bin_end ) + first_hist.GetXaxis().GetBinWidth( current_bin_end ) )
current_bin_start = current_bin_end
# add the purity and stability values for the final binning
for info in histogram_information:
new_hist = rebin_2d( info['hist'], bin_edges, bin_edges ).Clone( info['channel'] + '_' + str( info['CoM'] ) )
get_bin_content = new_hist.GetBinContent
purities = calculate_purities( new_hist.Clone() )
stabilities = calculate_stabilities( new_hist.Clone() )
n_events = [int( get_bin_content( i, i ) ) for i in range( 1, len( bin_edges ) )]
# Now check if the last bin also fulfils the requirements
if purities[-1] < p_min or stabilities[-1] < s_min or n_events[-1] < n_min:
# if not, merge last two bins
bin_edges[-2] = bin_edges[-1]
bin_edges = bin_edges[:-1]
new_hist = rebin_2d( info['hist'], bin_edges, bin_edges ).Clone()
get_bin_content = new_hist.GetBinContent
purities = calculate_purities( new_hist.Clone() )
stabilities = calculate_stabilities( new_hist.Clone() )
n_events = [int( get_bin_content( i, i ) ) for i in range( 1, len( bin_edges ) )]
info['p_i'] = purities
info['s_i'] = stabilities
info['N'] = n_events
return bin_edges, histogram_information
def get_next_end( histograms, bin_start, bin_end, p_min, s_min, n_min ):
current_bin_start = bin_start
current_bin_end = bin_end
for gen_vs_reco_histogram in histograms:
reco = asrootpy( gen_vs_reco_histogram.ProjectionX() )
gen = asrootpy( gen_vs_reco_histogram.ProjectionY() )
reco_i = list( reco.y() )
gen_i = list( gen.y() )
# keep the start bin the same but roll the end bin
for bin_i in range ( current_bin_end, len( reco_i ) + 1 ):
n_reco = sum( reco_i[current_bin_start:bin_i] )
n_gen = sum( gen_i[current_bin_start:bin_i] )
n_gen_and_reco = 0
if bin_i < current_bin_start + 1:
n_gen_and_reco = gen_vs_reco_histogram.Integral( current_bin_start + 1, bin_i + 1, current_bin_start + 1, bin_i + 1 )
else:
# this is necessary to synchronise the integral with the rebin method
# only if the bin before is taken is is equivalent to rebinning
# the histogram and taking the diagonal elements (which is what we want)
n_gen_and_reco = gen_vs_reco_histogram.Integral( current_bin_start + 1, bin_i , current_bin_start + 1, bin_i )
p, s = 0, 0
if n_reco > 0:
p = round( n_gen_and_reco / n_reco, 3 )
if n_gen > 0:
s = round( n_gen_and_reco / n_gen, 3 )
# find the bin range that matches
if p >= p_min and s >= s_min and n_gen_and_reco >= n_min:
current_bin_end = bin_i
break
# if it gets to the end, this is the best we can do
current_bin_end = bin_i
return current_bin_end, p, s, n_gen_and_reco
def print_console(info, old_purities, old_stabilities, print_old = False):
print 'CoM =', info['CoM'], 'channel =', info['channel']
print 'p_i =', info['p_i']
if print_old:
print 'p_i (old) =', old_purities
print 's_i =', info['s_i']
if print_old:
print 's_i (old) =', old_stabilities
print 'N =', info['N']
print '*' * 120
def print_latex_table( info, variable, best_binning ):
print 'CoM =', info['CoM'], 'channel =', info['channel']
header = """\%s bin (\GeV) & purity & stability & number of events\\\\
\hline""" % variable.lower()
print header
for i in range( len( best_binning ) - 1 ):
bin_range = ""
if i == len( best_binning ) - 2:
bin_range = '$\geq %d$' % best_binning[i]
else:
bin_range = '%d - %d' % ( best_binning[i], best_binning[i + 1] )
print '%s & %.3f & %.3f & %d\\\\' % (bin_range, info['p_i'][i], info['s_i'][i], info['N'][i])
print '\hline'
if __name__ == '__main__':
main()
```
#### File: src/cross_section_measurement/04_make_plots_matplotlib.py
```python
from __future__ import division # the result of the division will be always a float
from optparse import OptionParser
import os, gc
from copy import deepcopy
from config.latex_labels import variables_latex, measurements_latex, \
met_systematics_latex, b_tag_bins_latex, fit_variables_latex
from config.variable_binning import bin_edges, variable_bins_ROOT, fit_variable_bin_edges
from config import XSectionConfig
from tools.file_utilities import read_data_from_JSON, make_folder_if_not_exists
from tools.hist_utilities import value_error_tuplelist_to_hist, \
value_tuplelist_to_hist, value_errors_tuplelist_to_graph, graph_to_value_errors_tuplelist
from math import sqrt
# rootpy & matplotlib
from ROOT import kRed, kGreen, kMagenta, kBlue, kBlack
from tools.ROOT_utils import set_root_defaults
import matplotlib as mpl
from tools.plotting import get_best_max_y
mpl.use( 'agg' )
import rootpy.plotting.root2matplotlib as rplt
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MultipleLocator
from config import CMS
from matplotlib import rc
rc( 'font', **CMS.font )
rc( 'text', usetex = True )
def read_xsection_measurement_results( category, channel ):
global path_to_JSON, variable, k_values, met_type
filename = ''
if category in met_uncertainties and variable == 'HT':
filename = path_to_JSON + '/xsection_measurement_results/' + channel + '/kv' + str( k_values[channel] ) + '/central/normalised_xsection_' + met_type + '.txt'
else:
filename = path_to_JSON + '/xsection_measurement_results/' + channel + '/kv' + str( k_values[channel] ) + '/' + category + '/normalised_xsection_' + met_type + '.txt'
if channel == 'combined':
filename = filename.replace( 'kv' + str( k_values[channel] ), '' )
normalised_xsection_unfolded = read_data_from_JSON( filename )
h_normalised_xsection = value_error_tuplelist_to_hist( normalised_xsection_unfolded['TTJet_measured'], bin_edges[variable] )
h_normalised_xsection_unfolded = value_error_tuplelist_to_hist( normalised_xsection_unfolded['TTJet_unfolded'], bin_edges[variable] )
histograms_normalised_xsection_different_generators = {'measured':h_normalised_xsection,
'unfolded':h_normalised_xsection_unfolded}
histograms_normalised_xsection_systematics_shifts = {'measured':h_normalised_xsection,
'unfolded':h_normalised_xsection_unfolded}
if category == 'central':
# true distributions
h_normalised_xsection_MADGRAPH = value_error_tuplelist_to_hist( normalised_xsection_unfolded['MADGRAPH'], bin_edges[variable] )
h_normalised_xsection_MADGRAPH_ptreweight = value_error_tuplelist_to_hist( normalised_xsection_unfolded['MADGRAPH_ptreweight'], bin_edges[variable] )
h_normalised_xsection_POWHEG_PYTHIA = value_error_tuplelist_to_hist( normalised_xsection_unfolded['POWHEG_PYTHIA'], bin_edges[variable] )
h_normalised_xsection_POWHEG_HERWIG = value_error_tuplelist_to_hist( normalised_xsection_unfolded['POWHEG_HERWIG'], bin_edges[variable] )
h_normalised_xsection_MCATNLO = value_error_tuplelist_to_hist( normalised_xsection_unfolded['MCATNLO'], bin_edges[variable] )
h_normalised_xsection_mathchingup = value_error_tuplelist_to_hist( normalised_xsection_unfolded['matchingup'], bin_edges[variable] )
h_normalised_xsection_mathchingdown = value_error_tuplelist_to_hist( normalised_xsection_unfolded['matchingdown'], bin_edges[variable] )
h_normalised_xsection_scaleup = value_error_tuplelist_to_hist( normalised_xsection_unfolded['scaleup'], bin_edges[variable] )
h_normalised_xsection_scaledown = value_error_tuplelist_to_hist( normalised_xsection_unfolded['scaledown'], bin_edges[variable] )
histograms_normalised_xsection_different_generators.update( {'MADGRAPH':h_normalised_xsection_MADGRAPH,
'MADGRAPH_ptreweight':h_normalised_xsection_MADGRAPH_ptreweight,
'POWHEG_PYTHIA':h_normalised_xsection_POWHEG_PYTHIA,
'POWHEG_HERWIG':h_normalised_xsection_POWHEG_HERWIG,
'MCATNLO':h_normalised_xsection_MCATNLO} )
histograms_normalised_xsection_systematics_shifts.update( {'MADGRAPH':h_normalised_xsection_MADGRAPH,
'MADGRAPH_ptreweight':h_normalised_xsection_MADGRAPH_ptreweight,
'matchingdown': h_normalised_xsection_mathchingdown,
'matchingup': h_normalised_xsection_mathchingup,
'scaledown': h_normalised_xsection_scaledown,
'scaleup': h_normalised_xsection_scaleup} )
file_template = path_to_JSON + '/xsection_measurement_results/' + channel + '/kv' + str( k_values[channel] ) + '/' + category + '/normalised_xsection_' + met_type
if channel == 'combined':
file_template = file_template.replace( 'kv' + str( k_values[channel] ), '' )
# normalised_xsection_unfolded_with_errors = read_data_from_JSON( file_template + '_with_errors.txt' )
normalised_xsection_unfolded_with_errors_with_systematics_but_without_ttbar_theory = read_data_from_JSON( file_template + '_with_systematics_but_without_ttbar_theory_errors.txt' )
normalised_xsection_unfolded_with_errors_with_systematics_but_without_generator = read_data_from_JSON( file_template + '_with_systematics_but_without_generator_errors.txt' )
# a rootpy.Graph with asymmetric errors!
h_normalised_xsection_with_systematics_but_without_ttbar_theory = value_errors_tuplelist_to_graph(
normalised_xsection_unfolded_with_errors_with_systematics_but_without_ttbar_theory['TTJet_measured'],
bin_edges[variable] )
h_normalised_xsection_with_systematics_but_without_ttbar_theory_unfolded = value_errors_tuplelist_to_graph(
normalised_xsection_unfolded_with_errors_with_systematics_but_without_ttbar_theory['TTJet_unfolded'],
bin_edges[variable] )
h_normalised_xsection_with_systematics_but_without_generator = value_errors_tuplelist_to_graph(
normalised_xsection_unfolded_with_errors_with_systematics_but_without_generator['TTJet_measured'],
bin_edges[variable] )
h_normalised_xsection_with_systematics_but_without_generator_unfolded = value_errors_tuplelist_to_graph(
normalised_xsection_unfolded_with_errors_with_systematics_but_without_generator['TTJet_unfolded'],
bin_edges[variable] )
histograms_normalised_xsection_different_generators['measured_with_systematics'] = h_normalised_xsection_with_systematics_but_without_ttbar_theory
histograms_normalised_xsection_different_generators['unfolded_with_systematics'] = h_normalised_xsection_with_systematics_but_without_ttbar_theory_unfolded
histograms_normalised_xsection_systematics_shifts['measured_with_systematics'] = h_normalised_xsection_with_systematics_but_without_generator
histograms_normalised_xsection_systematics_shifts['unfolded_with_systematics'] = h_normalised_xsection_with_systematics_but_without_generator_unfolded
return histograms_normalised_xsection_different_generators, histograms_normalised_xsection_systematics_shifts
def read_fit_templates_and_results_as_histograms( category, channel ):
global path_to_JSON, variable, met_type
templates = read_data_from_JSON( path_to_JSON + '/fit_results/' + category + '/templates_' + channel + '_' + met_type + '.txt' )
data_values = read_data_from_JSON( path_to_JSON + '/fit_results/' + category + '/initial_values_' + channel + '_' + met_type + '.txt' )['data']
fit_results = read_data_from_JSON( path_to_JSON + '/fit_results/' + category + '/fit_results_' + channel + '_' + met_type + '.txt' )
fit_variables = templates.keys()
template_histograms = {fit_variable: {} for fit_variable in fit_variables}
fit_results_histograms = {fit_variable: {} for fit_variable in fit_variables}
for bin_i, variable_bin in enumerate( variable_bins_ROOT[variable] ):
for fit_variable in fit_variables:
h_template_data = value_tuplelist_to_hist( templates[fit_variable]['data'][bin_i], fit_variable_bin_edges[fit_variable] )
h_template_ttjet = value_tuplelist_to_hist( templates[fit_variable]['TTJet'][bin_i], fit_variable_bin_edges[fit_variable] )
h_template_singletop = value_tuplelist_to_hist( templates[fit_variable]['SingleTop'][bin_i], fit_variable_bin_edges[fit_variable] )
h_template_VJets = value_tuplelist_to_hist( templates[fit_variable]['V+Jets'][bin_i], fit_variable_bin_edges[fit_variable] )
h_template_QCD = value_tuplelist_to_hist( templates[fit_variable]['QCD'][bin_i], fit_variable_bin_edges[fit_variable] )
template_histograms[fit_variable][variable_bin] = {
'TTJet' : h_template_ttjet,
'SingleTop' : h_template_singletop,
'V+Jets':h_template_VJets,
'QCD':h_template_QCD
}
h_data = h_template_data.Clone()
h_ttjet = h_template_ttjet.Clone()
h_singletop = h_template_singletop.Clone()
h_VJets = h_template_VJets.Clone()
h_QCD = h_template_QCD.Clone()
data_normalisation = data_values[bin_i][0]
n_ttjet = fit_results['TTJet'][bin_i][0]
n_singletop = fit_results['SingleTop'][bin_i][0]
VJets_normalisation = fit_results['V+Jets'][bin_i][0]
QCD_normalisation = fit_results['QCD'][bin_i][0]
h_data.Scale( data_normalisation )
h_ttjet.Scale( n_ttjet )
h_singletop.Scale( n_singletop )
h_VJets.Scale( VJets_normalisation )
h_QCD.Scale( QCD_normalisation )
h_background = h_VJets + h_QCD + h_singletop
for bin_i_data in range( len( h_data ) ):
h_data.SetBinError( bin_i_data + 1, sqrt( h_data.GetBinContent( bin_i_data + 1 ) ) )
fit_results_histograms[fit_variable][variable_bin] = {
'data' : h_data,
'signal' : h_ttjet,
'background' : h_background
}
return template_histograms, fit_results_histograms
def make_template_plots( histograms, category, channel ):
global variable, output_folder
fit_variables = histograms.keys()
for variable_bin in variable_bins_ROOT[variable]:
path = output_folder + str( measurement_config.centre_of_mass_energy ) + 'TeV/' + variable + '/' + category + '/fit_templates/'
make_folder_if_not_exists( path )
for fit_variable in fit_variables:
plotname = path + channel + '_' + fit_variable + '_template_bin_' + variable_bin
# check if template plots exist already
for output_format in output_formats:
if os.path.isfile( plotname + '.' + output_format ):
continue
# plot with matplotlib
h_ttjet = histograms[fit_variable][variable_bin]['TTJet']
h_single_top = histograms[fit_variable][variable_bin]['SingleTop']
h_VJets = histograms[fit_variable][variable_bin]['V+Jets']
h_QCD = histograms[fit_variable][variable_bin]['QCD']
h_ttjet.linecolor = 'red'
h_single_top.linecolor = 'magenta'
h_VJets.linecolor = 'green'
h_QCD.linecolor = 'gray'
h_VJets.linestyle = 'dashed'
h_QCD.linestyle = 'dotted' # currently not working
# bug report: http://trac.sagemath.org/sage_trac/ticket/13834
h_ttjet.linewidth = 5
h_single_top.linewidth = 5
h_VJets.linewidth = 5
h_QCD.linewidth = 5
plt.figure( figsize = ( 16, 16 ), dpi = 200, facecolor = 'white' )
axes = plt.axes()
axes.minorticks_on()
plt.xlabel( fit_variables_latex[fit_variable], CMS.x_axis_title )
plt.ylabel( 'normalised to unit area/(%s)' % get_unit_string(fit_variable), CMS.y_axis_title )
plt.tick_params( **CMS.axis_label_major )
plt.tick_params( **CMS.axis_label_minor )
rplt.hist( h_ttjet, axes = axes, label = 'signal' )
rplt.hist( h_single_top, axes = axes, label = 'Single Top' )
if ( h_VJets.Integral() != 0 ):
rplt.hist( h_VJets, axes = axes, label = 'V+Jets' )
else:
print "WARNING: in %s bin %s, %s category, %s channel, V+Jets template is empty: not plotting." % ( variable, variable_bin, category, channel )
if ( h_QCD.Integral() != 0 ):
rplt.hist( h_QCD, axes = axes, label = 'QCD' )
else:
print "WARNING: in %s bin %s, %s category, %s channel, QCD template is empty: not plotting." % ( variable, variable_bin, category, channel )
y_max = get_best_max_y([h_ttjet, h_single_top, h_VJets, h_QCD])
axes.set_ylim( [0, y_max * 1.1] )
axes.set_xlim( measurement_config.fit_boundaries[fit_variable] )
plt.legend( numpoints = 1, loc = 'upper right', prop = CMS.legend_properties )
plt.title( get_cms_labels( channel ), CMS.title )
plt.tight_layout()
for output_format in output_formats:
plt.savefig( plotname + '.' + output_format )
plt.close()
gc.collect()
def plot_fit_results( histograms, category, channel ):
global variable, b_tag_bin, output_folder
from tools.plotting import Histogram_properties, make_data_mc_comparison_plot
fit_variables = histograms.keys()
for variable_bin in variable_bins_ROOT[variable]:
path = output_folder + str( measurement_config.centre_of_mass_energy ) + 'TeV/' + variable + '/' + category + '/fit_results/'
make_folder_if_not_exists( path )
for fit_variable in fit_variables:
plotname = channel + '_' + fit_variable + '_bin_' + variable_bin
# check if template plots exist already
for output_format in output_formats:
if os.path.isfile( plotname + '.' + output_format ):
continue
# plot with matplotlib
h_data = histograms[fit_variable][variable_bin]['data']
h_signal = histograms[fit_variable][variable_bin]['signal']
h_background = histograms[fit_variable][variable_bin]['background']
histogram_properties = Histogram_properties()
histogram_properties.name = plotname
histogram_properties.x_axis_title = fit_variables_latex[fit_variable]
histogram_properties.y_axis_title = 'Events/(%s)' % get_unit_string(fit_variable)
histogram_properties.title = get_cms_labels( channel )
histogram_properties.x_limits = measurement_config.fit_boundaries[fit_variable]
make_data_mc_comparison_plot( [h_data, h_background, h_signal],
['data', 'background', 'signal'],
['black', 'green', 'red'], histogram_properties,
save_folder = path, save_as = output_formats )
def get_cms_labels( channel ):
global b_tag_bin
lepton = 'e'
if channel == 'electron':
lepton = 'e + jets'
elif channel == 'muon':
lepton = '$\mu$ + jets'
else:
lepton = 'e, $\mu$ + jets combined'
# channel_label = '%s, $\geq$ 4 jets, %s' % ( lepton, b_tag_bins_latex[b_tag_bin] )
channel_label = lepton
template = 'CMS Preliminary, %.1f fb$^{-1}$ (%d TeV), %s'
label = template % ( measurement_config.new_luminosity / 1000., measurement_config.centre_of_mass_energy, channel_label )
return label
def make_plots( histograms, category, output_folder, histname, show_ratio = True, show_before_unfolding = False ):
global variable, k_values
channel = 'electron'
if 'electron' in histname:
channel = 'electron'
elif 'muon' in histname:
channel = 'muon'
else:
channel = 'combined'
# plot with matplotlib
hist_data = histograms['unfolded']
if category == 'central':
hist_data_with_systematics = histograms['unfolded_with_systematics']
hist_measured = histograms['measured']
hist_data.markersize = 2
hist_data.marker = 'o'
if category == 'central':
hist_data_with_systematics.markersize = 2
hist_data_with_systematics.marker = 'o'
hist_measured.markersize = 2
hist_measured.marker = 'o'
hist_measured.color = 'red'
plt.figure( figsize = CMS.figsize, dpi = CMS.dpi, facecolor = CMS.facecolor )
if show_ratio:
gs = gridspec.GridSpec( 2, 1, height_ratios = [5, 1] )
axes = plt.subplot( gs[0] )
else:
axes = plt.axes()
plt.xlabel( '$%s$ [GeV]' % variables_latex[variable], CMS.x_axis_title )
axes.minorticks_on()
plt.ylabel( r'$\frac{1}{\sigma} \frac{d\sigma}{d' + variables_latex[variable] + '} \left[\mathrm{GeV}^{-1}\\right]$', CMS.y_axis_title )
plt.tick_params( **CMS.axis_label_major )
plt.tick_params( **CMS.axis_label_minor )
hist_data.visible = True
if category == 'central':
hist_data_with_systematics.visible = True
rplt.errorbar( hist_data_with_systematics, axes = axes, label = 'do_not_show', xerr = None, capsize = 0, elinewidth = 2, zorder = len( histograms ) + 1 )
rplt.errorbar( hist_data, axes = axes, label = 'do_not_show', xerr = None, capsize = 15, capthick = 3, elinewidth = 2, zorder = len( histograms ) + 2 )
rplt.errorbar( hist_data, axes = axes, label = 'data', xerr = False, yerr = False, zorder = len( histograms ) + 3 ) # this makes a nicer legend entry
if show_before_unfolding:
rplt.errorbar( hist_measured, axes = axes, label = 'data (before unfolding)', xerr = None, zorder = len( histograms ) )
for key, hist in sorted( histograms.iteritems() ):
if not 'unfolded' in key and not 'measured' in key:
hist.linewidth = 2
# setting colours
if 'POWHEG_PYTHIA' in key or 'matchingdown' in key:
hist.linestyle = 'longdashdot'
hist.SetLineColor( kBlue )
elif 'POWHEG_HERWIG' in key or 'scaledown' in key:
hist.linestyle = 'dashed'
hist.SetLineColor( kGreen )
elif 'MADGRAPH_ptreweight' in key:
hist.linestyle = 'solid'
hist.SetLineColor( kBlack )
elif 'MADGRAPH' in key:
hist.linestyle = 'solid'
hist.SetLineColor( kRed + 1 )
elif 'matchingup' in key:
hist.linestyle = 'verylongdashdot'
hist.linecolor = 'orange'
elif 'MCATNLO' in key or 'scaleup' in key:
hist.linestyle = 'dotted'
hist.SetLineColor( kMagenta + 3 )
rplt.hist( hist, axes = axes, label = measurements_latex[key], zorder = sorted( histograms, reverse = True ).index( key ) )
handles, labels = axes.get_legend_handles_labels()
# making data first in the list
data_label_index = labels.index( 'data' )
data_handle = handles[data_label_index]
labels.remove( 'data' )
handles.remove( data_handle )
labels.insert( 0, 'unfolded data' )
handles.insert( 0, data_handle )
new_handles, new_labels = [], []
for handle, label in zip( handles, labels ):
if not label == 'do_not_show':
new_handles.append( handle )
new_labels.append( label )
legend_location = 'upper right'
if variable == 'MT':
legend_location = 'upper left'
plt.legend( new_handles, new_labels, numpoints = 1, loc = legend_location, prop = CMS.legend_properties )
plt.title( get_cms_labels( channel ), CMS.title )
if show_ratio:
plt.setp( axes.get_xticklabels(), visible = False )
ax1 = plt.subplot( gs[1] )
ax1.minorticks_on()
#ax1.grid( True, 'major', linewidth = 1 )
# setting the x_limits identical to the main plot
x_limits = axes.get_xlim()
ax1.set_xlim(x_limits)
ax1.yaxis.set_major_locator( MultipleLocator( 0.5 ) )
ax1.yaxis.set_minor_locator( MultipleLocator( 0.1 ) )
plt.xlabel( '$%s$ [GeV]' % variables_latex[variable], CMS.x_axis_title )
plt.tick_params( **CMS.axis_label_major )
plt.tick_params( **CMS.axis_label_minor )
plt.ylabel( '$\\frac{\\textrm{theory}}{\\textrm{data}}$', CMS.y_axis_title_small )
ax1.yaxis.set_label_coords(-0.115, 0.8)
#draw a horizontal line at y=1 for data
plt.axhline(y = 1, color = 'black', linewidth = 1)
for key, hist in sorted( histograms.iteritems() ):
if not 'unfolded' in key and not 'measured' in key:
ratio = hist.Clone()
ratio.Divide( hist_data ) #divide by data
rplt.hist( ratio, axes = ax1, label = 'do_not_show' )
stat_lower = hist_data.Clone()
stat_upper = hist_data.Clone()
syst_lower = hist_data.Clone()
syst_upper = hist_data.Clone()
# plot error bands on data in the ratio plot
for bin_i in range( 1, hist_data.GetNbinsX() + 1 ):
stat_errors = graph_to_value_errors_tuplelist(hist_data)
stat_lower.SetBinContent( bin_i, 1 - stat_errors[bin_i-1][1]/stat_errors[bin_i-1][0] )
stat_upper.SetBinContent( bin_i, 1 + stat_errors[bin_i-1][2]/stat_errors[bin_i-1][0] )
if category == 'central':
syst_errors = graph_to_value_errors_tuplelist(hist_data_with_systematics)
syst_lower.SetBinContent( bin_i, 1 - syst_errors[bin_i-1][1]/syst_errors[bin_i-1][0] )
syst_upper.SetBinContent( bin_i, 1 + syst_errors[bin_i-1][2]/syst_errors[bin_i-1][0] )
if category == 'central':
rplt.fill_between( syst_lower, syst_upper, ax1, facecolor = 'yellow', alpha = 0.5 )
rplt.fill_between( stat_upper, stat_lower, ax1, facecolor = '0.75', alpha = 0.5 )
# p1 = plt.Rectangle((0, 0), 1, 1, fc="yellow")
# p2 = plt.Rectangle((0, 0), 1, 1, fc="0.75")
# plt.legend([p1, p2], ['Stat. $\\oplus$ Syst.', 'Stat.'], loc = 'upper left', prop = {'size':20})
ax1.set_ylim( ymin = 0.5, ymax = 1.5 )
if CMS.tight_layout:
plt.tight_layout()
path = output_folder + str( measurement_config.centre_of_mass_energy ) + 'TeV/' + variable + '/' + category
make_folder_if_not_exists( path )
for output_format in output_formats:
filename = path + '/' + histname + '_kv' + str( k_values[channel] ) + '.' + output_format
if channel == 'combined':
filename = filename.replace( '_kv' + str( k_values[channel] ), '' )
plt.savefig( filename )
del hist_data, hist_measured
plt.close()
gc.collect()
def plot_central_and_systematics( channel, systematics, exclude = [], suffix = 'altogether' ):
global variable, k_values, b_tag_bin, met_type
plt.figure( figsize = ( 16, 16 ), dpi = 200, facecolor = 'white' )
axes = plt.axes()
axes.minorticks_on()
hist_data_central = read_xsection_measurement_results( 'central', channel )[0]['unfolded_with_systematics']
hist_data_central.markersize = 2 # points. Imagine, tangible units!
hist_data_central.marker = 'o'
plt.xlabel( '$%s$ [GeV]' % variables_latex[variable], CMS.x_axis_title )
plt.ylabel( r'$\frac{1}{\sigma} \frac{d\sigma}{d' + variables_latex[variable] + '} \left[\mathrm{GeV}^{-1}\\right]$', CMS.y_axis_title )
plt.tick_params( **CMS.axis_label_major )
plt.tick_params( **CMS.axis_label_minor )
rplt.errorbar( hist_data_central, axes = axes, label = 'data', xerr = True )
for systematic in sorted( systematics ):
if systematic in exclude or systematic == 'central':
continue
hist_data_systematic = read_xsection_measurement_results( systematic, channel )[0]['unfolded']
hist_data_systematic.markersize = 2
hist_data_systematic.marker = 'o'
colour_number = systematics.index( systematic ) + 2
if colour_number == 10:
colour_number = 42
hist_data_systematic.SetMarkerColor( colour_number )
if 'PDF' in systematic:
rplt.errorbar( hist_data_systematic, axes = axes, label = systematic.replace( 'Weights_', ' ' ), xerr = False )
elif met_type in systematic:
rplt.errorbar( hist_data_systematic, axes = axes, label = met_systematics_latex[systematic.replace( met_type, '' )], xerr = False )
else:
rplt.errorbar( hist_data_systematic, axes = axes, label = measurements_latex[systematic], xerr = False )
plt.legend( numpoints = 1, loc = 'upper right', prop = {'size':25}, ncol = 2 )
plt.title( get_cms_labels( channel ), CMS.title )
plt.tight_layout()
path = output_folder + str( measurement_config.centre_of_mass_energy ) + 'TeV/' + variable
make_folder_if_not_exists( path )
for output_format in output_formats:
filename = path + '/normalised_xsection_' + channel + '_' + suffix + '_kv' + str( k_values[channel] ) + '.' + output_format
if channel == 'combined':
filename = filename.replace( '_kv' + str( k_values[channel] ), '' )
plt.savefig( filename )
plt.close()
gc.collect()
def get_unit_string(fit_variable):
unit = measurement_config.fit_variable_unit[fit_variable]
fit_variable_bin_width = measurement_config.fit_variable_bin_width[fit_variable]
unit_string = ''
if unit == '':
unit_string = fit_variable_bin_width
else:
unit_string = '%f %s' % (fit_variable_bin_width, unit)
return unit_string
if __name__ == '__main__':
set_root_defaults()
parser = OptionParser()
parser.add_option( "-p", "--path", dest = "path", default = 'data/',
help = "set path to JSON files" )
parser.add_option( "-o", "--output_folder", dest = "output_folder", default = 'plots/',
help = "set path to save plots" )
parser.add_option( "-v", "--variable", dest = "variable", default = 'MET',
help = "set variable to plot (MET, HT, ST, MT)" )
parser.add_option( "-m", "--metType", dest = "metType", default = 'type1',
help = "set MET type used in the analysis of MET, ST or MT" )
parser.add_option( "-b", "--bjetbin", dest = "bjetbin", default = '2m',
help = "set b-jet multiplicity for analysis. Options: exclusive: 0-3, inclusive (N or more): 0m, 1m, 2m, 3m, 4m" )
parser.add_option( "-c", "--centre-of-mass-energy", dest = "CoM", default = 8, type = int,
help = "set the centre of mass energy for analysis. Default = 8 [TeV]" )
parser.add_option( "-a", "--additional-plots", action = "store_true", dest = "additional_plots",
help = "creates a set of plots for each systematic (in addition to central result)." )
output_formats = ['png', 'pdf']
( options, args ) = parser.parse_args()
measurement_config = XSectionConfig( options.CoM )
# caching of variables for shorter access
translate_options = measurement_config.translate_options
ttbar_theory_systematic_prefix = measurement_config.ttbar_theory_systematic_prefix
vjets_theory_systematic_prefix = measurement_config.vjets_theory_systematic_prefix
met_systematics_suffixes = measurement_config.met_systematics_suffixes
variable = options.variable
output_folder = options.output_folder
if not output_folder.endswith( '/' ):
output_folder += '/'
k_values = {'electron' : measurement_config.k_values_electron[variable],
'muon' : measurement_config.k_values_muon[variable],
'combined' : 'None'
}
met_type = translate_options[options.metType]
b_tag_bin = translate_options[options.bjetbin]
path_to_JSON = options.path + '/' + str( measurement_config.centre_of_mass_energy ) + 'TeV/' + variable + '/'
categories = deepcopy( measurement_config.categories_and_prefixes.keys() )
ttbar_generator_systematics = [ttbar_theory_systematic_prefix + systematic for systematic in measurement_config.generator_systematics]
vjets_generator_systematics = [vjets_theory_systematic_prefix + systematic for systematic in measurement_config.generator_systematics]
categories.extend( ttbar_generator_systematics )
categories.extend( vjets_generator_systematics )
# Add mass systematics
ttbar_mass_systematics = measurement_config.topMass_systematics
categories.extend( measurement_config.topMass_systematics )
# Add k value systematic
kValue_systematics = measurement_config.kValueSystematic
categories.extend( measurement_config.kValueSystematic )
pdf_uncertainties = ['PDFWeights_%d' % index for index in range( 1, 45 )]
pdf_uncertainties_1_to_11 = ['PDFWeights_%d' % index for index in range( 1, 12 )]
pdf_uncertainties_12_to_22 = ['PDFWeights_%d' % index for index in range( 12, 23 )]
pdf_uncertainties_23_to_33 = ['PDFWeights_%d' % index for index in range( 23, 34 )]
pdf_uncertainties_34_to_45 = ['PDFWeights_%d' % index for index in range( 34, 45 )]
# all MET uncertainties except JES as this is already included
met_uncertainties = [met_type + suffix for suffix in met_systematics_suffixes if not 'JetEn' in suffix and not 'JetRes' in suffix]
new_uncertainties = ['QCD_shape']
rate_changing_systematics = [systematic + '+' for systematic in measurement_config.rate_changing_systematics.keys()]
rate_changing_systematics.extend( [systematic + '-' for systematic in measurement_config.rate_changing_systematics.keys()] )
all_measurements = deepcopy( categories )
all_measurements.extend( pdf_uncertainties )
all_measurements.extend( met_uncertainties )
all_measurements.extend( new_uncertainties )
all_measurements.extend( rate_changing_systematics )
for channel in ['electron', 'muon', 'combined']:
for category in all_measurements:
if not category == 'central' and not options.additional_plots:
continue
if variable == 'HT' and category in met_uncertainties:
continue
# setting up systematic MET for JES up/down samples for reading fit templates
met_type = translate_options[options.metType]
if category == 'JES_up':
met_type += 'JetEnUp'
if met_type == 'PFMETJetEnUp':
met_type = 'patPFMetJetEnUp'
elif category == 'JES_down':
met_type += 'JetEnDown'
if met_type == 'PFMETJetEnDown':
met_type = 'patPFMetJetEnDown'
if not channel == 'combined':
fit_templates, fit_results = read_fit_templates_and_results_as_histograms( category, channel )
make_template_plots( fit_templates, category, channel )
plot_fit_results( fit_results, category, channel )
# change back to original MET type
met_type = translate_options[options.metType]
if met_type == 'PFMET':
met_type = 'patMETsPFlow'
histograms_normalised_xsection_different_generators, histograms_normalised_xsection_systematics_shifts = read_xsection_measurement_results( category, channel )
make_plots( histograms_normalised_xsection_different_generators, category, output_folder, 'normalised_xsection_' + channel + '_different_generators' )
make_plots( histograms_normalised_xsection_systematics_shifts, category, output_folder, 'normalised_xsection_' + channel + '_systematics_shifts' )
del histograms_normalised_xsection_different_generators, histograms_normalised_xsection_systematics_shifts
plot_central_and_systematics( channel, categories, exclude = ttbar_generator_systematics )
plot_central_and_systematics( channel, ttbar_generator_systematics, suffix = 'ttbar_generator_only' )
exclude = set( pdf_uncertainties ).difference( set( pdf_uncertainties_1_to_11 ) )
plot_central_and_systematics( channel, pdf_uncertainties_1_to_11, exclude = exclude, suffix = 'PDF_1_to_11' )
exclude = set( pdf_uncertainties ).difference( set( pdf_uncertainties_12_to_22 ) )
plot_central_and_systematics( channel, pdf_uncertainties_12_to_22, exclude = exclude, suffix = 'PDF_12_to_22' )
exclude = set( pdf_uncertainties ).difference( set( pdf_uncertainties_23_to_33 ) )
plot_central_and_systematics( channel, pdf_uncertainties_23_to_33, exclude = exclude, suffix = 'PDF_23_to_33' )
exclude = set( pdf_uncertainties ).difference( set( pdf_uncertainties_34_to_45 ) )
plot_central_and_systematics( channel, pdf_uncertainties_34_to_45, exclude = exclude, suffix = 'PDF_34_to_45' )
plot_central_and_systematics( channel, met_uncertainties, suffix = 'MET_only' )
plot_central_and_systematics( channel, new_uncertainties, suffix = 'new_only' )
plot_central_and_systematics( channel, rate_changing_systematics, suffix = 'rate_changing_only' )
```
#### File: src/cross_section_measurement/06_compare_energies.py
```python
from optparse import OptionParser
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import rootpy.plotting.root2matplotlib as rplt
from src.cross_section_measurement.lib import read_xsection_measurement_results
from config import XSectionConfig
from config.variable_binning import bin_edges
from config.latex_labels import variables_latex
from config import CMS
from rootpy.plotting import Graph
from ROOT import kRed, kMagenta, kBlue
from matplotlib.ticker import MultipleLocator
from tools.ROOT_utils import set_root_defaults
output_formats = ['pdf', 'png']
def main():
set_root_defaults()
options, _ = parse_arguments()
variable = 'ST'
config_7TeV = XSectionConfig(7)
config_8TeV = XSectionConfig(8)
path_to_JSON_7TeV = options.path + '/7TeV/' + variable + '/'
path_to_JSON_8TeV = options.path + '/8TeV/' + variable + '/'
# we need the generators
# and the central samples + errors
results_7TeV, _ = read_xsection_measurement_results( path_to_JSON_7TeV,
variable,
bin_edges,
category = 'central',
channel = 'combined',
k_values = {
'combined': config_7TeV.k_values_combined}
)
results_8TeV, _ = read_xsection_measurement_results( path_to_JSON_8TeV,
variable,
bin_edges,
category = 'central',
channel = 'combined',
k_values = {
'combined': config_8TeV.k_values_combined}
)
plot_results(results_7TeV, results_8TeV, variable)
def parse_arguments():
parser = OptionParser()
parser.add_option( "-p", "--path", dest = "path", default = 'data/',
help = "set path to JSON files" )
parser.add_option( "-o", "--output_folder", dest = "output_folder", default = 'plots/',
help = "set path to save plots" )
parser.add_option( "-v", "--variable", dest = "variable", default = 'MET',
help = "set variable to plot (MET, HT, ST, MT, WPT)" )
( options, args ) = parser.parse_args()
return options, args
def plot_results( results_7TeV, results_8TeV, variable ):
# first we need the central graphs
# the results are
# unfolded_with_systematics, MADGRAPH, POWHEG, MCATNLO
plt.figure( figsize = (16,32), dpi = CMS.dpi, facecolor = CMS.facecolor )
gs = gridspec.GridSpec( 3, 1, height_ratios = [7, 7, 1] )
axes = plt.subplot( gs[0] )
plt.setp( axes.get_xticklabels(), visible = False )
plt.tick_params( **CMS.axis_label_major )
plt.tick_params( **CMS.axis_label_minor )
plt.ylabel( r'$\frac{1}{\sigma} \frac{d\sigma}{d' + variables_latex[variable] + '} \left[\mathrm{GeV}^{-1}\\right]$', CMS.y_axis_title )
draw_result( results_7TeV, axes )
plt.legend(numpoints = 1, loc = 'upper right', prop = CMS.legend_properties )
axes = plt.subplot( gs[1] )
plt.tick_params( **CMS.axis_label_major )
plt.tick_params( **CMS.axis_label_minor )
plt.ylabel( r'$\frac{1}{\sigma} \frac{d\sigma}{d' + variables_latex[variable] + '} \left[\mathrm{GeV}^{-1}\\right]$', CMS.y_axis_title )
plt.setp( axes.get_xticklabels(), visible = False )
draw_result( results_8TeV, axes )
plt.legend(numpoints = 1, loc = 'upper right', prop = CMS.legend_properties )
ratios = get_ratios(results_7TeV, results_8TeV)
axes = plt.subplot( gs[2] )
plt.xlabel( '$%s$ [GeV]' % variables_latex[variable], CMS.x_axis_title )
plt.tick_params( **CMS.axis_label_major )
plt.tick_params( **CMS.axis_label_minor )
axes.yaxis.set_major_locator( MultipleLocator( 0.2 ) )
axes.yaxis.set_minor_locator( MultipleLocator( 0.1 ) )
plt.ylabel( r'$\frac{7\, TeV}{8\, TeV}$', CMS.y_axis_title )
# axes.grid( True, 'major', linewidth = 1 )
draw_result( ratios, axes )
axes.set_ylim( ymin = 0.8, ymax = 1.2 )
plt.tight_layout()
path = 'plots/'
histname = variable + '_comparison'
for output_format in output_formats:
filename = path + '/' + histname + '.' + output_format
plt.savefig( filename )
def draw_result( result, axes ):
graph = result['unfolded']
graph_with_systematics = result['unfolded_with_systematics']
madgraph = result['MADGRAPH']
powheg = result['POWHEG']
mcatnlo = result['MCATNLO']
# styles
graph.markersize = 2
graph.marker = 'o'
graph_with_systematics.markersize = 2
graph_with_systematics.marker = 'o'
powheg.linestyle = 'longdashdot'
powheg.SetLineColor( kBlue )
madgraph.linestyle = 'solid'
madgraph.SetLineColor( kRed + 1 )
mcatnlo.linestyle = 'dotted'
mcatnlo.SetLineColor( kMagenta + 3 )
rplt.errorbar( graph, xerr = False, emptybins = False, axes = axes, elinewidth = 2, capsize = 10, capthick = 2, zorder = 6)
rplt.errorbar( graph_with_systematics, xerr = False, emptybins = False, axes = axes, elinewidth = 2, capsize = 0, zorder = 5, label = 'unfolded data')
rplt.hist( madgraph, axes = axes, label = 'MADGRAPH', zorder = 1 )
rplt.hist( powheg, axes = axes, label = 'POWHEG', zorder = 2 )
rplt.hist( mcatnlo, axes = axes, label = 'MCATNLO', zorder = 3 )
def get_ratios(results_7TeV, results_8TeV):
ratios = {}
for key in results_7TeV.keys():
ratio = None
if 'Graph' in str(type(results_7TeV[key])):
ratio = Graph.divide(results_7TeV[key], results_8TeV[key], False)
else:
ratio = results_7TeV[key].Clone( 'ratio_' + key )
ratio.Divide(results_8TeV[key])
ratios[key] = ratio
return ratios
if __name__ == '__main__':
main()
```
#### File: src/cross_section_measurement/99_QCD_cross_checks.py
```python
from config import CMS
from optparse import OptionParser
from config.latex_labels import b_tag_bins_latex
from config.variable_binning import bin_edges, variable_bins_ROOT
from config import XSectionConfig
from tools.ROOT_utils import get_histograms_from_files
from tools.file_utilities import read_data_from_JSON
from tools.plotting import Histogram_properties, make_control_region_comparison
from tools.hist_utilities import value_error_tuplelist_to_hist, rebin_asymmetric
from ROOT import Double
from uncertainties import ufloat
def get_fit_results(variable, channel):
global path_to_JSON, category, met_type
fit_results = read_data_from_JSON(path_to_JSON + variable + '/fit_results/' + category + '/fit_results_' + channel + '_' + met_type + '.txt')
return fit_results
def get_fit_inputs(template, variable, channel):
inputs = {}
for var_bin in variable_bins_ROOT[variable]:
print var_bin
histogram = template % var_bin
histograms = get_histograms_from_files([histogram], histogram_files)
for sample in [channel, 'TTJet', 'V+Jets', 'SingleTop']:
n_bins = histograms[sample][histogram].GetNbinsX()
error = Double(0)
integral = histograms[sample][histogram].IntegralAndError(1, n_bins, error)
if inputs.has_key(sample):
inputs[sample].append((integral, error))
else:
inputs[sample] = [(integral, error)]
inputs['QCD'] = []
for data,ttjet, vjets, singletop in zip(inputs[channel], inputs['TTJet'], inputs['V+Jets'], inputs['SingleTop']):
qcd = ufloat(data) - ufloat(ttjet) - ufloat(vjets) - ufloat(singletop)
inputs['QCD'].append((qcd.nominal_value, qcd.std_dev))
print inputs
return inputs
def do_shape_check(channel, control_region_1, control_region_2, variable, normalisation, title, x_title, y_title, x_limits, y_limits,
name_region_1='conversions' , name_region_2='non-isolated electrons', name_region_3='fit results', rebin=1):
global b_tag_bin
# QCD shape comparison
if channel == 'electron':
histograms = get_histograms_from_files([control_region_1, control_region_2], histogram_files)
region_1 = histograms[channel][control_region_1].Clone() - histograms['TTJet'][control_region_1].Clone() - histograms['V+Jets'][control_region_1].Clone() - histograms['SingleTop'][control_region_1].Clone()
region_2 = histograms[channel][control_region_2].Clone() - histograms['TTJet'][control_region_2].Clone() - histograms['V+Jets'][control_region_2].Clone() - histograms['SingleTop'][control_region_2].Clone()
region_1.Rebin(rebin)
region_2.Rebin(rebin)
histogram_properties = Histogram_properties()
histogram_properties.name = 'QCD_control_region_comparison_' + channel + '_' + variable + '_' + b_tag_bin
histogram_properties.title = title + ', ' + b_tag_bins_latex[b_tag_bin]
histogram_properties.x_axis_title = x_title
histogram_properties.y_axis_title = 'arbitrary units/(0.1)'
histogram_properties.x_limits = x_limits
histogram_properties.y_limits = y_limits[0]
histogram_properties.mc_error = 0.0
histogram_properties.legend_location = 'upper right'
make_control_region_comparison(region_1, region_2,
name_region_1=name_region_1, name_region_2=name_region_2,
histogram_properties=histogram_properties, save_folder=output_folder)
# QCD shape comparison to fit results
histograms = get_histograms_from_files([control_region_1], histogram_files)
region_1_tmp = histograms[channel][control_region_1].Clone() - histograms['TTJet'][control_region_1].Clone() - histograms['V+Jets'][control_region_1].Clone() - histograms['SingleTop'][control_region_1].Clone()
region_1 = rebin_asymmetric(region_1_tmp, bin_edges[variable])
fit_results_QCD = normalisation[variable]['QCD']
region_2 = value_error_tuplelist_to_hist(fit_results_QCD, bin_edges[variable])
histogram_properties = Histogram_properties()
histogram_properties.name = 'QCD_control_region_comparison_' + channel + '_' + variable + '_fits_with_conversions_' + b_tag_bin
histogram_properties.title = title + ', ' + b_tag_bins_latex[b_tag_bin]
histogram_properties.x_axis_title = x_title
histogram_properties.y_axis_title = 'arbitrary units/(0.1)'
histogram_properties.x_limits = x_limits
histogram_properties.y_limits = y_limits[1]
histogram_properties.mc_error = 0.0
histogram_properties.legend_location = 'upper right'
make_control_region_comparison(region_1, region_2,
name_region_1=name_region_1, name_region_2=name_region_3,
histogram_properties=histogram_properties, save_folder=output_folder)
histograms = get_histograms_from_files([control_region_2], histogram_files)
region_1_tmp = histograms[channel][control_region_2].Clone() - histograms['TTJet'][control_region_2].Clone() - histograms['V+Jets'][control_region_2].Clone() - histograms['SingleTop'][control_region_2].Clone()
region_1 = rebin_asymmetric(region_1_tmp, bin_edges[variable])
fit_results_QCD = normalisation[variable]['QCD']
region_2 = value_error_tuplelist_to_hist(fit_results_QCD, bin_edges[variable])
histogram_properties = Histogram_properties()
histogram_properties.name = 'QCD_control_region_comparison_' + channel + '_' + variable + '_fits_with_noniso_' + b_tag_bin
histogram_properties.title = title + ', ' + b_tag_bins_latex[b_tag_bin]
histogram_properties.x_axis_title = x_title
histogram_properties.y_axis_title = 'arbitrary units/(0.1)'
histogram_properties.x_limits = x_limits
histogram_properties.y_limits = y_limits[1]
histogram_properties.mc_error = 0.0
histogram_properties.legend_location = 'upper right'
make_control_region_comparison(region_1, region_2,
name_region_1=name_region_2, name_region_2=name_region_3,
histogram_properties=histogram_properties, save_folder=output_folder)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-p", "--path", dest="path", default='data/absolute_eta_M3_angle_bl/',
help="set path to JSON files")
parser.add_option("-o", "--output_folder", dest="output_folder", default='plots/',
help="set path to save plots")
parser.add_option("-m", "--metType", dest="metType", default='type1',
help="set MET type used in the analysis of MET-dependent variables")
parser.add_option("-c", "--category", dest="category", default='central',
help="set the category to take the fit results from (default: central)")
parser.add_option("-n", "--normalise_to_fit", dest="normalise_to_fit", action="store_true",
help="normalise the MC to fit results")
parser.add_option("-i", "--use_inputs", dest="use_inputs", action="store_true",
help="use fit inputs instead of fit results")
parser.add_option("-e", "--centre-of-mass-energy", dest="CoM", default=8, type=int,
help="set the centre of mass energy for analysis. Default = 8 [TeV]")
(options, args) = parser.parse_args()
measurement_config = XSectionConfig(options.CoM)
# caching of variables for shorter access
translate_options = measurement_config.translate_options
lumi = measurement_config.luminosity
come = measurement_config.centre_of_mass_energy
electron_histogram_title = 'CMS Preliminary, $\mathcal{L}$ = %.1f fb$^{-1}$ at $\sqrt{s}$ = %d TeV \n e+jets, $\geq$4 jets' % (lumi, come)
muon_histogram_title = 'CMS Preliminary, $\mathcal{L}$ = %.1f fb$^{-1}$ at $\sqrt{s}$ = %d TeV \n $\mu$+jets, $\geq$4 jets' % (lumi, come)
path_to_JSON = options.path + '/' + str(measurement_config.centre_of_mass_energy) + 'TeV/'
output_folder = options.output_folder
normalise_to_fit = options.normalise_to_fit
category = options.category
met_type = translate_options[options.metType]
CMS.title['fontsize'] = 40
CMS.x_axis_title['fontsize'] = 50
CMS.y_axis_title['fontsize'] = 50
CMS.axis_label_major['labelsize'] = 40
CMS.axis_label_minor['labelsize'] = 40
CMS.legend_properties['size'] = 40
histogram_files = {
'electron' : measurement_config.data_file_electron,
'muon' : measurement_config.data_file_muon,
'TTJet': measurement_config.ttbar_category_templates[category],
'V+Jets': measurement_config.VJets_category_templates[category],
'QCD': measurement_config.electron_QCD_MC_file, # this should also be category-dependent, but unimportant and not available atm
'SingleTop': measurement_config.SingleTop_category_templates[category]
}
normalisations_electron, normalisations_muon = {}, {}
# getting normalisations
if not options.use_inputs:
fit_results_electron = {
'MET':get_fit_results('MET', 'electron'),
'HT':get_fit_results('HT', 'electron'),
'ST':get_fit_results('ST', 'electron'),
'MT':get_fit_results('MT', 'electron'),
'WPT':get_fit_results('WPT', 'electron')
}
fit_results_muon = {
'MET':get_fit_results('MET', 'muon'),
'HT':get_fit_results('HT', 'muon'),
'ST':get_fit_results('ST', 'muon'),
'MT':get_fit_results('MT', 'muon'),
'WPT':get_fit_results('WPT', 'muon')
}
normalisations_electron, normalisations_muon = fit_results_electron, fit_results_muon
else:
inputs_electron = {
'MET': get_fit_inputs('TTbar_plus_X_analysis/EPlusJets/QCDConversions/Binned_MET_Analysis/patType1CorrectedPFMet_bin_%s/electron_absolute_eta_0btag', 'MET', 'electron'),
'HT': get_fit_inputs('TTbar_plus_X_analysis/EPlusJets/QCDConversions/Binned_HT_Analysis/HT_bin_%s/electron_absolute_eta_0btag', 'HT', 'electron'),
'ST': get_fit_inputs('TTbar_plus_X_analysis/EPlusJets/QCDConversions/Binned_ST_Analysis/ST_with_patType1CorrectedPFMet_bin_%s/electron_absolute_eta_0btag', 'ST', 'electron'),
'MT': get_fit_inputs('TTbar_plus_X_analysis/EPlusJets/QCDConversions/Binned_MT_Analysis/MT_with_patType1CorrectedPFMet_bin_%s/electron_absolute_eta_0btag', 'MT', 'electron'),
'WPT': get_fit_inputs('TTbar_plus_X_analysis/EPlusJets/QCDConversions/Binned_WPT_Analysis/WPT_with_patType1CorrectedPFMet_bin_%s/electron_absolute_eta_0btag', 'WPT', 'electron'),
}
inputs_muon = {
'MET': get_fit_inputs('TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/Binned_MET_Analysis/patType1CorrectedPFMet_bin_%s/muon_absolute_eta_0btag', 'MET', 'muon'),
'HT': get_fit_inputs('TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/Binned_HT_Analysis/HT_bin_%s/muon_absolute_eta_0btag', 'HT', 'muon'),
'ST': get_fit_inputs('TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/Binned_ST_Analysis/ST_with_patType1CorrectedPFMet_bin_%s/muon_absolute_eta_0btag', 'ST', 'muon'),
'MT': get_fit_inputs('TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/Binned_MT_Analysis/MT_with_patType1CorrectedPFMet_bin_%s/muon_absolute_eta_0btag', 'MT', 'muon'),
'WPT': get_fit_inputs('TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/Binned_WPT_Analysis/WPT_with_patType1CorrectedPFMet_bin_%s/muon_absolute_eta_0btag', 'WPT', 'muon'),
}
normalisations_electron, normalisations_muon = inputs_electron, inputs_muon
# electrons
histogram_title = 'CMS Preliminary, $\mathcal{L}$ = 19.6 fb$^{-1}$ at $\sqrt{s}$ = 8 TeV \n e+jets, $\geq$4 jets'
b_tag_bin = '0btag'
name_region_1, name_region_2, name_region_3 = 'conversions', 'non-isolated electrons', 'fit results'
if options.use_inputs:
name_region_3 = 'fit inputs'
do_shape_check(channel='electron',
control_region_1='TTbar_plus_X_analysis/EPlusJets/QCDConversions/MET/patType1CorrectedPFMet/MET_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/EPlusJets/QCD non iso e+jets/MET/patType1CorrectedPFMet/MET_' + b_tag_bin,
variable='MET',
normalisation=normalisations_electron,
title=electron_histogram_title,
x_title='$E_{\mathrm{T}}^{\mathrm{miss}}$ [GeV]',
y_title='arbitrary units/(5 GeV)',
x_limits=[0, 250],
y_limits=([0, 0.18], [0, 0.65]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=1)
do_shape_check(channel='electron',
control_region_1='TTbar_plus_X_analysis/EPlusJets/QCDConversions/MET/HT_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/EPlusJets/QCD non iso e+jets/MET/HT_' + b_tag_bin,
variable='HT',
normalisation=normalisations_electron,
title=electron_histogram_title,
x_title='$H_\mathrm{T}$ [GeV]',
y_title='arbitrary units/(20 GeV)',
x_limits=[80, 1000],
y_limits=([0, 0.12], [0, 0.45]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=4)
do_shape_check(channel='electron',
control_region_1='TTbar_plus_X_analysis/EPlusJets/QCDConversions/MET/patType1CorrectedPFMet/ST_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/EPlusJets/QCD non iso e+jets/MET/patType1CorrectedPFMet/ST_' + b_tag_bin,
variable='ST',
normalisation=normalisations_electron,
title=electron_histogram_title,
x_title='$S_\mathrm{T}$ [GeV]',
y_title='arbitrary units/(20 GeV)',
x_limits=[106, 1000],
y_limits=([0, 0.12], [0, 0.65]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=4)
do_shape_check(channel='electron',
control_region_1='TTbar_plus_X_analysis/EPlusJets/QCDConversions/MET/patType1CorrectedPFMet/Transverse_Mass_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/EPlusJets/QCD non iso e+jets/MET/patType1CorrectedPFMet/Transverse_Mass_' + b_tag_bin,
variable='MT',
normalisation=normalisations_electron,
title=electron_histogram_title,
x_title='$M^\mathrm{W}_\mathrm{T}$ [GeV]',
y_title='arbitrary units/(10 GeV)',
x_limits=[0, 200],
y_limits=([0, 0.18], [0, 0.45]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=10)
do_shape_check(channel='electron',
control_region_1='TTbar_plus_X_analysis/EPlusJets/QCDConversions/MET/patType1CorrectedPFMet/WPT_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/EPlusJets/QCD non iso e+jets/MET/patType1CorrectedPFMet/WPT_' + b_tag_bin,
variable='WPT',
normalisation=normalisations_electron,
title=electron_histogram_title,
x_title='$p^\mathrm{W}_\mathrm{T}$ [GeV]',
y_title='arbitrary units/(5 GeV)',
x_limits=[0, 250],
y_limits=([0, 0.10], [0, 0.45]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=5)
# muons
b_tag_bin = '0btag'
name_region_1, name_region_2, name_region_3 = 'non-isolated muons', 'non-isolated muons', 'fit results'
if options.use_inputs:
name_region_3 = 'fit inputs'
do_shape_check(channel='muon',
control_region_1='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/patType1CorrectedPFMet/MET_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/patType1CorrectedPFMet/MET_' + b_tag_bin,
variable='MET',
normalisation=normalisations_muon,
title=muon_histogram_title,
x_title='$E_{\mathrm{T}}^{\mathrm{miss}}$ [GeV]',
y_title='arbitrary units/(5 GeV)',
x_limits=[0, 250],
y_limits=([0, 0.18], [0, 1]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=1)
do_shape_check(channel='muon',
control_region_1='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/HT_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/HT_' + b_tag_bin,
variable='HT',
normalisation=normalisations_muon,
title=muon_histogram_title,
x_title='$H_\mathrm{T}$ [GeV]',
y_title='arbitrary units/(20 GeV)',
x_limits=[80, 1000],
y_limits=([0, 0.12], [0, 1]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=4)
do_shape_check(channel='muon',
control_region_1='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/patType1CorrectedPFMet/ST_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/patType1CorrectedPFMet/ST_' + b_tag_bin,
variable='ST',
normalisation=normalisations_muon,
title=muon_histogram_title,
x_title='$S_\mathrm{T}$ [GeV]',
y_title='arbitrary units/(20 GeV)',
x_limits=[106, 1000],
y_limits=([0, 0.12], [0, 1]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=4)
do_shape_check(channel='muon',
control_region_1='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/patType1CorrectedPFMet/Transverse_Mass_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/patType1CorrectedPFMet/Transverse_Mass_' + b_tag_bin,
variable='MT',
normalisation=normalisations_muon,
title=muon_histogram_title,
x_title='$M^\mathrm{W}_\mathrm{T}$ [GeV]',
y_title='arbitrary units/(10 GeV)',
x_limits=[0, 200],
y_limits=([0, 0.18], [0, 1]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=10)
do_shape_check(channel='muon',
control_region_1='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/patType1CorrectedPFMet/WPT_' + b_tag_bin,
control_region_2='TTbar_plus_X_analysis/MuPlusJets/QCD non iso mu+jets ge3j/MET/patType1CorrectedPFMet/WPT_' + b_tag_bin,
variable='WPT',
normalisation=normalisations_muon,
title=muon_histogram_title,
x_title='$p^\mathrm{W}_\mathrm{T}$ [GeV]',
y_title='arbitrary units/(5 GeV)',
x_limits=[0, 250],
y_limits=([0, 0.10], [0, 1]),
name_region_1=name_region_1,
name_region_2=name_region_2,
name_region_3=name_region_3,
rebin=5)
```
#### File: src/cross_section_measurement/make_cutflow_8TeV.py
```python
from math import sqrt
from tools.ROOT_utils import get_histograms_from_files
from config import XSectionConfig
cuts = None
cuts_electrons = [
"Preselection", #
"Event cleaning/HLT", #
"One isolated electron", #
"Muon veto", #
"Dilepton veto", #
"Conversion veto", #
"$\geq 1$ jets", #
"$\geq 2$ jets", #
"$\geq 3$ jets", #
"$\geq 4$ jets", #
"$\geq 1$ b-tagged jets", #
"$\geq 2$ b-tagged jets" #
]
cuts_muons = [
"Preselection", #
"Event cleaning/HLT", #
"One isolated muon", #
"Second muon veto", #
"Electron veto", #
"$\geq 1$ jets", #
"$\geq 2$ jets", #
"$\geq 3$ jets", #
"$\geq 4$ jets", #
"$\geq 1$ b-tagged jets", #
"$\geq 2$ b-tagged jets" #
]
def printCutFlow(histograms, selection, luminosity_scale = 1.0, outputFormat='Latex'):
global cuts
header = " | Step | TTJet | W+jets | DY + Jets | single top | QCD | Sum MC | Data |"
row = " | %s | %d +- %d | %d +- %d | %d +- %d | %d +- %d | %d +- %d | %d +- %d | %d | "
if outputFormat == 'Latex':
header = "Selection step & \\ttbar + jets & W + jets & Z + jets & Single-Top & QCD & Sum MC & Data \\\\"
row = "%s & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & $%d \pm %d$ & %d \\\\ "
print header
# scale the luminosity
if luminosity_scale != 1.0:
for sample, histogram in histograms.iteritems():
if sample == 'data':
continue
histogram[selection].Scale(luminosity_scale)
numbers, errors = getEventNumbers(histograms, selection)
for step in range(len(cuts)):
nums = numbers[step]
errs = errors[step]
sumMC = nums['TTJet'] + nums['WJets'] + nums['ZJets'] + nums['QCD'] + nums['SingleTop']
sumMC_err = sqrt(errs['TTJet'] ** 2 + errs['WJets'] ** 2 + errs['ZJets'] ** 2 + errs['SingleTop'] ** 2 + errs['QCD'] ** 2)
print row % (cuts[step], nums['TTJet'], errs['TTJet'], nums['WJets'], errs['WJets'], nums['ZJets'], errs['ZJets'],
nums['SingleTop'], errs['SingleTop'], nums['QCD'], errs['QCD'], sumMC, sumMC_err, nums['data'])
def getEventNumbers(hists, selection):
global cuts
eventNumbers = []
errorValues = []
for step in range(len(cuts)):
events = {}
errors = {}
for sample in hists.keys():
events[sample] = hists[sample][selection].GetBinContent(step + 1)
errors[sample] = hists[sample][selection].GetBinError(step + 1)
eventNumbers.append(events)
errorValues.append(errors)
return eventNumbers, errorValues
if __name__ == '__main__':
measurement_config = XSectionConfig(8)
path_to_files = measurement_config.path_to_files + '/central/'
suffix = ''
lumi = measurement_config.luminosity
luminosity_scale = measurement_config.luminosity_scale
data = 'SingleElectron'
pfmuon = 'PFMuon_'
histogram_files = {
'data' : path_to_files + '%s_%spb_PFElectron_%sPF2PATJets_PFMET.root' % (data, str(lumi), pfmuon),
'TTJet': path_to_files + 'TTJet_%spb_PFElectron_%sPF2PATJets_PFMET%s.root' % (str(lumi), pfmuon, suffix),
'WJets': path_to_files + 'WJets_%spb_PFElectron_%sPF2PATJets_PFMET%s.root' % (str(lumi), pfmuon, suffix),
'ZJets': path_to_files + 'DYJetsToLL_%spb_PFElectron_%sPF2PATJets_PFMET%s.root' % (str(lumi), pfmuon, suffix),
'QCD': path_to_files + 'QCD_%spb_PFElectron_%sPF2PATJets_PFMET%s.root' % (str(lumi), pfmuon, suffix),
'SingleTop': path_to_files + 'SingleTop_%spb_PFElectron_%sPF2PATJets_PFMET%s.root' % (str(lumi), pfmuon, suffix)
}
electron_selection = 'EventCount/TTbarEplusJetsRefSelection'
muon_selection = 'EventCount/TTbarMuPlusJetsRefSelection'
cuts = cuts_electrons
histograms = get_histograms_from_files([electron_selection], histogram_files)
print '='*50
printCutFlow(histograms, electron_selection, luminosity_scale)
data = 'SingleMu'
histogram_files['data'] = path_to_files + '%s_%spb_PFElectron_%sPF2PATJets_PFMET.root' % (data, str(lumi), pfmuon)
histogram_files['QCD'] = path_to_files + 'QCD_Muon_%spb_PFElectron_%sPF2PATJets_PFMET%s.root' % (str(lumi), pfmuon, suffix)
histograms = get_histograms_from_files([muon_selection], histogram_files)
cuts = cuts_muons
print '='*50
printCutFlow(histograms, muon_selection, luminosity_scale)
```
#### File: DailyPythonScripts/src/grid.py
```python
from optparse import OptionParser
#import grid utilities
from tools.grid_utilities import fetch_grid_file, delete_grid_folder, remote_copy_folder
def rm(filename, recursive = False):
pass
def copyfile(source, destination):
pass
def copy(source, destination):
'''
detect if remote or local
if remote to remote: schedule in FTS? split on more than one stream?
'''
pass
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-d", "--delete",
action="store_false", dest="delete", default=False,
help="delete the given argument")
parser.add_option("-f", "--fetch",
action="store_false", dest="fetch", default=False,
help="fetch the given argument")
(options, args) = parser.parse_args()
```
#### File: DailyPythonScripts/src/make_CRAB_configuration.py
```python
import os
import sys
from optparse import OptionParser
import commands
@DeprecationWarning
def main():
"Main function."
parser = OptionParser("Script to create CRAB configuration files. This file is used by make_ntuples_CRAB_configurations.sh and make_unfolding_CRAB_configurations.sh")
parser.add_option("-j", "--jobtype", dest="jobtype", default='cmssw',
help="specify jobtype")
parser.add_option("-g", "--scheduler", dest="scheduler", default='glidein',
help="specify scheduler; default is 'glidein'.")
parser.add_option("-u", "--use_server", dest="use_server", default='1',
help="specify use_server variable; default is '1'.")
parser.add_option("-v", "--version", dest="version", default='v10',
help="specify nTuple version in the form 'vXx'; default is 'v10'.")
parser.add_option("-P", "--datasetpath", dest="datasetpath", default=None,
help="specify datasetpath; default is 'None'.")
parser.add_option("-p", "--pset", dest="pset", default='BristolAnalysis/nTupleTools/test/makeTuples_cfg.py',
help="specify path to pset; default is 'BristolAnalysis/nTupleTools/test/makeTuples_cfg.py'.")
parser.add_option("-e", "--numberevents", dest="total_number_of_events", default=-1,
help="specify total number of events to run over; default is -1.")
parser.add_option("-l", "--numberlumis", dest="total_number_of_lumis", default=-1,
help="specify total number of lumis to run over; default is -1.")
parser.add_option("-n", "--numberjobs", dest="number_of_jobs", default=1000,
help="specify total number of jobs to be created; default is 1000.")
parser.add_option("-f", "--lumi_mask", dest="lumi_mask", default=None,
help="specify lumi_mask if running on data; default is 'None'.")
parser.add_option("-d", "--useData", dest="useData", default=0,
help="specify 0 for monte carlo or 1 for data")
parser.add_option("-t", "--dataType", dest="dataType", default='TTJets',
help="specify dataType; default is 'TTJets'.")
parser.add_option("-s", "--skim", dest="skim", default='LeptonPlus3Jets',
help="specify skim; default is 'LeptonPlus3Jets'.")
parser.add_option("-W", "--storePDFWeights", dest="storePDFWeights", default=0,
help="specify whether to store PDFWeights, default is 0.")
parser.add_option("-T", "--isTTbarMC", dest="isTTbarMC", default=0,
help="specify if sample contains ttbar events or not, default is 0.")
parser.add_option("-M", "--isMCatNLO", dest="isMCatNLO", default=0,
help="specify if sample contains ttbar MC@NLO events or not (different genParticle structure), default is 0.")
parser.add_option("-m", "--mail", dest="email", default=None,
help="specify email address if notifications are desired; default is None.")
parser.add_option("-w", "--whiteList", dest="whiteList", default=None,
help="specify sites to which you wish to submit jobs (if desired) separated by commas; default is None. If you wish to create a white list of only the sites where your data is present, enter the string '1'.")
parser.add_option("-b", "--blackList", dest="blackList", default=None,
help="specify sites to which you do not wish to submit jobs (if desired) separated by commas; default is None.")
(options, _) = parser.parse_args()
#make sure that a datasetpath has been entered.
if options.datasetpath == "None":
print 'Please enter a datasetPath.'
sys.exit()
#Use das_client.py to get nFiles and nEvents for the dataset in question by making a DAS query.
dasData = commands.getstatusoutput("../tools/das_client.py --query=dataset=\"" + options.datasetpath + " | grep dataset.name, dataset.nfiles, dataset.nevents \" --verbose=1")
dasData = dasData[1].split("\n")
dasData = dasData[3].split(" ")
nFiles=dasData[1]
nEvents=dasData[2]
#set up white list and black list arrays
#whiteList
if options.whiteList == '1':
sites = commands.getstatusoutput("./das_client.py --query=\"site dataset=" + options.datasetpath + "\"")
sites = sites[1].split("\n")
sites = sites[3:]
elif options.whiteList:
sites = options.whiteList.split(",")
#blackList
if options.blackList:
blackList = options.blackList.split(",")
#Set up configuration file to write to and open it for writing.
if int(options.useData) == 1:
filepath = "data2012/" + options.version
if not os.path.exists(filepath):
os.makedirs(filepath)
if int(options.useData) == 0:
filepath = "defaultMC_Summer12/" + options.version
if not os.path.exists(filepath):
os.makedirs(filepath)
datasetPath = options.datasetpath.replace("/", "_")
filename = datasetPath[1:]
filename = filename + "_nTuple_" + options.version + "_" + options.skim + ".cfg"
configFile = open(filepath + "/" + filename, "w")
#Set up directory name (both for local and remote)
if int(options.useData) == 1:
directory = datasetPath[1:] + "_nTuple_" + options.version + "_GoldenJSON_" + options.skim + "_final"
elif int(options.useData) == 0:
directory = datasetPath[1:] + "_nTuple_" + options.version + "_" + options.skim + "_final"
else:
print "useData value entered is not 0 (monte carlo) or 1 (data). Current value: ", options.useData
sys.exit()
#Write to configuration file!
print "Starting writing configuration file " + filename
configFile.write("[CRAB]\n")
configFile.write("jobtype = " + options.jobtype + "\n")
configFile.write("scheduler = " + options.scheduler + "\n")
configFile.write("use_server = " + options.use_server + "\n\n")
configFile.write("[CMSSW]\n")
configFile.write("#nEvents = " + nEvents + "\n")
configFile.write("#nFiles = " + nFiles + "\n")
configFile.write("datasetpath = " + options.datasetpath + "\n")
configFile.write("pset = " + options.pset + "\n")
if int(options.useData) == 1:
configFile.write("total_number_of_lumis = " + str(options.total_number_of_lumis) + "\n")
elif int(options.useData) == 0:
configFile.write("total_number_of_events = " + str(options.total_number_of_events) + "\n")
configFile.write("number_of_jobs = " + str(options.number_of_jobs) + "\n")
configFile.write("get_edm_output = 1\n")
if int(options.useData) == 1:
if options.lumi_mask == None:
print "Please specify a JSON file."
else:
configFile.write("lumi_mask = BristolAnalysis/NTupleTools/data/CertifiedJSONs/" + options.lumi_mask + "\n")
configFile.write("pycfg_params = useData=" + str(options.useData) + " dataType=" + options.dataType + " skim=" + options.skim)
if int(options.storePDFWeights) == 1:
configFile.write(" storePDFWeights=" + str(options.storePDFWeights))
if int(options.isTTbarMC) == 1:
configFile.write(" isTTbarMC=" + str(options.isTTbarMC))
if int(options.isMCatNLO) == 1:
configFile.write(" isMCatNLO=" + str(options.isMCatNLO))
configFile.write("\n\n")
configFile.write("[USER]\n")
configFile.write("additional_input_files = BristolAnalysis/NTupleTools/data/PileUp/*.root\n")
configFile.write("return_data = 0\n")
configFile.write("copy_data = 1\n")
configFile.write("storage_element = T2_UK_SGrid_Bristol\n")
configFile.write("user_remote_dir = " + directory + "\n")
configFile.write("check_user_remote_dir = 0\n")
configFile.write("ui_working_dir = " + directory + "\n")
if options.email and options.email != "None":
configFile.write("email = " + options.email + "\n\n")
else:
configFile.write("\n")
configFile.write("[GRID]\n")
if options.whiteList and options.whiteList != "None":
configFile.write("se_white_list=")
for i in range(len(sites)):
configFile.write(sites[i])
if i != len(sites)-1:
configFile.write(", ")
else:
configFile.write("\n")
else:
configFile.write("#No whitelist.\n")
if options.blackList and options.blackList !="None":
configFile.write("se_black_list=")
for i in range(len(blackList)):
configFile.write(blackList[i])
if i != len(blackList)-1:
configFile.write(", ")
else:
configFile.write("\n")
else:
configFile.write("#No blacklist.")
configFile.close()
print filename, "saved.\n"
#
# main
#
if __name__ == '__main__':
main()
```
#### File: DailyPythonScripts/src/make_HLT_plots.py
```python
from rootpy.io import File
from rootpy import asrootpy
# Most verbose log level
import rootpy.plotting.root2matplotlib as rplt
import matplotlib.pyplot as plt
# from matplotlib.ticker import AutoMinorLocator
# import config.summations as summations
from ROOT import TEfficiency, TGraphAsymmErrors, TF1, TLegend, TLatex
from array import array
from config import CMS
from tools.ROOT_utils import set_root_defaults
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import numpy
from numpy import frompyfunc
from pylab import plot
from matplotlib import rc
rc('text', usetex=True)
def make_efficiency_plot(pass_data, total_data, pass_mc, total_mc, trigger_under_study):
global output_folder, output_formats
efficiency_data = asrootpy(TGraphAsymmErrors())
efficiency_mc = asrootpy(TGraphAsymmErrors())
efficiency_data.Divide(pass_data, total_data, "cl=0.683 b(1,1) mode")
efficiency_mc.Divide(pass_mc, total_mc, "cl=0.683 b(1,1) mode")
scale_factor = pass_data.Clone("pass_mc")
scale_factor.Multiply(total_mc)
scale_factor.Divide(total_data)
scale_factor.Divide(pass_mc)
scale_factor.linewidth = 6
scale_factor.SetMarkerSize(3)
scale_factor.linecolor = 'green'
scale_factor.SetMarkerColor('green')
x_limits, x_title, y_title, fit_function, fit_range = get_parameters(trigger_under_study)
fit_data = TF1("fit_data", fit_function, fit_range[0], fit_range[1])
fit_mc = TF1("fit_mc", fit_function, fit_range[0], fit_range[1])
fit_SF = TF1("fit_SF", fit_function, fit_range[0], fit_range[1])
set_parameter_limits(trigger_under_study, fit_data)
set_parameter_limits(trigger_under_study, fit_mc)
set_parameter_limits(trigger_under_study, fit_SF)
efficiency_data.Fit(fit_data, 'FECQ')
efficiency_mc.Fit(fit_mc, 'FECQ')
scale_factor.Fit(fit_SF, 'FECQ')
set_plot_styles(efficiency_data, efficiency_mc)
save_as_name = trigger_under_study
save_as_name = save_as_name.replace('Jet30/', 'Jet30_')
plot_efficiencies(efficiency_data, efficiency_mc, scale_factor,
fit_data, fit_mc, fit_SF, fit_function,
x_limits, x_title, y_title, save_as_name)
def plot_efficiencies(efficiency_data, efficiency_mc, scale_factor,
fit_data, fit_mc, fit_SF, fit_function,
x_limits, x_title, y_title, save_as_name):
# plot with matplotlib
plt.figure(figsize=(16, 16), dpi=200, facecolor='white')
gs = gridspec.GridSpec(2, 1, height_ratios=[5, 1])
ax0 = plt.subplot(gs[0])
ax0.minorticks_on()
ax0.grid(True, 'major', linewidth=2)
ax0.grid(True, 'minor')
plt.tick_params(**CMS.axis_label_major)
plt.tick_params(**CMS.axis_label_minor)
ax0.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
rplt.errorbar(efficiency_data, xerr=True, emptybins=True, axes=ax0)
rplt.errorbar(efficiency_mc, xerr=False, emptybins=True, axes=ax0)
ax0.set_xlim(x_limits)
plt.ylabel(y_title, CMS.y_axis_title)
plt.title(r'e+jets, CMS Preliminary, $\mathcal{L}$ = 5.1 fb$^{-1}$ at $\sqrt{s}$ = 7 TeV', CMS.title)
plt.legend(['data', r'$\mathrm{t}\bar{\mathrm{t}}$ MC'], numpoints=1, loc='lower right', prop=CMS.legend_properties)
#add fits
x = numpy.linspace(fit_data.GetXmin(), fit_data.GetXmax(), fit_data.GetNpx())
function_data = frompyfunc(fit_data.Eval, 1, 1)
plot(x, function_data(x), axes=ax0, color = 'black', linewidth = 2)
x = numpy.linspace(fit_mc.GetXmin(), fit_mc.GetXmax(), fit_mc.GetNpx())
function_mc = frompyfunc(fit_mc.Eval, 1, 1)
plot(x, function_mc(x), axes=ax0, color = 'red', linewidth = 2)
ax1 = plt.subplot(gs[1])
#disable labels for plot 1
plt.setp(ax0.get_xticklabels(minor = True), visible=False)
plt.setp(ax0.get_xticklabels(), visible=False)
ax1.minorticks_on()
ax1.grid(True, 'major', linewidth=2)
ax1.grid(True, 'minor')
ax1.yaxis.set_major_locator(MultipleLocator(1.))
ax1.yaxis.set_minor_locator(MultipleLocator(0.5))
ax1.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax1.xaxis.set_major_formatter(FormatStrFormatter('%d'))
plt.tick_params(**CMS.axis_label_major)
plt.tick_params(**CMS.axis_label_minor)
plt.xlabel(x_title, CMS.x_axis_title)
plt.ylabel('data/MC', CMS.y_axis_title)
rplt.errorbar(scale_factor, xerr=True, emptybins=False, axes=ax1)
ax1.set_xlim(x_limits)
#add fit formulas
ax0.text(0.1, 0.15, '$\epsilon$ = ' + get_fitted_function_str(fit_data, fit_function),
verticalalignment='bottom', horizontalalignment='left',
transform=ax0.transAxes,
color='black', fontsize=60, bbox = dict(facecolor = 'white', edgecolor = 'none', alpha = 0.5))
ax0.text(0.1, 0.05, '$\epsilon$ = ' + get_fitted_function_str(fit_mc, fit_function),
verticalalignment='bottom', horizontalalignment='left',
transform=ax0.transAxes,
color='red', fontsize=60, bbox = dict(facecolor = 'white', edgecolor = 'none', alpha = 0.5))
ax1.text(0.1, 0.10, '$\epsilon$ = ' + get_fitted_function_str(fit_SF, fit_function),
verticalalignment='bottom', horizontalalignment='left',
transform=ax1.transAxes,
color='green', fontsize=60, bbox = dict(facecolor = 'white', edgecolor = 'none', alpha = 0.5))
#add scale factor fit
x = numpy.linspace(fit_SF.GetXmin(), fit_SF.GetXmax(), fit_SF.GetNpx())
function_SF = frompyfunc(fit_SF.Eval, 1, 1)
plot(x, function_SF(x), axes=ax1, color = 'green', linewidth = 2)
if 'jet_pt' in trigger_under_study:
ax1.xaxis.set_minor_formatter(FormatStrFormatter('%d'))
plt.draw()
labels = [item.get_text() for item in ax1.get_xmajorticklabels()]
minor_labels = [item.get_text() for item in ax1.get_xminorticklabels()]
new_labels, new_minor_labels = [], []
keep_labels = ['20','50','100','150','200']
for label in labels:
if not label in keep_labels:
label = ''
new_labels.append(label)
for label in minor_labels:
if not label in keep_labels:
label = ''
new_minor_labels.append(label)
ax1.set_xticklabels(new_labels)
ax1.set_xticklabels(new_minor_labels, minor = True)
plt.tight_layout()
for output_format in output_formats:
plt.savefig(output_folder + save_as_name + '_efficiency_matplot.' + output_format)
def get_parameters(trigger_under_study):
x_limits = [10, 200]
x_title = '$p_{\mathrm{T}}$(jet) [GeV]'
y_title = 'Efficiency'
fit_function = ''
fit_range = [-9999, 9999]
if 'jet_pt' in trigger_under_study:
x_limits = [10, 200]
x_title = '$p_{\mathrm{T}}$(jet) [GeV]'
fit_function = "[0]*exp([1]*exp([2]*x))"
fit_range = [20, 200]
elif 'jet_eta' in trigger_under_study:
x_limits = [-3, 3]
x_title = '$\eta$(jet)'
fit_function = '[0]*x*x + [1]*x + [2]'
fit_range = [-3, 3]
elif 'jet_phi' in trigger_under_study:
x_limits = [-4., 4.]
x_title = '$\phi$(jet)'
fit_function = '[0]'
fit_range = [-3.1, 3.1]
return x_limits, x_title, y_title, fit_function, fit_range
def set_plot_styles(data_plot, mc_plot):
mc_plot.SetMarkerColor(2)
mc_plot.SetMarkerStyle(22)
mc_plot.SetMarkerSize(3)
mc_plot.SetLineWidth(6)
mc_plot.SetLineColor(2)
data_plot.SetMarkerSize(3)
def set_parameter_limits(trigger_under_study, fit):
if 'jet_pt' in trigger_under_study:
fit.SetParLimits(0,0.0,1.0);
fit.SetParLimits(1,-100,-1);
fit.SetParLimits(2,-1,-0.01);
if 'jet_eta' in trigger_under_study:
fit.SetParLimits(0,-0.2,0.0);
fit.SetParLimits(1,-1,-1);
fit.SetParLimits(2, 0.2,1.1);
def get_binning(trigger_under_study):
bin_edges = [0, 20, 25, 35, 45, 70, 100, 200]
if 'jet_pt' in trigger_under_study:
bin_edges = [0, 20, 25, 35, 45, 70, 200]
elif 'jet_eta' in trigger_under_study:
bin_edges = [-3, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3]
bin_edges = [-3, -2, -1, 0, 1, 2, 3]
bin_edges = [-3, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 3]
elif 'jet_phi' in trigger_under_study:
bin_edges = [-3.5, -3, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5]
bin_edge_array = array('d', bin_edges)
return bin_edge_array
def get_fitted_function_str(fit, fit_function):
decimals = 2
function_str = fit_function
function_str = function_str.replace('x*x', 'x^{2}')
function_str = function_str.replace('[0]', str(round(fit.GetParameter(0), decimals)))
function_str = function_str.replace('[1]', str(round(fit.GetParameter(1), decimals)))
function_str = function_str.replace('[2]', str(round(fit.GetParameter(2), decimals)))
function_str = function_str.replace('[3]', str(round(fit.GetParameter(3), decimals)))
function_str = function_str.replace('[4]', str(round(fit.GetParameter(4), decimals)))
function_str = function_str.replace('+ -', '-')
function_str = function_str.replace('- -', '+')
function_str = function_str.replace('*', ' \\times ')
function_str = function_str.replace('-0.0', '0.0')
function_str = function_str.replace('0.0 \\times x^{2}', '')
function_str = function_str.replace('+ 0.0 \\times x', '')
function_str = function_str.strip()#remove whitespace
if function_str.startswith('+'):
function_str = function_str[1:]
if '+ 0.98' in function_str:
print function_str
print len(function_str)
if 'exp' in function_str:
function_str = function_str.replace('exp(', 'e^{\left(')
function_str = function_str.replace(')', '\\right)}')
function_str = '$' + function_str + '$'
return function_str
def get_input_plots(data_file, mc_file, trigger_under_study):
plot_data_total = data_file.Get(trigger_under_study % 'visited')
plot_data_passed = data_file.Get(trigger_under_study % 'fired')
mc_trigger = trigger_under_study
if 'CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT' in trigger_under_study:
#no isolated trigger available (bug!) in analysed MC, use non-iso instead.
mc_trigger = trigger_under_study.replace('CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT', 'CaloIdVT_TrkIdT')
plot_ttbar_total = ttbar_file.Get(mc_trigger % 'visited')
plot_ttbar_passed = ttbar_file.Get(mc_trigger % 'fired')
plot_data_passed.Sumw2()
plot_data_total.Sumw2()
plot_ttbar_passed.Sumw2()
plot_ttbar_total.Sumw2()
bin_edge_array = get_binning(trigger_under_study)
n_bins = len(bin_edge_array) - 1
plot_data_passed = asrootpy(plot_data_passed.Rebin(n_bins, 'truth', bin_edge_array))
plot_data_total = asrootpy(plot_data_total.Rebin(n_bins, 'truth', bin_edge_array))
plot_ttbar_passed = asrootpy(plot_ttbar_passed.Rebin(n_bins, 'truth', bin_edge_array))
plot_ttbar_total = asrootpy(plot_ttbar_total.Rebin(n_bins, 'truth', bin_edge_array))
return plot_data_passed, plot_data_total, plot_ttbar_passed, plot_ttbar_total
if __name__ == '__main__':
set_root_defaults()
CMS.title['fontsize'] = 40
CMS.x_axis_title['fontsize'] = 50
CMS.y_axis_title['fontsize'] = 50
CMS.axis_label_major['labelsize'] = 40
CMS.axis_label_minor['labelsize'] = 40
CMS.legend_properties['size'] = 40
output_formats = ['png', 'pdf']
output_folder = '/storage/TopQuarkGroup/results/plots/Trigger/'
triggers = [
'HLT_Ele25_CaloIdVT_TrkIdT_TriCentralJet30',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_TriCentralJet30',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_TriCentralPFJet30',
]
trigger_variables = ['jet_pt',
'jet_eta_PtGT30',
'jet_phi_PtGT30',
'jet_eta_PtGT45',
'jet_phi_PtGT45'
]
trigger_modifiers = [
'_%s_3jets',
'_%s_4orMoreJets']
hltFiles = {}
hltFiles['data'] = '/storage/TopQuarkGroup/results/histogramfiles/HLT_V1/ElectronHad_4692.36pb_PFElectron_PFMuon_PF2PATJets_PFMET.root'
hltFiles['ttbar'] = '/storage/TopQuarkGroup/results/histogramfiles/HLT_V1/TTJetsFall11_4692.36pb_PFElectron_PFMuon_PF2PATJets_PFMET.root'
triggerPlots = ['HLTStudy/' + trigger + '/' + variable + modifier
for trigger in triggers
for variable in trigger_variables
for modifier in trigger_modifiers]
data_file = File(hltFiles['data'])
ttbar_file = File(hltFiles['ttbar'])
for trigger_under_study in triggerPlots:
plot_data_passed, plot_data_total, plot_ttbar_passed, plot_ttbar_total = get_input_plots(data_file,
ttbar_file,
trigger_under_study)
make_efficiency_plot(plot_data_passed, plot_data_total, plot_ttbar_passed, plot_ttbar_total,
trigger_under_study % '')
```
#### File: src/unfolding_tests/create_toy_mc.py
```python
from optparse import OptionParser
from tools.toy_mc import generate_toy_MC_from_distribution,\
generate_toy_MC_from_2Ddistribution
from tools.Unfolding import get_unfold_histogram_tuple
from tools.file_utilities import make_folder_if_not_exists
from rootpy.io import File
from ROOT import TH1F
from config import XSectionConfig
from tools.ROOT_utils import set_root_defaults
def main():
set_root_defaults()
# prevent directory ownership of ROOT histograms (python does the garbage collection)
TH1F.AddDirectory( False )
parser = OptionParser()
parser.add_option( "-n", "--n_toy_mc",
dest = "n_toy_mc", default = 300,
help = "number of toy MC to create", type = int )
parser.add_option( "-o", "--output",
dest = "output_folder", default = 'data/toy_mc/',
help = "output folder for toy MC" )
parser.add_option( "-v", "--variable", dest = "variable", default = 'MET',
help = "set the variable to analyse (MET, HT, ST, MT, WPT)" )
parser.add_option( "-m", "--metType", dest = "metType", default = 'type1',
help = "set MET type for analysis of MET, ST or MT" )
parser.add_option( "-c", "--centre-of-mass-energy", dest = "CoM", default = 8,
help = "set the centre of mass energy for analysis. Default = 8 [TeV]", type = int )
parser.add_option( '-V', '--verbose', dest = "verbose", action = "store_true",
help = "Print the event number, reco and gen variable value" )
( options, _ ) = parser.parse_args()
measurement_config = XSectionConfig( options.CoM )
centre_of_mass = options.CoM
ttbar_xsection = measurement_config.ttbar_xsection
variable = options.variable
met_type = measurement_config.translate_options[options.metType]
n_toy_mc = options.n_toy_mc
make_folder_if_not_exists( options.output_folder )
# get histograms
input_file_hists = File( measurement_config.unfolding_madgraph )
# define output file
out_file_template = '%s/toy_mc_%s_N_%d_%dTeV.root'
out_file_name = out_file_template % (options.output_folder, variable, n_toy_mc, centre_of_mass)
output = File( out_file_name, 'recreate' )
for channel in ['electron', 'muon']:
# first get the weights
h_truth, h_measured, h_response, _ = get_unfold_histogram_tuple( input_file_hists,
variable,
channel,
met_type,
centre_of_mass,
ttbar_xsection,
load_fakes = False )
# create directories
directory = output.mkdir( channel )
mkdir = directory.mkdir
cd = directory.cd
cd()
# generate toy MC
for i in range( 1, n_toy_mc + 1 ):
mkdir( 'toy_%d' % i )
cd( 'toy_%d' % i )
# create histograms
# add tuples (truth, measured, response) of histograms
truth = generate_toy_MC_from_distribution(h_truth)
measured = generate_toy_MC_from_distribution(h_measured)
response = generate_toy_MC_from_2Ddistribution(h_response)
truth.SetName('truth')
measured.SetName('measured')
response.SetName('response')
truth.Write()
measured.Write()
response.Write()
output.Write()
output.Close()
if __name__ == '__main__':
main()
```
#### File: DailyPythonScripts/test/fix_overflow.py
```python
import unittest
from rootpy.plotting import Hist, Hist2D
from tools.hist_utilities import fix_overflow
import numpy as np
N_bkg1 = 9000
N_signal = 1000
N_bkg1_obs = 10000
N_signal_obs = 2000
N_data = N_bkg1_obs + N_signal_obs
mu1, mu2, sigma1, sigma2 = 100, 140, 15, 5
x1 = mu1 + sigma1 * np.random.randn( N_bkg1 )
x2 = mu2 + sigma2 * np.random.randn( N_signal )
x1_obs = mu1 + sigma1 * np.random.randn( N_bkg1_obs )
x2_obs = mu2 + sigma2 * np.random.randn( N_signal_obs )
class Test( unittest.TestCase ):
def setUp( self ):
# create histograms
self.h1 = Hist( 60, 40, 100, title = '1D' )
self.h2 = Hist2D( 60, 40, 100, 100, 40, 140 )
# fill the histograms with our distributions
map( self.h1.Fill, x1 )
self.h2.fill_array( np.random.multivariate_normal(
mean = ( 100, 140 ),
cov = np.arange( 4 ).reshape( 2, 2 ),
size = ( int( 1E6 ), ) )
)
# map(h2.Fill, (x1, x2, 1))
def tearDown( self ):
pass
def test_overflow_1D( self ):
last_bin = self.h1.nbins()
overflow_bin = last_bin + 1
overflow = self.h1.GetBinContent( overflow_bin )
last_bin_content = self.h1.GetBinContent( last_bin )
self.assertGreater( overflow, 0, '1D hist: No overflow present, wrong setup.' )
h1 = fix_overflow( self.h1 )
self.assertEqual( h1.GetBinContent( overflow_bin ), 0., '1D hist: Overflow bin is not 0.' )
self.assertEqual( h1.GetBinContent( last_bin ), last_bin_content + overflow, '1D hist: last bin is not correct.' )
def test_overflow_2D( self ):
before_fix = check_overflow_in_2DHist(self.h2)
has_overflow_in_x = before_fix['has_overflow_in_x']
has_overflow_in_y = before_fix['has_overflow_in_y']
self.assertGreater(has_overflow_in_x, 0, '2D hist: No overflow in x present, wrong setup.')
self.assertGreater(has_overflow_in_y, 0, '2D hist: No overflow in y present, wrong setup.')
h2 = fix_overflow( self.h2 )
after_fix = check_overflow_in_2DHist(h2)
has_overflow_in_x = after_fix['has_overflow_in_x']
has_overflow_in_y = after_fix['has_overflow_in_y']
# check if overflow has been reset
self.assertEqual( has_overflow_in_x, 0, '2D hist: Overflow in x is not 0.' )
self.assertEqual( has_overflow_in_y, 0, '2D hist: Overflow in y is not 0.' )
# now check if new last bin content is equal to the old one plus overflow
overflow_x_before = before_fix['overflow_x']
overflow_y_before = before_fix['overflow_y']
last_bin_content_x_before = before_fix['last_bin_content_x']
last_bin_content_y_before = before_fix['last_bin_content_y']
last_bin_content_x_after = after_fix['last_bin_content_x']
last_bin_content_y_after = after_fix['last_bin_content_y']
check_last_bin_content_x = [overflow + last_bin_content for overflow,last_bin_content in zip(overflow_x_before, last_bin_content_x_before)]
check_last_bin_content_y = [overflow + last_bin_content for overflow,last_bin_content in zip(overflow_y_before, last_bin_content_y_before)]
# remember, the last item in each list is actually the overflow, which should be 0 and the above calculation is not correct.
self.assertTrue(check_equal_lists(check_last_bin_content_x[:-2], last_bin_content_x_after[:-2]), '2D hist: last bins in x are not correct.')
self.assertTrue(check_equal_lists(check_last_bin_content_y[:-2], last_bin_content_y_after[:-2]), '2D hist: last bins in y are not correct.')
def check_overflow_in_2DHist(hist):
last_bin_x = hist.nbins()
overflow_bin_x = last_bin_x + 1
last_bin_y = hist.nbins(axis=1)
overflow_bin_y = last_bin_y + 1
has_overflow_in_x = 0
has_overflow_in_y = 0
overflow_x = []
overflow_y = []
last_bin_content_x = []
last_bin_content_y = []
# first check the y overflows
# range(start, end) returns (start ... end -1)
for x in range(1, overflow_bin_x + 1):
overflow = hist.GetBinContent(x, overflow_bin_y)
if overflow > 0:
has_overflow_in_y += 1
overflow_y.append(overflow)
last_bin_content_y.append(hist.GetBinContent(x, last_bin_y))
for y in range(1, overflow_bin_y + 1):
overflow = hist.GetBinContent(overflow_bin_x, y)
overflow_x.append(overflow)
last_bin_content_x.append(hist.GetBinContent(last_bin_x, y))
if overflow > 0:
has_overflow_in_x += 1
result = {
'has_overflow_in_x':has_overflow_in_x,
'has_overflow_in_y':has_overflow_in_y,
'overflow_x': overflow_x,
'overflow_y': overflow_y,
'last_bin_content_x': last_bin_content_x,
'last_bin_content_y': last_bin_content_y,
}
return result
def check_equal_lists(list1, list2):
return len(list1) == len(list2) and sorted(list1) == sorted(list2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testTemplates']
unittest.main()
```
#### File: DailyPythonScripts/test/tools_Calculation.py
```python
from __future__ import division
import unittest
from random import random
import numpy as np
from rootpy.plotting import Hist2D
# under test
from tools.Calculation import calculate_purities
from tools.Calculation import calculate_stabilities
from tools.Calculation import decombine_result
class Test( unittest.TestCase ):
def setUp( self ):
# we only test symmetric bins for now
self.n_bins_x = 6
self.n_bins_y = 6
# only entries in diagonals, p = 1, s = 1 for all bins
self.best_case = Hist2D( self.n_bins_x, -3, 3, self.n_bins_y, 0, 6 )
for i in range( 1, self.n_bins_x + 1 ):
self.best_case.SetBinContent( i, i, random() * 1000 )
# random eclipse
self.random_elipse = Hist2D( self.n_bins_x, -3, 3, self.n_bins_y, 0, 6 )
self.random_elipse.fill_array(
np.random.multivariate_normal(
mean = ( 0, 3 ),
cov = [[1., 1.12], [1.12, 2.25]],
size = ( 1000 )
)
)
# this creates
# [4, 0, 0, 0, 0, 1],
# [0, 0, 0, 0, 1, 0],
# [0, 0, 0, 1, 0, 0],
# [0, 0, 1, 0, 0, 0],
# [0, 1, 0, 0, 0, 0],
# [1, 0, 0, 0, 0, 3],
# this should result in a purity and stability value of 1 for all bins
# except the first and last. The first bin should have p = 1/5 and
# s = 1/4 and the last bin should have p = 1/4 and s = 1/5
self.pre_calculated = Hist2D( self.n_bins_x, -3, 3, self.n_bins_y, 0, 6 )
for i in range( 1, self.n_bins_x + 1 ):
self.pre_calculated.SetBinContent( i, i, 1 )
self.pre_calculated.SetBinContent( 1, self.n_bins_y, 4 )
self.pre_calculated.SetBinContent( self.n_bins_x, 1, 3 )
def tearDown( self ):
pass
def test_best_case_purity( self ):
purities = calculate_purities( self.best_case )
self.assertEqual( len( purities ), self.n_bins_x, 'Invalid number of purity terms' )
for p in purities:
self.assertEqual( p, 1 )
def test_best_case_stability( self ):
stabilities = calculate_stabilities( self.best_case )
self.assertEqual( len( stabilities ), self.n_bins_x, 'Invalid number of stability terms' )
for s in stabilities:
self.assertEqual( s, 1 )
def test_random_elipse_purity( self ):
purities = calculate_purities( self.random_elipse )
self.assertEqual( len( purities ), self.n_bins_x, 'Invalid number of purity terms' )
# purities should always be above 0 and below ~0.5
for p in purities:
self.assertGreater( p, 0 )
self.assertLess( p, 0.5 )
def test_random_elipse_stability( self ):
stabilities = calculate_stabilities( self.random_elipse )
self.assertEqual( len( stabilities ), self.n_bins_x, 'Invalid number of stability terms' )
# stabilities should always be above 0 and below ~0.6
for s in stabilities:
self.assertGreater( s, 0 )
self.assertLess( s, 0.6 )
def test_pre_calculated_purity( self ):
purities = calculate_purities( self.pre_calculated )
self.assertEqual( len( purities ), self.n_bins_x, 'Invalid number of purity terms' )
for p in purities[1:-1]:
self.assertEqual( p, 1 )
self.assertEqual( purities[0], 0.2 )
self.assertEqual( purities[-1], 0.25 )
def test_pre_calculated_stability( self ):
stabilities = calculate_stabilities( self.pre_calculated )
self.assertEqual( len( stabilities ), self.n_bins_x, 'Invalid number of stability terms' )
for s in stabilities[1:-1]:
self.assertEqual( s, 1 )
self.assertEqual( stabilities[0], 0.25 )
self.assertEqual( stabilities[-1], 0.2 )
def test_decombine_result_default(self):
N_signal = 100
N_background = 20
N_total = N_signal + N_background
ratio_signal_bkg = N_signal/N_background
N_total_prime = N_total * 2
N_signal_prime, N_background_prime = decombine_result((N_total_prime, 0), ratio_signal_bkg)
self.assertEqual(N_signal_prime[0], N_signal * 2)
self.assertEqual(N_background_prime[0], N_background * 2)
def test_decombine_result_background_free(self):
N_signal = 100
N_background = 0
N_total = N_signal
ratio_signal_bkg = 0
N_total_prime = N_total * 2
N_signal_prime, N_background_prime = decombine_result((N_total_prime, 0), ratio_signal_bkg)
self.assertEqual(N_signal_prime[0], N_signal * 2)
self.assertEqual(N_background_prime[0], N_background * 2)
def test_decombine_result_multiple_backgrounds(self):
N_signal = 100
N_background_1 = 20
N_background_2 = 40
N_total = N_signal + N_background_1 + N_background_2
# ratio of bkg_1 to other samples
ratio_signal_bkg_1 = (N_signal + N_background_2)/N_background_1
# ratio of bkg_2 to signal
ratio_signal_bkg_2 = N_signal/N_background_2
N_total_prime = N_total * 2
N_signal_plus_bkg_2_prime, N_background_1_prime = decombine_result((N_total_prime, 0), ratio_signal_bkg_1)
N_signal_prime, N_background_2_prime = decombine_result(N_signal_plus_bkg_2_prime, ratio_signal_bkg_2)
self.assertEqual(N_signal_prime[0], N_signal * 2)
self.assertEqual(N_background_1_prime[0], N_background_1 * 2)
self.assertEqual(N_background_2_prime[0], N_background_2 * 2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testTemplates']
unittest.main()
```
#### File: DailyPythonScripts/test/tools_Fitting_FitData.py
```python
import unittest
from tools.Fitting import FitData, FitDataCollection
from rootpy.plotting import Hist
import numpy as np
from tools.hist_utilities import adjust_overflow_to_limit
N_bkg1 = 9000
N_signal = 1000
N_bkg1_obs = 10000
N_signal_obs = 2000
N_data = N_bkg1_obs + N_signal_obs
mu1, mu2, sigma1, sigma2 = 100, 140, 15, 5
x1 = mu1 + sigma1 * np.random.randn( N_bkg1 )
x2 = mu2 + sigma2 * np.random.randn( N_signal )
x1_obs = mu1 + sigma1 * np.random.randn( N_bkg1_obs )
x2_obs = mu2 + sigma2 * np.random.randn( N_signal_obs )
x3 = mu2 + sigma1 * np.random.randn( N_bkg1 )
x4 = mu1 + sigma2 * np.random.randn( N_signal )
x3_obs = mu2 + sigma1 * np.random.randn( N_bkg1_obs )
x4_obs = mu1 + sigma2 * np.random.randn( N_signal_obs )
x_min = 40
x_max = 200
data_scale = 1.2
N_data = N_data * data_scale
class Test( unittest.TestCase ):
def setUp( self ):
# create histograms
h_bkg1_1 = Hist( 100, 40, 200, title = 'Background' )
h_signal_1 = h_bkg1_1.Clone( title = 'Signal' )
h_data_1 = h_bkg1_1.Clone( title = 'Data' )
h_bkg1_2 = h_bkg1_1.Clone( title = 'Background' )
h_signal_2 = h_bkg1_1.Clone( title = 'Signal' )
h_data_2 = h_bkg1_1.Clone( title = 'Data' )
# fill the histograms with our distributions
map( h_bkg1_1.Fill, x1 )
map( h_signal_1.Fill, x2 )
map( h_data_1.Fill, x1_obs )
map( h_data_1.Fill, x2_obs )
map( h_bkg1_2.Fill, x3 )
map( h_signal_2.Fill, x4 )
map( h_data_2.Fill, x3_obs )
map( h_data_2.Fill, x4_obs )
h_data_1.Scale(data_scale)
h_data_2.Scale(data_scale)
self.histograms_1 = {'signal': h_signal_1,
'bkg1': h_bkg1_1}
self.histograms_2 = {'signal': h_signal_2,
'bkg1': h_bkg1_2}
self.histograms_3 = {'var1': h_signal_1,
'bkg1': h_bkg1_1}
self.fit_data_1 = FitData( h_data_1, self.histograms_1, fit_boundaries = ( x_min, x_max ))
self.fit_data_2 = FitData( h_data_2, self.histograms_2, fit_boundaries = ( x_min, x_max ))
self.fit_data_3 = FitData( h_data_1, self.histograms_3, fit_boundaries = ( x_min, x_max ))
self.collection_1 = FitDataCollection()
self.collection_1.add( self.fit_data_1, 'signal region' )
self.collection_1.add( self.fit_data_2, 'control region' )
self.collection_1.set_normalisation_constraints({'bkg1': 0.5})
self.collection_2 = FitDataCollection()
self.collection_2.add( self.fit_data_1 )
self.collection_2.add( self.fit_data_2 )
self.collection_2.set_normalisation_constraints({'bkg1': 0.5})
self.single_collection = FitDataCollection()
self.single_collection.add( self.fit_data_1 )
self.single_collection.set_normalisation_constraints({'bkg1': 0.5})
self.non_simultaneous_fit_collection = FitDataCollection()
self.non_simultaneous_fit_collection.add( self.fit_data_1 )
self.non_simultaneous_fit_collection.add( self.fit_data_3 )
self.h_data = h_data_1
self.h_bkg1 = h_bkg1_1
self.h_signal = h_signal_1
def tearDown( self ):
pass
def test_is_valid_for_simultaneous_fit( self ):
self.assertTrue( self.collection_1.is_valid_for_simultaneous_fit(), msg = 'has_same_n_samples: ' + str(self.collection_1.has_same_n_samples) + ', has_same_n_data: ' + str(self.collection_1.has_same_n_data) )
self.assertTrue( self.collection_2.is_valid_for_simultaneous_fit(), msg = 'has_same_n_samples: ' + str(self.collection_1.has_same_n_samples) + ', has_same_n_data: ' + str(self.collection_1.has_same_n_data) )
self.assertFalse( self.non_simultaneous_fit_collection.is_valid_for_simultaneous_fit() )
def test_samples( self ):
samples = sorted( self.histograms_1.keys() )
samples_from_fit_data = sorted( self.fit_data_1.samples )
samples_from_fit_data_collection = self.collection_1.mc_samples()
self.assertEqual( samples, samples_from_fit_data )
self.assertEqual( samples, samples_from_fit_data_collection )
def test_normalisation( self ):
normalisation = {name:adjust_overflow_to_limit(histogram, x_min, x_max).Integral() for name, histogram in self.histograms_1.iteritems()}
normalisation_from_fit_data = self.fit_data_1.normalisation
normalisation_from_single_collection = self.single_collection.mc_normalisation()
normalisation_from_collection = self.collection_1.mc_normalisation( 'signal region' )
normalisation_from_collection_1 = self.collection_1.mc_normalisation()['signal region']
for sample in normalisation.keys():
self.assertEqual( normalisation[sample], normalisation_from_fit_data[sample] )
self.assertEqual( normalisation[sample], normalisation_from_single_collection[sample] )
self.assertEqual( normalisation[sample], normalisation_from_collection[sample] )
self.assertEqual( normalisation[sample], normalisation_from_collection_1[sample] )
# data normalisation
normalisation = self.h_data.integral( overflow = True )
normalisation_from_fit_data = self.fit_data_1.n_data()
normalisation_from_single_collection = self.single_collection.n_data()
normalisation_from_collection = self.collection_1.n_data( 'signal region' )
normalisation_from_collection_1 = self.collection_1.n_data()['signal region']
self.assertEqual( normalisation, normalisation_from_fit_data )
self.assertEqual( normalisation, normalisation_from_single_collection )
self.assertEqual( normalisation, normalisation_from_collection )
self.assertEqual( normalisation, normalisation_from_collection_1 )
self.assertAlmostEqual(normalisation, self.collection_1.max_n_data(), delta = 1 )
def test_real_data( self ):
real_data = self.fit_data_1.real_data_histogram()
self.assertEqual( self.h_data.integral( overflow = True ), real_data.Integral() )
def test_overwrite_warning( self ):
c = FitDataCollection()
c.add( self.fit_data_1, 'var1' )
self.assertRaises( UserWarning, c.add, ( self.fit_data_1, 'var1' ) )
def test_vectors( self ):
h_signal = adjust_overflow_to_limit( self.h_signal, x_min, x_max )
h_signal.Scale(1/h_signal.Integral())
h_bkg1 = adjust_overflow_to_limit( self.h_bkg1, x_min, x_max )
h_bkg1.Scale(1/h_bkg1.Integral())
signal = list( h_signal.y() )
bkg1 = list( h_bkg1.y() )
v_from_fit_data = self.fit_data_1.vectors
v_from_single_collection = self.single_collection.vectors()
# v_from_collection = self.collection_1.vectors( 'signal region' )
# v_from_collection_1 = self.collection_1.vectors()['signal region']
self.assertEqual(signal, v_from_fit_data['signal'])
self.assertEqual(bkg1, v_from_fit_data['bkg1'])
self.assertEqual(signal, v_from_single_collection['signal'])
self.assertEqual(bkg1, v_from_single_collection['bkg1'])
def test_constraints(self):
constraint_from_single_collection = self.single_collection.constraints()['bkg1']
self.assertEqual(0.5, constraint_from_single_collection)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testTemplates']
unittest.main()
```
#### File: DailyPythonScripts/test/tools_Fitting_Minuit.py
```python
import unittest
from tools.Fitting import Minuit, FitData, FitDataCollection
from rootpy.plotting import Hist
from math import sqrt
import numpy as np
N_bkg1 = 9000
N_signal = 1000
N_bkg1_obs = 10000
N_signal_obs = 2000
N_data = N_bkg1_obs + N_signal_obs
mu1, mu2, sigma1, sigma2 = 100, 140, 15, 5
x1 = mu1 + sigma1 * np.random.randn( N_bkg1 )
x2 = mu2 + sigma2 * np.random.randn( N_signal )
x1_obs = mu1 + sigma1 * np.random.randn( N_bkg1_obs )
x2_obs = mu2 + sigma2 * np.random.randn( N_signal_obs )
x3 = mu2 + sigma1 * np.random.randn( N_bkg1 )
x4 = mu1 + sigma2 * np.random.randn( N_signal )
x3_obs = mu2 + sigma1 * np.random.randn( N_bkg1_obs )
x4_obs = mu1 + sigma2 * np.random.randn( N_signal_obs )
data_scale = 1.2
N_data = N_data * data_scale
class Test( unittest.TestCase ):
def setUp( self ):
# create histograms
h_bkg1_1 = Hist( 100, 40, 200, title = 'Background' )
h_signal_1 = h_bkg1_1.Clone( title = 'Signal' )
h_data_1 = h_bkg1_1.Clone( title = 'Data' )
h_bkg1_2 = h_bkg1_1.Clone( title = 'Background' )
h_signal_2 = h_bkg1_1.Clone( title = 'Signal' )
h_data_2 = h_bkg1_1.Clone( title = 'Data' )
# fill the histograms with our distributions
map( h_bkg1_1.Fill, x1 )
map( h_signal_1.Fill, x2 )
map( h_data_1.Fill, x1_obs )
map( h_data_1.Fill, x2_obs )
map( h_bkg1_2.Fill, x3 )
map( h_signal_2.Fill, x4 )
map( h_data_2.Fill, x3_obs )
map( h_data_2.Fill, x4_obs )
h_data_1.Scale( data_scale )
h_data_2.Scale( data_scale )
histograms_1 = {'signal': h_signal_1,
'bkg1': h_bkg1_1}
histograms_2 = {'signal': h_signal_2,
'bkg1': h_bkg1_2}
fit_data_1 = FitData( h_data_1, histograms_1, fit_boundaries = ( 40, 200 ) )
fit_data_2 = FitData( h_data_2, histograms_2, fit_boundaries = ( 40, 200 ) )
single_fit_collection = FitDataCollection()
single_fit_collection.add( fit_data_1 )
collection_1 = FitDataCollection()
collection_1.add( fit_data_1, 'var1' )
collection_1.add( fit_data_2, 'var2' )
collection_2 = FitDataCollection()
collection_2.add( fit_data_1, 'var1' )
collection_2.add( fit_data_2, 'var2' )
collection_2.set_normalisation_constraints( {'bkg1':0.5} )
collection_3 = FitDataCollection()
collection_3.add( fit_data_1, 'var1' )
collection_3.add( fit_data_2, 'var2' )
collection_3.set_normalisation_constraints( {'bkg1':0.001} )
self.minuit_fitter = Minuit( single_fit_collection )
self.minuit_fitter.fit()
self.simultaneous_fit = Minuit( collection_1 )
self.simultaneous_fit.fit()
self.simultaneous_fit_with_constraints = Minuit( collection_2 )
self.simultaneous_fit_with_constraints.fit()
self.simultaneous_fit_with_bad_constraints = Minuit( collection_3 )
self.simultaneous_fit_with_bad_constraints.fit()
def tearDown( self ):
pass
def test_normalisation( self ):
normalisation = self.minuit_fitter.normalisation
self.assertAlmostEqual( normalisation["data"], N_data, delta = sqrt( N_data ) )
self.assertAlmostEqual( normalisation["bkg1"], N_bkg1, delta = sqrt( N_bkg1 ) )
self.assertAlmostEqual( normalisation["signal"], N_signal, delta = sqrt( N_signal ) )
def test_result( self ):
results = self.minuit_fitter.readResults()
self.assertAlmostEqual( N_signal_obs * data_scale, results['signal'][0], delta = 2 * results['signal'][1] )
self.assertAlmostEqual( N_bkg1_obs * data_scale, results['bkg1'][0], delta = 2 * results['bkg1'][1] )
def test_result_simultaneous( self ):
results = self.simultaneous_fit.readResults()
self.assertAlmostEqual( N_signal_obs * data_scale, results['signal'][0], delta = 2 * results['signal'][1] )
self.assertAlmostEqual( N_bkg1_obs * data_scale, results['bkg1'][0], delta = 2 * results['bkg1'][1] )
def test_result_simultaneous_with_constraints( self ):
results = self.simultaneous_fit_with_constraints.readResults()
self.assertAlmostEqual( N_signal_obs * data_scale, results['signal'][0], delta = 2 * results['signal'][1] )
self.assertAlmostEqual( N_bkg1_obs * data_scale, results['bkg1'][0], delta = 2 * results['bkg1'][1] )
def test_result_simultaneous_with_bad_constraints( self ):
results = self.simultaneous_fit_with_bad_constraints.readResults()
self.assertNotAlmostEqual( N_signal_obs * data_scale, results['signal'][0], delta = results['signal'][1] )
self.assertNotAlmostEqual( N_bkg1_obs * data_scale, results['bkg1'][0], delta = results['bkg1'][1] )
def test_relative_error( self ):
results = self.minuit_fitter.readResults()
self.assertLess( results['signal'][1] / results['signal'][0], 0.1 )
self.assertLess( results['bkg1'][1] / results['bkg1'][0], 0.1 )
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testTemplates']
unittest.main()
```
#### File: DailyPythonScripts/tools/datapoint_position.py
```python
from rootpy import asrootpy
from tools.hist_utilities import rebin_asymmetric
def get_bin_centers(bin_edges):
centers = []
add_center = centers.append
for lowerEdge, upperEdge in zip(bin_edges[:-1], bin_edges[1:]):
center = (upperEdge - lowerEdge)/2 + lowerEdge
add_center(center)
return centers
def barycenters(finedbinnedhist, coarsebinnedhist):
distribution = list(finedbinnedhist.y())
distribution_binEdges = list(finedbinnedhist.xedges())
data_binEdges = list(coarsebinnedhist.xedges())
centers = []
old_centers = []
for lowerEdge, upperEdge in zip(data_binEdges[:-1], data_binEdges[1:]):
data_position = 0
mass = 0
for x,y in zip(distribution_binEdges[1:], distribution):
if x < upperEdge and x>= lowerEdge:
data_position += x*y
mass +=y
data_position /= mass
centers.append(data_position)
old_centers.append(object)
return centers
def calculate_bin_centers(hist, bins):
pass
def calculate_bin_widths(data_binEdges):
widths = []
add_width = widths.append
for lowerEdge, upperEdge in zip(data_binEdges[:-1], data_binEdges[1:]):
width = abs(upperEdge) - abs(lowerEdge)
add_width(width)
return widths
def calculate_correct_x_coordinates(mc_truth, bins):
mc_temp = rebin_asymmetric(mc_truth, bins)
widths = calculate_bin_widths(bins)
x_positions = []
add_position = x_positions.append
for bin_i, width in enumerate(widths):
y = mc_temp.GetBinContent(bin_i + 1)/width
#find closest y-distance on MC hist and get the x-value
x_low = bins[bin_i]# + 1)
x_high = x_low + width
x = find_x_of_closest_approach(mc_truth, x_low, x_high, y)
add_position(x)
return x_positions
def find_x_of_closest_approach(hist, x_low, x_high, y_search):
y_values = list(hist.y())
x_edges = list(hist.xedges())
closest_x = 0
closest_distance = 99999999
centers = get_bin_centers(x_edges)
for x,y, center in zip(x_edges, y_values, centers):
if x < x_high and x>= x_low:
distance = abs(y_search - y)
if distance < closest_distance:
closest_distance = distance
closest_x = center
return closest_x
```
#### File: DailyPythonScripts/tools/plotting.py
```python
import matplotlib as mpl
from tools.file_utilities import make_folder_if_not_exists
mpl.use('agg')
import matplotlib.pyplot as plt
import rootpy.plotting.root2matplotlib as rplt
from rootpy.plotting import HistStack
from config import CMS
from matplotlib.patches import Rectangle
from copy import deepcopy
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MultipleLocator
from itertools import cycle
from matplotlib import rc
rc('font',**CMS.font)
rc( 'text', usetex = True )
class Histogram_properties:
name = 'Test'
title = "Test"
x_axis_title = "I am the x-axis"
y_axis_title = "I am the y-axis"
x_limits = []
y_limits = []
mc_error = 0.
mc_errors_label = 'MC uncertainty'
normalise = False
legend_location = 'best'
set_log_y = False
legend_columns = 1
has_ratio = False
ratio_y_limits = []
rebin = 1
def __init__( self, dictionary = {} ):
for name, value in dictionary.iteritems():
if hasattr( self, name ):
setattr( self, name, value )
# prototype
class Control_plot:
lumi = 5050
rebin = 1
histogram_properties = Histogram_properties()
channel = 'combined'
b_tag_bin = '2orMoreBtags'
def __init__( self, control_region, qcd_control_region, histogram_files, **kwargs ):
self.control_region = control_region
self.qcd_control_region = qcd_control_region
self.histogram_files = histogram_files
self.b_tag_bin = kwargs.pop( 'b_tag_bin', self.b_tag_bin )
self.lumi = kwargs.pop( 'lumi', self.lumi )
self.rebin = kwargs.pop( 'rebin', self.rebin )
self.histogram_properties = kwargs.pop( 'histogram_properties', self.histogram_properties )
self.channel = kwargs.pop( 'channel', self.channel )
def make_data_mc_comparison_plot( histograms = [],
histogram_lables = [],
histogram_colors = [],
histogram_properties = Histogram_properties(),
data_index = 0,
save_folder = 'plots/',
save_as = ['pdf', 'png'],
normalise = False,
show_ratio = False,
show_stat_errors_on_mc = False,
draw_vertical_line = 0,
):
save_folder = check_save_folder(save_folder)
# make copies in order not to mess with existing histograms
histograms_ = deepcopy(histograms)
stack = HistStack()
add_mc = stack.Add
for index, histogram in enumerate( histograms_ ):
label = histogram_lables[index]
color = histogram_colors[index]
histogram.SetTitle( label )
if normalise:
histogram.Sumw2()
if not index == data_index:
histogram.fillstyle = 'solid'
histogram.fillcolor = color
histogram.legendstyle = 'F'
add_mc( histogram )
data = histograms_[data_index]
data.SetMarkerSize( CMS.data_marker_size )
if normalise:
n_events_data = data.Integral()
n_events_mc = stack.Integral()
data.Scale( 1 / n_events_data )
stack.Scale( 1 / n_events_mc )
# plot with matplotlib
plt.figure( figsize = CMS.figsize, dpi = CMS.dpi, facecolor = CMS.facecolor )
if show_ratio:
ratio = data.Clone( 'ratio' )
ratio.Divide( sum( stack.GetHists() ) )
ratio.SetMarkerSize( 3 )
gs = gridspec.GridSpec( 2, 1, height_ratios = [5, 1] )
axes = plt.subplot( gs[0] )
else:
axes = plt.axes()
if histogram_properties.set_log_y:
axes.set_yscale( 'log', nonposy = "clip" )
axes.set_ylim( ymin = 1e-2 )
mc_error = histogram_properties.mc_error
if mc_error > 0:
stack_lower = sum( stack.GetHists() )
stack_upper = stack_lower.Clone( 'upper' )
stack_lower.Scale( 1 - mc_error )
stack_upper.Scale( 1 + mc_error )
rplt.fill_between( stack_upper,
stack_lower, axes, facecolor = '0.75',
alpha = 0.5, hatch = '/',
zorder = len(histograms_) + 1 )
if not mc_error > 0 and show_stat_errors_on_mc:
stack_lower = sum( stack.GetHists() )
mc_errors = list( stack_lower.yerravg() )
stack_upper = stack_lower.Clone( 'upper' )
for bin_i in range( 1, stack_lower.GetNbinsX() ):
stack_lower.SetBinContent( bin_i, stack_lower.GetBinContent( bin_i ) - mc_errors[bin_i - 1] )
stack_upper.SetBinContent( bin_i, stack_upper.GetBinContent( bin_i ) + mc_errors[bin_i - 1] )
rplt.fill_between( stack_upper, stack_lower, axes, facecolor = '0.75',
alpha = 0.5, hatch = '/',
zorder = len(histograms_) + 1 )
# a comment on zorder: the MC stack should be always at the very back (z = 1),
# then the MC error (z = len(histograms_) + 1) and finally the data
# (z = len(histograms_) + 2)
rplt.hist( stack, stacked = True, axes = axes, zorder = 1 )
rplt.errorbar( data, xerr = False, emptybins = False, axes = axes,
elinewidth = 2, capsize = 10, capthick = 2,
zorder = len(histograms_) + 2 )
# put legend into the correct order (data is always first!)
handles, labels = axes.get_legend_handles_labels()
data_label_index = labels.index( 'data' )
data_handle = handles[data_label_index]
labels.remove( 'data' )
handles.remove( data_handle )
labels.insert( 0, 'data' )
handles.insert( 0, data_handle )
if mc_error > 0 or ( not mc_error > 0 and show_stat_errors_on_mc ):
p1 = Rectangle( ( 0, 0 ), 1, 1, fc = "0.75", alpha = 0.5, hatch = '/' )
handles.append( p1 )
labels.append( histogram_properties.mc_errors_label )
plt.legend( handles, labels, numpoints = 1, loc = histogram_properties.legend_location,
prop = CMS.legend_properties, ncol = histogram_properties.legend_columns ).set_zorder(102)
set_labels( plt, histogram_properties, show_x_label = not show_ratio )
x_limits = histogram_properties.x_limits
y_limits = histogram_properties.y_limits
if len( x_limits ) == 2:
axes.set_xlim( xmin = x_limits[0], xmax = x_limits[1] )
if len( y_limits ) == 2:
axes.set_ylim( ymin = y_limits[0], ymax = y_limits[1] )
else:
axes.set_ylim( ymin = 0 )
if histogram_properties.set_log_y:
if not len( y_limits ) == 2: # if not user set y-limits, set default
axes.set_ylim( ymin = 1e-1 )
#draw a red vertical line if needed:
if draw_vertical_line != 0:
plt.axvline(x = draw_vertical_line, color = 'red', linewidth = 3)
if show_ratio:
plt.setp( axes.get_xticklabels(), visible = False )
ax1 = plt.subplot( gs[1] )
ax1.minorticks_on()
ax1.grid( True, 'major', linewidth = 1 )
ax1.yaxis.set_major_locator( MultipleLocator( 1.0 ) )
ax1.yaxis.set_minor_locator( MultipleLocator( 0.5 ) )
set_labels( plt, histogram_properties, show_x_label = True, show_title = False )
plt.ylabel( 'data/MC', CMS.y_axis_title )
rplt.errorbar( ratio, xerr = True, emptybins = False, axes = ax1 )
if len( x_limits ) == 2:
ax1.set_xlim( xmin = x_limits[0], xmax = x_limits[1] )
ax1.set_ylim( ymin = 0, ymax = 2 )
if CMS.tight_layout:
plt.tight_layout()
for save in save_as:
plt.savefig( save_folder + histogram_properties.name + '.' + save )
plt.close()
def make_control_region_comparison( control_region_1, control_region_2,
name_region_1, name_region_2,
histogram_properties = Histogram_properties(),
# show_ratio = True,
save_folder = 'plots/',
save_as = ['pdf', 'png'] ):
save_folder = check_save_folder(save_folder)
# make copies in order not to mess with existing histograms
control_region_1 = deepcopy( control_region_1 )
control_region_2 = deepcopy( control_region_2 )
# normalise as we are comparing shapes
control_region_1.Scale( 1 / control_region_1.Integral() )
control_region_2.Scale( 1 / control_region_2.Integral() )
ratio = control_region_1.Clone( 'ratio' )
ratio.Divide( control_region_2 )
ratio.SetMarkerSize( 3 )
control_region_1.fillcolor = 'yellow'
control_region_2.fillcolor = 'red'
control_region_1.fillstyle = 'solid'
control_region_2.fillstyle = 'solid'
control_region_1.legendstyle = 'F'
control_region_2.legendstyle = 'F'
# plot with matplotlib
plt.figure( figsize = CMS.figsize, dpi = CMS.dpi, facecolor = CMS.facecolor )
gs = gridspec.GridSpec( 2, 1, height_ratios = [5, 1] )
ax0 = plt.subplot( gs[0] )
ax0.minorticks_on()
rplt.hist( control_region_1, axes = ax0, alpha = 0.5 )
rplt.hist( control_region_2, axes = ax0, alpha = 0.5 )
set_labels( plt, histogram_properties, show_x_label = False )
handles, labels = ax0.get_legend_handles_labels()
labels.insert( 0, name_region_1 + ' (1)' )
labels.insert( 1, name_region_2 + ' (2)' )
plt.legend( handles, labels, numpoints = 1, loc = histogram_properties.legend_location,
prop = CMS.legend_properties, ncol = histogram_properties.legend_columns ).set_zorder(102)
x_limits = histogram_properties.x_limits
y_limits = histogram_properties.y_limits
if len( x_limits ) == 2:
ax0.set_xlim( xmin = x_limits[0], xmax = x_limits[1] )
if len( y_limits ) == 2:
ax0.set_ylim( ymin = y_limits[0], ymax = y_limits[1] )
plt.setp( ax0.get_xticklabels(), visible = False )
ax1 = plt.subplot( gs[1] )
ax1.minorticks_on()
ax1.grid( True, 'major', linewidth = 1 )
ax1.yaxis.set_major_locator( MultipleLocator( 1.0 ) )
ax1.yaxis.set_minor_locator( MultipleLocator( 0.5 ) )
set_labels( plt, histogram_properties, show_x_label = True, show_title = False )
plt.ylabel( '(1)/(2)', CMS.y_axis_title )
rplt.errorbar( ratio, xerr = True, emptybins = False, axes = ax1 )
if len( x_limits ) == 2:
ax1.set_xlim( xmin = x_limits[0], xmax = x_limits[1] )
ax1.set_ylim( ymin = -0.5, ymax = 4 )
if CMS.tight_layout:
plt.tight_layout()
for save in save_as:
plt.savefig( save_folder + histogram_properties.name + '.' + save )
plt.close()
def make_shape_comparison_plot( shapes = [],
names = [],
colours = [],
histogram_properties = Histogram_properties(),
fill_area = True,
make_ratio = False,
alpha = 0.5,
save_folder = 'plots/',
save_as = ['pdf', 'png'],
normalise_ratio_to_errors = False ):
save_folder = check_save_folder(save_folder)
# make copies in order not to mess with existing histograms
shapes_ = deepcopy(shapes)
# normalise as we are comparing shapes
for shape, colour in zip(shapes_, colours):
integral = shape.Integral()
if integral > 0:
shape.Sumw2()
shape.Scale( 1 / integral )
if fill_area:
shape.fillcolor = colour
shape.fillstyle = 'solid'
shape.legendstyle = 'F'
else:
shape.linecolor = colour
shape.legendstyle = 'F'
shape.linewidth = 5
if not histogram_properties.y_limits:
histogram_properties.y_limits = [0, get_best_max_y(shapes_, False)]
# plot with matplotlib
plt.figure( figsize = CMS.figsize, dpi = CMS.dpi, facecolor = CMS.facecolor )
gs = gridspec.GridSpec( 2, 1, height_ratios = [5, 1] )
axes = None
if make_ratio:
axes = plt.subplot( gs[0] )
else:
axes = plt.axes()
axes.minorticks_on()
for shape in shapes_:
rplt.hist( shape, axes = axes, alpha = alpha )
set_labels( plt, histogram_properties, show_x_label = not make_ratio )
handles, labels = axes.get_legend_handles_labels()
for i,name in enumerate(names):
labels.insert(i, name)
plt.legend( handles, labels, numpoints = 1, loc = histogram_properties.legend_location,
prop = CMS.legend_properties, ncol = histogram_properties.legend_columns ).set_zorder(102)
#add error bars
for shape in shapes_:
rplt.errorbar( shape, axes = axes, alpha = alpha)
adjust_axis_limits(axes, histogram_properties)
if make_ratio:
plt.setp( axes.get_xticklabels(), visible = False )
ratio = shapes_[0].Clone( 'ratio' )
if normalise_ratio_to_errors:
# TODO
# this is a preliminary feature, use with care
for bin_i in range( 1, shapes_[0].nbins() ):
x_i = shapes_[0][bin_i].value
x_i_error = shapes_[0][bin_i].error
y_i = shapes_[1][bin_i].value
y_i_error = shapes_[1][bin_i].error
numerator = x_i - y_i
denominator = pow( pow( x_i_error, 2 ) + pow( y_i_error, 2 ), 0.5 )
if denominator == 0:
ratio.SetBinContent(bin_i, 0.)
ratio.SetBinError(bin_i, 0.)
else:
ratio.SetBinContent(bin_i, numerator/denominator)
ratio.SetBinError(bin_i, denominator)
else:
ratio.Divide( shapes_[1] )
ratio.SetMarkerSize( 3 )
ax1 = plt.subplot( gs[1] )
ax1.minorticks_on()
ax1.grid( True, 'major', linewidth = 1 )
set_labels( plt, histogram_properties, show_x_label = True, show_title = False )
if normalise_ratio_to_errors:
plt.ylabel( r'$\frac{1-2}{\sqrt{(\sigma_1)^2 + (\sigma_2)^2}}$', CMS.y_axis_title )
else:
plt.ylabel( '(1)/(2)', CMS.y_axis_title )
rplt.errorbar( ratio, xerr = True, emptybins = False, axes = ax1 )
if len( histogram_properties.x_limits ) == 2:
ax1.set_xlim( xmin = histogram_properties.x_limits[0],
xmax = histogram_properties.x_limits[1] )
if len( histogram_properties.ratio_y_limits ) == 2:
ax1.set_ylim( ymin = histogram_properties.ratio_y_limits[0],
ymax = histogram_properties.ratio_y_limits[1] )
# dynamic tick placement
ticks = ax1.yaxis.get_ticklocs()
tick_min, tick_max = ticks[0], ticks[-1]
# limit to 3 ticks
tick_distance = abs(tick_max - tick_min)/4
ax1.yaxis.set_major_locator( MultipleLocator( tick_distance ) )
ax1.yaxis.set_minor_locator( MultipleLocator( tick_distance/2 ) )
if CMS.tight_layout:
plt.tight_layout()
for save in save_as:
plt.savefig( save_folder + histogram_properties.name + '.' + save )
plt.close()
def make_plot( histogram, histogram_label, histogram_properties = Histogram_properties(),
save_folder = 'plots/',
save_as = ['pdf', 'png'],
normalise = False,
draw_errorbar = False,
draw_legend = True
):
save_folder = check_save_folder(save_folder)
histogram.SetTitle( histogram_label )
# histogram.SetMarkerSize(CMS.data_marker_size)
# to be changed
histogram.fillcolor = '0.75'
histogram.fillstyle = 'solid'
if normalise:
histogram.Scale( 1 / histogram.Integral() )
# plot with matplotlib
plt.figure( figsize = CMS.figsize, dpi = CMS.dpi, facecolor = CMS.facecolor )
axes = plt.axes()
if draw_errorbar:
rplt.errorbar( histogram, xerr = False, emptybins = False, axes = axes, elinewidth = 2, capsize = 10, capthick = 2 )
else:
rplt.hist( histogram )
if draw_legend:
plt.legend( numpoints = 1, loc = histogram_properties.legend_location, prop = CMS.legend_properties )
adjust_axis_limits( axes, histogram_properties )
x_limits = histogram_properties.x_limits
y_limits = histogram_properties.y_limits
if len( x_limits ) == 2:
axes.set_xlim( xmin = x_limits[0], xmax = x_limits[1] )
if len( y_limits ) == 2:
axes.set_ylim( ymin = y_limits[0], ymax = y_limits[1] )
if histogram_properties.set_log_y:
axes.set_yscale( 'log', nonposy = "clip" )
if not len( histogram_properties.y_limits ) == 2: # if not user set y-limits, calculate the limits from the tuple values
value_range = sorted( list( histogram.y() ) )
for i, value in enumerate(value_range):
if value == 0:
del value_range[i]
axes.set_ylim( ymin = min(value_range)/10, ymax = max(value_range)*10 )
set_labels( plt, histogram_properties )
if CMS.tight_layout:
plt.tight_layout()
for save in save_as:
plt.savefig( save_folder + histogram_properties.name + '.' + save )
plt.close()
def compare_measurements( models = {}, measurements = {},
show_measurement_errors = True,
histogram_properties = Histogram_properties(),
save_folder = 'plots/',
save_as = ['pdf', 'png'] ):
"""
This function takes one or more models and compares it to a set of measurements.
Models and measurements are supplied as dictionaries in the form of {'label': histogram}
@param models: a dictionary of one or more model input, i.e
theories = {'model1' : histogram1, 'model2' : histogram_2
where histogram_1(2) is a root (or rootpy/matplotlib) histogram object.
@param measurements: a dictionary of one or more measurement. Follows the same
prescription as the models parameter.
@param histogram_properties: a Histogram_properties object to describe the look of the histogram
"""
save_folder = check_save_folder(save_folder)
# plot with matplotlib
plt.figure( figsize = CMS.figsize, dpi = CMS.dpi, facecolor = CMS.facecolor )
axes = plt.axes()
# Set default color cycle to rgby
# matplotlib
# plt.rc( 'axes', color_cycle = ['r', 'g', 'b', 'y'] )
# rootpy
colors = ['green', 'red', 'blue', 'magenta']
colorcycler = cycle( colors )
# markers = ['circle', 'triangledown', 'triangleup', 'diamond', 'square', 'star']
markers = [20, 23, 22, 33, 21, 29]
markercycler = cycle( markers )
# matplotlib
# lines = ["-", "--", "-.", ":"]
# rootpy
lines = ["dashed", "solid", "dashdot", "dotted"]
linecycler = cycle( lines )
for label, histogram in models.iteritems():
if not histogram: # skip empty ones
continue
histogram.linewidth = 2
histogram.color = next( colorcycler )
histogram.linestyle = next( linecycler )
rplt.hist( histogram, axex = axes, label = label )
for label, histogram in measurements.iteritems():
histogram.markersize = 2
histogram.markerstyle = next( markercycler )
histogram.color = next( colorcycler )
rplt.errorbar( histogram, axes = axes, label = label ,
yerr = show_measurement_errors, xerr = False )
set_labels( plt, histogram_properties )
plt.legend( numpoints = 1, loc = histogram_properties.legend_location,
prop = CMS.legend_properties )
adjust_axis_limits( axes, histogram_properties )
x_limits = histogram_properties.x_limits
y_limits = histogram_properties.y_limits
if len( x_limits ) == 2:
axes.set_xlim( xmin = x_limits[0], xmax = x_limits[1] )
if len( y_limits ) == 2:
axes.set_ylim( ymin = y_limits[0], ymax = y_limits[1] )
if histogram_properties.set_log_y:
axes.set_yscale( 'log', nonposy = "clip" )
if not len( histogram_properties.y_limits ) == 2: # if not user set y-limits, calculate the limits from the tuple values
value_range = sorted( list( histogram.y() ) )
for i, value in enumerate(value_range):
if value == 0:
del value_range[i]
axes.set_ylim( ymin = min(value_range)/10, ymax = max(value_range)*10 )
if CMS.tight_layout:
plt.tight_layout()
for save in save_as:
plt.savefig( save_folder + histogram_properties.name + '.' + save )
plt.close()
def set_labels( plt, histogram_properties, show_x_label = True, show_title = True ):
if show_x_label:
plt.xlabel( histogram_properties.x_axis_title, CMS.x_axis_title )
plt.ylabel( histogram_properties.y_axis_title, CMS.y_axis_title )
plt.tick_params( **CMS.axis_label_major )
plt.tick_params( **CMS.axis_label_minor )
if show_title:
plt.title( histogram_properties.title, CMS.title )
def adjust_axis_limits( axes, histogram_properties ):
x_limits = histogram_properties.x_limits
if len( x_limits ) == 2:
axes.set_xlim( xmin = x_limits[0], xmax = x_limits[1] )
y_limits = histogram_properties.y_limits
if len( y_limits ) == 2:
axes.set_ylim( ymin = y_limits[0], ymax = y_limits[1] )
else:
axes.set_ylim( ymin = 0 )
def get_best_max_y(histograms, include_error = True):
return max([histogram.max(include_error = include_error) for histogram in histograms])
def get_best_min_y(histograms, include_error = True):
return min([histogram.min(include_error = include_error) for histogram in histograms])
def check_save_folder(save_folder):
'''
Checks and fixes (if necessary) the save folder
'''
# save_folder should end with an '/'
if not save_folder.endswith('/'):
save_folder += '/'
# save_folder should exist
make_folder_if_not_exists(save_folder)
return save_folder
```
#### File: DailyPythonScripts/tools/QCD_rate_estimation.py
```python
from __future__ import division
from math import sqrt
DEBUG = False
relative_isolation_bias = 0.0
rebin = 10
bin_width = 0.01
def estimate_with_fit_to_relative_isolation(input_histogram, function='expo',
fit_range=(0.3, 1.6), fit_ranges_for_systematics=[(0.2, 1.6), (0.4, 1.6)]):
global rebin
if DEBUG:
print '*' * 120
print "Estimating QCD using a fit to relative isolation"
print 'Histogram = ', input_histogram
print 'Fit function = ', function
print 'Fit range = ', fit_range
print 'Fit ranges for systematics = ', fit_ranges_for_systematics
print '*' * 120
input_histogram.Rebin(rebin)
result = fit_to_relative_isolation_with_systematics(input_histogram, function, fit_range=fit_range, fit_ranges_for_systematics=fit_ranges_for_systematics)
return result
def fit_to_relative_isolation_with_systematics(input_histogram, function, fit_range=(0.3, 1.6), fit_ranges_for_systematics=[(0.2, 1.6), (0.4, 1.6)],
apply_bias_correction=True):
central_result = fit_to_relative_isolation(input_histogram, function, fit_range=fit_range)
central_value, central_error = central_result['value'], central_result['error']
# systematic errors
systematic_relative_error_squared = 0
for current_range in fit_ranges_for_systematics:
result = fit_to_relative_isolation(input_histogram, function, fit_range=current_range)
value = result['value']
deviation = value - central_value
if not central_value == 0:
systematic_relative_error_squared += (deviation / central_value) ** 2
relative_error_from_bias_correction = 0
if apply_bias_correction:
reduction_from_bias = 1 - relative_isolation_bias
central_value = central_value * reduction_from_bias
relative_error_from_bias_correction = relative_isolation_bias
error_squared = central_error ** 2 + (systematic_relative_error_squared + relative_error_from_bias_correction) * (central_value ** 2)
central_error = sqrt(error_squared)
result = {
'value':central_value,
'error': central_error,
'fit':central_result['fit']
}
return result
def fit_to_relative_isolation(input_histogram, function, fit_range, signal_region=(0., 0.1)):
global rebin, bin_width
value, error = 0,0
relative_error_squared = 0
histogram = input_histogram.Clone('tmp')
fit = perform_fit(histogram, function, fit_range)
if fit:
value = fit.Integral(signal_region[0], signal_region[1])/(bin_width * rebin)
for n in range(0, fit.GetNumberFreeParameters()):
parameter = fit.GetParameter(n)
error = fit.GetParError(n)
if not parameter == 0:
relative_error_squared += (error / parameter) ** 2
error = sqrt(relative_error_squared)*value
result = {'value': value,
'error':error,
'fit':fit}
return result
def perform_fit(histogram, function, fit_range):
histogram.Fit(function, "Q0", "ah", fit_range[0], fit_range[1])
fit = histogram.GetFunction(function)
if fit:
return fit.Clone()
else:
return None
```
#### File: DailyPythonScripts/tools/Timer.py
```python
from time import time
class Timer():
def __init__(self):
self.start_time = time()
def elapsed_time(self):
return time() - self.start_time
def restart(self):
self.start_time = time()
``` |
{
"source": "jjacobi123123/airflow-training-skeleton",
"score": 3
} |
#### File: airflow-training-skeleton/dags/execute_wait_dag.py
```python
from datetime import timedelta
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
import datetime
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
'owner': 'Airflow',
'start_date': timezone.datetime(2019, 12, 7),
}
def print_execution_date(**context):
print(context['execution_date'])
with DAG(
dag_id='execute_wait_dag',
default_args=args,
schedule_interval=None,
dagrun_timeout=timedelta(minutes=60),
) as dag:
print_execution_date_operator = PythonOperator(task_id='print_execution_date',
python_callable=print_execution_date,
provide_context=True)
wait_5 = BashOperator(task_id='wait_5', bash_command="sleep 5")
wait_1 = BashOperator(task_id='wait_1', bash_command="sleep 1")
wait_10 = BashOperator(task_id='wait_10', bash_command="sleep 10")
the_end = DummyOperator(task_id='the_end')
print_execution_date_operator >> wait_1 >> the_end
print_execution_date_operator >> wait_5 >> the_end
print_execution_date_operator >> wait_10 >> the_end
``` |
{
"source": "jjacob/NTupleProduction",
"score": 2
} |
#### File: NTupleProduction/python/EventFilters_cff.py
```python
def setup_eventfilters(process, cms, options, useTrackingFailureFilter=False):
print '=' * 60
print "Setting up Event Filters"
print '=' * 60
process.scrapingVeto = setup_scrapingveto(process, cms)
process.HBHENoiseFilter = setup_HBHENoiseFilter(process, cms)
process.HBHENoiseFilterResultProducer = setup_HBHENoiseFilterResultProducer(process, cms)
process.HcalLaserEventFilter = setup_HcalLaserFilter(process, cms)
process.EcalDeadCellBoundaryEnergyFilter = setup_ECALDeadCellFilter(process, cms)
process.EcalDeadCellTriggerPrimitiveFilter = setup_ECALDeadCellTriggerPrimitiveFilter(process, cms)
process.trackingFailureFilter = setup_trackingFailureFilter(process, cms)
process.eeBadScFilter = setup_eeBadScFilter(process, cms)
process.ecalLaserCorrFilter = setup_ecalLaserCorrFilter(process, cms)
#setting up tracking POG filters
setup_trackingPOGfilters(process, cms)
process.EventFilter = setup_skim(process, cms, options)
process.EventFilter.HBHENoiseFilterInput = cms.InputTag('HBHENoiseFilterResultProducer', 'HBHENoiseFilterResult')
process.EventFilter.HCALLaserFilterInput = cms.InputTag('HcalLaserEventFilter')
process.EventFilter.ECALDeadCellFilterInput = cms.InputTag('EcalDeadCellBoundaryEnergyFilter')
process.EventFilter.ECALDeadCellTriggerPrimitiveFilterInput = cms.InputTag('EcalDeadCellTriggerPrimitiveFilter')
process.EventFilter.TrackingFailureFilterInput = cms.InputTag('trackingFailureFilter')
process.EventFilter.EEBadSCFilterInput = cms.InputTag('eeBadScFilter')
process.EventFilter.ECALLaserCorrFilterInput = cms.InputTag('ecalLaserCorrFilter')
#tracking POG filters
process.EventFilter.manystripclus53XInput = cms.InputTag('manystripclus53X')
process.EventFilter.toomanystripclus53XInput = cms.InputTag('toomanystripclus53X')
process.EventFilter.logErrorTooManyClustersInput = cms.InputTag('logErrorTooManyClusters')
process.EventFilter.useTrackingPOGFilters = cms.bool(True)
process.EventFilter.useTrackingFailureFilter = cms.bool(True)
#disable optional MET filters for now
process.EventFilter.useOptionalMETFilters = cms.bool(False)
print "Creating event filter sequence (merging all previous)."
EventFilters = cms.Sequence(
process.HBHENoiseFilterResultProducer *
process.trackingFailureFilter *
process.scrapingVeto *
process.HcalLaserEventFilter *
process.EcalDeadCellBoundaryEnergyFilter *
process.EcalDeadCellTriggerPrimitiveFilter *
process.eeBadScFilter *
process.ecalLaserCorrFilter *
~process.manystripclus53X *
~process.toomanystripclus53X *
#~process.logErrorTooManyClusters *
process.EventFilter
)
return EventFilters
def setup_HBHENoiseFilter(process, cms):
print '=' * 60
print "Setting up HBHE Noise Filter"
print '=' * 60
# HB + HE noise filtering
#following https://twiki.cern.ch/twiki/bin/viewauth/CMS/MissingETOptionalFilters prescription
#this filter is applied before the PAT sequence
from CommonTools.RecoAlgos.HBHENoiseFilter_cfi import HBHENoiseFilter
return HBHENoiseFilter
def setup_HBHENoiseFilterResultProducer(process, cms):
#EDProducer to keep track of in the AnalysisTools
#values kept identical to the ones from HBHENoiseFilter
HBHENoiseFilterResultProducer = cms.EDProducer(
'HBHENoiseFilterResultProducer',
noiselabel=cms.InputTag('hcalnoise'),
minRatio=cms.double(-999),
maxRatio=cms.double(999),
minHPDHits=cms.int32(17),
minRBXHits=cms.int32(999),
minHPDNoOtherHits=cms.int32(10),
minZeros=cms.int32(10),
minHighEHitTime=cms.double(-9999.0),
maxHighEHitTime=cms.double(9999.0),
maxRBXEMF=cms.double(-999.0),
minNumIsolatedNoiseChannels = cms.int32(10),
minIsolatedNoiseSumE = cms.double(50.0),
minIsolatedNoiseSumEt = cms.double(25.0),
useTS4TS5=cms.bool(True),
IgnoreTS4TS5ifJetInLowBVRegion=cms.bool(False),
jetlabel = cms.InputTag('ak5PFJets'),
maxjetindex = cms.int32(0), # maximum jet index that will be checked for 'IgnoreTS4TS5ifJetInLowBVRegion'
maxNHF = cms.double(0.9) # maximum allowed jet->neutralHadronEnergyFraction()
)
return HBHENoiseFilterResultProducer
def setup_scrapingveto(process, cms):
print '=' * 60
print "Setting up scraping Filter"
print '=' * 60
scrapingVeto = cms.EDFilter("FilterOutScraping",
applyfilter=cms.untracked.bool(False),
debugOn=cms.untracked.bool(False),
numtrack=cms.untracked.uint32(10),
thresh=cms.untracked.double(0.25)
)
return scrapingVeto
def setup_HcalLaserFilter(process, cms):
print '=' * 60
print "Setting up HcalLaser Filter"
print '=' * 60
from RecoMET.METFilters.hcalLaserEventFilter_cfi import hcalLaserEventFilter
hcalLaserEventFilter.taggingMode = cms.bool(True)
return hcalLaserEventFilter
def setup_ECALDeadCellFilter(process, cms):
print '=' * 60
print "Setting up ECALDeadCell Filter"
print '=' * 60
#https://twiki.cern.ch/twiki/bin/viewauth/CMS/MissingETOptionalFilters#ECAL_dead_cell_filter
from RecoMET.METFilters.EcalDeadCellBoundaryEnergyFilter_cfi import EcalDeadCellBoundaryEnergyFilter
EcalDeadCellBoundaryEnergyFilter.taggingMode = cms.bool(True)
EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyDeadCellsEB = cms.untracked.double(10)
EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyDeadCellsEE = cms.untracked.double(10)
EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyGapEB = cms.untracked.double(100)
EcalDeadCellBoundaryEnergyFilter.cutBoundEnergyGapEE = cms.untracked.double(100)
EcalDeadCellBoundaryEnergyFilter.enableGap = cms.untracked.bool(False)
EcalDeadCellBoundaryEnergyFilter.limitDeadCellToChannelStatusEB = cms.vint32(12, 14)
EcalDeadCellBoundaryEnergyFilter.limitDeadCellToChannelStatusEE = cms.vint32(12, 14)
return EcalDeadCellBoundaryEnergyFilter
def setup_ECALDeadCellTriggerPrimitiveFilter(process, cms):
print '=' * 60
print "Setting up ECALDeadCell TriggerPrimitive Filter"
print '=' * 60
#https://twiki.cern.ch/twiki/bin/viewauth/CMS/MissingETOptionalFilters#ECAL_dead_cell_filter
from RecoMET.METFilters.EcalDeadCellTriggerPrimitiveFilter_cfi import EcalDeadCellTriggerPrimitiveFilter
EcalDeadCellTriggerPrimitiveFilter.taggingMode = cms.bool(True)
EcalDeadCellTriggerPrimitiveFilter.tpDigiCollection = cms.InputTag("ecalTPSkimNA")
return EcalDeadCellTriggerPrimitiveFilter
def setup_trackingFailureFilter(process, cms):
from RecoMET.METFilters.trackingFailureFilter_cfi import trackingFailureFilter
trackingFailureFilter.JetSource = cms.InputTag('ak5PFJets')
trackingFailureFilter.TrackSource = cms.InputTag('generalTracks')
trackingFailureFilter.VertexSource = cms.InputTag('goodOfflinePrimaryVertices')
trackingFailureFilter.taggingMode = cms.bool(True)
return trackingFailureFilter
def setup_eeBadScFilter(process, cms):
from RecoMET.METFilters.eeBadScFilter_cfi import eeBadScFilter
eeBadScFilter.taggingMode = cms.bool (True)
return eeBadScFilter
def setup_ecalLaserCorrFilter(process, cms):
from RecoMET.METFilters.ecalLaserCorrFilter_cfi import ecalLaserCorrFilter
ecalLaserCorrFilter.taggingMode = cms.bool (True)
ecalLaserCorrFilter.Debug = cms.bool (False)
return ecalLaserCorrFilter
def setup_trackingPOGfilters(process, cms):
from RecoMET.METFilters.trackingPOGFilters_cfi import manystripclus53X
from RecoMET.METFilters.trackingPOGFilters_cfi import toomanystripclus53X
from RecoMET.METFilters.trackingPOGFilters_cfi import logErrorTooManyClusters
manystripclus53X.taggedMode = cms.untracked.bool(True)
manystripclus53X.forcedValue = cms.untracked.bool(False)
toomanystripclus53X.taggedMode = cms.untracked.bool(True)
toomanystripclus53X.forcedValue = cms.untracked.bool(False)
logErrorTooManyClusters.taggedMode = cms.untracked.bool(True)
logErrorTooManyClusters.forcedValue = cms.untracked.bool(False)
process.manystripclus53X = manystripclus53X
process.toomanystripclus53X = toomanystripclus53X
process.logErrorTooManyClusters = logErrorTooManyClusters
def setup_skim(process, cms, options):
print '=' * 60
print "Setting up skim"
print '=' * 60
skim = options.skim
process.load("BristolAnalysis.NTupleTools.EventFilter_cfi")
from BristolAnalysis.NTupleTools.EventFilter_cfi import EventFilter
#at least one good primary vertex
EventFilter.VertexInput = cms.InputTag('goodOfflinePrimaryVertices')
#reset to 0 skim
EventFilter.minNElectrons = cms.int32(-1)
EventFilter.minNMuons = cms.int32(-1)
EventFilter.minNJets = cms.int32(-1)
EventFilter.counteitherleptontype = cms.bool(False)
skim = skim.lower()
if 'electron' in skim or 'lepton' in skim:
EventFilter.maxAbsElectronEta = cms.double(2.5)#within tracker volume
#electron multiplicity
if 'di' in skim:
EventFilter.minNElectrons = cms.int32(2)
else:
EventFilter.minNElectrons = cms.int32(1)
if 'loose' in skim:#loose Pt cut
EventFilter.minElectronPt = cms.double(20.)
EventFilter.electronInput = cms.InputTag("selectedPatElectrons")#GSF electrons
else:
EventFilter.minElectronPt = cms.double(30.)
EventFilter.electronInput = cms.InputTag("selectedPatElectronsLoosePFlow")
if 'muon' in skim or 'lepton' in skim:
#muon multiplicity
if 'di' in skim:
EventFilter.minNMuons = cms.int32(2)
else:
EventFilter.minNMuons = cms.int32(1)
if 'loose' in skim:#loose Pt cut and eta cut
EventFilter.maxAbsMuonEta = cms.double(2.5)#within tracker volume
EventFilter.minMuonPt = cms.double(10.)
EventFilter.muonInput = cms.InputTag("selectedPatMuons")
else:
EventFilter.minMuonPt = cms.double(20.)#triggers are 17GeV
EventFilter.maxAbsMuonEta = cms.double(2.1)#new triggers have this restriction anyway
EventFilter.muonInput = cms.InputTag("selectedPatMuonsLoosePFlow")
if 'lepton' in skim:
EventFilter.counteitherleptontype = cms.bool(True)
#jet skim
#unprescaled triggers are >=3/>=2 jets for electron/muon triggers
if 'jet' in skim:
find = skim.find('jet')
nJets = int(skim[find - 1])
EventFilter.jetInput = cms.InputTag("selectedPatJetsPFlow")
EventFilter.minNJets = cms.int32(nJets)
EventFilter.minJetPt = cms.double(30.)# identical (within JEC) to trigger
EventFilter.maxAbsJetEta = cms.double(2.6)# identical to trigger
if not (skim == '' or skim == 'noskim'):
print '=' * 10, 'Skim definition', '=' * 10
print 'Electron skim:'
print '\t >=', str(EventFilter.minNMuons), ' electron with ',
print 'p_T > ', str(EventFilter.minElectronPt),
print '|eta| < ' , str(EventFilter.maxAbsElectronEta)
print '\t input collection:', str(EventFilter.electronInput)
print
print 'Muon skim:'
print '\t >=', str(EventFilter.minNElectrons), ' muon with ',
print 'p_T > ', str(EventFilter.minMuonPt),
print '|eta| < ' , str(EventFilter.maxAbsMuonEta)
print '\t input collection:', str(EventFilter.muonInput)
print
print 'Use either lepton type:', str(EventFilter.counteitherleptontype)
print
print 'Jet skim:'
print '\t >=', str(EventFilter.minNJets), ' jet with ',
print 'p_T > ', str(EventFilter.minJetPt),
print '|eta| < ' , str(EventFilter.maxAbsJetEta)
print '\t input collection:', str(EventFilter.jetInput)
else:
print 'No skim used.'
return EventFilter
```
#### File: NTupleProduction/python/MET_Setup_cff.py
```python
def setup_MET(process, cms, options, postfix="PFlow"):
print '=' * 60
print "Setting up PFMET from PAT"
print '=' * 60
getattr(process,'patPFMet'+postfix).addGenMET = cms.bool(not options.useData)
process.patPFMet.addGenMET = cms.bool(not options.useData)
process.load("JetMETCorrections.Type1MET.pfMETCorrections_cff")
process.load("JetMETCorrections.Type1MET.pfMETsysShiftCorrections_cfi")
setup_MET_uncertainties(process, cms, options, postfix)
if options.applyType0METcorrection:
getattr(process,'patType1CorrectedPFMet'+postfix).srcType1Corrections = cms.VInputTag(
cms.InputTag("patPFJetMETtype1p2Corr"+postfix,"type1"),
cms.InputTag("patPFMETtype0Corr"+postfix)
)
getattr(process,'patType1p2CorrectedPFMet'+postfix).srcType1Corrections = cms.VInputTag(
cms.InputTag("patPFJetMETtype1p2Corr"+postfix,"type1"),
cms.InputTag("patPFMETtype0Corr"+postfix)
)
#these flags are false for '+postfix' mets by default, but true for non-postfix ones!
getattr(process,'patPFJetMETtype1p2Corr'+postfix).skipEM = cms.bool(False)
getattr(process,'patPFJetMETtype1p2Corr'+postfix).skipMuons = cms.bool(False)
if options.applySysShiftCorrection:
getattr(process,'patType1CorrectedPFMet'+postfix).srcType1Corrections.append(cms.InputTag('pfMEtSysShiftCorr'))
getattr(process,'patType1p2CorrectedPFMet'+postfix).srcType1Corrections.append(cms.InputTag('pfMEtSysShiftCorr'))
def setup_MET_manually(process, cms, options, postfix="PFlow"):
print '=' * 60
print "Setting up PFMET from PAT manually"
print '=' * 60
#PFMET setup: following by-hand recipe from https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookMetAnalysis#Type_I_II_0_with_PF2PAT
process.load("PhysicsTools.PatUtils.patPFMETCorrections_cff")
getattr(process,'patPF2PATSequence'+postfix).remove(getattr(process,'patMETs'+postfix))
from PhysicsTools.PatAlgos.tools.helpers import cloneProcessingSnippet
cloneProcessingSnippet(process, process.producePatPFMETCorrections, postfix)
getattr(process,'selectedPatJetsForMETtype1p2Corr'+postfix).src = cms.InputTag('selectedPatJets'+postfix)
getattr(process,'selectedPatJetsForMETtype2Corr'+postfix).src = cms.InputTag('selectedPatJets'+postfix)
getattr(process,'pfCandMETcorr'+postfix).src = cms.InputTag('pfNoJet'+postfix)
getattr(process,'patPFJetMETtype1p2Corr'+postfix).type1JetPtThreshold = cms.double(10.0)
if options.applyType0METcorrection:
getattr(process,'patType1CorrectedPFMet'+postfix).srcType1Corrections = cms.VInputTag(
cms.InputTag("patPFJetMETtype1p2Corr"+postfix,"type1"),
cms.InputTag("patPFMETtype0Corr"+postfix)
)
getattr(process,'patType1p2CorrectedPFMet'+postfix).srcType1Corrections = cms.VInputTag(
cms.InputTag("patPFJetMETtype1p2Corr"+postfix,"type1"),
cms.InputTag("patPFMETtype0Corr"+postfix)
)
else:
getattr(process,'patType1CorrectedPFMet'+postfix).srcType1Corrections = cms.VInputTag(
cms.InputTag("patPFJetMETtype1p2Corr"+postfix,"type1"),
)
getattr(process,'patType1p2CorrectedPFMet'+postfix).srcType1Corrections = cms.VInputTag(
cms.InputTag("patPFJetMETtype1p2Corr"+postfix,"type1"),
)
getattr(process,'patPFJetMETtype1p2Corr'+postfix).skipEM = cms.bool(False)
getattr(process,'patPFJetMETtype1p2Corr'+postfix).skipMuons = cms.bool(False)
getattr(process,'patPFJetMETtype2Corr'+postfix).skipEM = cms.bool(False)
getattr(process,'patPFJetMETtype2Corr'+postfix).skipMuons = cms.bool(False)
##for type I+II corrections, switch this to patType1p2CorrectedPFMet
getattr(process,'patMETs'+postfix).metSource = cms.InputTag('patType1CorrectedPFMet'+postfix)
getattr(process,'patDefaultSequence'+postfix).remove(getattr(process,'patMETs'+postfix))
if options.useData:
getattr(process,'patPFJetMETtype1p2Corr'+postfix).jetCorrLabel = 'L2L3Residual'
getattr(process,'patPFJetMETtype2Corr'+postfix).jetCorrLabel = 'L2L3Residual'
getattr(process,'patPFMet'+postfix).addGenMET = cms.bool(not options.useData)
process.patPFMet.addGenMET = cms.bool(not options.useData)
process.load("JetMETCorrections.Type1MET.pfMETCorrections_cff")
process.load("JetMETCorrections.Type1MET.pfMETsysShiftCorrections_cfi")
#these flags are false for '+postfix' mets by default, but true for non-postfix ones!
getattr(process,'patPFJetMETtype1p2Corr'+postfix).skipEM = cms.bool(False)
getattr(process,'patPFJetMETtype1p2Corr'+postfix).skipMuons = cms.bool(False)
setup_MET_uncertainties(process, cms, options, postfix)
def setup_MET_uncertainties(process, cms, options, postfix="PFlow"):
from PhysicsTools.PatUtils.tools.metUncertaintyTools import runMEtUncertainties
#runMEtUncertainties(process, doSmearJets=not options.useData, jetCollection='goodPatJetsPFlow', addToPatDefaultSequence=False)
if options.useData:
inputJetCorrLabelForMETuncertainties = 'L2L3Residual'
if options.centreOfMassEnergy == 8:
metSysShiftCorrParameter = process.pfMEtSysShiftCorrParameters_2012runABCDvsNvtx_data
print "using pfMEtSysShiftCorrParameters_2012runABCDvsNvtx_data"
elif options.centreOfMassEnergy == 7:
metSysShiftCorrParameter = process.pfMEtSysShiftCorrParameters_2011runAplusBvsNvtx_data
print "using pfMEtSysShiftCorrParameters_2011runAplusBvsNvtx_data"
else:
inputJetCorrLabelForMETuncertainties = 'L3Absolute'
if options.centreOfMassEnergy == 8:
metSysShiftCorrParameter = process.pfMEtSysShiftCorrParameters_2012runABCDvsNvtx_mc
print "using pfMEtSysShiftCorrParameters_2012runABCDvsNvtx_mc"
elif options.centreOfMassEnergy == 7:
metSysShiftCorrParameter = process.pfMEtSysShiftCorrParameters_2011runAplusBvsNvtx_mc
print "using pfMEtSysShiftCorrParameters_2011runAplusBvsNvtx_mc"
process.pfMEtSysShiftCorr.parameter = metSysShiftCorrParameter
runMEtUncertainties(process,
electronCollection = cms.InputTag('patElectronsPFlow'),
muonCollection = 'patMuonsPFlow',
tauCollection = 'patTausPFlow',
jetCollection = cms.InputTag('goodPatJetsPFlow'),
jetCorrLabel = inputJetCorrLabelForMETuncertainties,
doSmearJets = not options.useData,
makeType1corrPFMEt = True,
makeType1p2corrPFMEt = True,
makePFMEtByMVA = False,
makeNoPileUpPFMEt = False,
doApplyType0corr = options.applyType0METcorrection,
sysShiftCorrParameter = metSysShiftCorrParameter,
doApplySysShiftCorr = options.applySysShiftCorrection,
addToPatDefaultSequence=False
)
``` |
{
"source": "jjacobson93/aiorpc",
"score": 2
} |
#### File: aiorpc/aiorpc/server.py
```python
import asyncio
import aioamqp
import umsgpack as msgpack
import inspect
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
class Response(object):
def __init__(self, channel, envelope, properties):
self.channel = channel
self.envelope = envelope
self.properties = properties
async def send(self, exception, result):
routing_key = self.properties.reply_to
correlation_id = self.properties.correlation_id
delivery_tag = self.envelope.delivery_tag
payload = msgpack.packb((str(exception) if exception is not None else None, result))
logger.info(f'Sending response to queue {routing_key} ({correlation_id})')
await self.channel.basic_publish(
payload=payload,
exchange_name='',
routing_key=routing_key,
properties={
'correlation_id': correlation_id
}
)
await self.channel.basic_client_ack(delivery_tag=delivery_tag)
class Server(object):
def __init__(self, queue='', prefetch_count=1, prefetch_size=0, connection_global=False):
self.queue = queue
self.prefetch_count = prefetch_count
self.prefetch_size = prefetch_size
self.connection_global = connection_global
self.functions = {}
def __call__(self, func):
if not callable(func):
def decorator(f):
self.functions[func] = f
return f
return decorator
else:
self.functions[func.__name__] = func
return func
async def on_request(self, channel, body, envelope, properties):
correlation_id = properties.correlation_id
response = Response(channel, envelope, properties)
try:
func_name, args, kwargs = msgpack.unpackb(body)
logger.info(f'Received request for {func_name} ({correlation_id})')
except Exception as err:
logger.error(f'Could not unpack message: {err} ({correlation_id})')
await response.send(err, None)
return
func = self.functions.get(func_name)
if func is None:
logger.error(f'Function {func_name} does not exist ({correlation_id})')
await response.send(f'Unknown function {func_name}', None)
return
try:
if inspect.iscoroutinefunction(func):
result = await func(*args, **kwargs)
else:
result = func(*args, **kwargs)
except Exception as err:
logger.error(f'Exception while executing {func_name}: {err} ({correlation_id})')
await response.send(err, None)
return
await response.send(None, result)
async def connect(self, *args, **kwargs):
retry = kwargs.get('retry', 5) # retry every X second(s)
if 'retry' in kwargs:
del kwargs['retry']
host = kwargs.get('host', 'localhost')
port = kwargs.get('port')
ssl = kwargs.get('ssl', False)
if port is None:
port = 5671 if ssl else 5672
protocol = None
if retry is not False:
while protocol is None:
try:
transport, protocol = await aioamqp.connect(*args, **kwargs)
except:
logger.warn(f'Could not connect to amqp://{host}:{port}/. Trying again in {retry} second(s).')
await asyncio.sleep(retry)
else:
transport, protocol = await aioamqp.connect(*args, **kwargs)
logger.info(f'Connected to amqp://{host}:{port}/.')
channel = await protocol.channel()
await channel.queue_declare(queue_name=self.queue)
await channel.basic_qos(
prefetch_count=self.prefetch_count,
prefetch_size=self.prefetch_size,
connection_global=self.connection_global
)
await channel.basic_consume(self.on_request, queue_name=self.queue)
logger.info(f'Consuming on queue {self.queue}.')
def start(self, *args, **kwargs):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.connect(*args, **kwargs))
try:
loop.run_forever()
finally:
loop.close()
``` |
{
"source": "jjacobson93/javelin-web2py",
"score": 2
} |
#### File: javelin/controllers/jadmin.py
```python
__author__ = "<NAME>"
__copyright__ = "(c) 2013, Jacobson and Varni, LLC"
__date__ = "7/12/2013"
__email__ = "<EMAIL>"
__data__ = {'name' : 'jadmin', 'label' : 'Admin', 'description' : 'Only accessible to admins',
'icon' : 'briefcase', 'u-icon' : u'\uf0b1', 'color':'orange', 'required' : True}
import time
from datetime import datetime
from applications.javelin.ctr_data import ctr_enabled, get_ctr_data
from gluon.contrib import simplejson as json
from gluon.tools import Service
from gluon.storage import Storage
service = Service(globals())
DOC_TYPES = Storage(
CALLSLIP=Storage(value=0, label="Call Slips"),
ATTSHEETS=Storage(value=1, label="Attendance Sheets"),
NAMETAGS=Storage(value=2, label="Nametags")
)
@auth.requires_login()
@auth.requires_membership('admin')
def index():
"""Loads the index page for the 'Admin' controller
:returns: a dictionary to pass to the view with the list of ctr_enabled and the active module ('admin')
"""
ctr_data = get_ctr_data()
users = db().select(db.auth_user.ALL)
approvals = db(db.auth_user.registration_key=='pending').select(db.auth_user.ALL)
return dict(ctr_enabled=ctr_enabled, ctr_data=ctr_data, active_module='jadmin', users=users, approvals=approvals, doctypes=DOC_TYPES)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def create_doc(doctype, data):
logger.debug("CREATE DOC CALLED")
import StringIO
from reportlab.platypus import SimpleDocTemplate, Paragraph, Table, TableStyle, Image, Spacer
from reportlab.platypus.flowables import PageBreak
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.enums import TA_CENTER, TA_LEFT
from reportlab.lib.pagesizes import letter, inch
from reportlab.lib import colors
io = StringIO.StringIO()
doc = SimpleDocTemplate(io, pagesize=letter,
rightMargin=0.18*inch, leftMargin=0.18*inch, topMargin=0.18*inch, bottomMargin=0)
elements = list()
doctype = int(doctype)
if data: data = json.loads(data)
if doctype == DOC_TYPES.CALLSLIP.value:
doc_title = "Call_Slips"
people = data['people']
message = data['message']
persons = list()
for p in people:
if p.startswith('group_'):
group = db(db.group_rec.group_id==p.replace('group_', '')).select(db.person.id,
join=db.group_rec.on(db.person.id==db.group_rec.person_id))
for g in group:
if g.id not in persons:
persons.append(g.id)
elif p.startswith('grade_'):
grade = db(db.person.grade==p.replace('grade_', '')).select(db.person.id)
for g in grade:
if g.id not in persons:
persons.append(g.id)
elif p == 'all_leaders':
leaders = db(db.person.leader==True).select(db.person.id)
for l in leaders:
if l.id not in persons:
persons.append(l.id)
elif p == 'all_people':
allpeople = db().select(db.person.id)
for a in allpeople:
if a.id not in persons:
persons.append(a.id)
else:
if p not in persons:
persons.append(p)
people = [Storage(id=pid, last_name=db(db.person.id==pid).select(db.person.last_name).first().last_name,
first_name=db(db.person.id==pid).select(db.person.first_name).first().first_name,
courses=['{}: {}'.format(c.period, c.room) for c in db().select(db.course.period, db.course.room,
join=db.course_rec.on((db.course.id==db.course_rec.course_id) & (db.course_rec.student_id==pid)),
orderby=db.course.period)]
) for pid in persons]
i = 0
centerStyle = ParagraphStyle(name='Center', alignment=TA_CENTER)
leftStyle = ParagraphStyle(name='Left', alignment=TA_LEFT)
tableStyle = TableStyle([('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black)])
page = list()
for person in people:
page.append([Paragraph("<para alignment='left'><br></para>" +\
"<para alignment='center'><font face='Times-Bold' size=16>Vintage Crusher Crew</font><br><br><br></para>" +\
"<para alignment='left'><font face='Times' size=14><b>Name:</b> {} {}</font><br><br></para>".format(person.first_name, person.last_name) +\
"<para alignment='left'><font face='Times' size=12><b>Rooms:</b> {}</font><br><br></para>".format(', '.join(person.courses)) +\
"<para alignment='left'><font face='Times' size=12><b>Message:</b></font><br></para>" +\
"<para alignment='left'><font face='Times' size=12>{}</font></para>".format(message), leftStyle)])
i = (i+1)%4
if i == 0:
table = Table(page, colWidths=[8*inch], rowHeights=[2.5*inch]*len(page))
table.setStyle(tableStyle)
elements.append(table)
elements.append(PageBreak())
page = list()
elif doctype == DOC_TYPES.ATTSHEETS.value:
pass
elif doctype == DOC_TYPES.NAMETAGS.value:
people = data['people']
event_name = data['event_name']
events = data['events']
present = data['present']
persons = list()
for p in people:
if p.startswith('group_'):
group = db(db.group_rec.group_id==p.replace('group_', '')).select(db.person.id,
join=db.group_rec.on(db.person.id==db.group_rec.person_id))
for g in group:
if g.id not in persons:
persons.append(g.id)
elif p.startswith('grade_'):
grade = db(db.person.grade==p.replace('grade_', '')).select(db.person.id)
for g in grade:
if g.id not in persons:
persons.append(g.id)
elif p == 'all_leaders':
leaders = db(db.person.leader==True).select(db.person.id)
for l in leaders:
if l.id not in persons:
persons.append(l.id)
elif p == 'all_people':
allpeople = db().select(db.person.id)
for a in allpeople:
if a.id not in persons:
persons.append(a.id)
else:
if p not in persons:
persons.append(p)
centerStyle = ParagraphStyle(name='Center', alignment=TA_CENTER)
leftStyle = ParagraphStyle(name='Left', alignment=TA_LEFT)
tableStyle = TableStyle([('VALIGN',(0,-1),(-1,-1),'TOP')])
label_num = 0
row_num = 0
labels = list()
for pid in persons:
row = db(db.person.id==pid).select(db.person.ALL).first()
label = list()
if label_num == 2:
table = Table([labels], colWidths=[4*inch,0.14*inch,4*inch], rowHeights=[2*inch]*(len(labels)/2))
table.setStyle(tableStyle)
elements.append(table)
label_num = 0
labels = list()
row_num += 1
if row_num == 5:
row_num = 0
elements.append(PageBreak())
header = Paragraph("<font face='Times-Bold' size=11>{} {}</font>".format(year, event_name), centerStyle)
label.append(header)
label.append(Spacer(1,11))
firstName = Paragraph("<font face='Times-Bold' size=18>{}</font>".format(row.first_name), centerStyle)
label.append(firstName)
label.append(Spacer(1, 11))
lastName = Paragraph("<font face='Times-Roman' size=11>{}</font>".format(row.last_name), centerStyle)
label.append(lastName)
label.append(Spacer(1,20))
# if row.crew.wefsk != '' or row.crew.wefsk != None or row.crew.wefsk != 'N/A':
# try:
# rooms = rotation(row.crew.wefsk.split('-')[0], row.crew.wefsk.split('-')[1])
# except:
# rooms = 'N/A'
# else:
# rooms = 'N/A'
label.append(Paragraph("<font face='Times-Roman' size=11>ID#: {}</font>".format(row.student_id), leftStyle))
label.append(Paragraph("<font face='Times-Roman' size=11>Crew #: {}</font>".format(row.crew), leftStyle))
# label.append(Paragraph("<font face='Times-Roman' size=11>Crew Room: {}</font>".format(row.crew.room), leftStyle))
# label.append(Paragraph("<font face='Times-Roman' size=11>W.E.F.S.K. Rotation: {}</font>".format(rooms), leftStyle))
labels.append(label)
if label_num == 0:
labels.append(Spacer(14, 144))
label_num += 1
doc_title = '_'.join(event_name.split())
doc.build(elements)
io.seek(0)
now = datetime.now().strftime('%Y-%m-%d')
filename = "{}_{}_{}.pdf".format(doc_title, now, int(time.time()))
file_id = db.file.insert(name=filename, file=db.file.file.store(io, filename))
db_file = db.file(file_id).file
return dict(filename=db_file)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def update_names(names):
names = json.loads(names)
response = []
for name in names:
r = db.module_names.update_or_insert(name=name['name'], label=name['value'])
response.append(r)
errors = list()
for i in range(len(response)):
if response[i] == 0:
errors.append(names[i])
return dict(errors=errors)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def approve_user(id):
response = db(db.auth_user.id==id).update(registration_key='')
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def disapprove_user(id):
response = db(db.auth_user.id==id).delete()
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def import_from_csv(csv_file):
"""Imports records into the database from a CSV file
:param file: the file to be imported
:param contains_ids: a boolean value which specifies if the records have ids; default is True
:returns: a dictionary with a response, either a 0 or 1, depending on success
"""
response = list()
lines = csv_file.rstrip().splitlines()
if len(lines) > 0:
columns = lines.pop(0).split(',')
for i in range(len(columns)):
columns[i] = '_'.join(columns[i].lower().split())
for line in lines:
record = dict()
line = line.split(',')
for i in range(len(line)):
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
response.append(db.person.update_or_insert(db.person.id==record['id'], **record))
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def import_from_query(csv_file, leaders):
"""Imports records into the database from a CSV file (in the form of the queries from VHS)
:param file: the file to be imported
:returns: a dictionary with a response, either a 0 or 1, depending on success
"""
import csv
import StringIO
leaders = True if leaders=="true" else False
def phone_format(n):
try:
return format(int(n[:-1]), ",").replace(",", "-") + n[-1]
except:
return None
if not leaders:
file_string = StringIO.StringIO(csv_file)
lines = list(csv.reader(file_string, skipinitialspace=True))
del file_string
del csv_file
# INSERT STUDENTS
student_ids = list()
teacher_ids = list()
course_ids = list()
columns = lines.pop(0)
while len(lines) > 0:
record = dict()
line = lines.pop(0)
student_id = line[columns.index('student_id')]
teacher_id = line[columns.index('teacher_id')]
course_id = line[columns.index('course_id')]
if student_id and student_id not in student_ids:
student_ids.append(student_id)
for i in range(len(line)):
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
if record.get('cell_phone', None):
record['cell_phone'] = phone_format(record['cell_phone'])
if record.get('home_phone', None):
record['home_phone'] = phone_format(record['home_phone'])
db.person.update_or_insert(db.person.student_id==student_id, **record)
if teacher_id and teacher_id not in teacher_ids:
teacher_ids.append(teacher_id)
db.teacher.update_or_insert(db.teacher.teacher_id==teacher_id, **{
'teacher_id':line[columns.index('teacher_id')],
'teacher_name':line[columns.index('teacher_name')]})
if course_id and teacher_id and course_id not in course_ids:
course_ids.append(course_id)
teacher = db(db.teacher.teacher_id==teacher_id).select(db.teacher.id).first()
if teacher:
db.course.update_or_insert(db.course.course_id==course_id, **{
'course_id':line[columns.index('course_id')],
'code':line[columns.index('course_code')],
'title':line[columns.index('course_title')],
'period':line[columns.index('period')],
'room':line[columns.index('room')],
'teacher_id':teacher.id})
if course_id and student_id:
course = db(db.course.course_id==course_id).select().first()
student = db(db.person.student_id==student_id).select().first()
if course and student:
db.course_rec.update_or_insert((db.course_rec.course_id==course.id) &
(db.course_rec.student_id==student.id),
course_id=course.id,
student_id=student.id)
db.commit()
del record
del line
return dict(response=True)
else:
errors = list()
lines = list(csv.reader(StringIO.StringIO(csv_file), skipinitialspace=True))
columns = lines.pop(0)
short_tasks = {
'Team Sacrifice (Must have a car and willingness to work later than others)' : 'Team Sacrifice',
"Peer Support (Must be enrolled in Mr. Ward's Psychology or Peer Support class)" : 'Peer Support',
"Tutor/Study Buddy (Academic credits are available for this option)" : 'Tutor/Study Buddy',
"Database Manager (Must know Excel, Mail merge, and other technologies)" : 'Database Manager',
"Facebook Maintenance (You are responsible for up keeping on our page. Must be a FB addict)" : "Facebook Maintenance",
"Fundraising Team" : "Fundraising Team",
"TAs (Work with freshmen and Mr. Varni, Mr. Ward, or Mrs. Housley during the school day (Academic credits are available for this option)": "TAs",
"Posters & Propaganda" : "Posters & Propaganda",
"Public Outreach (Attend Parent Night, Back-to-School, other public events)" : 'Public Outreach',
"ASB Support (Those enrolled in 4th period Leadership class should check this option, but others are welcome as well)" : "ASB Support",
"L.O.C.s (Loyal Order of the Crushers. Attend home athletic and extracurricular events)": "L.O.C.s",
"Dirty 30 (Explain various aspects of high school culture to freshmen on Orientation Day afternoon)" : "Dirty 30",
"Set-up (Room Mapping) and Clean-up (Orientation Day only)": "Set-up and Clean-up",
"Homecoming Parade (Dress up and ride on our float! Easy!)" : "Homecoming Parade",
"Security/Safety (Helps keep freshmen in line; works with Peer Support on Orientation Day)": "Security/Safety",
"Food Prep & Clean-up (Orientation Day only)": "Food Prep & Clean-up",
"Fashion (Make costumes for House Hotties and Homecoming Parade)" : "Fashion",
'Burgundy Beauties and Golden Guns (Formerly "House Hotties")' : "Burgundy Beauties and Golden Guns",
"Audio-Visual (Responsible for music and videos during Orientation)" : "Audio-Visual",
"A-Team (Alumni only)": "A-Team"
}
task_teams = [task.name for task in db().select(db.groups.name)]
for line in lines:
record = dict()
for i in range(len(line)):
if columns[i] == 'last_name' or columns[i] == 'first_name':
line[i] = line[i].capitalize()
record[columns[i]] = line[i]
record = dict((k,v) for k,v in record.items() if k in db.person.fields)
if record.get('cell_phone', None):
record['cell_phone'] = phone_format(record['cell_phone'])
try:
person = db((db.person.last_name==record['last_name']) &
(db.person.first_name==record['first_name'])).select(db.person.ALL).first()
if person:
person_id = person.id
db(db.person.id==person_id).update(**record)
db(db.person.id==person_id).update(leader=True)
aTasks = line[columns.index('a_tasks')].split(',')
bTasks = line[columns.index('b_tasks')].split(',')
cTasks = line[columns.index('c_tasks')].split(',')
tasks_to_add = list()
for task in aTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in bTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in cTasks:
if task not in task_teams and task in short_tasks.values():
task_id = db.groups.insert(name=task)
tasks_to_add.append(task_id)
task_teams.append(task)
elif task in task_teams and task in short_tasks.values():
task_row = db(db.groups.name==task).select().first()
if task_row:
task_id = task_row.id
tasks_to_add.append(task_id)
for task in tasks_to_add:
if not db((db.group_rec.group_id==task_id) & (db.group_rec.person_id==person_id)).select().first():
db.group_rec.insert(group_id=task_id, person_id=person_id)
except:
errors.append(record['last_name'] + ", " + record['first_name'])
return dict(errors=errors)
@auth.requires_login()
@auth.requires_membership('admin')
@service.json
def get_person_group_data(query=None):
if query:
qlist = query.split()
query = query.lower()
students = db(((db.person.last_name.contains(qlist, all=True)) |
(db.person.first_name.contains(qlist, all=True))) ).select(
db.person.id, db.person.last_name, db.person.first_name,
orderby=db.person.last_name|db.person.first_name).as_list()
allfields = [{'text': 'All', 'children':[d for d in [{'id':'all_people', 'last_name':'All Students', 'first_name' : ''},
{'id':'all_leaders', 'last_name':'All Leaders', 'first_name' : ''}] if query in d['last_name'].lower()]}]
allfields = [] if not allfields[0]['children'] else allfields
gradefields = [{'text': 'By Grade', 'children':[d for d in [{'id':'grade_9', 'last_name': 'Freshmen', 'first_name': ''},
{'id':'grade_10', 'last_name': 'Sophomores', 'first_name': ''},
{'id':'grade_11', 'last_name': 'Juniors', 'first_name': ''},
{'id':'grade_12', 'last_name': 'Seniors', 'first_name': ''}] if query in d['last_name'].lower()]}]
gradefields = [] if not gradefields[0]['children'] else gradefields
taskteams = [{'text': 'Task Teams', 'children': [{'id':'group_' + str(g.id),
'last_name': g.name,
'first_name':''}
for g in db(db.groups.name.contains(qlist)).select(db.groups.ALL, orderby=db.groups.name)]}]
taskteams = [] if not taskteams[0]['children'] else taskteams
students = [] if not students else [{'text': 'Students', 'children':students}]
people = allfields +\
gradefields +\
taskteams +\
students
else:
students = db().select(db.person.id, db.person.last_name, db.person.first_name,
orderby=db.person.last_name|db.person.first_name).as_list()
people = [{'text': 'All', 'children':[{'id':'all_people', 'last_name':'All Students', 'first_name' : ''},
{'id':'all_leaders', 'last_name':'All Leaders', 'first_name' : ''}]}] +\
[{'text': 'By Grade', 'children':[{'id':'grade_9', 'last_name': 'Freshmen', 'first_name': ''},
{'id':'grade_10', 'last_name': 'Sophomores', 'first_name': ''},
{'id':'grade_11', 'last_name': 'Juniors', 'first_name': ''},
{'id':'grade_12', 'last_name': 'Seniors', 'first_name': ''} ]}] +\
[{'text': 'Task Teams', 'children': [{'id':'group_' + str(g.id),
'last_name': g.name,
'first_name':''}
for g in db().select(db.groups.ALL, orderby=db.groups.name)]}] +\
[{'text': 'Students', 'children':students}]
return people
@auth.requires_login()
@auth.requires_membership('admin')
def call():
"""Call function used when calling a function from an HTTP request"""
return service()
```
#### File: javelin/controllers/messages.py
```python
__author__ = "<NAME>"
__copyright__ = "(c) 2013, Jacobson and Varni, LLC"
__date__ = "7/28/2013"
__email__ = "<EMAIL>"
__data__ = {'name' : 'messages', 'label' : 'Messages', 'description' : 'Send Email and SMS messages to people',
'icon' : 'comment', 'u-icon' : u'\uf075', 'color': 'light-blue', 'required' : True}
from applications.javelin.ctr_data import ctr_enabled, get_ctr_data
from gluon.contrib import simplejson as json
from gluon.tools import Service
service = Service(globals())
from gluon.contrib.sms_utils import sms_email
class Providers(object):
ATT = 'AT&T'
VERIZON = 'Verizon Wireless (vtext)'
SPRINT = 'Sprint PCS'
METRO_PCS = 'Metro PCS'
TMOBILE = 'T-Mobile USA (tmail)'
@staticmethod
def contains(e):
if e.lower() in ('at&t', 'at & t', 'verizon',
'metro pcs', 'metro-pcs', 'sprint pcs', 'sprint',
't-mobile', 't mobile'):
return True
else:
return False
@staticmethod
def get(e):
if e.lower() in ('at&t', 'at & t'):
return Providers.ATT
elif e.lower() in ('verizon'):
return Providers.VERIZON
elif e.lower() in ('metro pcs', 'metro-pcs'):
return Providers.METRO_PCS
elif e.lower() in ('sprint pcs', 'sprint'):
return Providers.SPRINT
elif e.lower() in ('t-mobile', 't mobile'):
return Providers.TMOBILE
else:
return None
@auth.requires_login()
@auth.requires_membership('standard')
def index():
"""Loads the index page for the 'Messages' controller
:returns: a dictionary to pass to the view with the list of modules_enabled and the active module ('messages')
"""
ctr_data = get_ctr_data()
return dict(ctr_enabled=ctr_enabled, ctr_data=ctr_data, active_module='messages')
@auth.requires_login()
@auth.requires_membership('standard')
@service.json
def send_sms(message, to='all_leaders'):
import re
bcc = list()
if to == 'all_leaders':
people = db((db.person.grade != 9) & (db.person.leader==True)).select(
db.person.id, db.person.student_id, db.person.cell_phone,
db.person.cell_provider, db.person.email)
for person in people:
if person.cell_phone and person.cell_provider and Providers.contains(person.cell_provider):
bcc.append(sms_email(person.cell_phone, Providers.get(person.cell_provider)))
elif person.email:
bcc.append(person.email)
elif to.startswith('task_team'):
people = db(db.group_rec.group_id==int(re.findall(r'\d+', to)[0])).select(
db.person.id, db.person.student_id, db.person.cell_phone,
db.person.cell_provider, db.person.email,
join=db.person.on(db.person.id==db.group_rec.person_id))
for person in people:
if person.cell_phone and person.cell_provider and Providers.contains(person.cell_provider):
bcc.append(sms_email(person.cell_phone, Providers.get(person.cell_provider)))
elif person.email:
bcc.append(person.email)
else:
person = db(db.person.id==to).select().first()
if person.cell_phone and person.cell_provider and Providers.contains(person.cell_provider):
bcc.append(sms_email(person.cell_phone, Providers.get(person.cell_provider)))
elif person.email:
bcc.append(person.email)
if len(bcc) < 100:
mail.send(to=mail.settings.sender, bcc=bcc, subject='', message=message)
else:
small_lists = list()
curr = list()
count = 0
while len(bcc) > 0:
if count < 50:
curr.append(bcc.pop(0))
else:
small_lists.append(curr)
curr = list()
curr.append(bcc.pop(0))
count = 0
count += 1
for l in small_lists:
mail.send(to=mail.settings.sender, bcc=l, subject='', message=message)
return dict(response='Sending messages')
@auth.requires_login()
@auth.requires_membership('standard')
@service.json
def get_recipients(query=None):
if query:
leaders = db((db.person.leader==True)
& ((db.person.last_name.contains(query)) |
(db.person.first_name.contains(query))) ).select(
db.person.id, db.person.last_name, db.person.first_name,
orderby=db.person.id).as_list()
people = [{'text': 'All', 'children':[{'id':'all_leaders', 'last_name':'All Leaders', 'first_name' : ''}]}] +\
[{'text': 'Task Teams', 'children': [{'id':'task_team_' + str(g.id),
'last_name': g.name,
'first_name':''}
for g in db(db.groups.name.contains(query)).select(db.groups.ALL, orderby=db.groups.name)]}] +\
[{'text': 'Leaders', 'children':leaders}]
else:
leaders = db((db.person.leader==True)).select(
db.person.id, db.person.last_name,
db.person.first_name, orderby=db.person.id).as_list()
people = [{'text': 'All', 'children':[{'id':'all_leaders', 'last_name':'All Leaders', 'first_name' : ''}]}] +\
[{'text': 'Task Teams', 'children': [{'id':'task_team_' + str(g.id),
'last_name': g.name,
'first_name':''}
for g in db().select(db.groups.ALL, orderby=db.groups.name)]}] +\
[{'text': 'Leaders', 'children':leaders}]
return people
@auth.requires_login()
@auth.requires_membership('standard')
def call():
"""Call function used when calling a function from an HTTP request"""
return service()
```
#### File: javelin/controllers/peersupport.py
```python
__author__ = "<NAME>"
__copyright__ = "(c) 2013, Jacobson and Varni, LLC"
__date__ = "7/12/2013"
__email__ = "<EMAIL>"
__data__ = {'name' : 'peersupport', 'label' : 'Peer Support', 'description' : 'Issue tracking system for Peer Support',
'icon' : 'heart', 'u-icon' : u'\uf004', 'color': 'pink', 'required' : True}
from applications.javelin.ctr_data import ctr_enabled, get_ctr_data
from gluon.tools import Service
from gluon.sqlhtml import FormWidget
service = Service(globals())
@auth.requires_login()
@auth.requires(auth.has_membership('peer_support') or auth.has_membership('admin'))
def index():
student = db.person.with_alias('student')
peer_support = db.person.with_alias('peer_support')
issues = db().select(db.student_issue.ALL, student.ALL, peer_support.ALL, db.crew.ALL,
join=[student.on(student.id==db.student_issue.person_id),
peer_support.on(peer_support.id==db.student_issue.ps_id)],
left=db.crew.on(db.student.crew==db.crew.id))
reports = db(db.file.name.contains("Peer_Support")).select(db.file.ALL)
return dict(issues=issues, reports=reports, ctr_enabled=ctr_enabled, ctr_data=get_ctr_data(), active_module='peersupport')
@auth.requires_login()
@auth.requires(auth.has_membership('peer_support') or auth.has_membership('admin'))
def new_issue():
form = SQLFORM(db.student_issue,
fields=['person_id', 'ps_id', 'summary', 'result', 'need_follow_up', 'refer'],
formstyle='divs')
if form.process(next=URL(a='javelin', c='peersupport', f='index')).accepted:
response.flash = 'The issue has been submitted!'
elif form.errors:
response.flash = 'There are errors in the form'
return dict(form=form, ctr_enabled=ctr_enabled, active_module='peersupport', ctr_data=get_ctr_data())
@auth.requires_login()
@auth.requires(auth.has_membership('peer_support') or auth.has_membership('admin'))
def issue():
id = int(request.vars.id[-1])
student = db.person.with_alias('student')
peer_support = db.person.with_alias('peer_support')
if id:
issue = db(db.student_issue.id==id).select(db.student_issue.ALL, student.ALL, peer_support.ALL, db.crew.ALL,
join=[student.on(student.id==db.student_issue.person_id),
peer_support.on(peer_support.id==db.student_issue.ps_id)],
left=db.crew.on(db.student.crew==db.crew.id)).first()
if issue:
return dict(issue=issue, ctr_enabled=ctr_enabled, active_module='peersupport', ctr_data=get_ctr_data())
return dict(issue=None, ctr_enabled=ctr_enabled, active_module='peersupport', ctr_data=get_ctr_data())
@auth.requires_login()
@auth.requires(auth.has_membership('peer_support') or auth.has_membership('admin'))
def follow_up():
id = int(request.vars.id[-1])
db.student_issue.id.readable = False
db.student_issue.person_id.writable = False
db.student_issue.person_id.represent = lambda row: ' '.join([row.first_name, row.last_name])
db.student_issue.ps_id.writable = False
db.student_issue.ps_id.represent = lambda row: ' '.join([row.first_name, row.last_name])
db.student_issue.result.writable = False
db.student_issue.summary.writable = False
db.student_issue.result.label = 'Result of Campus Walk'
form = SQLFORM(db.student_issue, id,
fields=['person_id', 'ps_id', 'summary', 'result', 'follow_up'],
formstyle='table2cols')
if form.process(next=URL(a='javelin', c='peersupport', f='index')).accepted:
db(db.student_issue.id==id).update(need_follow_up=False)
response.flash = 'The issue has been submitted!'
elif form.errors:
response.flash = 'There are errors in the form'
return dict(form=form, ctr_enabled=ctr_enabled, active_module='peersupport', ctr_data=get_ctr_data())
@auth.requires_login()
@auth.requires(auth.has_membership('peer_support') or auth.has_membership('admin'))
@service.json
def generate_report():
import StringIO
from reportlab.platypus import SimpleDocTemplate, Paragraph, Table, TableStyle, Image, Spacer
from reportlab.platypus.flowables import PageBreak
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.enums import TA_CENTER, TA_LEFT, TA_RIGHT
from reportlab.lib.pagesizes import letter, inch
from reportlab.lib import colors
try:
id = int(request.vars.id[-1])
except:
id = -1
io = StringIO.StringIO()
doc = SimpleDocTemplate(io, pagesize=letter,
rightMargin=0.25*inch, leftMargin=0.25*inch, topMargin=0.25*inch, bottomMargin=0)
elements = list()
centerStyle = ParagraphStyle(name='Center', alignment=TA_CENTER)
leftStyle = ParagraphStyle(name='Left', alignment=TA_LEFT)
rightStyle = ParagraphStyle(name='Right', alignment=TA_RIGHT)
tableStyle = TableStyle([
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('VALIGN',(1,8),(1,-1),'TOP'),
('GRID', (0,0), (-1,-1), 1, colors.black),
('FONT', (0, 0), (0, -1), 'Helvetica-Bold')])
if id != -1:
student = db.person.with_alias('student')
peer_support = db.person.with_alias('peer_support')
issue = db(db.student_issue.id==id).select(db.student_issue.ALL, student.ALL, peer_support.ALL, db.crew.ALL,
join=[student.on(student.id==db.student_issue.person_id),
peer_support.on(peer_support.id==db.student_issue.ps_id)],
left=db.crew.on(db.student.crew==db.crew.id)).first()
output = StringIO.StringIO()
l = list()
for field in issue:
l.append(field)
else:
student = db.person.with_alias('student')
peer_support = db.person.with_alias('peer_support')
issues = db().select(db.student_issue.ALL, student.ALL, peer_support.ALL, db.crew.ALL,
join=[student.on(student.id==db.student_issue.person_id),
peer_support.on(peer_support.id==db.student_issue.ps_id)],
left=db.crew.on(db.student.crew==db.crew.id))
numpage = len(issues)
p = 1
import calendar
from datetime import datetime, timedelta
def utc_to_local(utc_dt):
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
for i in issues:
elements.append(Paragraph("<font face='Helvetica' size=11>Page {} of {}</font>".format(p, numpage), rightStyle))
elements.append(Paragraph("<font face='Helvetica-Bold' size=16>Peer Support Issue #{}</font>".format(i.student_issue.id), leftStyle))
elements.append(Spacer(1, 16))
rows = [['Date/Time', utc_to_local(i.student_issue.timestamp)],
['Student ID#', Paragraph("{}".format(i.student.student_id), leftStyle)],
['Last Name', Paragraph("{}".format(i.student.last_name), leftStyle)],
['First Name', Paragraph("{}".format(i.student.first_name), leftStyle)],
['Grade', Paragraph("{}".format(i.student.grade), leftStyle)],
['Peer Support Student', Paragraph("{}".format(i.peer_support.last_name + ', ' + i.peer_support.first_name), leftStyle)],
['Need Follow Up?', Paragraph("{}".format('Yes' if i.student_issue.need_follow_up else 'No'), leftStyle)],
['Refer to PS?', Paragraph("{}".format('Yes' if i.student_issue.refer else 'No'), leftStyle)],
['Summary of Concert', Paragraph("{}".format(i.student_issue.summary), leftStyle)],
['Result of Campus Walk', Paragraph("{}".format(i.student_issue.result), leftStyle)],
['Follow Up Notes', Paragraph("{}".format(i.student_issue.follow_up), leftStyle)]]
table = Table(rows, colWidths=[1.75*inch, 6.25*inch],
rowHeights=[.5*inch, .5*inch, .5*inch, .5*inch,
.5*inch, .5*inch, .5*inch, .5*inch,
1.8*inch, 1.8*inch, 1.8*inch])
table.setStyle(tableStyle)
elements.append(table)
elements.append(Spacer(1, 16))
elements.append(Paragraph("<font face='Helvetica' size=10>Created On: {}</font>".format(datetime.now().strftime('%Y-%m-%d')), rightStyle))
elements.append(PageBreak())
p += 1
doc.build(elements)
io.seek(0)
import time
now = datetime.now().strftime('%Y-%m-%d')
filename = "{}_{}_{}.pdf".format('Peer_Support_Report', now, int(time.time()))
file_id = db.file.insert(name=filename, file=db.file.file.store(io, filename))
db_file = db.file(file_id).file
return dict(filename=db_file)
@auth.requires_login()
@auth.requires(auth.has_membership('peer_support') or auth.has_membership('admin'))
def call():
"""Call function used when calling a function from an HTTP request"""
return service()
``` |
{
"source": "jjacobson/ETFCalc",
"score": 2
} |
#### File: etfcalc/util/holdings_calculator.py
```python
import requests_cache
from operator import attrgetter
from .webscraper import scrape_ticker, get_company_data, get_stock_news
from .holding import Holding
from .portfolio import Portfolio
def get_holdings(portfolio):
data = {}
total = _get_total(portfolio)
for ticker, shares in portfolio.get_holdings().items():
price = portfolio.get_price(ticker)
ratio = (shares * price) / total
holdings = scrape_ticker(ticker)
for holding in holdings:
underlying = holding.get_ticker()
weight = float(holding.get_weight()) * ratio
if not underlying in data:
holding.set_weight(round_weight(weight))
data[underlying] = holding
else:
previous_weight = data[underlying].get_weight()
data[underlying].set_weight(
round_weight(previous_weight + weight))
holdings = list(data.values())
holdings.sort(key=attrgetter('weight'), reverse=True)
handle_stock_data(holdings)
return holdings
def handle_stock_data(holdings):
tickers = [holding.ticker for holding in holdings]
company_data = get_company_data(tickers)
news = get_stock_news(tickers[:50])
for holding in holdings:
ticker = holding.get_ticker()
if ticker in company_data:
company = company_data[holding.get_ticker()]
if company is not None:
# use iex names if available (they look better)
name = company['name']
if name is not None:
holding.set_name(name)
holding.set_sector(company['sector'])
holding.set_link(company['link'])
stock_news = news.get(holding.get_ticker())
if stock_news is not None:
holding.set_news(stock_news)
def round_weight(weight):
return round(weight, 3)
def _get_total(portfolio):
total = 0
for ticker, shares in portfolio.get_holdings().items():
price = portfolio.get_price(ticker)
total += shares * price
return total
```
#### File: etfcalc/util/webscraper.py
```python
import requests
import json
import requests_cache
import time
import logging
from datetime import date, timedelta, datetime
from pyquery import PyQuery
from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
from .holding import Holding
symbols = get_nasdaq_symbols()
expire_after = timedelta(days=5)
requests_cache.install_cache('cache_data', expire_after=expire_after)
#token = next(open('./token.txt')) iexcloud
# Scrape name and holdings if any for a given ticker
def scrape_ticker(ticker):
holdings = []
data = get_data(ticker)
# invalid ticker
if data is None:
return holdings
if _is_etf(data):
_get_etf_data(ticker, data, holdings)
else:
_get_stock_data(ticker, data, holdings)
return holdings
# Get the nasdaq data for a given ticker
def get_data(ticker):
data = None
try:
data = symbols.loc[ticker]
except KeyError:
logging.info('Failed to get data for ticker ', ticker)
return data
# Get latest price for a given ticker
def get_price(ticker):
with requests_cache.disabled():
quote = _get_iex_data([ticker], ['price'])
return _round_price(quote[ticker]['price'])
def get_company_data(tickers):
company_data = {}
data = _get_iex_data(tickers, ['company'])
for ticker, stock in data.items():
quote = stock['company']
if quote is None:
continue
company_data[ticker] = {'name' : quote['companyName'], 'sector' : quote['sector'], 'link' : quote['website']}
return company_data
def get_stock_news(tickers):
stock_news = {}
with requests_cache.disabled():
data = _get_iex_data(tickers, ['news'], ['last=5'])
for ticker, stock in data.items():
news = stock['news']
if news is None:
continue
news_items = []
for news_item in news:
news_items.append({'title' : news_item['headline'], 'description' : news_item['summary'],
'image_url' : _get_ticker_image(ticker), 'datetime' : _convert_time(news_item['datetime']),
'url' : news_item['url'], 'source' : news_item['source']})
stock_news[ticker] = news_items
return stock_news
def get_holding_data(ticker):
holding_data = {}
with requests_cache.disabled():
data = _get_iex_data([ticker], ['stats', 'quote', 'chart', 'dividends'], ['displayPercent=true', 'range=5y'])
return data
def _round_price(price):
return format(price, '.2f')
def _is_etf(data):
return data.loc['ETF']
def _convert_time(timestamp):
timestamp = timestamp.replace('T', ' ')
return datetime.fromisoformat(timestamp)
def _get_etf_data(ticker, data, holdings):
response = _get_etf_page(ticker)
if not _valid_request(response):
logging.warning('Failed to get holdings for ticker',
ticker, response.status_code)
return
page_content = response.content
pq = PyQuery(page_content)
table = pq.find('#etfs-that-own')
# use secondary data source if none available
if not table:
_get_etf_data_backup(ticker, data, holdings)
return
for row in table('tbody tr').items():
columns = list(row('td').items())
ticker = columns[0].children("a").text()
holding_data = get_data(ticker)
if holding_data is None:
# fall back to getting name from scraped data
name = columns[1].children("a").text()
else:
# make use of official nasdaq data if available
name = holding_data.loc['Security Name']
weight = columns[2].text()
weight = weight[:-1]
holdings.append(Holding(name, ticker, weight))
def _get_etf_data_backup(ticker, data, holdings):
response = _get_etf_page_backup(ticker)
if not _valid_request(response):
logging.warning('Failed to get holdings for ticker ', ticker)
return
page_content = response.content
title = data.loc['Security Name']
url = _get_holdings_url(page_content)
holdings_json = _make_request(url + str(0)).json()
rows = holdings_json['total']
# etfdb limits us to 15 tickers per page
for i in range(0, rows, 15):
for entry in holdings_json['rows']:
holding = _get_etf_holding(entry)
holdings.append(holding)
holdings_json = _make_request(url + str(i + 15), throttle=0.7).json()
def _get_stock_data(ticker, data, holdings):
title = data.loc['Security Name']
holding = Holding(title, ticker)
holdings.append(holding)
def _get_etf_page(ticker):
url = 'https://etfdailynews.com/etf/{0}/'.format(ticker)
return _make_request(url, redirects=False)
def _get_etf_page_backup(ticker):
url = 'https://etfdb.com/etf/{0}/'.format(ticker)
return _make_request(url, redirects=False)
def _get_ticker_image(ticker):
return 'https://storage.googleapis.com/iex/api/logos/{0}.png'.format(ticker)
def _get_iex_data(tickers, options, settings=None):
data = {}
options = ",".join(options)
if settings:
options = options + ("&" + "&".join(settings))
for i in range(0, len(tickers), 100):
subset = ",".join(tickers[i:i+100])
url = 'https://api.iextrading.com/1.0/stock/market/batch?symbols={0}&types={1}'.format(subset, options)
#url = 'https://cloud.iexapis.com/beta/stock/market/batch?symbols={0}&types={1}&token={2}'.format(subset, options, token)
data.update(_make_request(url, redirects=False).json())
return data
def _make_request(url, redirects=True, throttle=0.0):
response = None
try:
response = requests.get(url, hooks={'response': _throttle_hook(
throttle)}, allow_redirects=redirects, timeout=3)
except requests.exceptions.RequestException as e:
raise ValueError('Request exception') from e
return response
# returns response hook function which sleeps for
# timeout if the response is not yet cached
def _throttle_hook(timeout):
def hook(response, *args, **kwargs):
if not getattr(response, 'from_cache', False):
time.sleep(timeout)
return response
return hook
def _valid_request(response):
return response.status_code == requests.codes.ok
def _get_holdings_url(content):
pq = PyQuery(content)
url = 'https://etfdb.com/'
sort = '&sort=weight&order=desc&limit=15&offset='
url += pq("table[data-hash='etf-holdings']").attr('data-url') + sort
return url
def _get_etf_holding(entry):
name = ticker = ''
data = entry['holding']
pq = PyQuery(data)
# handle normal cases of actual stocks
if pq('a').length:
ticker = pq('a').attr('href').split('/')[2].split(':')[0]
holding_data = get_data(ticker)
if holding_data is None:
# fall back to getting name from scraped data
name = pq('a').text().split('(')[0]
else:
# make use of official nasdaq data if available
name = holding_data.loc['Security Name']
# handle special underlyings e.g. VIX futures
elif pq('span').eq(2).length:
name = data
ticker = pq('span').eq(2).text()
# handle further special cases e.g. Cash components, Hogs, Cattle
else:
name = data
ticker = data
weight = entry['weight'][:-1]
return Holding(name, ticker, weight)
``` |
{
"source": "jjagielka/python-accessor",
"score": 4
} |
#### File: python-accessor/accessor/__init__.py
```python
from functools import wraps
import operator
ops = ['__lt__', '__le__', '__eq__', '__ne__', '__ge__', '__gt__', '__not__',
'__abs__', '__add__', '__and__', '__floordiv__', '__index__', '__inv__',
'__invert__', '__lshift__', '__mod__', '__mul__', '__matmul__',
'__neg__', '__or__', '__pos__', '__pow__', '__rshift__', '__sub__',
'__truediv__', '__xor__', '__concat__', '__contains__']
class Meta(type):
"""Bind methods from operator module to Accessor class."""
def __init__(cls, name, bases, attrs):
super(Meta, cls).__init__(name, bases, attrs)
def wrapper(func):
@wraps(func)
def inner(self, *args, **kwargs):
return lambda x: func(self(x), *args, **kwargs)
return inner
for name in ops:
setattr(cls, name, wrapper(getattr(operator, name)))
def _str_slice(s):
t = (s.start, s.stop, s.step) if s.step else (s.start, s.stop)
return ':'.join(['' if x is None else str(x) for x in t])
class Accessor(metaclass=Meta):
"""Return a callable object that fetches the given item(s) from its operand.
It is like itemgetter with recusion and object dot notation.
After f = Accessor.foo.bar, the call f(r) returns r["foo"]["bar"].
"""
__slots__ = ('_accessor', '_path')
def __init__(self, accessor=None):
self._accessor = accessor
self._path = accessor._path if accessor else ''
def __call__(self, x):
return x if self._accessor is None else self._accessor(x)
def __getattr__(self, name):
return self.__getitem__(name)
def __getitem__(self, name):
def accessor(x, resolve=True):
value = self(x) if resolve else x # for the below recurence
if not isinstance(name, (int, slice)) and isinstance(value, list):
return [accessor(n, False) for n in value]
try:
return getattr(value, '__getitem__', lambda a: None)(name)
except (KeyError, TypeError):
return None
accessor._path = _str_slice(name) if isinstance(name, slice) else name
return self.__class__(accessor)
def __repr__(self):
return f"<{self.__class__.__name__} '{self._path}'>"
accessor = Accessor()
def values(*getters):
"""Return func extracting values of mulitple getters as tuple.
After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])
"""
return lambda x: tuple(n(x) for n in getters)
def keys(*getters):
"""Return paths."""
return lambda x: tuple(n._path for n in getters)
def items(*getters, prefix=''):
return lambda x: {prefix + n._path: n(x) for n in getters}
def select(*getters, **name_getters):
"""Return func extracting values of mulitple getters as dict.
getters: list of getters for dict entries with _path key
name_getters: list of getters with names
"""
return lambda x: dict(
{n._path: n(x) for n in getters},
**{k: f(x) for k, f in name_getters.items()})
def normalize(data, c_getter, select, c_select):
"""Normalize semi-structured JSON data into a flat table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
c_getter : getter or select of strings
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
"""
for x in [data] if isinstance(data, dict) else data:
yield from (dict(select(x), **c_select(c)) for c in c_getter(x) or [{}])
data = [{
'state': 'Florida',
'shortname': 'FL',
'info': {'governor': '<NAME>'},
'counties': [
{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': '<NAME>', 'population': 60000}
]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {'governor': '<NAME>'},
'counties': [
{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}
]}
]
``` |
{
"source": "jjagodzinski/ralph",
"score": 2
} |
#### File: ralph/discovery/models_pricing.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django.db import models as db
from lck.django.choices import Choices
from lck.django.common import nested_commit_on_success
class PricingAggregate(Choices):
"""The way to aggregate values of a variable."""
_ = Choices.Choice
sum = _("Sum") << {'function': db.Sum}
average = _("Average") << {'function': db.Avg}
min = _("Minimum") << {'function': db.Min}
max = _("Maximum") << {'function': db.Max}
class PricingGroup(db.Model):
"""
A group of devices that are priced according to common rules for the
given month.
"""
name = db.CharField(max_length=64)
devices = db.ManyToManyField('discovery.Device')
date = db.DateField()
class Meta:
unique_together = ('name', 'date')
ordering = ('name', 'date')
@nested_commit_on_success
def copy_from(self, source):
"""
Copy the variables, formulas and values from the specified group.
Only call on empty groups right after creating them.
Don't call on itself.
"""
for device in source.devices.all():
self.devices.add(device)
self.save()
for formula in source.pricingformula_set.all():
PricingFormula(
group=self,
component_group=formula.component_group,
formula=formula.formula,
).save()
for variable in source.pricingvariable_set.all():
new_variable = PricingVariable(
name=variable.name,
group=self,
aggregate=variable.aggregate,
)
new_variable.save()
for value in variable.pricingvalue_set.all():
PricingValue(
variable=new_variable,
value=value.value,
device=value.device,
).save()
def __unicode__(self):
return self.name
class PricingFormula(db.Model):
"""
A formula for pricing a specific component in a specific pricing group.
"""
group = db.ForeignKey('discovery.PricingGroup')
component_group = db.ForeignKey('discovery.ComponentModelGroup')
formula = db.TextField()
class Meta:
unique_together = ('group', 'component_group')
ordering = ('group', 'component_group')
def __unicode__(self):
return self.formula
@staticmethod
def eval_formula(formula, variables):
builtins = {
'sum': sum,
'max': max,
'min': min,
}
return eval(
formula,
{'__builtins__': builtins},
variables,
)
def get_value(self, **kwargs):
variables = {}
for variable in self.group.pricingvariable_set.all():
variables[variable.name] = variable.get_value()
variables.update(kwargs)
return PricingFormula.eval_formula(self.formula, variables)
def get_example(self):
try:
return self.get_value(size=1)
except Exception as e:
return unicode(e)
class PricingVariable(db.Model):
"""A variable that is used in the pricing formulas."""
name = db.CharField(max_length=64)
group = db.ForeignKey('discovery.PricingGroup')
aggregate = db.PositiveIntegerField(
choices=PricingAggregate(),
default=PricingAggregate.sum.id,
)
class Meta:
unique_together = ('group', 'name')
ordering = ('group', 'name')
def get_value(self):
function = PricingAggregate.from_id(self.aggregate).function
d = self.pricingvalue_set.aggregate(function('value'))
return d.values()[0]
def __unicode__(self):
return self.name
class PricingValue(db.Model):
"""A value of a variable that is used in the pricing formulas. """
device = db.ForeignKey('discovery.Device')
variable = db.ForeignKey('discovery.PricingVariable')
value = db.DecimalField(max_digits=8, decimal_places=2)
class Meta:
unique_together = ('device', 'variable')
ordering = ('device', 'variable')
def __unicode__(self):
return unicode(self.value)
```
#### File: discovery/plugins/hp_oa.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from urllib2 import urlopen, URLError
import httplib
from lck.django.common import nested_commit_on_success
from lck.django.common.models import MACAddressField
from lck.lang import Null, nullify
from lck.xml import etree_to_dict
import lck.xml.converters
from lxml import etree as ET
from ralph.discovery.models import (IPAddress, Device, DeviceType,
SERIAL_BLACKLIST, ComponentType, GenericComponent, ComponentModel)
from ralph.util import network, plugin, Eth
SAVE_PRIORITY = 5
def _nullify(value):
if value is not None:
raise ValueError
return Null
def hp_xmldata(hostname, timeout=10):
try:
url = urlopen("https://{}/xmldata?item=all".format(hostname),
timeout=timeout)
try:
data = url.read()
finally:
url.close()
except (URLError, httplib.InvalidURL, httplib.BadStatusLine):
return
else:
if not url.info().get('Content-Type', '').startswith('text/xml'):
return
data = data.decode('utf-8', 'replace').encode('utf-8')
rimp = ET.fromstring(data)
if rimp.tag.upper() != 'RIMP':
return
return nullify(
etree_to_dict(
rimp,
_converters=[
_nullify,
int,
float,
lck.xml.converters._datetime,
lck.xml.converters._datetime_strip_tz
],
)
)[1]
def _get_ethernets(data):
for mezz in data['PORTMAP']['MEZZ']:
name = mezz['DEVICE']['NAME']
ports = mezz['DEVICE']['PORT']
if isinstance(ports, dict):
ports = [ports]
for port in ports:
if port['TYPE'] == 'INTERCONNECT_TYPE_ETH':
try:
mac = MACAddressField.normalize(port['WWPN'])
except ValueError:
continue
yield Eth(name, mac, speed=None)
@nested_commit_on_success
def _add_hp_oa_devices(devices, device_type, parent=None):
if devices and not isinstance(devices, list):
devices = [devices]
for i, device in enumerate(devices):
bay = device['BAY']['CONNECTION2'][
'BLADESYMBOLICNUMBER'] or str(device['BAY']['CONNECTION'])
name = device['PN'].strip() or device['SPN'].strip()
if not name.startswith('HP'):
name = 'HP ' + name
firmware = str(device.get('FWRI', ''))
sn = device['SN'].strip()
if sn in SERIAL_BLACKLIST:
sn = None
if not sn:
sn = device['BSN'].strip()
if sn in SERIAL_BLACKLIST:
sn = None
try:
ip = network.validate_ip(device['MGMTIPADDR'])
except ValueError:
continue
ip_address, created = IPAddress.concurrent_get_or_create(
address=str(ip))
if created:
ip_address.hostname = network.hostname(ip_address.address)
ip_address.snmp_name = name
# no priorities for IP addresses
ip_address.save(update_last_seen=True)
if device_type == DeviceType.management:
ip_address.is_management = True
if parent and not parent.management:
parent.management = ip_address
parent.save(priority=SAVE_PRIORITY)
model, mcreated = ComponentModel.create(
ComponentType.management,
name=name,
priority=SAVE_PRIORITY,
)
component, created = GenericComponent.concurrent_get_or_create(
sn=sn,
defaults=dict(device=parent),
)
component.model = model
component.label = name
component.save(priority=SAVE_PRIORITY)
if ip:
ip_address.is_management = True
ip_address.device = parent
ip_address.save() # no priorities for IP addresses
continue
if device_type == DeviceType.switch and 'SAN' in name:
device_type = DeviceType.fibre_channel_switch
ethernets = list(_get_ethernets(device))
if not (ip and name and (sn or ethernets)):
continue
dev = None
# FIXME: isn't ip already known as not empty?
if ip and device_type in (DeviceType.switch, DeviceType.fibre_channel_switch):
# FIXME: isn't this IP address already created as `ip_address`
# above?
ip_addr, ip_created = IPAddress.concurrent_get_or_create(
address=ip)
if ip_addr.device:
dev = ip_addr.device
dev.parent = parent
if dev is None:
dev = Device.create(sn=sn, model_name=name, model_type=device_type,
ethernets=ethernets, parent=parent,
priority=SAVE_PRIORITY)
if firmware:
dev.hard_firmware = firmware
if bay:
name = '%s [%s]' % (name, bay)
if bay:
if 'A' in bay or 'B' in bay:
dev.chassis_position = int(bay[:-1])
if bay[-1] == 'A':
dev.chassis_position += 1000
elif bay[-1] == 'B':
dev.chassis_position += 2000
else:
dev.chassis_position = int(bay)
dev.position = bay
else:
dev.chassis_position = i + 1
dev.save(update_last_seen=True, priority=SAVE_PRIORITY)
ip_address.device = dev
# no priorities for IP addresses
ip_address.save(update_last_seen=True)
def make_encl(data):
encl_name = data['INFRA2']['PN'].strip()
encl_sn = data['INFRA2']['ENCL_SN'].strip()
if not encl_name.startswith('HP'):
encl_name = 'HP ' + encl_name
encl = Device.create(
sn=encl_sn,
name=encl_name,
model_type=DeviceType.blade_system,
model_name=encl_name,
priority=SAVE_PRIORITY,
)
encl.save(update_last_seen=True, priority=SAVE_PRIORITY)
return encl
@plugin.register(chain='discovery', requires=['ping', 'http'])
def hp_oa_xml(**kwargs):
snmp_name = kwargs.get('snmp_name', '').lower()
if snmp_name and "onboard administrator" not in snmp_name:
return False, "no match.", kwargs
if kwargs.get('http_family', '') not in ('Unspecified', 'RomPager', 'HP'):
return False, 'no match.', kwargs
ip = str(kwargs['ip'])
data = hp_xmldata(ip, timeout=30)
if not data:
return False, 'silent.', kwargs
# For some reason those are sometimes ints instead of strings
name = unicode(data['MP']['PN']).strip()
sn = unicode(data['MP']['SN']).strip()
rack_name = unicode(data['INFRA2']['RACK']).strip()
encl_name = unicode(data['INFRA2']['PN']).strip()
encl_sn = unicode(data['INFRA2']['ENCL_SN']).strip()
if not (name and sn and rack_name and encl_name and encl_sn):
return False, 'incompatible answer.', kwargs
encl = make_encl(data)
_add_hp_oa_devices(data['INFRA2']['MANAGERS']['MANAGER'],
DeviceType.management, parent=encl)
_add_hp_oa_devices(data['INFRA2']['SWITCHES']['SWITCH'],
DeviceType.switch, parent=encl)
_add_hp_oa_devices(data['INFRA2']['BLADES']['BLADE'],
DeviceType.blade_server, parent=encl)
return True, name, kwargs
```
#### File: discovery/plugins/http.py
```python
from lck.django.common import nested_commit_on_success
from ralph.util import plugin
from ralph.discovery.models import IPAddress
from ralph.discovery.http import get_http_family
@nested_commit_on_success
def run_http(ip):
family = get_http_family(ip)
ip_address, created = IPAddress.concurrent_get_or_create(address=ip)
ip_address.http_family = family
ip_address.save(update_last_seen=True)
return family
@plugin.register(chain='discovery', requires=['ping'], priority=201)
def http(**kwargs):
ip = str(kwargs['ip'])
try:
name = run_http(ip)
except Exception as e:
if hasattr(e, 'code') and hasattr(e, 'reason'):
message = 'Error %s: %s (%s)' % (e.code, e.reason)
else:
message = 'Error: %s' % unicode(e)
return True, message, kwargs
kwargs['http_family'] = name
return True, name, kwargs
```
#### File: discovery/plugins/mac.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from lck.django.common import nested_commit_on_success
from ralph.discovery.models import (Device, Ethernet, IPAddress)
SAVE_PRIORITY = 0
@nested_commit_on_success
def _merge_devs(dev, other_dev):
for field, value in other_dev.__dict__.iteritems():
if field.startswith('_'):
continue
if not getattr(dev, field):
setattr(dev, field, value)
dev.save(priority=SAVE_PRIORITY)
for set_field in Device.__dict__.keys():
if not set_field.endswith('_set'):
continue
if len(getattr(dev, set_field).all()):
continue
for child in getattr(other_dev, set_field).all():
child.device = dev
child.save(priority=SAVE_PRIORITY)
other_dev.delete()
def _connect_macs(dev):
macs = Ethernet.objects.filter(device=dev).values_list('mac')
count = 0
for mac, in macs:
devs = Device.objects.filter(ethernet__mac=mac)
for other_dev in devs:
if other_dev == dev:
continue
_merge_devs(dev, other_dev)
count += 1
return count
def own_mac(ip, **kwargs):
ip = str(ip)
try:
dev = IPAddress.objects.select_related().get(address=ip).device
except IPAddress.DoesNotExist:
return False, 'no device.', kwargs
if dev is None:
return False, 'no device.', kwargs
count = _connect_macs(dev)
return True, '%d own MACs connected.' % count, kwargs
def children_mac(ip, **kwargs):
ip = str(ip)
try:
dev = IPAddress.objects.select_related().get(address=ip).device
except IPAddress.DoesNotExist:
return False, 'no device.', kwargs
if dev is None:
return False, 'no device.', kwargs
count = 0
child_count = 0
for child_dev in dev.child_set.all():
count += _connect_macs(child_dev)
child_count += 1
message = '%d MACs of %d children connected.' % (count, child_count)
return True, message, kwargs
```
#### File: discovery/plugins/position.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from ralph.util import plugin
from ralph.discovery.models import (
Device,
DeviceModel,
DeviceType,
IPAddress,
Network,
)
def _make_dc(dc_no):
if dc_no is None:
return None
dev_model, created = DeviceModel.concurrent_get_or_create(
name='Data center',
defaults={
'type': DeviceType.data_center.id,
},
)
dc, created = Device.concurrent_get_or_create(
sn=dc_no,
defaults={
'model': dev_model,
},
)
if created:
dc.name = dc_no
dc.save(update_last_seen=True)
return dc
def _connect_dc(ip, dev):
try:
network = Network.from_ip(ip)
except IndexError:
dc_no = None
rack = None
else:
dc_no = network.data_center.name if network.data_center else None
rack = None
for rack in network.racks.all()[:1]:
break
dc = _make_dc(dc_no)
if rack:
dev.parent = rack
elif dev.parent is None:
dev.parent = rack or dc
else:
return 'Already has better info.'
stack = [dev]
while stack:
dev = stack.pop()
for child in dev.child_set.all():
stack.append(child)
if rack:
dev.rack = rack.sn if rack.sn else None
if dc_no:
dev.dc = dc_no
dev.save()
return '%s %s' % (dc_no, rack.name if rack else '?')
@plugin.register(chain='postprocess', requires=['ping'])
def position(ip, **kwargs):
ip = str(ip)
try:
ipaddr = IPAddress.objects.select_related().get(address=ip)
except IPAddress.DoesNotExist:
return False, 'no device.', kwargs
dev = ipaddr.device
if dev is None:
return False, 'no device.', kwargs
if dev.parent and (
dev.parent.model is None or
dev.parent.model.type not in (
DeviceType.data_center.id,
DeviceType.rack.id,
)
):
return False, 'has parent.', kwargs
name = _connect_dc(ip, dev)
return True, name, kwargs
```
#### File: discovery/plugins/ssh_aix.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import paramiko
import re
from django.conf import settings
from lck.django.common import nested_commit_on_success
from ralph.util import network, Eth
from ralph.util import plugin
from ralph.discovery import guessmodel
from ralph.discovery.hardware import normalize_wwn
from ralph.discovery.models import (
Device, DeviceType, DiskShareMount, DiskShare,
ComponentType, ComponentModel, Storage,
Processor, Memory, IPAddress, OperatingSystem
)
AIX_USER = settings.AIX_USER
AIX_PASSWORD = settings.AIX_PASSWORD
AIX_KEY = settings.AIX_KEY
MODELS = {
'IBM,9131-52A': 'IBM P5 520',
'IBM,8203-E4A': 'IBM P6 520',
'IBM,8233-E8B': 'IBM Power 750 Express',
}
SAVE_PRIORITY = 4
class Error(Exception):
pass
def _connect_ssh(ip):
return network.connect_ssh(ip, AIX_USER, AIX_PASSWORD, key=AIX_KEY)
def _ssh_lines(ssh, command):
stdin, stdout, stderr = ssh.exec_command(command)
for line in stdout.readlines():
yield line
@nested_commit_on_success
def run_ssh_aix(ip):
ssh = _connect_ssh(ip)
try:
ethernets = []
for model_line in _ssh_lines(ssh, 'lsattr -El sys0 | grep ^modelname'):
machine_model = model_line.split(None, 2)[1]
break
for mac_line in _ssh_lines(ssh, 'netstat -ia | grep link'):
interface, mtu, net, mac, rest = mac_line.split(None, 4)
if '.' not in mac:
continue
octets = mac.split('.')
mac = ''.join('%02x' % int(o, 16) for o in octets).upper()
ethernets.append(Eth(label=interface, mac=mac, speed=0))
disks = {}
os_storage_size = 0
for disk_line in _ssh_lines(ssh, 'lsdev -c disk'):
disk, rest = disk_line.split(None, 1)
wwn = None
model = None
for line in _ssh_lines(ssh, 'lscfg -vl %s' % disk):
if 'hdisk' in line:
match = re.search(r'\(([0-9]+) MB\)', line)
if match:
os_storage_size += int(match.group(1))
elif 'Serial Number...' in line:
label, sn = line.split('.', 1)
sn = sn.strip('. \n')
elif 'Machine Type and Model.' in line:
label, model = line.split('.', 1)
model = model.strip('. \n')
disks[disk] = (model, sn)
os_version = ''
for line in _ssh_lines(ssh, 'oslevel'):
os_version = line.strip()
break
os_memory = 0
for line in _ssh_lines(ssh, 'lsattr -El sys0 | grep ^realmem'):
match = re.search(r'[0-9]+', line)
if match:
os_memory = int(int(match.group(0)) / 1024)
break
os_corescount = 0
for line in _ssh_lines(ssh, 'lparstat -i|grep ^Active\ Phys'):
match = re.search(r'[0-9]+', line)
if match:
os_corescount += int(match.group(0))
finally:
ssh.close()
dev = Device.create(
ethernets=ethernets, model_type=DeviceType.rack_server,
model_name='%s AIX' % MODELS.get(machine_model, machine_model))
ipaddr = IPAddress.objects.get(address=ip)
ipaddr.device = dev
ipaddr.save()
wwns = []
sns = []
stors = []
for disk, (model_name, sn) in disks.iteritems():
if not sn:
continue
if model_name == 'VV':
wwns.append(sn)
else:
stors.append((disk, model_name, sn))
sns.append(sn)
for mount in dev.disksharemount_set.exclude(share__wwn__in=wwns):
mount.delete()
for stor in dev.storage_set.exclude(sn__in=sns):
stor.delete()
for wwn in wwns:
try:
share = DiskShare.objects.get(wwn=wwn)
except DiskShare.DoesNotExist:
continue
wwn = normalize_wwn(sn[-4:] + sn[:-4])
mount, created = DiskShareMount.concurrent_get_or_create(
share=share, device=dev, defaults={'is_virtual': False})
mount.volume = disk
mount.save(priority=SAVE_PRIORITY)
for disk, model_name, sn in stors:
# FIXME: storage with no size
model, c = ComponentModel.create(
ComponentType.disk,
family=model_name,
priority=SAVE_PRIORITY,
)
stor, created = Storage.concurrent_get_or_create(
device=dev,
sn=sn,
mount_point=None,
)
stor.model = model
stor.label = disk
stor.save(priority=SAVE_PRIORITY)
# FIXME: memory with no size
mem, created = Memory.concurrent_get_or_create(device=dev, index=0)
mem.label = 'Memory'
mem.model, c = ComponentModel.create(
ComponentType.memory,
family='pSeries',
priority=SAVE_PRIORITY,
)
mem.save(priority=SAVE_PRIORITY)
# FIXME: CPUs without info
cpu, created = Processor.concurrent_get_or_create(device=dev, index=0)
cpu.label = 'CPU'
cpu.model, c = ComponentModel.create(
ComponentType.processor,
family='pSeries',
name='pSeries CPU',
priority=SAVE_PRIORITY,
)
cpu.save(priority=SAVE_PRIORITY)
OperatingSystem.create(dev=dev,
os_name='AIX',
version=os_version,
family='AIX',
memory=os_memory or None,
cores_count=os_corescount or None,
storage=os_storage_size or None,
priority=SAVE_PRIORITY
)
return machine_model
@plugin.register(chain='discovery', requires=['ping'])
def ssh_aix(**kwargs):
if 'nx-os' in kwargs.get('snmp_name', '').lower():
return False, 'incompatible Nexus found.', kwargs
ip = str(kwargs['ip'])
if AIX_USER is None:
return False, 'no auth.', kwargs
kwargs['guessmodel'] = gvendor, gmodel = guessmodel.guessmodel(**kwargs)
if gvendor != 'IBM':
return False, 'no match: %s %s' % (gvendor, gmodel), kwargs
snmp_name = kwargs.get('snmp_name', '')
if snmp_name and not snmp_name.startswith('IBM PowerPC'):
return False, 'no match.', kwargs
if not network.check_tcp_port(ip, 22):
return False, 'closed.', kwargs
try:
name = run_ssh_aix(ip)
except (network.Error, Error) as e:
return False, str(e), kwargs
except paramiko.SSHException as e:
return False, str(e), kwargs
except Error as e:
return False, str(e), kwargs
return True, name, kwargs
```
#### File: discovery/plugins/ssh_onstor.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import paramiko
from django.conf import settings
from lck.django.common import nested_commit_on_success
from ralph.util import network
from ralph.util import plugin
from ralph.util import parse
from ralph.discovery.models import (DeviceType, DeviceModel, Device, IPAddress,
DiskShare, DiskShareMount)
from ralph.discovery.models_history import DiscoveryWarning
SSH_ONSTOR_USER = settings.SSH_ONSTOR_USER
SSH_ONSTOR_PASSWORD = settings.SSH_ONSTOR_PASSWORD
class Error(Exception):
pass
class SkipError(Error):
pass
def _connect_ssh(ip):
return network.connect_ssh(ip, SSH_ONSTOR_USER, SSH_ONSTOR_PASSWORD)
def _save_shares(dev, luns, mounts):
wwns = []
for lun, volume in luns.iteritems():
rest, wwn_end = lun.rsplit('_', 1)
try:
share = DiskShare.objects.get(wwn__endswith=wwn_end)
except DiskShare.DoesNotExist:
continue
wwns.append(share.wwn)
clients = mounts.get(volume, [])
for client in clients:
ipaddr, ip_created = IPAddress.concurrent_get_or_create(
address=client,
)
mount, created = DiskShareMount.concurrent_get_or_create(
device=ipaddr.device,
share=share,
defaults={
'address': ipaddr,
'server': dev,
}
)
if not created:
mount.address = ipaddr
mount.server = dev
mount.volume = volume
mount.save(update_last_seen=True)
if not clients:
mount, created = DiskShareMount.concurrent_get_or_create(
device=None,
share=share,
defaults={
'address': None,
'server': dev,
}
)
if not created:
mount.address = None
mount.server = dev
mount.volume = volume
mount.save(update_last_seen=True)
for mount in DiskShareMount.objects.filter(
server=dev
).exclude(
share__wwn__in=wwns
):
mount.delete()
@nested_commit_on_success
def _save_device(ip, name, model_name, sn, mac):
model, model_created = DeviceModel.concurrent_get_or_create(
name='Onstor %s' % model_name,
defaults={
'type': DeviceType.storage.id,
},
)
dev = Device.create(sn=sn, model=model)
dev.save()
ipaddr, ip_created = IPAddress.concurrent_get_or_create(address=ip)
ipaddr.device = dev
ipaddr.is_management = True
ipaddr.save(update_last_seen=True)
dev.management = ipaddr
dev.save(update_last_seen=True)
return dev
def _command(channel, command):
buffer = ''
channel.sendall('\r\n')
while not buffer.endswith('> '):
buffer += channel.recv(1024)
channel.sendall(command)
buffer = ''
while command not in buffer:
buffer += channel.recv(1024)
channel.sendall('\r\n')
buffer = ['']
while not buffer[-1].endswith('> '):
chunk = channel.recv(1024)
lines = chunk.split('\n')
buffer[-1] += lines[0]
buffer.extend(lines[1:])
return buffer[1:-1]
def _run_ssh_onstor(ip):
ssh = _connect_ssh(ip)
try:
stdin, stdout, stderr = ssh.exec_command("system show summary")
pairs = parse.pairs(lines=stdout.readlines())
name = pairs['Name']
model_name = pairs['--------']['Model number']
sn = pairs['--------']['System serial number']
mac = pairs['--------']['MAC addr'].upper().replace(':', '')
dev = _save_device(ip, name, model_name, sn, mac)
first_ip = dev.ipaddress_set.order_by('address')[0].address
if ip != first_ip:
raise SkipError('multiple addresses (will check %s).' % first_ip)
stdin, stdout, stderr = ssh.exec_command("lun show all -P1 -S10000")
in_table = False
luns = {}
for line in stdout.readlines():
if not in_table:
if line.startswith('-------------'):
in_table = True
continue
else:
(
lun_name,
lun_type,
raid,
size,
state,
cluster,
volume,
) = line.split()
luns[lun_name] = volume
stdin, stdout, stderr = ssh.exec_command("vsvr show")
in_table = False
server_list = []
for line in stdout.readlines():
if not in_table:
if line.startswith('======='):
in_table = True
continue
else:
no, state, server = line.split()
if server.startswith('VS_MGMT'):
continue
server_list.append(server)
mounts = collections.defaultdict(list)
for server in server_list:
channel = ssh.invoke_shell()
_command(channel, 'vsvr set %s' % server)
lines = _command(channel, 'nfs cache show mounts')
channel.close()
if not lines:
continue
if lines[0].startswith('No Mount information'):
continue
for line in lines:
if line.strip().endswith('>') or not line.strip():
continue
try:
CLIENT, IP, ipaddr, SHARE, PATH, path = line.split(None, 6)
except ValueError:
continue
if '/' in path:
volume = path.split('/', 1)[1]
else:
volume = path
mounts[volume].append(ipaddr)
finally:
ssh.close()
_save_shares(dev, luns, mounts)
return name
@plugin.register(chain='discovery', requires=['ping', 'http'])
def ssh_onstor(**kwargs):
if SSH_ONSTOR_USER is None or SSH_ONSTOR_PASSWORD is None:
return False, 'no credentials.', kwargs
if 'nx-os' in kwargs.get('snmp_name', '').lower():
return False, 'incompatible Nexus found.', kwargs
ip = str(kwargs['ip'])
if kwargs.get('http_family') not in ('sscccc',):
return False, 'no match.', kwargs
if not network.check_tcp_port(ip, 22):
DiscoveryWarning(
message="Port 22 closed on an Onstor device.",
plugin=__name__,
ip=ip,
).save()
return False, 'closed.', kwargs
try:
name = _run_ssh_onstor(ip)
except (network.Error, Error, paramiko.SSHException) as e:
DiscoveryWarning(
message="This is an Onstor, but: " + str(e),
plugin=__name__,
ip=ip,
).save()
return False, str(e), kwargs
return True, name, kwargs
```
#### File: discovery/tests/test_api.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from django.contrib.auth.models import User
from django.core.cache import cache
from django.test import TestCase
from ralph.account.models import BoundPerm, Profile, Perm
from ralph.business.models import Venture
from ralph.discovery.models import (
ComponentModel,
ComponentModelGroup,
ComponentType,
DeprecationKind,
DeviceType,
SplunkUsage,
)
from ralph.ui.tests.util import create_device
from ralph.ui.tests.global_utils import create_user
from tastypie.test import ResourceTestCase
class DeviceWithPricingResourceTest(ResourceTestCase):
def setUp(self):
super(DeviceWithPricingResourceTest, self).setUp()
self.resource = 'devicewithpricing'
self.user = User.objects.create_user(
'ralph',
'<EMAIL>',
'ralph'
)
self.venture = Venture(name='Infra').save()
self.deprecation_kind = DeprecationKind(
name='12 months',
months=12
).save()
srv1 = {
'sn': 'srv-1',
'model_name': 'server',
'model_type': DeviceType.virtual_server,
'venture': self.venture,
'name': 'Srv 1',
'purchase_date': datetime.datetime(2020, 1, 1, 0, 0),
'deprecation_kind': self.deprecation_kind,
}
srv1_cpu = {
'model_name': 'Intel PCU1',
'label': 'CPU 1',
'priority': 0,
'family': 'Intsels',
'price': 120,
'count': 2,
'speed': 1200,
}
srv1_memory = {
'priority': 0,
'family': 'Noname RAM',
'label': 'Memory 1GB',
'price': 100,
'speed': 1033,
'size': 512,
'count': 6,
}
srv1_storage = {
'model_name': 'Store 1TB',
'label': 'store 1TB',
'priority': 0,
'family': 'Noname Store',
'price': 180,
'count': 10,
}
self.device = create_device(
device=srv1,
cpu=srv1_cpu,
memory=srv1_memory,
storage=srv1_storage,
)
self.device.save()
name = 'Splunk Volume 100 GiB'
symbol = 'splunkvolume'
model, created = ComponentModel.create(
ComponentType.unknown,
family=symbol,
name=name,
priority=0,
)
model_group, created = ComponentModelGroup.objects.get_or_create(
name='Group Splunk',
price=128,
type=ComponentType.unknown,
)
model.group = model_group
model.save()
res, created = SplunkUsage.concurrent_get_or_create(
device=self.device,
day=datetime.date.today(),
defaults={'model': model},
)
res.size = 10
res.save()
def test_get_list_json(self):
resp = self.api_client.get(
'/api/v0.9/{0}/'.format(self.resource),
format='json',
)
self.assertValidJSONResponse(resp)
device = self.deserialize(resp)['objects'][0]
self.assertEqual(device['id'], 1)
self.assertEqual(device['name'], 'Srv 1')
self.assertEqual(device['sn'], 'srv-1')
self.assertEqual(device['total_cost'], 2640)
self.assertDictEqual(
device['splunk'],
{
'splunk_size': 10,
'splunk_monthly_cost': 128.0,
'splunk_daily_cost': 128.0
}
)
class AccessToDiscoveyApiTest(TestCase):
def setUp(self):
self.user = create_user(
'api_user',
'<EMAIL>',
'password',
is_staff=False,
is_superuser=False,
)
self.api_login = {
'format': 'json',
'username': self.user.username,
'api_key': self.user.api_key.key,
}
cache.delete("api_user_accesses")
def get_response(self, resource):
path = "/api/v0.9/%s/" % resource
response = self.client.get(
path=path,
data=self.api_login,
format='json',
)
return response
def add_perms(self, perms):
user_profile = Profile.objects.get(user=self.user)
for perm in perms:
BoundPerm(profile=user_profile, perm=perm).save()
def test_ipaddress_resource(self):
resource = 'ipaddress'
perms = [Perm.read_network_structure, ]
schema = '%s/schema' % resource
response = self.get_response(schema)
self.assertEqual(response.status_code, 200)
response = self.get_response(resource)
self.assertEqual(response.status_code, 401)
# Add perms to display resources
self.add_perms(perms=perms)
response = self.get_response(resource)
self.assertEqual(response.status_code, 200)
def test_modelgroup_resource(self):
resource = 'modelgroup'
perms = [Perm.read_dc_structure, ]
schema = '%s/schema' % resource
response = self.get_response(schema)
self.assertEqual(response.status_code, 200)
response = self.get_response(resource)
self.assertEqual(response.status_code, 401)
# Add perms to display resources
self.add_perms(perms=perms)
response = self.get_response(resource)
self.assertEqual(response.status_code, 200)
def test_model_resource(self):
resource = 'model'
perms = [Perm.read_dc_structure, ]
schema = '%s/schema' % resource
response = self.get_response(schema)
self.assertEqual(response.status_code, 200)
response = self.get_response(resource)
self.assertEqual(response.status_code, 401)
# Add perms to display resources
self.add_perms(perms=perms)
response = self.get_response(resource)
self.assertEqual(response.status_code, 200)
def test_device_resource(self):
resource = 'dev'
perms = [Perm.read_dc_structure, ]
schema = '%s/schema' % resource
response = self.get_response(schema)
self.assertEqual(response.status_code, 200)
response = self.get_response(resource)
self.assertEqual(response.status_code, 401)
# Add perms to display resources
self.add_perms(perms=perms)
response = self.get_response(resource)
self.assertEqual(response.status_code, 200)
def test_physicalserver_resource(self):
resource = 'physicalserver'
perms = [Perm.read_dc_structure, ]
schema = '%s/schema' % resource
response = self.get_response(schema)
self.assertEqual(response.status_code, 200)
response = self.get_response(resource)
self.assertEqual(response.status_code, 401)
# Add perms to display resources
self.add_perms(perms=perms)
response = self.get_response(resource)
self.assertEqual(response.status_code, 200)
def test_rackserver_resource(self):
resource = 'rackserver'
perms = [Perm.read_dc_structure, ]
schema = '%s/schema' % resource
response = self.get_response(schema)
self.assertEqual(response.status_code, 200)
response = self.get_response(resource)
self.assertEqual(response.status_code, 401)
# Add perms to display resources
self.add_perms(perms=perms)
response = self.get_response(resource)
self.assertEqual(response.status_code, 200)
def test_bladeserver_resource(self):
resource = 'bladeserver'
perms = [Perm.read_dc_structure, ]
schema = '%s/schema' % resource
response = self.get_response(schema)
self.assertEqual(response.status_code, 200)
response = self.get_response(resource)
self.assertEqual(response.status_code, 401)
# Add perms to display resources
self.add_perms(perms=perms)
response = self.get_response(resource)
self.assertEqual(response.status_code, 200)
def test_virtualserver_resource(self):
resource = 'virtualserver'
perms = [Perm.read_dc_structure, ]
schema = '%s/schema' % resource
response = self.get_response(schema)
self.assertEqual(response.status_code, 200)
response = self.get_response(resource)
self.assertEqual(response.status_code, 401)
# Add perms to display resources
self.add_perms(perms=perms)
response = self.get_response(resource)
self.assertEqual(response.status_code, 200)
```
#### File: scan/plugins/http_ibm_system_x.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import urllib2
from django.conf import settings
from xml.etree import cElementTree as ET
from ralph.discovery.http import guess_family, get_http_info
from ralph.discovery.models import DeviceType, SERIAL_BLACKLIST
from ralph.scan.errors import (
AuthError,
NoMatchError,
NotConfiguredError,
TreeError,
)
from ralph.scan.plugins import get_base_result_template
SETTINGS = settings.SCAN_PLUGINS.get(__name__, {})
GENERIC_SOAP_TEMPLATE = '''\n
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://www.w3.org/2003/05/soap-envelope"
xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing"
xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd">
<SOAP-ENV:Header>
<wsa:To>http://%(management_url)s</wsa:To>
<wsa:ReplyTo>
<wsa:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:Address>
</wsa:ReplyTo>
<wsman:ResourceURI>http://www.ibm.com/iBMC/sp/%(resource)s</wsman:ResourceURI>
<wsa:Action>%(action)s</wsa:Action>
<wsa:MessageID>dt:1348650519402</wsa:MessageID>
</SOAP-ENV:Header>
<SOAP-ENV:Body>
%(body)s
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
def _send_soap(post_url, session_id, message):
opener = urllib2.build_opener()
request = urllib2.Request(
post_url, message,
headers={'session_id': session_id},
)
response = opener.open(request, timeout=10)
response_data = response.read()
return response_data
def _get_session_id(ip_address, user, password):
login_url = "http://%s/session/create" % ip_address
login_data = "%s,%s" % (user, password)
opener = urllib2.build_opener()
request = urllib2.Request(login_url, login_data)
response = opener.open(request, timeout=15)
response_data = response.readlines()
if response_data and response_data[0][:2] == 'ok':
return response_data[0][3:]
raise AuthError('Session error.')
def _get_model_name(management_url, session_id):
message = GENERIC_SOAP_TEMPLATE % dict(
management_url=management_url,
action='http://www.ibm.com/iBMC/sp/Monitors/GetVitalProductData',
resource='Monitors',
body='''
<GetVitalProductData xmlns="http://www.ibm.com/iBMC/sp/Monitors"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</GetVitalProductData>
''',
)
soap_result = _send_soap(management_url, session_id, message)
tree = ET.XML(soap_result)
product_name = tree.findall(
'{0}Body/GetVitalProductDataResponse/'
'GetVitalProductDataResponse/MachineLevelVPD/'
'ProductName'.format('{http://www.w3.org/2003/05/soap-envelope}'),
)
try:
return product_name[0].text
except IndexError:
raise TreeError(
"Improper response. Couldn't find model name. "
"Full response: %s" % soap_result,
)
def _get_sn(management_url, session_id):
message = GENERIC_SOAP_TEMPLATE % dict(
management_url=management_url,
action='http://www.ibm.com/iBMC/sp/iBMCControl/GetSPNameSettings',
resource='iBMCControl',
body='''
<GetSPNameSettings xmlns="http://www.ibm.com/iBMC/sp/iBMCControl"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</GetSPNameSettings>
''',
)
soap_result = _send_soap(management_url, session_id, message)
tree = ET.XML(soap_result)
sn = tree.findall('{0}Body/GetSPNameSettingsResponse/SPName'.format(
'{http://www.w3.org/2003/05/soap-envelope}',
))
try:
return sn[0].text
except IndexError:
raise TreeError(
"Improper response. Couldn't find serial number. "
"Full response: %s" % soap_result,
)
def _get_mac_addresses(management_url, session_id):
message = GENERIC_SOAP_TEMPLATE % dict(
management_url=management_url,
action='http://www.ibm.com/iBMC/sp/Monitors/GetHostMacAddresses',
resource='Monitors',
body='''
<GetHostMacAddresses xmlns="http://www.ibm.com/iBMC/sp/Monitors"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</GetHostMacAddresses>
''',
)
soap_result = _send_soap(management_url, session_id, message)
tree = ET.XML(soap_result)
mac_addresses = tree.findall(
'{0}Body/GetHostMacAddressesResponse/**'.format(
'{http://www.w3.org/2003/05/soap-envelope}',
),
)
return [mac.find('Address').text for mac in mac_addresses]
def _get_memory(management_url, session_id):
message = GENERIC_SOAP_TEMPLATE % dict(
management_url=management_url,
action='http://www.ibm.com/iBMC/sp/Monitors/GetMemoryInfo',
resource='Monitors',
body='''
<GetMemoryInfo xmlns="http://www.ibm.com/iBMC/sp/Monitors"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</GetMemoryInfo>
''',
)
soap_result = _send_soap(management_url, session_id, message)
tree = ET.XML(soap_result)
memory = tree.findall('{0}Body/GetMemoryInfoResponse/Memory/*'.format(
'{http://www.w3.org/2003/05/soap-envelope}',
))
return [
dict(
label=chip.find('Description').text,
index=int(chip.find('Description').text.split()[1]),
size=int(chip.find('Size').text) * 1024,
) for chip in memory
]
def _get_processors(management_url, session_id):
message = GENERIC_SOAP_TEMPLATE % dict(
management_url=management_url,
resource='Monitors',
action='http://www.ibm.com/iBMC/sp/Monitors/GetProcessorInfo',
body='''
<GetProcessorInfo xmlns="http://www.ibm.com/iBMC/sp/Monitors"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</GetProcessorInfo>
''',
)
soap_result = _send_soap(management_url, session_id, message)
tree = ET.XML(soap_result)
data = tree.findall('{0}Body/GetProcessorInfoResponse/Processor/*'.format(
'{http://www.w3.org/2003/05/soap-envelope}',
))
processors = []
for d in data:
dsc = d.find('Description').text
speed = d.find('Speed').text
family = d.find('Family').text
cores = d.find('Cores').text
threads = d.find('Threads').text
index = dsc.split()[1]
label = '%s CPU %s MHz, %s cores %s threads' % (
family,
speed,
cores,
threads,
)
processors.append(dict(
index=index,
label=label,
speed=speed,
cores=cores,
family=family,
))
return processors
def _http_ibm_system_x(ip_address, user, password):
session_id = _get_session_id(ip_address, user, password)
management_url = "http://%s/wsman" % ip_address
model_name = _get_model_name(management_url, session_id)
sn = _get_sn(management_url, session_id)
device = {
'type': DeviceType.rack_server.raw,
'model_name': model_name,
'management_ip_address': [ip_address],
}
if sn not in SERIAL_BLACKLIST:
device['serial_number'] = sn
macs = _get_mac_addresses(management_url, session_id)
if macs:
device['mac_addresses'] = macs
memory = _get_memory(management_url, session_id)
if memory:
device['memory'] = memory
processors = _get_processors(management_url, session_id)
if processors:
device['processors'] = processors
return device
def scan_address(ip_address, **kwargs):
user = SETTINGS.get('user')
password = SETTINGS.get('password')
messages = []
result = get_base_result_template('http_ibm_system_x', messages)
if not user or not password:
raise NotConfiguredError(
'Not configured. Set IBM_SYSTEM_X_USER and IBM_SYSTEM_X_PASSWORD '
'in your configuration file.',
)
headers, document = get_http_info(ip_address)
family = guess_family(headers, document)
if family != 'IBM System X':
raise NoMatchError('It is not IBM System X device.')
result.update({
'status': 'success',
'device': _http_ibm_system_x(ip_address, user, password),
})
return result
```
#### File: scan/plugins/ssh_cisco_asa.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import paramiko
import re
import socket
import time
from django.conf import settings
from ralph.discovery import guessmodel
from ralph.discovery.models import (
DeviceType,
MAC_PREFIX_BLACKLIST,
SERIAL_BLACKLIST
)
from ralph.scan.errors import (
AuthError,
ConsoleError,
NoMatchError,
NotConfiguredError,
)
from ralph.scan.plugins import get_base_result_template
from ralph.util import parse, network
SETTINGS = settings.SCAN_PLUGINS.get(__name__, {})
SSH_USER, SSH_PASS = SETTINGS['ssh_user'], SETTINGS['ssh_pass']
if not SSH_USER or not SSH_PASS:
raise NotConfiguredError(
"SSH not configured in plugin {}.".format(__name__),
)
class CiscoSSHClient(paramiko.SSHClient):
"""SSHClient modified for Cisco's broken SSH console."""
def __init__(self, *args, **kwargs):
super(CiscoSSHClient, self).__init__(*args, **kwargs)
self.set_log_channel('critical_only')
def _auth(
self, username, password, pkey, key_filenames, allow_agent,
look_for_keys,
):
self._transport.auth_password(username, password)
self._asa_chan = self._transport.open_session()
self._asa_chan.invoke_shell()
self._asa_chan.sendall('\r\n')
self._asa_chan.settimeout(15.0)
time.sleep(0.125)
try:
chunk = self._asa_chan.recv(1024)
except socket.timeout:
raise AuthError('Authentication failed.')
else:
if '> ' not in chunk and not chunk.strip().startswith('asa'):
raise ConsoleError('Expected system prompt, got %r.' % chunk)
def asa_command(self, command):
# XXX Work around random characters
# appearing at the beginning of the command.
self._asa_chan.sendall('\b')
time.sleep(0.125)
self._asa_chan.sendall(command)
buffer = ''
while not command.endswith(
buffer[max(0, buffer.rfind('\b')):][:len(command)].strip('\b'),
):
chunk = self._asa_chan.recv(1024)
buffer += chunk.replace('\b', '')
self._asa_chan.sendall('\r\n')
buffer = ['']
while True:
chunk = self._asa_chan.recv(1024)
lines = chunk.split('\n')
buffer[-1] += lines[0]
buffer.extend(lines[1:])
if '% Invalid input' in buffer:
raise ConsoleError('Invalid input %r.' % buffer)
if '> ' in buffer[-1]:
return buffer[1:-1]
def _connect_ssh(ip, username='root', password=''):
return network.connect_ssh(ip, SSH_USER, SSH_PASS, client=CiscoSSHClient)
def scan_address(ip_address, **kwargs):
if 'nx-os' in (kwargs.get('snmp_name', '') or '').lower():
raise NoMatchError('Incompatible Nexus found.')
kwargs['guessmodel'] = gvendor, gmodel = guessmodel.guessmodel(**kwargs)
if gvendor != 'Cisco' or gmodel not in ('',):
raise NoMatchError('It is not Cisco.')
ssh = _connect_ssh(ip_address)
try:
lines = ssh.asa_command(
"show version | grep (^Hardware|Boot microcode|^Serial|address is)"
)
finally:
ssh.close()
pairs = parse.pairs(lines=[line.strip() for line in lines])
sn = pairs.get('Serial Number', None)
model, ram, cpu = pairs['Hardware'].split(',')
boot_firmware = pairs['Boot microcode']
macs = []
for i in xrange(99):
try:
junk, label, mac = pairs['%d' % i].split(':')
except KeyError:
break
mac = mac.split(',', 1)[0]
mac = mac.replace('address is', '')
mac = mac.replace('.', '').upper().strip()
label = label.strip()
if mac.replace(':', '').upper()[:6] not in MAC_PREFIX_BLACKLIST:
macs.append(mac)
ram_size = re.search('[0-9]+', ram).group()
cpu_match = re.search('[0-9]+ MHz', cpu)
cpu_speed = cpu_match.group()[:-4]
cpu_model = cpu[:cpu_match.start()][4:].strip()
result = get_base_result_template('ssh_cisco_asa')
result.update({
'status': 'success',
'device': {
'model_name': 'Cisco ' + model,
'type': str(DeviceType.firewall),
'mac_adresses': macs,
'boot_firmware': boot_firmware,
'management_ip_addresses': [ip_address],
'memory': [{
'size': int(ram_size),
}],
'processors': [{
'model_name': cpu_model,
'speed': int(cpu_speed),
'family': cpu_model,
}],
},
})
if sn not in SERIAL_BLACKLIST:
result['device']['serial_number'] = sn
return result
```
#### File: ralph/scan/util.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ipaddr
from ralph.discovery.models import Network
from ralph.scan.models import ScanSummary
def find_network(network_spec):
"""Returns network object by network address."""
try:
address = str(ipaddr.IPNetwork(network_spec))
except ValueError:
network = Network.objects.get(name=network_spec)
else:
network = Network.objects.get(address=address)
return network
def update_scan_summary(job):
try:
scan_summary = ScanSummary.objects.get(job_id=job.id)
except ScanSummary.DoesNotExist:
return
else:
scan_summary.previous_checksum = job.meta.get(
'results_checksum',
)
scan_summary.false_positive_checksum = None
scan_summary.save()
job.meta['changed'] = False
job.save()
```
#### File: ui/forms/deployment.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cStringIO
import re
import ipaddr
from bob.forms import AutocompleteWidget
from django import forms
from django.forms import formsets
from django.db.models import Q
from lck.django.common.models import MACAddressField
from powerdns.models import Record, Domain
from ralph.business.models import Venture, VentureRole
from ralph.deployment.models import Deployment, Preboot
from ralph.deployment.util import (
clean_hostname,
hostname_exists,
ip_address_exists,
is_mac_address_known,
network_exists,
preboot_exists,
rack_exists,
venture_and_role_exists,
)
from ralph.discovery.models import Device, Network, IPAddress, DeviceType
from ralph.discovery.models_component import is_mac_valid
from ralph.dnsedit.models import DHCPEntry
from ralph.dnsedit.util import (
find_addresses_for_hostname,
get_domain,
get_revdns_records,
is_valid_hostname,
)
from ralph.ui.widgets import DeviceWidget
from ralph.util import Eth
from bob.csvutil import UnicodeReader
from ralph.ui.widgets import ReadOnlySelectWidget, ReadOnlyWidget
class DeploymentForm(forms.ModelForm):
class Meta:
model = Deployment
fields = [
'device',
'venture',
'venture_role',
'mac',
'ip',
'hostname',
'preboot',
]
widgets = {
'device': DeviceWidget,
'mac': AutocompleteWidget,
'ip': AutocompleteWidget,
}
def __init__(self, *args, **kwargs):
super(DeploymentForm, self).__init__(*args, **kwargs)
device = self.initial['device']
macs = [e.mac for e in device.ethernet_set.order_by('mac')]
self.fields['mac'].widget.choices = [(mac, mac) for mac in macs]
# all mac addresses have the same length - default sorting is enough
dhcp_entries = DHCPEntry.objects.filter(mac__in=macs).order_by('mac')
ips = [e.ip for e in dhcp_entries]
self.fields['ip'].widget.choices = [(ip, ip) for ip in ips]
proposed_mac = macs[0] if macs else ''
proposed_ip = ips[0] if ips else ''
for dhcp_entry in dhcp_entries:
if dhcp_entry.mac in macs:
proposed_mac = dhcp_entry.mac
proposed_ip = dhcp_entry.ip
break
self.initial.update({
'mac': proposed_mac,
'ip': proposed_ip,
'venture': device.venture,
'venture_role': device.venture_role,
'preboot': (device.venture_role.get_preboot() if
device.venture_role else ''),
'hostname': device.name,
})
self.fields['venture'].queryset = Venture.objects.order_by('name')
def clean_hostname(self):
hostname = self.cleaned_data['hostname']
return clean_hostname(hostname)
def clean_ip(self):
ip = self.cleaned_data.get('ip')
return str(ipaddr.IPAddress(ip))
def _validate_cols_count(expected_count, cols, row_number):
if len(cols) != expected_count:
raise forms.ValidationError(
"Incorrect number of columns (got %d, expected %d) at row %d" %
(len(cols), expected_count, row_number),
)
def _validate_cols_not_empty(cols, row_number):
for col_number, col in enumerate(cols, start=1):
value = col.strip()
if not value:
raise forms.ValidationError(
"Empty value at row %d column %d" % (
row_number, col_number
)
)
def _validate_mac(mac, parsed_macs, row_number):
if not is_mac_valid(Eth("", mac, "")):
raise forms.ValidationError(
"Row %s: Invalid MAC address." % row_number
)
if mac in parsed_macs:
raise forms.ValidationError(
"Row %s: Duplicated MAC address. "
"Please check previous rows..." % row_number
)
def _validate_management_ip(ip, row_number):
try:
ipaddr.IPAddress(ip)
except ValueError:
raise forms.ValidationError(
"Row %s: Incorrect management IP address." % row_number
)
def _validate_network_name(network_name, row_number):
if not network_exists(network_name):
raise forms.ValidationError(
"Row %s: Network doesn't exists." % row_number
)
def _validate_venture_and_role(venture_symbol, venture_role, row_number):
if not venture_and_role_exists(venture_symbol, venture_role):
raise forms.ValidationError(
"Row %s: "
"Couldn't find venture with symbol %s and role %s" % (
row_number, venture_symbol, venture_role
)
)
def _validate_preboot(preboot, row_number):
if not preboot_exists(preboot):
raise forms.ValidationError(
"Row %s: Couldn't find preboot %s" % (
row_number, preboot
)
)
def _validate_deploy_children(mac, row_number):
mac = MACAddressField.normalize(mac)
try:
device = Device.admin_objects.get(ethernet__mac=mac)
except Device.DoesNotExist:
return
if device.deleted:
return
children = device.child_set.filter(deleted=False)
if children.exists():
raise forms.ValidationError(
"Row %d: Device with MAC %s exists and has child devices "
"[%s]. Delete the child devices first." % (
row_number,
mac,
', '.join(str(d) for d in children.all()),
)
)
if device.servermount_set.filter(device__deleted=False).exists():
raise forms.ValidationError(
"Row %d: Device with MAC %s exists and exports shares." %
(row_number, mac)
)
for share in device.diskshare_set.all():
if any((
share.disksharemount_set.filter(device__deleted=False).exists(),
share.disksharemount_set.filter(server__deleted=False).exists(),
)):
raise forms.ValidationError(
"Row %d: Device with MAC %s exists and exports disks." %
(row_number, mac)
)
class PrepareMassDeploymentForm(forms.Form):
csv = forms.CharField(
label="CSV",
widget=forms.widgets.Textarea(attrs={'class': 'span12 csv-input'}),
required=False,
help_text="Template: mac ; management-ip ; network ; venture-symbol ; "
"role ; preboot"
)
def clean_csv(self):
csv_string = self.cleaned_data['csv'].strip().lower()
rows = UnicodeReader(cStringIO.StringIO(csv_string))
parsed_macs = set()
for row_number, cols in enumerate(rows, start=1):
_validate_cols_count(6, cols, row_number)
mac = cols[0].strip()
_validate_mac(mac, parsed_macs, row_number)
_validate_deploy_children(mac, row_number)
parsed_macs.add(mac)
management_ip = cols[1].strip()
_validate_management_ip(management_ip, row_number)
network_name = cols[2].strip()
if not (is_mac_address_known(mac) and network_name == ''):
# Allow empty network when the device already exists.
_validate_network_name(network_name, row_number)
venture_symbol = cols[3].strip()
venture_role = cols[4].strip()
_validate_venture_and_role(
venture_symbol, venture_role, row_number,
)
preboot = cols[5].strip()
_validate_preboot(preboot, row_number)
return csv_string
def _validate_hostname(hostname, mac, parsed_hostnames, row_number):
mac = MACAddressField.normalize(mac)
try:
dev = Device.admin_objects.get(ethernet__mac=mac)
except Device.DoesNotExist:
if hostname_exists(hostname):
raise forms.ValidationError(
"Row %s: Hostname already exists." % row_number
)
else:
ip_addresses = list(
dev.ipaddress_set.values_list('address', flat=True)
)
ip_addresses_in_dns = find_addresses_for_hostname(hostname)
for ip in ip_addresses_in_dns:
if ip not in ip_addresses:
raise forms.ValidationError(
"Row %s: Using an old device %s failed. "
"Exists A or PTR records in DNS which are not assigned "
"to device IP addresses." % (row_number, dev)
)
if Deployment.objects.filter(hostname=hostname).exists():
raise forms.ValidationError(
"Row %s: Running deployment with hostname: %s already "
"exists." % (row_number, hostname)
)
if hostname in parsed_hostnames:
raise forms.ValidationError(
"Row %s: Duplicated hostname. "
"Please check previous rows..." % row_number
)
def _validate_ip_address(ip, network, parsed_ip_addresses, row_number):
try:
ipaddr.IPAddress(ip)
except ValueError:
raise forms.ValidationError(
"Row %s: Invalid IP address." % row_number
)
if ip not in network:
raise forms.ValidationError(
"Row %s: IP address is not valid for network %s." % (
row_number, network.name
)
)
if ip in parsed_ip_addresses:
raise forms.ValidationError(
"Row %s: Duplicated IP address. "
"Please check previous rows..." % row_number
)
def _validate_ip_owner(ip, mac, row_number):
"""If the MAC is unique, make sure the IP address is not used anywhere.
If the MAC address belongs to an existing device, make sure the IP address
also belongs to that device.
"""
mac = MACAddressField.normalize(mac)
try:
dev = Device.admin_objects.get(ethernet__mac=mac)
except Device.DoesNotExist:
if ip_address_exists(ip):
raise forms.ValidationError(
"Row %s: IP address already exists." % row_number
)
else:
# Does another device have this IPAddress?
if(Device.objects.filter(
ipaddress__number=int(ipaddr.IPAddress(ip)),
).exclude(
pk=dev.id,
).exists()):
raise forms.ValidationError(
"Row %s: IP address used by another device." % row_number
)
class MassDeploymentForm(forms.Form):
csv = forms.CharField(
label="CSV",
widget=forms.widgets.Textarea(attrs={'class': 'span12 csv-input'}),
help_text="Template: hostname ; ip ; rack-sn ; mac ; management-ip ; "
"network ; venture-symbol ; role ; preboot"
)
def clean_csv(self):
csv_string = self.cleaned_data['csv'].strip().lower()
rows = UnicodeReader(cStringIO.StringIO(csv_string))
cleaned_csv = []
parsed_hostnames = set()
parsed_ip_addresses = set()
parsed_macs = set()
for row_number, cols in enumerate(rows, start=1):
_validate_cols_count(9, cols, row_number)
_validate_cols_not_empty(cols, row_number)
mac = cols[3].strip()
_validate_mac(mac, parsed_macs, row_number)
parsed_macs.add(mac)
hostname = cols[0].strip()
_validate_hostname(hostname, mac, parsed_hostnames, row_number)
if not clean_hostname(hostname):
raise forms.ValidationError("Invalid hostname")
parsed_hostnames.add(hostname)
network_name = cols[5].strip()
try:
network = Network.objects.get(name=network_name)
except Network.DoesNotExist:
raise forms.ValidationError(
"Row %s: Network '%s' doesn't exists." %
(row_number, network_name)
)
rack_sn = cols[2].strip()
if re.match(r"^[0-9]+$", rack_sn):
rack_sn = "Rack %s %s" % (
rack_sn,
network.data_center.name.upper(),
)
if not rack_exists(rack_sn):
raise forms.ValidationError(
"Row %s: Rack with serial number '%s' doesn't exists." % (
row_number, rack_sn
)
)
try:
network.racks.get(sn=rack_sn)
except Device.DoesNotExist:
raise forms.ValidationError(
"Row %s: Rack '%s' isn't connected to "
"network '%s'." % (row_number, rack_sn, network.name)
)
ip = cols[1].strip()
_validate_ip_address(ip, network, parsed_ip_addresses, row_number)
_validate_ip_owner(ip, mac, row_number)
parsed_ip_addresses.add(ip)
management_ip = cols[4].strip()
_validate_management_ip(management_ip, row_number)
try:
venture_role = VentureRole.objects.get(
venture__symbol=cols[6].strip().upper(),
name=cols[7].strip()
)
venture = venture_role.venture
except VentureRole.DoesNotExist:
raise forms.ValidationError(
"Row %s: "
"Couldn't find venture with symbol %s and role %s" % (
row_number, cols[6].strip(), cols[7].strip()
)
)
try:
preboot = Preboot.objects.get(name=cols[8].strip())
except Preboot.DoesNotExist:
raise forms.ValidationError(
"Row %s: Couldn't find preboot %s" % (
row_number, cols[8].strip()
)
)
cleaned_csv.append({
'hostname': hostname,
'ip': ip,
'mac': mac,
'rack_sn': rack_sn,
'venture': venture,
'venture_role': venture_role,
'preboot': preboot,
'management_ip': management_ip,
'network': network
})
return cleaned_csv
class ServerMoveStep1Form(forms.Form):
addresses = forms.CharField(
label="Server addresses",
widget=forms.widgets.Textarea(attrs={'class': 'span12'}),
help_text="Enter the IP addresses or hostnames to be moved, "
"separated with spaces or newlines.",
)
@staticmethod
def _get_address_candidates(address):
try:
ip_address = str(ipaddr.IPAddress(address))
except ValueError:
ip_address = None
try:
mac = MACAddressField.normalize(address)
except ValueError:
mac = None
if not mac:
hostname = address
if ip_address:
candidates = IPAddress.objects.filter(
address=ip_address,
)
elif mac:
ips = {
str(ip) for ip in
DHCPEntry.objects.filter(mac=mac).values_list('ip', flat=True)
}
candidates = IPAddress.objects.filter(address__in=ips)
else:
candidates = IPAddress.objects.filter(
Q(hostname=hostname) |
Q(address__in=find_addresses_for_hostname(hostname))
)
return candidates.filter(
device__deleted=False,
device__model__type__in={
DeviceType.rack_server,
DeviceType.blade_server,
DeviceType.virtual_server,
DeviceType.unknown,
}
)
def clean_addresses(self):
addresses = self.cleaned_data['addresses']
for address in addresses.split():
if not self._get_address_candidates(address).exists():
raise forms.ValidationError(
"No server found for %s." % address,
)
return addresses
def _check_move_address(address):
if not IPAddress.objects.filter(
device__deleted=False,
device__model__type__in={
DeviceType.rack_server,
DeviceType.blade_server,
DeviceType.virtual_server,
DeviceType.unknown,
}
).filter(address=address).exists():
raise forms.ValidationError(
"No server found for %s." % address,
)
class ServerMoveStep2Form(forms.Form):
address = forms.ChoiceField()
network = forms.ChoiceField()
def clean_address(self):
address = self.cleaned_data['address']
_check_move_address(address)
return address
def clean_network(self):
network_id = self.cleaned_data['network']
if not Network.objects.filter(id=network_id).exists():
raise forms.ValidationError("Invalid network.")
return network_id
class ServerMoveStep2FormSetBase(formsets.BaseFormSet):
def add_fields(self, form, index):
form.fields['network'].choices = [
(n.id, n.name)
for n in Network.objects.order_by('name')
]
form.fields['network'].widget.attrs = {
'class': 'span12',
}
if self.initial:
candidates = self.initial[index]['candidates']
else:
candidates = {form.data['%s-%d-address' % (self.prefix, index)]}
form.fields['address'].widget.attrs = {
'class': 'span12',
}
if len(candidates) == 1:
form.fields['address'].widget = ReadOnlySelectWidget()
form.fields['address'].choices = [(ip, ip) for ip in candidates]
return super(ServerMoveStep2FormSetBase, self).add_fields(form, index)
ServerMoveStep2FormSet = formsets.formset_factory(
form=ServerMoveStep2Form,
formset=ServerMoveStep2FormSetBase,
extra=0,
)
class ServerMoveStep3Form(forms.Form):
address = forms.CharField(widget=ReadOnlyWidget())
new_ip = forms.CharField()
new_hostname = forms.CharField()
def clean_address(self):
address = self.cleaned_data['address']
_check_move_address(address)
return address
def clean_new_ip(self):
old_ip = self.cleaned_data.get('address')
new_ip = self.cleaned_data['new_ip']
try:
new_ip = str(ipaddr.IPAddress(new_ip))
except ValueError:
raise forms.ValidationError("Malformed IP address.")
rdomain = '.'.join(
list(reversed(new_ip.split('.')))[1:]
) + '.in-addr.arpa'
if not Domain.objects.filter(name=rdomain).exists():
raise forms.ValidationError("No RevDNS domain for address.")
try:
ipaddress = IPAddress.objects.get(address=new_ip)
except IPAddress.DoesNotExist:
if Record.objects.filter(content=new_ip).exists():
raise forms.ValidationError("Address already in DNS.")
if get_revdns_records(new_ip).exists():
raise forms.ValidationError("Address already in DNS.")
if DHCPEntry.objects.filter(ip=new_ip).exists():
raise forms.ValidationError("Address already in DHCP.")
else:
if ipaddress.device and not ipaddress.device.deleted:
if not old_ip:
raise forms.ValidationError("Address in use.")
device = Device.objects.get(ipaddress__address=old_ip)
if ipaddress.device.id != device.id:
raise forms.ValidationError(
"Address used by %s" % device,
)
return new_ip
def clean_new_hostname(self):
old_ip = self.cleaned_data.get('address')
new_hostname = self.cleaned_data['new_hostname']
if not is_valid_hostname(new_hostname):
raise forms.ValidationError("Invalid hostname")
try:
get_domain(new_hostname)
except Domain.DoesNotExist:
raise forms.ValidationError("Invalid domain")
try:
ipaddress = IPAddress.objects.get(hostname=new_hostname)
except IPAddress.DoesNotExist:
if find_addresses_for_hostname(new_hostname):
raise forms.ValidationError("Hostname already in DNS.")
else:
if ipaddress.device and not ipaddress.device.deleted:
if not old_ip:
raise forms.ValidationError("Hostname in use.")
device = Device.objects.get(ipaddress__address=old_ip)
if ipaddress.device.id != device.id:
raise forms.ValidationError(
"Hostname used by %s" % device,
)
elif Record.objects.filter(name=new_hostname).exists():
raise forms.ValidationError("Hostname already in DNS.")
return new_hostname
class ServerMoveStep3FormSetBase(formsets.BaseFormSet):
def clean(self):
if any(self.errors):
return
hostnames = set()
ips = set()
for i in xrange(self.total_form_count()):
form = self.forms[i]
ip = form.cleaned_data['new_ip']
if ip in ips:
form._errors['new_ip'] = form.error_class([
"Duplicate IP"
])
else:
ips.add(ip)
hostname = form.cleaned_data['new_hostname']
if hostname in hostnames:
form._errors['new_hostname'] = form.error_class([
"Duplicate hostname"
])
else:
hostnames.add(hostname)
ServerMoveStep3FormSet = formsets.formset_factory(
form=ServerMoveStep3Form,
formset=ServerMoveStep3FormSetBase,
extra=0,
)
```
#### File: ui/templatetags/icons.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django import template
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape as esc
from ralph.util import presentation
register = template.Library()
@register.filter(name="icon")
def icon_filter(name):
return mark_safe('<i class="fugue-icon %s"></i>' % esc(name))
@register.filter
def device_icon(device):
return icon_filter(presentation.get_device_icon(device))
@register.filter
def venture_icon(venture):
return icon_filter(presentation.get_venture_icon(venture))
@register.filter
def owner_icon(owner):
return icon_filter(presentation.get_owner_icon(owner))
@register.filter
def address_icon(ip):
if not ip:
return ''
if ip.is_buried:
icon_name = 'fugue-headstone'
elif ip.is_management:
icon_name = 'fugue-system-monitor-network'
else:
icon_name = 'fugue-network-ip'
return icon_filter(icon_name)
@register.filter
def field_icon(field, form):
icon_name = form.icons.get(field.name, 'fugue-property')
return icon_filter(icon_name)
@register.filter
def alert_icon(alert_type):
icon_name = {
'info': 'fugue-information',
'error': 'fugue-exclamation-red',
'warning': 'fugue-exclamation',
'success': 'fugue-tick',
}.get(alert_type, 'fugue-sticky-note')
return icon_filter(icon_name)
@register.filter
def device_model_type_icon(model_type_id):
icon_name = presentation.DEVICE_ICONS.get(
model_type_id, 'fugue-wooden-box')
return icon_filter(icon_name)
@register.filter
def component_model_type_icon(model_type_id):
icon_name = presentation.COMPONENT_ICONS.get(model_type_id, 'fugue-box')
return icon_filter(icon_name)
@register.filter
def network_icon(network):
return icon_filter(presentation.get_network_icon(network))
@register.simple_tag
def icon(icon_name):
return icon_filter(icon_name)
```
#### File: ui/views/ventures.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import datetime
import calendar
from django.contrib import messages
from django.db import models as db
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.utils import simplejson as json
from bob.menu import MenuItem
from ralph.account.models import Perm
from ralph.business.models import Venture, VentureRole, VentureExtraCost
from ralph.discovery.models import (
ComponentModel,
DataCenter,
Device,
DeviceModelGroup,
DeviceType,
HistoryCost,
ReadOnlyDevice,
SplunkUsage,
)
from ralph.ui.forms import (
DateRangeForm,
RolePropertyForm,
VentureFilterForm,
)
from ralph.ui.views.common import (
Addresses,
Asset,
Base,
BaseMixin,
Components,
Costs,
History,
Info,
Prices,
Software,
Scan,
)
from ralph.ui.views.devices import BaseDeviceList
from ralph.ui.views.reports import Reports, ReportDeviceList
from ralph.ui.reports import (
get_total_cost,
get_total_count,
get_total_cores,
get_total_virtual_cores,
)
from ralph.util import presentation
def _normalize_venture(symbol):
"""
>>> _normalize_venture('węgielek,Ziew')
u'w.gielek.ziew'
"""
return re.sub(r'[^\w]', '.', symbol).lower()
def collect_ventures(parent, ventures, items, depth=0):
for v in ventures.filter(parent=parent):
symbol = _normalize_venture(v.symbol)
indent = ' ' * depth
icon = presentation.get_venture_icon(v)
if icon == 'fugue-store':
if depth > 0:
icon = 'fugue-store-medium'
if depth > 1:
icon = 'fugue-store-small'
items.append((icon, v.name, symbol, indent, v))
collect_ventures(v, ventures, items, depth + 1)
def venture_tree_menu(ventures, details, show_all=False):
items = []
if not show_all:
ventures = ventures.filter(show_in_ralph=True)
for v in ventures.order_by('-is_infrastructure', 'name'):
symbol = _normalize_venture(v.symbol)
icon = presentation.get_venture_icon(v)
item = MenuItem(
v.name, name=symbol,
fugue_icon=icon,
view_name='ventures',
view_args=[symbol, details, ''],
indent=' ',
collapsed=True,
collapsible=True,
)
item.venture_id = v.id
item.subitems = venture_tree_menu(
v.child_set.all(), details, show_all)
for subitem in item.subitems:
subitem.parent = item
items.append(item)
return items
class SidebarVentures(object):
section = 'ventures'
def __init__(self, *args, **kwargs):
super(SidebarVentures, self).__init__(*args, **kwargs)
self.venture = None
def set_venture(self):
if self.venture is not None:
return
venture_symbol = self.kwargs.get('venture')
if venture_symbol in ('', '-'):
self.venture = ''
elif venture_symbol == '*':
self.venture = '*'
elif venture_symbol:
self.venture = get_object_or_404(Venture,
symbol__iexact=venture_symbol)
else:
self.venture = None
def get_context_data(self, **kwargs):
ret = super(SidebarVentures, self).get_context_data(**kwargs)
self.set_venture()
details = ret['details']
profile = self.request.user.get_profile()
has_perm = profile.has_perm
ventures = profile.perm_ventures(Perm.list_devices_generic)
show_all = self.request.GET.get('show_all')
ventures = ventures.order_by('-is_infrastructure', 'name')
sidebar_items = [
MenuItem(fugue_icon='fugue-prohibition', label="Unknown",
name='-', view_name='ventures',
view_args=['-', details, '']),
MenuItem(fugue_icon='fugue-asterisk', label="All ventures",
name='*', view_name='ventures',
view_args=['*', details, ''])
]
sidebar_items.extend(venture_tree_menu(
ventures.filter(parent=None), details, show_all))
if self.venture and self.venture != '*':
stack = list(sidebar_items)
while stack:
item = stack.pop()
if getattr(item, 'venture_id', None) == self.venture.id:
parent = getattr(item, 'parent', None)
while parent:
parent.kwargs['collapsed'] = False
parent = getattr(parent, 'parent', None)
break
stack.extend(getattr(item, 'subitems', []))
self.set_venture()
tab_items = ret['tab_items']
if has_perm(Perm.read_device_info_generic, self.venture if
self.venture and self.venture != '*' else None):
tab_items.append(MenuItem('Roles', fugue_icon='fugue-mask',
href='../roles/?%s' % self.request.GET.urlencode()))
if has_perm(Perm.list_devices_financial, self.venture if
self.venture and self.venture != '*' else None):
tab_items.append(MenuItem('Venture', fugue_icon='fugue-store',
href='../venture/?%s' %
self.request.GET.urlencode()))
ret.update({
'sidebar_items': sidebar_items,
'sidebar_selected': (_normalize_venture(self.venture.symbol) if
self.venture and self.venture != '*' else self.venture or '-'),
'section': 'ventures',
'subsection': (_normalize_venture(self.venture.symbol) if
self.venture and self.venture != '*' else self.venture),
'searchform': VentureFilterForm(self.request.GET),
'searchform_filter': True,
})
return ret
class Ventures(SidebarVentures, BaseMixin):
pass
class VenturesInfo(Ventures, Info):
pass
class VenturesComponents(Ventures, Components):
pass
class VenturesSoftware(Ventures, Software):
pass
class VenturesPrices(Ventures, Prices):
pass
class VenturesAddresses(Ventures, Addresses):
pass
class VenturesCosts(Ventures, Costs):
pass
class VenturesHistory(Ventures, History):
pass
class VenturesAsset(Ventures, Asset):
pass
class VenturesReports(Ventures, Reports):
pass
class VenturesScan(Ventures, Scan):
pass
class VenturesRoles(Ventures, Base):
template_name = 'ui/ventures-roles.html'
def __init__(self, *args, **kwargs):
super(VenturesRoles, self).__init__(*args, **kwargs)
self.form = None
def post(self, *args, **kwargs):
self.set_venture()
has_perm = self.request.user.get_profile().has_perm
if not has_perm(Perm.edit_ventures_roles, self.venture if
self.venture and self.venture != '*' else None):
messages.error(self.request, "No permission to edit that role.")
else:
self.form = RolePropertyForm(self.request.POST)
if self.form.is_valid():
self.form.save()
messages.success(self.request, "Property created.")
return HttpResponseRedirect(self.request.path)
else:
messages.error(self.request, "Correct the errors.")
return self.get(*args, **kwargs)
def get(self, *args, **kwargs):
self.set_venture()
role_id = self.kwargs.get('role')
if role_id:
self.role = get_object_or_404(VentureRole, id=role_id)
else:
self.role = None
if self.form is None:
if self.role:
self.form = RolePropertyForm(initial={'role': role_id})
else:
self.form = RolePropertyForm(
initial={'venture': self.venture.id})
return super(VenturesRoles, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
ret = super(VenturesRoles, self).get_context_data(**kwargs)
has_perm = self.request.user.get_profile().has_perm
ret.update({
'items': (self.venture.venturerole_set.all() if
self.venture and self.venture != '*' else []),
'role': self.role,
'venture': self.venture,
'form': self.form,
'editable': has_perm(Perm.edit_ventures_roles, self.venture if
self.venture and self.venture != '*' else None),
})
return ret
def _total_dict(name, query, start, end, url=None):
cost = get_total_cost(query, start, end)
count, count_now, devices = get_total_count(query, start, end)
if not count and not count_now:
return None
return {
'name': name,
'count': count,
'cost': cost,
'count_now': count_now,
'url': url,
}
def _get_search_url(venture, dc=None, type=(), model_group=None):
if venture == '':
venture_id = '-'
elif venture == '*':
venture_id = '*'
elif venture is None:
venture_id = ''
else:
venture_id = venture.id
params = [
('role', venture_id),
]
for t in type:
params.append(('device_type', '%d' % t))
if model_group:
params.append(('device_group', '%d' % model_group))
if dc:
params.append(('position', dc.name))
return '/ui/search/info/?%s' % '&'.join('%s=%s' % p for p in params)
def _get_summaries(query, start, end, overlap=True, venture=None):
if overlap:
yield _total_dict('Servers', query.filter(
device__model__type__in=(DeviceType.rack_server.id,
DeviceType.blade_server.id,
DeviceType.virtual_server.id)), start, end,
_get_search_url(venture, type=(201, 202, 203)))
for dc in DataCenter.objects.all():
yield _total_dict(' • Servers in %s' % dc.name, query.filter(
device__model__type__in=(DeviceType.rack_server.id,
DeviceType.blade_server.id,
DeviceType.virtual_server.id)
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(201, 202, 203))
)
if overlap:
yield _total_dict(
' ∙ Rack servers in %s' % dc.name, query.filter(
device__model__type=DeviceType.rack_server.id,
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(201,))
)
for mg in DeviceModelGroup.objects.filter(
type=DeviceType.rack_server.id).order_by('name'):
yield _total_dict(
' %s in %s' % (mg, dc.name), query.filter(
device__model__group=mg,
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(201,),
model_group=mg.id)
)
yield _total_dict(
' ∙ Blade servers in %s' % dc.name, query.filter(
device__model__type=DeviceType.blade_server.id,
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(202,))
)
for mg in DeviceModelGroup.objects.filter(
type=DeviceType.blade_server.id).order_by('name'):
yield _total_dict(
' %s in %s' % (mg, dc.name), query.filter(
device__model__group=mg,
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(202,),
model_group=mg.id)
)
yield _total_dict(
' ∙ Virtual servers in %s' % dc.name, query.filter(
device__model__type=DeviceType.virtual_server.id,
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(203,))
)
if overlap:
yield _total_dict('Loadbalancers', query.filter(
device__model__type__in=(DeviceType.load_balancer.id,)
), start, end, _get_search_url(venture, type=(103,)))
for dc in DataCenter.objects.all():
yield _total_dict(' • Loadbalancers in %s' % dc.name, query.filter(
device__model__type__in=(DeviceType.load_balancer.id,)
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(103,))
)
if overlap:
yield _total_dict('Storage', query.filter(
device__model__type__in=(
DeviceType.storage.id,
DeviceType.fibre_channel_switch.id,
)), start, end,
_get_search_url(venture, type=(301,))
)
for dc in DataCenter.objects.all():
yield _total_dict(' • Storage in %s' % dc.name, query.filter(
device__model__type__in=(
DeviceType.storage.id,
DeviceType.fibre_channel_switch.id,
)
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(301,))
)
if overlap:
yield _total_dict('Network', query.filter(
device__model__type__in=(
DeviceType.switch.id,
DeviceType.router.id,
DeviceType.firewall.id,
DeviceType.smtp_gateway.id,
DeviceType.appliance.id,
)
), start, end,
_get_search_url(venture, type=(
DeviceType.switch.id,
DeviceType.router.id,
DeviceType.firewall.id,
DeviceType.smtp_gateway.id,
DeviceType.appliance.id,
))
)
for dc in DataCenter.objects.all():
yield _total_dict(' • Network in %s' % dc.name, query.filter(
device__model__type__in=(
DeviceType.switch.id,
DeviceType.router.id,
DeviceType.firewall.id,
DeviceType.smtp_gateway.id,
DeviceType.appliance.id,
)
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(
DeviceType.switch.id,
DeviceType.router.id,
DeviceType.firewall.id,
DeviceType.smtp_gateway.id,
DeviceType.appliance.id,
))
)
yield _total_dict('Cloud', query.filter(
device__model__type__in=(DeviceType.cloud_server.id,)
), start, end,
_get_search_url(venture, type=(DeviceType.cloud_server.id,))
)
if overlap:
yield _total_dict('Unknown', query.filter(
device__model__type__in=(DeviceType.unknown.id,)), start, end,
_get_search_url(venture, type=(DeviceType.unknown.id,))
)
for dc in DataCenter.objects.all():
yield _total_dict(' • Unknown in %s' % dc.name, query.filter(
device__model__type__in=(DeviceType.unknown.id,)
).filter(device__dc__iexact=dc.name), start, end,
_get_search_url(venture, dc=dc, type=(DeviceType.unknown.id,))
)
(
splunk_cost,
splunk_count,
splunk_count_now,
splunk_size,
) = SplunkUsage.get_cost(venture, start, end)
if splunk_cost:
url = None
try:
splunk_model = ComponentModel.objects.get(family='splunkvolume')
except ComponentModel.DoesNotExist:
pass
else:
if splunk_model.group_id:
url = ('/ui/search/components/'
'?component_group=%d' % splunk_model.group_id)
yield {
'name': 'Splunk usage ({:,.0f} MB)'.format(
splunk_size).replace(',', ' '),
'cost': splunk_cost,
'count': splunk_count,
'count_now': splunk_count_now,
'url': url,
}
for extra_id, in query.values_list('extra_id').distinct():
if extra_id is None:
continue
extra = VentureExtraCost.objects.get(id=extra_id)
q = query.filter(extra=extra)
cost = get_total_cost(q, start, end)
count, count_now, devices = get_total_count(q, start, end)
if count:
yield {
'name': extra.name + ' (from %s)' % extra.venture.name,
'count': 'expires %s' % extra.expire.strftime(
'%Y-%m-%d') if extra.expire else '',
'cost': cost,
'count_now': count_now,
}
if overlap:
yield _total_dict(
'Total',
query,
start,
end,
_get_search_url(venture, type=()),
)
yield _total_dict(
'Total physical',
query.exclude(
device__model__type__in=(
DeviceType.cloud_server,
DeviceType.virtual_server,
DeviceType.unknown,
DeviceType.data_center,
DeviceType.rack,
DeviceType.management,
),
).exclude(
device=None,
),
start,
end,
_get_search_url(venture, type=()),
)
def _venture_children(venture, children):
children.append(venture)
for child in venture.child_set.all():
_venture_children(child, children)
class VenturesVenture(SidebarVentures, Base):
template_name = 'ui/ventures-venture.html'
def get(self, *args, **kwargs):
if 'start' in self.request.GET:
self.form = DateRangeForm(self.request.GET)
if not self.form.is_valid():
messages.error(self.request, "Invalid date range")
else:
initial = {
'start': datetime.date.today() - datetime.timedelta(days=30),
'end': datetime.date.today(),
}
self.form = DateRangeForm(initial)
self.form.is_valid()
self.set_venture()
has_perm = self.request.user.get_profile().has_perm
if not has_perm(Perm.list_devices_financial, self.venture if
self.venture and self.venture != '*' else None):
return HttpResponseForbidden(
"You don't have permission to see this.")
return super(VenturesVenture, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
ret = super(VenturesVenture, self).get_context_data(**kwargs)
start = None
end = None
if self.venture is None or not self.form.is_valid():
items = []
cost_data = []
count_data = []
cores_data = []
vcores_data = []
else:
if self.venture == '':
query = HistoryCost.objects.filter(venture=None)
elif self.venture == '*':
query = HistoryCost.objects.exclude(venture=None)
else:
ventures = []
_venture_children(self.venture, ventures)
query = HistoryCost.objects.filter(
venture__in=ventures
)
start = self.form.cleaned_data['start']
end = self.form.cleaned_data['end']
query = query.exclude(device__deleted=True)
items = _get_summaries(query.all(), start, end, True, self.venture)
cost_data = []
count_data = []
cores_data = []
vcores_data = []
one_day = datetime.timedelta(days=1)
datapoints = set(dp for dp, in
query.values_list('start').distinct())
datapoints |= set(dp for dp, in
query.values_list('end').distinct())
datapoints |= set([start, end])
datapoints = set(min(max(start, date or start), end) for
date in datapoints)
for date in sorted(datapoints):
timestamp = calendar.timegm(date.timetuple()) * 1000
total_cost = get_total_cost(query, date, date + one_day)
total_count, now_count, devices = get_total_count(
query, date, date + one_day)
total_cores = get_total_cores(query, date, date + one_day)
total_vcores = get_total_virtual_cores(
query, date, date + one_day)
cost_data.append([timestamp, total_cost])
count_data.append([timestamp, total_count])
cores_data.append([timestamp, total_cores])
vcores_data.append([timestamp, total_vcores])
ret.update({
'items': items,
'venture': self.venture,
'cost_data': json.dumps(cost_data),
'count_data': json.dumps(count_data),
'cores_data': json.dumps(cores_data),
'vcores_data': json.dumps(vcores_data),
'form': self.form,
'start_date': start,
'end_date': end,
})
return ret
class VenturesDeviceList(SidebarVentures, BaseMixin, BaseDeviceList):
def user_allowed(self):
self.set_venture()
has_perm = self.request.user.get_profile().has_perm
return has_perm(Perm.list_devices_generic, self.venture if
self.venture and self.venture != '*' else None)
def get_queryset(self):
if self.venture is None:
queryset = ReadOnlyDevice.objects.none()
elif self.venture == '*':
queryset = Device.objects.all()
elif self.venture == '':
queryset = ReadOnlyDevice.objects.filter(
venture=None
).select_related(depth=3)
else:
queryset = ReadOnlyDevice.objects.filter(
db.Q(venture=self.venture) |
db.Q(venture__parent=self.venture) |
db.Q(venture__parent__parent=self.venture) |
db.Q(venture__parent__parent__parent=self.venture) |
db.Q(venture__parent__parent__parent__parent=self.venture) |
db.Q(
venture__parent__parent__parent__parent__parent=self.venture
)
).select_related(depth=3)
return self.sort_queryset(queryset)
def get_context_data(self, **kwargs):
ret = super(VenturesDeviceList, self).get_context_data(**kwargs)
ret.update({
'subsection': (self.venture.name if
self.venture and self.venture != '*' else self.venture),
'subsection_slug': (_normalize_venture(self.venture.symbol) if
self.venture and self.venture != '*' else self.venture),
})
return ret
class ReportVenturesDeviceList(ReportDeviceList, VenturesDeviceList):
pass
``` |
{
"source": "jjaifba/locadora-cin",
"score": 2
} |
#### File: features/steps/cadastro_dependente.py
```python
from behave import *
@given(u'estou na pagina de clientes cadastrados')
def step_impl(context):
# raise NotImplementedError(u'STEP: Given estou na pagina de clientes cadastrados')
pass
@when(u'clicar em cadastro de dependentes')
def step_impl(context):
# raise NotImplementedError(u'STEP: When clicar em cadastro de dependentes')
assert True is not False
@then(u'exibira a tela para preencher os dados do dependente')
def step_impl(context):
# raise NotImplementedError(u'STEP: Then exibira a tela para preencher os dados do dependente')
assert context.failed is False
@given(u'na tela de cadastro de dependentes')
def step_impl(context):
# raise NotImplementedError(u'STEP: Given na tela de cadastro de dependentes')
pass
@then(u'exibira a mensagem "Cadastro efetuado com sucesso!"')
def step_impl(context):
# raise NotImplementedError(u'STEP: Then exibira a mensagem "Cadastro efetuado com sucesso!"')
assert context.failed is False
``` |
{
"source": "jjak14/AWS_Lambda",
"score": 2
} |
#### File: jjak14/AWS_Lambda/Lambda_S3_SES.py
```python
import os
import json
import os.path
import boto3
import email
from botocore.exceptions import ClientError
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
#initialize components of the email to be sent
email_sender = 'sender_email_here'
email_recipient = 'recipient_email_here'
email_subject = 'New Object Added to testjakor bucket in S3'
email_body = 'Hello, The Attached file was just added to your S3 bucket'
# The HTML body of the email.
email_body_html = """\
<html>
<head></head>
<body>
<h1>Hello!</h1>
<p>The Attached file was just added to your S3 bucket.</p>
</body>
</html>
"""
def lambda_handler(event, context):
# Create an S3 and SES client
s3 = boto3.client('s3')
ses = boto3.client('ses', region_name='us-east-1')
# Bucket Name where file was uploaded
source_bucket = event["Records"][0]['s3']['bucket']['name']
# Extract object info and path and store it as a variable
key = event['Records'][0]['s3']['object']['key']
file_pathname = key.replace("+", " ")
#temporary store the file in tmp directory in lambda
temp_file_name = '/tmp/' + os.path.basename(file_pathname)
# Download the file from the event (extracted above) to the tmp location
s3.download_file(source_bucket, file_pathname, temp_file_name)
# Create a multipart/mixed parent container.
msg = MIMEMultipart('mixed')
# Add subject, from and to lines.
msg['Subject'] = email_subject
msg['From'] = email_sender
msg['To'] = email_recipient
# The character encoding for the email.
CHARSET = "utf-8"
# Create a multipart/alternative child container.
msg_body = MIMEMultipart('alternative')
# Encode the text and HTML content and set the character encoding. This step is
# necessary if you're sending a message with characters outside the ASCII range.
textpart = MIMEText(email_body.encode(CHARSET), 'plain', CHARSET)
htmlpart = MIMEText(email_body_html.encode(CHARSET), 'html', CHARSET)
# Add the text and HTML parts to the child container.
msg_body.attach(textpart)
msg_body.attach(htmlpart)
# Define the attachment part and encode it using MIMEApplication.
attachment = MIMEApplication(open(temp_file_name, 'rb').read())
# Add a header to tell the email client to treat this part as an attachment,
# and to give the attachment a name.
attachment.add_header('Content-Disposition', 'attachment',
filename=os.path.basename(temp_file_name))
# Attach the multipart/alternative child container to the multipart/mixed
# parent container.
msg.attach(msg_body)
# Add the attachment to the parent container.
msg.attach(attachment)
print(msg)
try:
#Provide the contents of the email.
response = ses.send_raw_email(
Source=msg['From'],
Destinations=[
msg['To']
],
RawMessage={
'Data': msg.as_string(),
}
)
# Display an error if something goes wrong.
except ClientError as e:
print(e.response['Error']['Message'])
else:
print("Email sent! Message ID:"),
print(response['MessageId'])
``` |
{
"source": "jjakimoto/BBoptimizer",
"score": 2
} |
#### File: bboptimizer/samplers/grid.py
```python
from copy import deepcopy
from itertools import product
import numpy as np
from .core import BaseSampler
class GridSampler(BaseSampler):
"""Grid optimization sampler
Sample next location based on grid sampling
Parameters
----------
space: list(dict)
Define search space. Each element has to the following key
values: 'name', 'type', and 'domain' (,'num_grid' is optional).
init_X: array-like(float), shape=(n_samples, n_dim)
The list of parameters to initizlie sampler
init_y: array-like(float), shape(n_samples,)
The list of score of init_X
num_grid: int, optional
The default number of grid
"""
sampler_name = "grid"
def __init__(self, space, init_X=None, init_y=None, num_grid=None,
*args, **kwargs):
super(GridSampler, self).__init__(space, init_X, init_y)
self.index = 0
domains = []
indices = []
_params_conf = deepcopy(self.params_conf)
# Set default grid
for i, conf in enumerate(_params_conf):
# Set default grid value
if "num_grid" not in conf and num_grid is not None:
if len(conf["domain"]) == 2:
conf["num_grid"] = num_grid
# Configure domain
domain = conf["domain"]
if conf["type"] in ["continuous", "integer"]:
if "num_grid" in conf:
scale = conf.get("scale", None)
if scale == 'log':
domain = np.logspace(np.log10(domain[0]),
np.log10(domain[1]),
conf["num_grid"])
else:
domain = np.linspace(domain[0],
domain[1],
conf["num_grid"])
if conf["type"] == "integer":
domain = domain.astype(int)
else:
domain = tuple(domain)
elif conf["type"] == "fixed":
domain = (domain,)
else:
domain = tuple(domain)
domains.append(list(domain))
indices.append(i)
# Sample parameters from parameters stored in self.params_list
patterns = product(*domains)
self.params_list = []
for params_val in patterns:
params_dict = dict()
for i, idx in enumerate(indices):
conf = _params_conf[idx]
params_dict[conf["name"]] = params_val[i]
self.params_list.append(params_dict)
def sample(self, num_samples=1, *args, **kwargs):
"""Sample next location to evaluate based on grid.
Everytime this function is called, it samples points not sampled yet.
Parameters
---------
num_samples: int
The number of samples
Returns
-------
Xs: list(dict), length is num_samples
"""
Xs = []
for i in range(num_samples):
x = self.params_list[self.index]
Xs.append(x)
self.index += 1
self.index = self.index % len(self.params_list)
return Xs
```
#### File: BBoptimizer/examples/nn_opt.py
```python
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization, Dropout
from keras.layers import Activation, Reshape
from keras.optimizers import Adam, Adadelta, SGD, RMSprop
from keras.regularizers import l2
import matplotlib.pyplot as plt
from bboptimizer import Optimizer
# Fetch MNIST dataset
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train = mnist.train
X = train.images
train_X = X
train_y = np.expand_dims(train.labels, -1)
train_y = OneHotEncoder().fit_transform(train_y)
valid = mnist.validation
X = valid.images
valid_X = X
valid_y = np.expand_dims(valid.labels, -1)
valid_y = OneHotEncoder().fit_transform(valid_y)
def get_optimzier(name, **kwargs):
if name == "rmsprop":
return RMSprop(**kwargs)
elif name == "adam":
return Adam(**kwargs)
elif name == "sgd":
return SGD(**kwargs)
elif name == "adadelta":
return Adadelta(**kwargs)
else:
raise ValueError(name)
def construct_NN(params):
model = Sequential()
model.add(Reshape((784,), input_shape=(784,)))
def update_model(_model, _params, name):
_model.add(Dropout(_params[name + "_drop_rate"]))
_model.add(Dense(units=_params[name + "_num_units"],
activation=None,
kernel_regularizer=l2(_params[name + "_w_reg"])))
if _params[name + "_is_batch"]:
_model.add(BatchNormalization())
if _params[name + "_activation"] is not None:
_model.add(Activation(_params[name + "_activation"]))
return _model
# Add input layer
model = update_model(model, params, "input")
# Add hidden layer
for i in range(params["num_hidden_layers"]):
model = update_model(model, params, "hidden")
# Add output layer
model = update_model(model, params, "output")
optimizer = get_optimzier(params["optimizer"],
lr=params["learning_rate"])
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def score_func(params):
# print("parameters", params)
model = construct_NN(params)
model.fit(train_X, train_y,
epochs=params["epochs"],
batch_size=params["batch_size"], verbose=1)
# print("###################", model.metrics_names)
score = model.evaluate(valid_X, valid_y,
batch_size=params["batch_size"])
idx = model.metrics_names.index("acc")
score = score[idx]
print(params, score)
return score
params_conf = [
{"name": "num_hidden_layers", "type": "integer",
"domain": (0, 5)},
{"name": "batch_size", "type": "integer",
"domain": (16, 128), "scale": "log"},
{"name": "learning_rate", "type": "continuous",
"domain": (1e-5, 1e-1), "scale": "log"},
{"name": "epochs", "type": "integer",
"domain": (10, 250), "scale": "log"},
{"name": "optimizer", "type": "categorical",
"domain": ("rmsprop", "sgd", "adam", "adadelta")},
{"name": "input_drop_rate", "type": "continuous",
"domain": (0, 0.5)},
{"name": "input_num_units", "type": "integer",
"domain": (32, 512), "scale": "log"},
{"name": "input_w_reg", "type": "continuous",
"domain": (1e-10, 1e-1), "scale": "log"},
{"name": "input_is_batch", "type": "categorical",
"domain": (True, False)},
{"name": "input_activation", "type": "categorical",
"domain": ("relu", "sigmoid", "tanh")},
{"name": "hidden_drop_rate", "type": "continuous",
"domain": (0, 0.75)},
{"name": "hidden_num_units", "type": "integer",
"domain": (32, 512), "scale": "log"},
{"name": "hidden_w_reg", "type": "continuous",
"domain": (1e-10, 1e-1), "scale": "log"},
{"name": "hidden_is_batch", "type": "categorical",
"domain": (True, False)},
{"name": "hidden_activation", "type": "categorical",
"domain": ("relu", "sigmoid", "tanh")},
{"name": "output_drop_rate", "type": "continuous",
"domain": (0, 0.5)},
{"name": "output_num_units", "type": "fixed",
"domain": 10},
{"name": "output_w_reg", "type": "continuous",
"domain": (1e-10, 1e-1), "scale": "log"},
{"name": "output_is_batch", "type": "categorical",
"domain": (True, False)},
{"name": "output_activation", "type": "fixed",
"domain": "softmax"},
]
if __name__ == "__main__":
np.random.seed(0)
random.seed(0)
bayes_opt = Optimizer(score_func, params_conf, sampler="bayes", r_min=10, maximize=True)
print("****************************")
print("bayes")
print(bayes_opt.search(num_iter=50))
print("****************************")
print("random")
np.random.seed(0)
random.seed(0)
random_opt = Optimizer(score_func, params_conf, sampler="random", maximize=True)
random_opt.search(num_iter=50)
# Plot results
plt.figure(figsize=(20, 10))
X = np.arange(1, len(bayes_opt.results[1]) + 1)
plt.plot(X, bayes_opt.results[1], color="b", label="bayes")
plt.plot(X, random_opt.results[1], color="g", label="random")
plt.scatter(X, bayes_opt.results[1], color="b")
plt.scatter(X, random_opt.results[1], color="g")
plt.xlabel("the number of trials", fontsize=30)
plt.ylabel("score", fontsize=30)
plt.title("Neural Network Hyperparameter Optimization", fontsize=50)
plt.ylim(0.96, 1.0)
plt.legend(fontsize=20)
plt.savefig("hyper_nn_opt.jpg")
``` |
{
"source": "jjakimoto/rl_traders.py",
"score": 3
} |
#### File: rl_traders/memories/utils.py
```python
from six.moves import xrange
from collections import deque
import random
import warnings
import numpy as np
def sample_batch_indexes(low, high, size):
"""Return a sample of (size) unique elements between low and high
Parameters
----------
low: int, The minimum value for our samples
high: int, The maximum value for our samples
size: int, The number of samples to pick
Returns
-------
A list of samples of length size, with values between low and high
"""
if high - low >= size:
# We have enough data. Draw without replacement, that is each index is unique in the
# batch. We cannot use `np.random.choice` here because it is horribly inefficient as
# the memory grows. See https://github.com/numpy/numpy/issues/2764 for a discussion.
# `random.sample` does the same thing (drawing without replacement) and is way faster.
r = xrange(low, high)
batch_idxs = random.sample(r, size)
else:
# Not enough data. Help ourselves with sampling from the range, but the same index
# can occur multiple times. This is not good and should be avoided by picking a
# large enough warm-up phase.
warnings.warn(
'Not enough entries to sample without replacement. Consider increasing your warm-up phase to avoid oversampling!')
batch_idxs = np.random.random_integers(low, high - 1, size=size)
assert len(batch_idxs) == size
return batch_idxs
class RingBuffer(object):
"""Erase the oldest memory after reaching maxlen
Parameters
----------
maxlen: int
The maximum number of elements in memory
"""
def __init__(self, maxlen):
self.maxlen = maxlen
self.data = deque(maxlen=maxlen)
def __len__(self):
return self.length()
def __getitem__(self, idx):
"""Return element of buffer at specific index"""
if idx < 0 or idx >= self.length():
raise KeyError()
return self.data[idx]
def __setitem__(self, idx, value):
"""Set element by accessing with index"""
if idx < 0 or idx >= self.length():
raise KeyError()
self.data[idx] = value
def append(self, v):
"""Append an element to the buffer
# Argument
v (object): Element to append
"""
self.data.append(v)
def length(self):
"""Return the length of Deque
# Argument
None
# Returns
The lenght of deque element
"""
return len(self.data)
def zeroed_observation(observation):
"""Return an array of zeros with same shape as given observation
Returns
-------
observation: np.ndarray, list, or something else
Returns
-------
A np.ndarray of zeros with observation.shape
"""
if hasattr(observation, 'shape'):
return np.zeros(observation.shape)
elif hasattr(observation, '__iter__'):
out = []
for x in observation:
out.append(zeroed_observation(x))
return out
else:
return 0.
```
#### File: rl_traders.py/rl_traders/models.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_utils.models import FeedForward
class EIIEFeedForwarad(nn.Module):
def __init__(self, model_params, cash_bias):
super(EIIEFeedForwarad, self).__init__()
self.lower_model = FeedForward(model_params['lower_params'])
self.upper_model = FeedForward(model_params['upper_params'])
self.cash_bias = nn.Parameter(cash_bias)
def forward(self, states, prev_actions):
n_batch = states.shape[0]
outputs = self.lower_model(states)
# We do not use cash actions as input, prev_actions[:, 0]
prev_actions = prev_actions[:, None, None, 1:]
# Concatenation with channel dimension
outputs = torch.cat((outputs, prev_actions), dim=1)
prev_softmax = self.upper_model(outputs)
_cash_bias = self.cash_bias.repeat(n_batch, 1)
prev_softmax = torch.cat((_cash_bias, prev_softmax), dim=-1)
actions = F.softmax(prev_softmax, dim=-1)
return actions
def predict(self, state, prev_action):
states = state[None, :]
prev_actions = prev_action[None, :]
return self.forward(states, prev_actions)[0].detach().numpy()
``` |
{
"source": "jjakimoto/torch_utils",
"score": 3
} |
#### File: torch_utils/torch_utils/debug.py
```python
import numpy as np
def is_updated_model(model, old_model):
"""Check if model is updated
Parameters
----------
model: torch.nn.Module instance
Model with updated parameters
old_model: torch.nn.Module instance
Model with not updated parameters
Returns
-------
bool: If any parameters fail to update, return False
"""
updated_list = []
for old_param, param in zip(old_model.parameters(), model.parameters()):
updated_list.append(
not np.allclose(old_param.data.numpy(), param.data.numpy()))
return np.alltrue(updated_list)
def is_updated_model_with_names(model, old_model):
"""Check if model is updated while printing parameter names
Parameters
----------
model: torch.nn.Module instance
Model with updated parameters
old_model: torch.nn.Module instance
Model with not updated parameters
Returns
-------
bool: If any parameters fail to update, return False
"""
updated_list = []
for old_param, param in zip(old_model.named_parameters(), model.named_parameters()):
updated_list.append(not np.allclose(old_param[1].data.numpy(), param[1].data.numpy()))
print(old_param[0],
not np.allclose(old_param[1].data.numpy(), param[1].data.numpy()),
(old_param[1].data.numpy() - param[1].data.numpy()).sum())
return np.alltrue(updated_list)
def is_same_model(model1, model2):
"""Check if model1 and model 2 are the same
Parameters
----------
model1: torch.nn.Module instance
model2: torch.nn.Module instance
Returns
-------
bool: If any pairs parameters are different, return False
"""
same_list = []
for param1, param2 in zip(model1.parameters(), model2.parameters()):
same_list.append(np.allclose(param1.data.numpy(), param2.data.numpy()))
return np.alltrue(same_list)
def is_same_model_with_names(model1, model2):
"""Check if model1 and model 2 are the same while printing parameter names
Parameters
----------
model1: torch.nn.Module instance
model2: torch.nn.Module instance
Returns
-------
bool: If any pairs parameters are different, return False
"""
same_list = []
for param1, param2 in zip(model1.named_parameters(), model2.named_parameters()):
same_list.append(np.allclose(param1[1].data.numpy(), param2[1].data.numpy()))
print(param1[0],
np.allclose(param1[1].data.numpy(), param2[1].data.numpy()),
(param1[1].data.numpy() - param2[1].data.numpy()).sum())
return np.alltrue(same_list)
``` |
{
"source": "jjaldridge2009/Project_Xs",
"score": 2
} |
#### File: Project_Xs/src/player_blink_gui.py
```python
import os
os.environ["OPENCV_VIDEOIO_MSMF_ENABLE_HW_TRANSFORMS"] = "0"
import cv2
import heapq
import json
import os.path
import signal
import sys
import threading
import time
import tkinter as tk
import tkinter.filedialog as fd
from tkinter import ttk
from os import listdir
from os.path import isfile, join
from PIL import Image, ImageTk
os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import rngtool
from xorshift import Xorshift
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.rng = None
self.previewing = False
self.monitoring = False
self.reidentifying = False
self.tidsiding = False
self.timelining = False
self.config_json = {}
self.default_config = {
"MonitorWindow": True,
"WindowPrefix": "SysDVR-Client [PID ",
"image": "./images/cave/eye.png",
"view": [0, 0, 0, 0],
"thresh": 0.9,
"white_delay": 0.0,
"advance_delay": 0,
"advance_delay_2": 0,
"npc": 0,
"timeline_npc": 0,
"pokemon_npc": 0,
"crop": [0,0,0,0],
"camera": 0,
"display_percent": 80
}
self.pack()
self.create_widgets()
signal.signal(signal.SIGINT, self.signal_handler)
def update_configs(self,event=None):
self.config_jsons = [f for f in listdir("configs") if isfile(join("configs", f))]
self.config_combobox['values'] = self.config_jsons
def create_widgets(self):
self.master.title("Player Blink")
ttk.Label(self,text="Progress:").grid(column=0,row=0)
ttk.Label(self,text="S[0-3]:").grid(column=0,row=3)
ttk.Label(self,text="S[0-1]:").grid(column=0,row=7)
ttk.Label(self,text="Advances:").grid(column=0,row=10)
ttk.Label(self,text="Timer:").grid(column=0,row=11)
ttk.Label(self,text="X to advance:").grid(column=0,row=12)
self.progress = ttk.Label(self,text="0/0")
self.progress.grid(column=1,row=0)
self.config_combobox = ttk.Combobox(self, state="readonly", values=[])
self.config_combobox.grid(column=2,row=0)
self.config_combobox.bind("<<ComboboxSelected>>", self.config_combobox_onchange)
self.config_combobox.bind("<Button-1>", self.update_configs)
self.update_configs()
self.new_config_button = ttk.Button(self,text="+",command=self.new_config,width=2)
self.new_config_button.grid(column=3,row=0,columnspan=2)
self.eye_display = ttk.Label(self)
self.eye_display.grid(column=2,row=1)
self.prefix_input = ttk.Entry(self)
self.prefix_input.grid(column=2,row=2)
ttk.Label(self,text="Camera:").grid(column=3,row=1)
self.camera_index = tk.Spinbox(self, from_= 0, to = 99, width = 5)
self.camera_index.grid(column=4,row=1)
self.monitor_window_var = tk.IntVar()
self.monitor_window = ttk.Checkbutton(self,text="Monitor Window",variable=self.monitor_window_var)
self.monitor_window.grid(column=3,row=2,columnspan=2)
self.monitor_display_buffer = ttk.Label(self)
self.monitor_display_buffer.grid(column=2,row=3,rowspan=64,columnspan=2)
self.monitor_display = ttk.Label(self)
self.monitor_display.grid(column=2,row=3,rowspan=64,columnspan=2)
self.monitor_blink_button = ttk.Button(self, text="Monitor Blinks", command=self.monitor_blinks)
self.monitor_blink_button.grid(column=5,row=0)
self.reidentify_button = ttk.Button(self, text="Reidentify", command=self.reidentify)
self.reidentify_button.grid(column=5,row=1)
self.preview_button = ttk.Button(self, text="Preview", command=self.preview)
self.preview_button.grid(column=5,row=2)
self.stop_tracking_button = ttk.Button(self, text="Stop Tracking", command=self.stop_tracking)
self.stop_tracking_button.grid(column=5,row=3)
self.timeline_button = ttk.Button(self, text="Timeline", command=self.timeline)
self.timeline_button.grid(column=5,row=4)
self.tidsid_button = ttk.Button(self, text="TID/SID", command=self.tidsid)
self.tidsid_button.grid(column=5,row=5)
x = y = w = h = 0
th = 0.9
ttk.Label(self,text="X").grid(column=6,row=1)
ttk.Label(self,text="Y").grid(column=6,row=2)
ttk.Label(self,text="W").grid(column=6,row=3)
ttk.Label(self,text="H").grid(column=6,row=4)
ttk.Label(self,text="Threshold").grid(column=6,row=5)
ttk.Label(self,text="Time Delay").grid(column=6,row=6)
ttk.Label(self,text="Advance Delay").grid(column=6,row=7)
ttk.Label(self,text="Advance Delay 2").grid(column=6,row=8)
ttk.Label(self,text="NPCs").grid(column=6,row=9)
ttk.Label(self,text="NPCs during Timeline").grid(column=6,row=10)
ttk.Label(self,text="Pokemon NPCs").grid(column=6,row=11)
self.menu_check_var = tk.IntVar()
self.menu_check = ttk.Checkbutton(self, text="+1 on menu close", variable=self.menu_check_var)
self.menu_check.grid(column=7,row=0)
self.menu_check_var.set(1)
self.reident_noisy_check_var = tk.IntVar()
self.reident_noisy_check = ttk.Checkbutton(self, text="Reident 1 PK NPC", variable=self.reident_noisy_check_var)
self.reident_noisy_check.grid(column=5,row=6)
self.reident_noisy_check_var.set(0)
self.pos_x = tk.Spinbox(self, from_= 0, to = 99999, width = 5)
self.pos_x.grid(column=7,row=1)
self.pos_y = tk.Spinbox(self, from_= 0, to = 99999, width = 5)
self.pos_y.grid(column=7,row=2)
self.pos_w = tk.Spinbox(self, from_= 0, to = 99999, width = 5)
self.pos_w.grid(column=7,row=3)
self.pos_h = tk.Spinbox(self, from_= 0, to = 99999, width = 5)
self.pos_h.grid(column=7,row=4)
self.pos_th = tk.Spinbox(self, from_= 0, to = 1, width = 5, increment=0.1)
self.pos_th.grid(column=7,row=5)
self.whi_del = tk.Spinbox(self, from_= 0, to = 999, width = 5, increment=0.1)
self.whi_del.grid(column=7,row=6)
self.adv_del = tk.Spinbox(self, from_= 0, to = 999, width = 5, increment=1)
self.adv_del.grid(column=7,row=7)
self.adv_del_2 = tk.Spinbox(self, from_= 0, to = 999, width = 5, increment=1)
self.adv_del_2.grid(column=7,row=8)
self.npc = tk.Spinbox(self, from_= 0, to = 999, width = 5, increment=1)
self.npc.grid(column=7,row=9)
self.timeline_npc = tk.Spinbox(self, from_= -1, to = 999, width = 5, increment=1)
self.timeline_npc.grid(column=7,row=10)
self.pokemon_npc = tk.Spinbox(self, from_= 0, to = 999, width = 5, increment=1)
self.pokemon_npc.grid(column=7,row=11)
self.new_eye_button = ttk.Button(self, text="Select Eye",command=self.new_eye)
self.new_eye_button.grid(column=6,row=12,columnspan=2)
self.save_button = ttk.Button(self, text="Save Config",command=self.save_config)
self.save_button.grid(column=6,row=13,columnspan=2)
self.raw_screenshot_button = ttk.Button(self, text="Raw Screenshot",command=self.save_screenshot)
self.raw_screenshot_button.grid(column=6,row=14,columnspan=2)
self.s0_1_2_3 = tk.Text(self, width=10, height=4)
self.s0_1_2_3.grid(column=1,row=2,rowspan=4)
self.s01_23 = tk.Text(self, width=20, height=2)
self.s01_23.grid(column=1,row=6,rowspan=4)
self.advances = 0
self.adv = ttk.Label(self,text=self.advances)
self.adv.grid(column=1,row=10)
self.count_down = 0
self.cd = ttk.Label(self,text=self.count_down)
self.cd.grid(column=1,row=11)
self.advances_increase = tk.Spinbox(self, from_ = 0, to = 999999)
self.advances_increase.grid(column=1,row=12)
self.advances_increase_button = ttk.Button(self, text="Advance", command=self.increase_advances)
self.advances_increase_button.grid(column=1,row=13)
ttk.Label(self,text="Display Percent").grid(column=0,row=14)
self.display_percent = tk.Spinbox(self, from_ = 0, to = 500)
self.display_percent.grid(column=1,row=14)
self.pos_x.delete(0, tk.END)
self.pos_x.insert(0, x)
self.pos_y.delete(0, tk.END)
self.pos_y.insert(0, y)
self.pos_w.delete(0, tk.END)
self.pos_w.insert(0, w)
self.pos_h.delete(0, tk.END)
self.pos_h.insert(0, h)
self.pos_th.delete(0, tk.END)
self.pos_th.insert(0, th)
self.whi_del.delete(0, tk.END)
self.whi_del.insert(0, 0.0)
self.adv_del.delete(0, tk.END)
self.adv_del.insert(0, 0)
self.adv_del_2.delete(0, tk.END)
self.adv_del_2.insert(0, 0)
self.npc.delete(0, tk.END)
self.npc.insert(0, 0)
self.timeline_npc.delete(0, tk.END)
self.timeline_npc.insert(0, 0)
self.pokemon_npc.delete(0, tk.END)
self.pokemon_npc.insert(0, 0)
self.camera_index.delete(0, tk.END)
self.camera_index.insert(0, 0)
self.advances_increase.delete(0, tk.END)
self.advances_increase.insert(0, 165)
self.display_percent.delete(0, tk.END)
self.display_percent.insert(0, 100)
self.after_task()
def increase_advances(self):
plus = int(self.advances_increase.get())
self.rng.advance(plus)
self.advances += plus
def new_config(self):
with fd.asksaveasfile(initialdir="./configs/", filetypes=[("JSON", ".json")]) as f:
json.dump(self.default_config,f,indent=4)
self.config_combobox.set(os.path.basename(f.name))
self.config_combobox_onchange()
def save_screenshot(self):
with fd.asksaveasfile(initialdir="./", filetypes=[("PNG", ".png")]) as f:
cv2.imwrite(f.name,self.raw_screenshot)
def new_eye(self):
self.config_json["image"] = "./"+os.path.relpath(fd.askopenfilename(initialdir="./images/", filetypes=[("Image", ".png")])).replace("\\","/")
self.player_eye = cv2.imread(self.config_json["image"], cv2.IMREAD_GRAYSCALE)
self.player_eye_tk = self.cv_image_to_tk(self.player_eye)
self.eye_display['image'] = self.player_eye_tk
def save_config(self):
json.dump(self.config_json,open(join("configs",self.config_combobox.get()),"w"),indent=4)
def cv_image_to_tk(self, image):
split = cv2.split(image)
if len(split) == 3:
b,g,r = split
image = cv2.merge((r,g,b))
im = Image.fromarray(image)
return ImageTk.PhotoImage(image=im)
def config_combobox_onchange(self, event=None):
self.config_json = json.load(open(join("configs",self.config_combobox.get())))
missing = set(self.default_config.keys()).difference(self.config_json.keys())
if len(missing) > 0:
print(f"Config was missing the following keys {missing}\nDefaults have been added")
for key in missing:
self.config_json[key] = self.default_config[key]
x,y,w,h = self.config_json["view"]
self.pos_x.delete(0, tk.END)
self.pos_x.insert(0, x)
self.pos_y.delete(0, tk.END)
self.pos_y.insert(0, y)
self.pos_w.delete(0, tk.END)
self.pos_w.insert(0, w)
self.pos_h.delete(0, tk.END)
self.pos_h.insert(0, h)
self.pos_th.delete(0, tk.END)
self.pos_th.insert(0, self.config_json["thresh"])
self.whi_del.delete(0, tk.END)
self.whi_del.insert(0, self.config_json["white_delay"])
self.adv_del.delete(0, tk.END)
self.adv_del.insert(0, self.config_json["advance_delay"])
self.adv_del_2.delete(0, tk.END)
self.adv_del_2.insert(0, self.config_json["advance_delay_2"])
self.npc.delete(0, tk.END)
self.npc.insert(0, self.config_json["npc"])
self.pokemon_npc.delete(0, tk.END)
self.pokemon_npc.insert(0, self.config_json["pokemon_npc"])
self.timeline_npc.delete(0, tk.END)
self.timeline_npc.insert(0, self.config_json["timeline_npc"])
self.camera_index.delete(0, tk.END)
self.camera_index.insert(0, self.config_json["camera"])
self.player_eye = cv2.imread(self.config_json["image"], cv2.IMREAD_GRAYSCALE)
self.player_eye_tk = self.cv_image_to_tk(self.player_eye)
self.eye_display['image'] = self.player_eye_tk
self.prefix_input.delete(0, tk.END)
self.prefix_input.insert(0, self.config_json["WindowPrefix"])
self.monitor_window_var.set(self.config_json["MonitorWindow"])
self.display_percent.delete(0, tk.END)
self.display_percent.insert(0, self.config_json["display_percent"])
def stop_tracking(self):
self.tracking = False
def timeline(self):
self.timelining = True
def monitor_blinks(self):
if not self.monitoring:
self.monitor_blink_button['text'] = "Stop Monitoring"
self.monitoring = True
self.monitoring_thread=threading.Thread(target=self.monitoring_work)
self.monitoring_thread.daemon = True
self.monitoring_thread.start()
else:
self.monitor_blink_button['text'] = "Monitor Blinks"
self.monitoring = False
def reidentify(self):
if not self.reidentifying:
self.reidentify_button['text'] = "Stop Reidentifying"
self.reidentifying = True
self.reidentifying_thread=threading.Thread(target=self.reidentifying_work)
self.reidentifying_thread.daemon = True
self.reidentifying_thread.start()
else:
self.reidentify_button['text'] = "Reidentify"
self.reidentifying = False
def tidsid(self):
if not self.tidsiding:
self.tidsid_button['text'] = "Stop TID/SID"
self.tidsiding = True
self.tidsiding_thread=threading.Thread(target=self.tidsiding_work)
self.tidsiding_thread.daemon = True
self.tidsiding_thread.start()
else:
self.tidsid_button['text'] = "TID/SID"
self.tidsiding = False
def monitoring_work(self):
self.tracking = False
blinks, intervals, offset_time = rngtool.tracking_blink(self.player_eye, *self.config_json["view"], MonitorWindow=self.config_json["MonitorWindow"], WindowPrefix=self.config_json["WindowPrefix"], crop=self.config_json["crop"], camera=self.config_json["camera"], tk_window=self, th=self.config_json["thresh"])
self.rng = rngtool.recov(blinks, intervals, npc=self.config_json["npc"])
self.monitor_blink_button['text'] = "Monitor Blinks"
self.monitoring = False
self.preview()
waituntil = time.perf_counter()
diff = round(waituntil-offset_time)+(1 if self.menu_check_var.get() else 0)
self.rng.getNextRandSequence(diff*(self.config_json["npc"]+1))
state = self.rng.getState()
s0 = f"{state[0]:08X}"
s1 = f"{state[1]:08X}"
s2 = f"{state[2]:08X}"
s3 = f"{state[3]:08X}"
s01 = s0+s1
s23 = s2+s3
print(s01,s23)
print(s0,s1,s2,s3)
self.s0_1_2_3.delete(1.0, tk.END)
self.s01_23.delete(1.0, tk.END)
self.s0_1_2_3.insert(1.0,s0+"\n"+s1+"\n"+s2+"\n"+s3)
self.s01_23.insert(1.0,s01+"\n"+s23)
self.advances = 0
self.tracking = True
self.count_down = None
while self.tracking:
if self.count_down is None:
if self.timelining:
self.count_down = 10
elif self.count_down != 0:
self.count_down -= 1
print(self.count_down+1)
else:
break
self.advances += self.config_json["npc"]+1
r = self.rng.getNextRandSequence(self.config_json["npc"]+1)[-1]
waituntil += 1.018
print(f"advances:{self.advances}, blinks:{hex(r&0xF)}")
next_time = waituntil - time.perf_counter() or 0
time.sleep(next_time)
if self.timelining:
self.rng.next()
# white screen
time.sleep(self.config_json["white_delay"])
waituntil = time.perf_counter()
self.rng.advance(self.config_json["advance_delay"])
self.advances += self.config_json["advance_delay"]
print("blink timeline started")
queue = []
for _ in range(self.config_json["timeline_npc"]+1):
heapq.heappush(queue, (waituntil+1.017,0))
for _ in range(self.config_json["pokemon_npc"]):
blink_int = self.rng.rangefloat(3,12) + 0.285
heapq.heappush(queue, (waituntil+blink_int,1))
self.count_down = 10
while queue and self.tracking:
self.advances += 1
w, q = heapq.heappop(queue)
next_time = w - time.perf_counter() or 0
if next_time>0:
time.sleep(next_time)
if self.config_json["advance_delay_2"] != 0:
if self.count_down > 0:
self.count_down -= 1
print(self.count_down+1)
elif self.count_down != -1:
self.count_down -= 1
self.advances += self.config_json["advance_delay_2"]
self.rng.advance(self.config_json["advance_delay_2"])
if q==0:
r = self.rng.next()
print(f"advances:{self.advances}, blink:{hex(r&0xF)}")
heapq.heappush(queue, (w+1.017, 0))
else:
blink_int = self.rng.rangefloat(3,12) + 0.285
heapq.heappush(queue, (w+blink_int, 1))
print(f"advances:{self.advances}, interval:{blink_int}")
self.timelining = False
def tidsiding_work(self):
self.tracking = False
munchlax_intervals = rngtool.tracking_poke_blink(self.player_eye, *self.config_json["view"], MonitorWindow=self.config_json["MonitorWindow"], WindowPrefix=self.config_json["WindowPrefix"], crop=self.config_json["crop"], camera=self.config_json["camera"], tk_window=self, th=self.config_json["thresh"], size=64)
self.rng = rngtool.recovByMunchlax(munchlax_intervals)
state = self.rng.getState()
self.tidsid_button['text'] = "TID/SID"
self.tidsiding = False
self.preview()
s0 = f"{state[0]:08X}"
s1 = f"{state[1]:08X}"
s2 = f"{state[2]:08X}"
s3 = f"{state[3]:08X}"
s01 = s0+s1
s23 = s2+s3
print(s01,s23)
print(s0,s1,s2,s3)
self.s0_1_2_3.delete(1.0, tk.END)
self.s01_23.delete(1.0, tk.END)
self.s0_1_2_3.insert(1.0,s0+"\n"+s1+"\n"+s2+"\n"+s3)
self.s01_23.insert(1.0,s01+"\n"+s23)
waituntil = time.perf_counter()
ts = time.time()
print([hex(x) for x in state],ts)
self.tracking = True
while self.tracking:
self.advances += 1
interval = self.rng.rangefloat(3.0,12.0) + 0.285
waituntil += interval
print(f"advances:{self.advances}")
next_time = waituntil - time.perf_counter() or 0
time.sleep(next_time)
def reidentifying_work(self):
self.tracking = False
state = [int(x,16) for x in self.s0_1_2_3.get(1.0,tk.END).split("\n")[:4]]
s0 = f"{state[0]:08X}"
s1 = f"{state[1]:08X}"
s2 = f"{state[2]:08X}"
s3 = f"{state[3]:08X}"
s01 = s0+s1
s23 = s2+s3
print(s01,s23)
print(s0,s1,s2,s3)
self.s0_1_2_3.delete(1.0, tk.END)
self.s01_23.delete(1.0, tk.END)
self.s0_1_2_3.insert(1.0,s0+"\n"+s1+"\n"+s2+"\n"+s3)
self.s01_23.insert(1.0,s01+"\n"+s23)
print([hex(x) for x in state])
if self.reident_noisy_check_var.get():
self.pokemon_npc.delete(0,tk.END)
self.pokemon_npc.insert(0,1)
observed_blinks, observed_intervals, offset_time = rngtool.tracking_blink(self.player_eye, *self.config_json["view"], MonitorWindow=self.config_json["MonitorWindow"], WindowPrefix=self.config_json["WindowPrefix"], crop=self.config_json["crop"], camera=self.config_json["camera"], tk_window=self, th=self.config_json["thresh"], size=20)
self.rng, adv = rngtool.reidentifyByIntervalsNoisy(Xorshift(*state), observed_intervals)
self.timelining = True
self.count_down = 0
auto_timeline = True
else:
observed_blinks, observed_intervals, offset_time = rngtool.tracking_blink(self.player_eye, *self.config_json["view"], MonitorWindow=self.config_json["MonitorWindow"], WindowPrefix=self.config_json["WindowPrefix"], crop=self.config_json["crop"], camera=self.config_json["camera"], tk_window=self, th=self.config_json["thresh"], size=7)
self.rng, adv = rngtool.reidentifyByIntervals(Xorshift(*state), observed_intervals, return_advance=True, npc=self.config_json["npc"])
auto_timeline = False
self.reidentify_button['text'] = "Reidentify"
self.reidentifying = False
self.preview()
waituntil = time.perf_counter()
diff = round(waituntil-offset_time)+(1 if self.menu_check_var.get() else 0)
self.rng.getNextRandSequence(diff*(self.config_json["npc"]+1))
state = self.rng.getState()
self.advances = adv+diff*(self.config_json["npc"]+1)
self.tracking = True
if not auto_timeline:
self.count_down = None
while self.tracking:
if self.count_down is None:
if self.timelining:
self.count_down = 10
elif self.count_down != 0:
self.count_down -= 1
print(self.count_down+1)
else:
break
self.advances += self.config_json["npc"]+1
r = self.rng.getNextRandSequence(self.config_json["npc"]+1)[-1]
waituntil += 1.018
print(f"advances:{self.advances}, blinks:{hex(r&0xF)}")
next_time = waituntil - time.perf_counter() or 0
time.sleep(next_time)
if self.timelining:
self.rng.next()
# white screen
time.sleep(self.config_json["white_delay"])
waituntil = time.perf_counter()
self.rng.advance(self.config_json["advance_delay"])
self.advances += self.config_json["advance_delay"]
print("blink timeline started")
queue = []
for _ in range(self.config_json["timeline_npc"]+1):
heapq.heappush(queue, (waituntil+1.017,0))
for _ in range(self.config_json["pokemon_npc"]):
blink_int = self.rng.rangefloat(3,12) + 0.285
heapq.heappush(queue, (waituntil+blink_int,1))
self.count_down = 10
while queue and self.tracking:
self.advances += 1
w, q = heapq.heappop(queue)
next_time = w - time.perf_counter() or 0
if next_time>0:
time.sleep(next_time)
if self.config_json["advance_delay_2"] != 0:
if self.count_down > 0:
self.count_down -= 1
print(self.count_down+1)
elif self.count_down != -1:
self.count_down -= 1
self.advances += self.config_json["advance_delay_2"]
self.rng.advance(self.config_json["advance_delay_2"])
if q==0:
r = self.rng.next()
print(f"advances:{self.advances}, blink:{hex(r&0xF)}")
heapq.heappush(queue, (w+1.017, 0))
else:
blink_int = self.rng.rangefloat(3,12) + 0.285
heapq.heappush(queue, (w+blink_int, 1))
print(f"advances:{self.advances}, interval:{blink_int}")
self.timelining = False
def preview(self):
if not self.previewing:
self.preview_button['text'] = "Stop Preview"
self.previewing = True
self.previewing_thread=threading.Thread(target=self.previewing_work)
self.previewing_thread.daemon = True
self.previewing_thread.start()
else:
self.preview_button['text'] = "Preview"
self.previewing = False
def previewing_work(self):
last_frame_tk = None
last_camera = self.config_json["camera"]
if self.config_json["MonitorWindow"]:
from windowcapture import WindowCapture
video = WindowCapture(self.config_json["WindowPrefix"],self.config_json["crop"])
else:
if sys.platform.startswith('linux'): # all Linux
backend = cv2.CAP_V4L
elif sys.platform.startswith('win'): # MS Windows
backend = cv2.CAP_DSHOW
elif sys.platform.startswith('darwin'): # macOS
backend = cv2.CAP_ANY
else:
backend = cv2.CAP_ANY # auto-detect via OpenCV
video = cv2.VideoCapture(self.config_json["camera"],backend)
video.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
video.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)
video.set(cv2.CAP_PROP_BUFFERSIZE,1)
print(f"camera {self.config_json['camera']}")
while self.previewing:
if self.config_json["camera"] != last_camera:
video = cv2.VideoCapture(self.config_json["camera"],backend)
video.set(cv2.CAP_PROP_FRAME_WIDTH,1920)
video.set(cv2.CAP_PROP_FRAME_HEIGHT,1080)
video.set(cv2.CAP_PROP_BUFFERSIZE,1)
print(f"camera {self.config_json['camera']}")
last_camera = self.config_json["camera"]
eye = self.player_eye
w, h = eye.shape[::-1]
roi_x, roi_y, roi_w, roi_h = self.config_json["view"]
_, frame = video.read()
if frame is not None:
roi = cv2.cvtColor(frame[roi_y:roi_y+roi_h,roi_x:roi_x+roi_w],cv2.COLOR_RGB2GRAY)
res = cv2.matchTemplate(roi,eye,cv2.TM_CCOEFF_NORMED)
_, match, _, max_loc = cv2.minMaxLoc(res)
cv2.rectangle(frame,(roi_x,roi_y), (roi_x+roi_w,roi_y+roi_h), (0,0,255), 2)
if 0.01<match<self.config_json["thresh"]:
cv2.rectangle(frame,(roi_x,roi_y), (roi_x+roi_w,roi_y+roi_h), 255, 2)
else:
max_loc = (max_loc[0] + roi_x,max_loc[1] + roi_y)
bottom_right = (max_loc[0] + w, max_loc[1] + h)
cv2.rectangle(frame,max_loc, bottom_right, 255, 2)
self.raw_screenshot = frame
if self.config_json["display_percent"] != 100:
_, fw, fh = frame.shape[::-1]
frame = cv2.resize(frame,(round(fw*self.config_json["display_percent"]/100),round(fh*self.config_json["display_percent"]/100)))
frame_tk = self.cv_image_to_tk(frame)
self.monitor_tk_buffer = last_frame_tk
self.monitor_display_buffer['image'] = self.monitor_tk_buffer
self.monitor_tk = frame_tk
self.monitor_display['image'] = self.monitor_tk
last_frame_tk = frame_tk
self.monitor_tk_buffer = None
self.monitor_tk = None
def after_task(self):
self.config_json["view"] = [int(self.pos_x.get()),int(self.pos_y.get()),int(self.pos_w.get()),int(self.pos_h.get())]
self.config_json["thresh"] = float(self.pos_th.get())
self.config_json["WindowPrefix"] = self.prefix_input.get()
self.config_json["white_delay"] = float(self.whi_del.get())
self.config_json["advance_delay"] = int(self.adv_del.get())
self.config_json["advance_delay_2"] = int(self.adv_del_2.get())
self.config_json["npc"] = int(self.npc.get())
self.config_json["pokemon_npc"] = int(self.pokemon_npc.get())
self.config_json["timeline_npc"] = int(self.timeline_npc.get())
self.config_json["MonitorWindow"] = bool(self.monitor_window_var.get())
self.config_json["camera"] = int(self.camera_index.get())
self.config_json["display_percent"] = int(self.display_percent.get())
self.adv['text'] = self.advances
self.cd['text'] = self.count_down
self.after(100,self.after_task)
def signal_handler(self, signal, frame):
sys.exit(0)
root = tk.Tk()
app = Application(master=root)
app.mainloop()
``` |
{
"source": "jjalonsoc/coto",
"score": 3
} |
#### File: coto/clients/billing.py
```python
import json
from . import BaseClient
import os
BILLING_CONSOLE_URL = os.environ.get('BILLING_CONSOLE_URL', 'https://console.aws.amazon.com/billing/')
BILLING_REGION = os.environ.get('BILLING_REGION', 'eu-central-1')
class Client(BaseClient):
"""
A low-level client representing Biling:
.. code-block:: python
import coto
session = coto.Session()
client = session.client('billing')
These are the available methods:
* Account:
* :py:meth:`account_status`
* :py:meth:`close_account`
* Alternate Contacts:
* :py:meth:`list_alternate_contacts`
* :py:meth:`set_alternate_contacts`
* Tax Registrations:
* :py:meth:`list_tax_registrations`
* :py:meth:`set_tax_registration`
* :py:meth:`delete_tax_registration`
"""
def __init__(self, session):
super().__init__(session)
self.__xsrf_token = None
def _xsrf_token(self):
if self.__xsrf_token is None:
self.__xsrf_token = self._get_xsrf_token()
return self.__xsrf_token
def _get_xsrf_token(self):
r = self.session()._get(
BILLING_CONSOLE_URL + 'home?region=' + BILLING_REGION + '&state=hashArgs%23'
)
if r.status_code != 200:
raise Exception("failed get billing xsrf token")
return r.headers['x-awsbc-xsrf-token']
def _get(self, api):
r = self.session()._get(
BILLING_CONSOLE_URL + "rest/v1.0/{0}?state=hashArgs%23".
format(api),
headers={'x-awsbc-xsrf-token': self._xsrf_token()})
if r.status_code != 200:
raise Exception("failed get {0}".format(api))
return r
def _put(self, api, data=None):
if data is None:
r = self.session()._put(
BILLING_CONSOLE_URL + "rest/v1.0/{0}?state=hashArgs%23".
format(api),
headers={
'x-awsbc-xsrf-token': self._xsrf_token(),
'Content-Type': 'application/json',
})
else:
r = self.session()._put(
BILLING_CONSOLE_URL + "rest/v1.0/{0}?state=hashArgs%23".
format(api),
headers={
'x-awsbc-xsrf-token': self._xsrf_token(),
'Content-Type': 'application/json',
},
data=json.dumps(data))
if r.status_code != 200:
raise Exception("failed put {}: {}".format(api, r.text))
return r
# billing api
def list_alternate_contacts(self):
"""
Lists the alternate contacts set for the account. In order to keep the
right people in the loop, you can add an alternate contact for Billing,
Operations, and Security communications.
Request Syntax:
.. code-block:: python
response = client.list_alternate_contacts()
Returns:
dict: Response Syntax
.. code-block:: python
[
{
'contactId': int,
'contactType': 'billing' | 'operations' | 'security',
'email': str,
'name': str,
'phoneNumber': str,
'title': str
},
]
"""
r = self._get('additionalcontacts')
return json.loads(r.text)
def set_alternate_contacts(self, AlternateContacts):
"""
Sets the alternate contacts set for the account. In order to keep the
right people in the loop, you can add an alternate contact for Billing,
Operations, and Security communications.
Please note that, the primary account holder will continue to receive
all email communications.
Contact Types:
``billing``:
The alternate Billing contact will receive billing-related
notifications, such as invoice availability notifications.
``operations``:
The alternate Operations contact will receive
operations-related notifications.
``security``:
The alternate Security contact will receive
security-related notifications. For additional AWS
security-related notifications, please access the Security
Bulletins RSS Feed.
Request Syntax:
.. code-block:: python
response = client.set_alternate_contacts(
AlternateContacts=[
{
'contactType': 'billing',
'email': str,
'name': str,
'phoneNumber': str,
'title': str
},
{
'contactType': 'operations',
'email': str,
'name': str,
'phoneNumber': str,
'title': str
},
{
'contactType': 'security',
'email': str,
'name': str,
'phoneNumber': str,
'title': str
},
]
)
Args:
AlternateContacts (list): List of alternate contacts.
"""
self._put('additionalcontacts', AlternateContacts)
def list_tax_registrations(self):
"""
Lists the tax registrations set for the account.
Set your tax information so that your 1099K or W-88EN is generated
appropriately. Setting this information up also allows you to sell more
than 200 transactions or $20,000 in Reserved Instances.
Status:
``Verified``:
Verified
``Pending``:
Pending
``Deleted``:
Deleted
Request Syntax:
.. code-block:: python
response = client.list_tax_registrations()
Returns:
dict: Response Syntax
.. code-block:: python
[
{
'address': {
'addressLine1': str,
'addressLine2': str,
'city': str,
'countryCode': str,
'postalCode': str,
'state': str,
},
'authority': {
'country': str,
'state': str
},
'currentStatus': 'Verified' | 'Pending',
'legalName': str,
'localTaxRegistration': bool,
'registrationId': str
},
]
"""
r = self._get('taxexemption/eu/vat/information')
return json.loads(r.text)['taxRegistrationList']
def set_tax_registration(self, TaxRegistration):
"""
Set the tax registrations for the account.
Set your tax information so that your 1099K or W-88EN is generated
appropriately. Setting this information up also allows you to sell more
than 200 transactions or $20,000 in Reserved Instances.
Request Syntax:
.. code-block:: python
response = client.set_tax_registration(
TaxRegistration={
'address': {
'addressLine1': str,
'addressLine2': str,
'city': str,
'countryCode': str,
'postalCode': str,
'state': str,
},
'authority': {
'country': str,
'state': str
},
'legalName': str,
'localTaxRegistration': bool,
'registrationId': str,
}
)
Args:
TaxRegistration (dict): Desired tax registration.
"""
self._put('taxexemption/eu/vat/information', TaxRegistration)
def delete_tax_registration(self, TaxRegistration):
"""
Delete the given tax registrations from the account.
Request Syntax:
.. code-block:: python
response = client.delete_tax_registration(
TaxRegistration={
'address': {
'addressLine1': str,
'addressLine2': str,
'city': str,
'countryCode': str,
'postalCode': str,
'state': str,
},
'authority': {
'country': str,
'state': str
},
'legalName': str,
'localTaxRegistration': bool,
'registrationId': str,
}
)
Args:
TaxRegistration (dict): Tax registration to delete.
"""
TaxRegistration['currentStatus'] = 'Deleted'
return self.set_tax_registration(TaxRegistration)
def account_status(self):
"""
Obtain the status of the account.
Status:
``ACTIVE``:
Active
``SUSPENDED``:
Suspended, will be deleted within 90 days
Request Syntax:
.. code-block:: python
response = client.account_status()
Returns:
string: status
"""
r = self._get('account/status')
return json.loads(r.text)
def close_account(self):
"""
Close the account. Returns True iff successful, otherwise throws
an exception.
Request Syntax:
.. code-block:: python
client.close_account()
Returns:
boolean: success
"""
self._put('account')
return True
```
#### File: coto/clients/federation.py
```python
from furl import furl
import json
import requests
from . import BaseClient
import os
FEDERATION_SIGNIN_URL = os.environ.get('FEDERATION_SIGNIN_URL', 'https://signin.aws.amazon.com/federation')
FEDERATION_DESTINATION = os.environ.get('FEDERATION_DESTINATION', 'https://console.aws.amazon.com/')
class Client(BaseClient):
REQUIRES_AUTHENTICATION = False
def __init__(self, session):
super().__init__(session)
def signin(self, boto3_session):
"""
Signin using a boto3 session.
This method uses the federation endpoint to obtain a signin token using
the credentials in your boto3 session. The signin token is then used
to signin into the AWS Management Console.
Although possible, you are not encouraged to call this method directly,
instead follow the following example.
Example:
.. code-block:: python
import boto3
import coto
session = coto.Session(
boto3_session=boto3.Session()
)
Request Syntax:
.. code-block:: python
response = client.signin(
boto3_session=boto3.session.Session,
)
Args:
boto3_session (boto3.session.Session): The boto3 session to use as
provider for AWS credentials.
Returns:
bool: Signin succeeded.
"""
r = self.session()._get(self.get_signin_url(boto3_session))
if r.status_code != 200:
raise Exception("failed session signin")
self.session().authenticated = True
return True
def get_signin_url(self, boto3_session):
"""
Signin using a boto3 session.
This method uses the federation endpoint to obtain a signin token using
the credentials in your boto3 session. The signin token is then used
to signin into the AWS Management Console.
Although possible, you are not encouraged to call this method directly,
instead follow the following example.
Example:
.. code-block:: python
import boto3
import coto
session = coto.Session(
boto3_session=boto3.Session()
)
Request Syntax:
.. code-block:: python
response = client.signin(
boto3_session=boto3.session.Session,
)
Args:
boto3_session (boto3.session.Session): The boto3 session to use as
provider for AWS credentials.
Returns:
bool: Signin succeeded.
"""
url = furl(FEDERATION_SIGNIN_URL)
url.args['Action'] = "login"
url.args['Issuer'] = None
url.args['Destination'] = FEDERATION_DESTINATION
url.args['SigninToken'] = self.get_signin_token(boto3_session)
return url.url
def get_signin_token(self, boto3_session):
"""
Obtain a signin token for a boto3 session.
This method uses the federation endpoint to obtain a signin token using
the credentials in your boto3 session.
Request Syntax:
.. code-block:: python
response = client.get_signin_token(
boto3_session=boto3.session.Session,
)
Args:
boto3_session (boto3.session.Session): The boto3 session to use as
provider for AWS credentials.
Returns:
str: Signin token.
"""
credentials = boto3_session.get_credentials()
url = FEDERATION_SIGNIN_URL
response = self.session()._get(
url,
params={
"Action":
"getSigninToken",
"Session":
json.dumps({
"sessionId": credentials.access_key,
"sessionKey": credentials.secret_key,
"sessionToken": credentials.token,
})
}
)
return json.loads(response.text)["SigninToken"]
``` |
{
"source": "jjalvare/power-up",
"score": 2
} |
#### File: scripts/python/cobbler_add_distros.py
```python
import os
import xmlrpc.client
import lib.logger as logger
import lib.utilities as util
import lib.genesis as gen
OS_IMAGES_DIR = gen.get_container_os_images_path() + '/'
OS_CONFIG_DIR = OS_IMAGES_DIR + 'config/'
APACHE2_HTML_DIR = '/var/www/html/'
KICKSTARTS_DIR = '/var/lib/cobbler/kickstarts/'
SNIPPETS_DIR = '/var/lib/cobbler/snippets/'
COBBLER_USER = gen.get_cobbler_user()
COBBLER_PASS = gen.get_cobbler_pass()
def extract_iso_images(path, html_dir):
"""Extract ISO images into webserver directory
Args:
path (str): Directory path containing ISOs or path to single
ISO file
html_dir (str): Path to root http directory
Returns:
list: List of tuples ('str: Extracted image directory name',
'str: Relative path to kernel',
'str: Relative path to initrd')
"""
return_list = []
if os.path.isdir(path):
if not path.endswith('/'):
path += '/'
file_list = os.listdir(path)
elif os.path.isfile(path):
file_list = [os.path.basename(path)]
path = os.path.dirname(path) + '/'
# Extract ISO into web directory for access over http
for _file in file_list:
if _file.endswith('.iso'):
kernel, initrd = util.extract_iso_image(path + _file, html_dir)
name = _file[:-4]
return_list.append((name,
os.path.join(html_dir, kernel),
os.path.join(html_dir, initrd)))
return return_list
def setup_image_config_files(path, html_dir):
"""Setup image config files
Args:
path (str): Directory path image config files
html_dir (str): Path to root http directory
"""
if not path.endswith('/'):
path += '/'
# Update preseed configurations with default user id
# Copy preseed & kickstart files to cobbler kickstart directory
for _file in os.listdir(path):
if _file.endswith('.ks') or _file.endswith('.seed'):
util.copy_file(path + _file, KICKSTARTS_DIR)
# Copy custom snippets to cobbler snippets directory
snippets_src_dir = path + 'snippets/'
for _file in os.listdir(snippets_src_dir):
util.copy_file(snippets_src_dir + _file, SNIPPETS_DIR)
# Copy apt source lists to web repo directory
if not os.path.isdir(html_dir + 'ubuntu_sources'):
os.makedirs(html_dir + 'ubuntu_sources')
for _file in os.listdir(path):
if _file.endswith('.list'):
util.copy_file(path + _file, html_dir + 'ubuntu_sources')
def cobbler_add_distro(name, kernel, initrd):
"""Add distro and profile to Cobbler
Args:
name (str): Name of distro/profile
kernel (str): Path to kernel
initrd (str): Path to initrd
"""
log = logger.getlogger()
name_list = [item.lower() for item in name.split('-')]
if 'ubuntu' in name_list:
breed = 'ubuntu'
for item in name_list:
if item == 'amd64':
arch = 'x86_64'
elif item == 'ppc64el':
arch = 'ppc64le'
elif item.startswith('14.04'):
os_version = 'trusty'
elif item.startswith('16.04'):
os_version = 'xenial'
elif item.startswith('18.04'):
os_version = 'bionic'
kernel_options = (
"netcfg/dhcp_timeout=1024 "
"netcfg/choose_interface=auto "
"ipv6.disable=1")
if os.path.isfile('%s%s.seed' % (KICKSTARTS_DIR, name)):
kickstart = '%s%s.seed' % (KICKSTARTS_DIR, name)
else:
kickstart = '%subuntu-default.seed' % KICKSTARTS_DIR
elif ('centos' in name_list) or ('rhel' in name_list):
breed = 'redhat'
for item in name_list:
if item == 'x86_64':
arch = 'x86_64'
elif item == 'ppc64le':
arch = 'ppc64le'
elif item.startswith('7'):
os_version = 'rhel7'
kernel_options = "text"
if os.path.isfile('%s%s.ks' % (KICKSTARTS_DIR, name)):
kickstart = '%s%s.ks' % (KICKSTARTS_DIR, name)
else:
kickstart = '%sRHEL-7-default.ks' % KICKSTARTS_DIR
else:
log.info(f'Cobbler distro {name} unrecognized and not added')
return
cobbler_server = xmlrpc.client.Server("http://127.0.0.1/cobbler_api")
token = cobbler_server.login(COBBLER_USER, COBBLER_PASS)
new_distro_create = cobbler_server.new_distro(token)
cobbler_server.modify_distro(
new_distro_create,
"name",
name,
token)
cobbler_server.modify_distro(
new_distro_create,
"arch",
arch,
token)
cobbler_server.modify_distro(
new_distro_create,
"kernel",
kernel,
token)
cobbler_server.modify_distro(
new_distro_create,
"initrd",
initrd,
token)
cobbler_server.modify_distro(
new_distro_create,
"breed",
breed,
token)
cobbler_server.modify_distro(
new_distro_create,
"os_version",
os_version,
token)
cobbler_server.modify_distro(
new_distro_create,
"kernel_options",
kernel_options,
token)
cobbler_server.save_distro(new_distro_create, token)
log.info(f"Cobbler Add Distro: name={name}")
log.debug(f"name={name} kernel={kernel} initrd{initrd}")
new_profile_create = cobbler_server.new_profile(token)
cobbler_server.modify_profile(
new_profile_create,
"name",
name,
token)
cobbler_server.modify_profile(
new_profile_create,
"distro",
name,
token)
cobbler_server.modify_profile(
new_profile_create,
"enable_menu",
"True",
token)
cobbler_server.modify_profile(
new_profile_create,
"kickstart",
kickstart,
token)
cobbler_server.save_profile(new_profile_create, token)
log.info(
"Cobbler Add Profile: name=%s, distro=%s" %
(name, name))
cobbler_server.sync(token)
log.info("Running Cobbler sync")
def cobbler_add_profile(distro, name):
log = logger.getlogger()
cobbler_server = xmlrpc.client.Server("http://127.0.0.1/cobbler_api")
token = cobbler_server.login(COBBLER_USER, COBBLER_PASS)
distro_list = cobbler_server.get_distros()
existing_distro_list = []
for existing_distro in distro_list:
existing_distro_list.append(existing_distro['name'])
if distro not in existing_distro_list:
log.warning(
"Cobbler Skipping Profile - Distro Unavailable: "
"name=%s, distro=%s" %
(name, distro))
return
new_profile_create = cobbler_server.new_profile(token)
cobbler_server.modify_profile(
new_profile_create,
"name",
name,
token)
cobbler_server.modify_profile(
new_profile_create,
"distro",
distro,
token)
cobbler_server.modify_profile(
new_profile_create,
"enable_menu",
"True",
token)
cobbler_server.modify_profile(
new_profile_create,
"kickstart",
"/var/lib/cobbler/kickstarts/%s.seed" % name,
token)
cobbler_server.save_profile(new_profile_create, token)
log.info(
"Cobbler Add Profile: name=%s, distro=%s" %
(name, distro))
cobbler_server.sync(token)
log.info("Running Cobbler sync")
if __name__ == '__main__':
logger.create()
distros = extract_iso_images(OS_IMAGES_DIR, APACHE2_HTML_DIR)
setup_image_config_files(OS_CONFIG_DIR, APACHE2_HTML_DIR)
for distro in distros:
name = distro[0]
kernel = os.path.join(APACHE2_HTML_DIR, distro[1])
initrd = os.path.join(APACHE2_HTML_DIR, distro[2])
cobbler_add_distro(name, kernel, initrd)
for _file in os.listdir(OS_CONFIG_DIR):
if _file.endswith('.seed') or _file.endswith('.ks'):
profile = _file[:-5]
distro = _file.rsplit('.', 2)[0]
if profile != distro and os.path.isdir(APACHE2_HTML_DIR + distro):
cobbler_add_profile(distro, profile)
```
#### File: scripts/python/cobbler_install.py
```python
import argparse
import os
import sys
import pwd
from shutil import copy2
import re
from random import choice
from netaddr import IPNetwork
from git import Repo
from lib.config import Config
import lib.genesis as gen
import lib.utilities as util
import lib.logger as logger
URL = 'https://github.com/cobbler/cobbler.git'
BRANCH = 'release28'
TFTPBOOT = '/tftpboot'
DNSMASQ_TEMPLATE = '/etc/cobbler/dnsmasq.template'
MODULES_CONF = '/etc/cobbler/modules.conf'
COBBLER_CONF_ORIG = '/etc/cobbler/cobbler.conf'
COBBLER_CONF = '/etc/apache2/conf-available/cobbler.conf'
COBBLER_WEB_CONF_ORIG = '/etc/cobbler/cobbler_web.conf'
COBBLER_WEB_CONF = '/etc/apache2/conf-available/cobbler_web.conf'
COBBLER_WEB_SETTINGS = '/usr/local/share/cobbler/web/settings.py'
WEBUI_SESSIONS = '/var/lib/cobbler/webui_sessions'
COBBLER_SETTINGS = '/etc/cobbler/settings'
PXEDEFAULT_TEMPLATE = '/etc/cobbler/pxe/pxedefault.template'
KICKSTART_DONE = '/var/lib/cobbler/snippets/kickstart_done'
ROOT_AUTH_KEYS = '/root/.ssh/authorized_keys'
WWW_AUTH_KEYS = '/var/www/html/authorized_keys'
NTP_CONF = '/etc/ntp.conf'
COBBLER = '/usr/local/bin/cobbler'
LOCAL_PY_DIST_PKGS = '/usr/local/lib/python2.7/dist-packages'
PY_DIST_PKGS = '/usr/lib/python2.7/dist-packages'
INITD = '/etc/init.d/'
APACHE2_CONF = '/etc/apache2/apache2.conf'
MANAGE_DNSMASQ = '/opt/cobbler/cobbler/modules/manage_dnsmasq.py'
COBBLER_DLCONTENT = '/opt/cobbler/cobbler/action_dlcontent.py'
COBBLER_SETTINGS_PY = '/opt/cobbler/cobbler/settings.py'
A2ENCONF = '/usr/sbin/a2enconf'
A2ENMOD = '/usr/sbin/a2enmod'
def cobbler_install(config_path=None):
"""Install and configure Cobbler in container.
This function must be called within the container 'pup-venv'
python virtual environment. Cobbler will be installed within
this environment.
"""
cfg = Config(config_path)
log = logger.getlogger()
# Check to see if cobbler is already installed
try:
util.bash_cmd('cobbler check')
log.info("Cobbler is already installed")
return
except util.CalledProcessError as error:
if error.returncode == 127:
log.debug("'cobbler' command not found, continuing with "
"installation")
else:
log.warning("Cobbler is installed but not working:")
log.warning(error.output)
print("\nPress enter to remove Cobbler and attempt to ")
print("re-install, or 'T' to terminate.")
resp = input("\nEnter or 'T': ")
log.debug("User response = \'{}\'".format(resp))
if resp == 'T':
sys.exit('POWER-Up stopped at user request')
# Clone cobbler github repo
cobbler_url = URL
cobbler_branch = BRANCH
install_dir = gen.get_cobbler_install_dir()
if os.path.exists(install_dir):
log.info(
"Removing Cobbler source directory \'{}\'".format(install_dir))
util.bash_cmd('rm -rf %s' % install_dir)
log.info(
"Cloning Cobbler branch \'%s\' from \'%s\'" %
(cobbler_branch, cobbler_url))
repo = Repo.clone_from(
cobbler_url, install_dir, branch=cobbler_branch, single_branch=True)
log.info(
"Cobbler branch \'%s\' cloned into \'%s\'" %
(repo.active_branch, repo.working_dir))
# Modify Cobbler scrpit that write DHCP reservations so that the
# lease time is included.
dhcp_lease_time = cfg.get_globals_dhcp_lease_time()
util.replace_regex(MANAGE_DNSMASQ, r'systxt \= systxt \+ \"\\\\n\"',
"systxt = systxt + \",{}\\\\n\"".
format(dhcp_lease_time))
# Use non-secure http to download network boot-loaders
util.replace_regex(COBBLER_DLCONTENT,
'https://cobbler.github.io',
'http://cobbler.github.io')
# Use non-secure http to download signatures
util.replace_regex(COBBLER_SETTINGS_PY,
'https://cobbler.github.io',
'http://cobbler.github.io')
# Run cobbler make install
util.bash_cmd('cd %s; make install' % install_dir)
# Backup original files
util.backup_file(DNSMASQ_TEMPLATE)
util.backup_file(MODULES_CONF)
util.backup_file(COBBLER_WEB_SETTINGS)
util.backup_file(COBBLER_CONF_ORIG)
util.backup_file(COBBLER_WEB_CONF_ORIG)
util.backup_file(COBBLER_SETTINGS)
util.backup_file(PXEDEFAULT_TEMPLATE)
util.backup_file(KICKSTART_DONE)
util.backup_file(NTP_CONF)
util.backup_file(APACHE2_CONF)
# Create tftp root directory
if not os.path.exists(TFTPBOOT):
mode = 0o755
os.mkdir(TFTPBOOT, mode)
# Set IP address range to use for unrecognized DHCP clients
dhcp_range = 'dhcp-range=%s,%s,%s # %s'
util.remove_line(DNSMASQ_TEMPLATE, 'dhcp-range')
dhcp_pool_start = gen.get_dhcp_pool_start()
for index, netw_type in enumerate(cfg.yield_depl_netw_client_type()):
depl_netw_client_ip = cfg.get_depl_netw_client_cont_ip(index)
depl_netw_client_netmask = cfg.get_depl_netw_client_netmask(index)
network = IPNetwork(depl_netw_client_ip + '/' +
depl_netw_client_netmask)
entry = dhcp_range % (str(network.network + dhcp_pool_start),
str(network.network + network.size - 1),
str(dhcp_lease_time),
str(network.cidr))
util.append_line(DNSMASQ_TEMPLATE, entry)
# Save PXE client network information for later
if netw_type == 'pxe':
cont_pxe_ipaddr = depl_netw_client_ip
cont_pxe_netmask = depl_netw_client_netmask
bridge_pxe_ipaddr = cfg.get_depl_netw_client_brg_ip(index)
# Configure dnsmasq to enable TFTP server
util.append_line(DNSMASQ_TEMPLATE, 'enable-tftp')
util.append_line(DNSMASQ_TEMPLATE, 'tftp-root=%s' % TFTPBOOT)
util.append_line(DNSMASQ_TEMPLATE, 'user=root')
# Configure dnsmasq to use deployer as gateway
if cfg.get_depl_gateway():
util.remove_line(DNSMASQ_TEMPLATE, 'dhcp-option')
util.append_line(DNSMASQ_TEMPLATE, 'dhcp-option=3,%s' % bridge_pxe_ipaddr)
# Cobbler modules configuration
util.replace_regex(MODULES_CONF, 'module = manage_bind',
'module = manage_dnsmasq')
util.replace_regex(MODULES_CONF, 'module = manage_isc',
'module = manage_dnsmasq')
# Copy cobbler.conf into apache2/conf-available
copy2(COBBLER_CONF_ORIG, COBBLER_CONF)
# Copy cobbler_web.conf into apache2/conf-available
copy2(COBBLER_WEB_CONF_ORIG, COBBLER_WEB_CONF)
# Apache2 configuration
util.bash_cmd('%s cobbler cobbler_web' % A2ENCONF)
util.bash_cmd('%s proxy' % A2ENMOD)
util.bash_cmd('%s proxy_http' % A2ENMOD)
# Set secret key in web settings
secret_key = _generate_random_characters()
util.replace_regex(COBBLER_WEB_SETTINGS, '^SECRET_KEY = .*',
'SECRET_KEY = "%s"' % secret_key)
# Remove "Order allow,deny" lines from cobbler configuration
regex = '.*Order allow,deny'
util.remove_line(COBBLER_CONF, regex)
util.remove_line(COBBLER_WEB_CONF, regex)
# Replace "Allow from all" with "Require all granted" in
regex = 'Allow from all'
replace = 'Require all granted'
util.replace_regex(COBBLER_CONF, regex, replace)
util.replace_regex(COBBLER_WEB_CONF, regex, replace)
# chown www-data WEBUI_SESSIONS
uid = pwd.getpwnam("www-data").pw_uid
gid = -1 # unchanged
os.chown(WEBUI_SESSIONS, uid, gid)
# Cobbler settings
util.replace_regex(COBBLER_SETTINGS, '127.0.0.1', cont_pxe_ipaddr)
util.replace_regex(COBBLER_SETTINGS, 'manage_dhcp: 0', 'manage_dhcp: 1')
util.replace_regex(COBBLER_SETTINGS, 'manage_dns: 0', 'manage_dns: 1')
util.replace_regex(COBBLER_SETTINGS, 'pxe_just_once: 0', 'pxe_just_once: 1')
globals_env_variables = cfg.get_globals_env_variables()
if globals_env_variables and 'http_proxy' in globals_env_variables:
util.replace_regex(COBBLER_SETTINGS, 'proxy_url_ext: ""',
'proxy_url_ext: %s' %
globals_env_variables['http_proxy'])
util.replace_regex(COBBLER_SETTINGS, 'default_password_crypted:',
'default_password_crypted: '
'$1$clusterp$/gd3ep<PASSWORD>.')
# Create link to
if not os.path.exists(PY_DIST_PKGS):
util.bash_cmd('ln -s %s/cobbler %s' %
(LOCAL_PY_DIST_PKGS, PY_DIST_PKGS))
# Set PXE timeout to maximum
util.replace_regex(PXEDEFAULT_TEMPLATE, r'TIMEOUT \d+',
'TIMEOUT 35996')
util.replace_regex(PXEDEFAULT_TEMPLATE, r'TOTALTIMEOUT \d+',
'TOTALTIMEOUT 35996')
# Fix line break escape in kickstart_done snippet
util.replace_regex(KICKSTART_DONE, "\\\\nwget", "wget")
util.replace_regex(KICKSTART_DONE, r"\$saveks", "$saveks + \"; \\\\\\\"\n")
util.replace_regex(KICKSTART_DONE, r"\$runpost", "$runpost + \"; \\\\\\\"\n")
# Copy authorized_keys ssh key file to web repo directory
copy2(ROOT_AUTH_KEYS, WWW_AUTH_KEYS)
os.chmod(WWW_AUTH_KEYS, 0o444)
# Add mgmt subnet to NTP service configuration
cont_pxe_broadcast = str(
IPNetwork(cont_pxe_ipaddr + '/' + cont_pxe_netmask).broadcast)
util.append_line(NTP_CONF, 'broadcast %s' % cont_pxe_broadcast)
# Add 'required-stop' line to cobblerd init.d to avoid warning
util.replace_regex(INITD + 'cobblerd', '### END INIT INFO',
'# Required-Stop:\n### END INIT INFO')
# Set Apache2 'ServerName'
util.append_line(APACHE2_CONF, "ServerName localhost")
# Restart services
_restart_service('ntp')
_restart_service('cobblerd')
_restart_service('apache2')
# Update Cobbler boot-loader files
util.bash_cmd('%s get-loaders' % COBBLER)
# Update cobbler list of OS signatures
util.bash_cmd('%s signature update' % COBBLER)
# Run Cobbler sync
util.bash_cmd('%s sync' % COBBLER)
# Restart services (again)
_restart_service('apache2')
_restart_service('cobblerd')
_restart_service('dnsmasq')
# Set services to start on boot
_service_start_on_boot('cobblerd')
_service_start_on_boot('ntp')
def _restart_service(service):
util.bash_cmd('service %s restart' % service)
def _service_start_on_boot(service):
util.replace_regex(INITD + service,
'# Default-Start:.*',
'# Default-Start: 2 3 4 5')
util.replace_regex(INITD + service,
'# Default-Stop:.*',
'# Default-Stop: 0 1 6')
util.bash_cmd('update-rc.d %s defaults' % service)
def _generate_random_characters(length=100):
characters = "abcdefghijklmnopqrstuvwxyz0123456789^&*(-_=+)"
return re.escape("".join([choice(characters) for _ in range(length)]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', default='config.yml',
help='Config file path. Absolute path or relative '
'to power-up/')
parser.add_argument('--print', '-p', dest='log_lvl_print',
help='print log level', default='info')
parser.add_argument('--file', '-f', dest='log_lvl_file',
help='file log level', default='info')
args = parser.parse_args()
logger.create(args.log_lvl_print, args.log_lvl_file)
if not os.path.isfile(args.config_path):
args.config_path = gen.GEN_PATH + args.config_path
print('Using config path: {}'.format(args.config_path))
if not os.path.isfile(args.config_path):
sys.exit('{} does not exist'.format(args.config_path))
cobbler_install(args.config_path)
```
#### File: scripts/python/configure_data_switches.py
```python
import os
import sys
import pprint
import argparse
import lib.logger as logger
from lib.config import Config
from lib.switch import SwitchFactory
from lib.switch_exception import SwitchException
from lib.genesis import GEN_PATH
# from write_switch_memory import WriteSwitchMemory
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
PP = pprint.PrettyPrinter(indent=1, width=120)
class Tree(dict):
"""Instantiates a nested dictionary which allows assignment to arbitrary
depths.
"""
def __getitem__(self, key):
if key in self:
return self.get(key)
return self.setdefault(key, Tree())
def _get_port_chan_list():
"""
Args:
Returns:
Tree of switches and port channels or mlag port channels. Switches in
an MLAG are grouped in pairs.
"""
log = logger.getlogger()
ifcs = CFG.get_interfaces()
# Gather bond definintions from interfaces list
bond_ifcs = {}
for ifc in ifcs:
if 'bond_mode' in ifc:
for _ifc in ifcs:
if 'bond_master' in _ifc and _ifc['bond_master'] == ifc['iface']:
if ifc['label'] in bond_ifcs:
bond_ifcs[ifc['label']].append(_ifc['label'])
else:
bond_ifcs[ifc['label']] = [_ifc['label']]
elif 'BONDING_MASTER' in ifc:
for _ifc in ifcs:
if 'MASTER' in _ifc and _ifc['MASTER'] == ifc['DEVICE']:
if ifc['label'] in bond_ifcs:
bond_ifcs[ifc['label']].append(_ifc['label'])
else:
bond_ifcs[ifc['label']] = [_ifc['label']]
pretty_str = PP.pformat(bond_ifcs)
log.debug('bond_ifcs')
log.debug('\n' + pretty_str)
# Gather bond node template, switch and port information
bonds = Tree()
for bond in bond_ifcs:
for ntmpl_ind, ntmpl_label in enumerate(CFG.yield_ntmpl_label()):
ntmpl_ifcs = CFG.get_ntmpl_ifcs_all(ntmpl_ind)
if bond in ntmpl_ifcs:
for phyintf_idx in CFG.yield_ntmpl_phyintf_data_ind(ntmpl_ind):
phyintf = CFG.get_ntmpl_phyintf_data_ifc(
ntmpl_ind, phyintf_idx)
if phyintf in bond_ifcs[bond]:
switch = CFG.get_ntmpl_phyintf_data_switch(
ntmpl_ind, phyintf_idx)
ports = CFG.get_ntmpl_phyintf_data_ports(
ntmpl_ind, phyintf_idx)
ports = [str(ports[i]) for i in range(len(ports))]
bonds[bond][ntmpl_label][phyintf][switch] = ports
pretty_str = PP.pformat(bonds)
log.debug('Bonds:')
log.debug('\n' + pretty_str)
# For each bond, aggregate ports across node templates and group into port
# channel groups
ports_list = Tree()
for bond in bonds:
for ntmpl in bonds[bond]:
bond_ports_list = Tree()
for ifc in bonds[bond][ntmpl]:
for switch in bonds[bond][ntmpl][ifc]:
ports = bonds[bond][ntmpl][ifc][switch]
if switch not in bond_ports_list:
bond_ports_list[switch] = [ports]
else:
bond_ports_list[switch].append(ports)
for switch in bond_ports_list:
# group the ports into channel groups
if switch not in ports_list[bond][ntmpl]:
ports_list[bond][ntmpl][switch] = zip(*bond_ports_list[switch])
else:
ports_list[bond][ntmpl][switch] += zip(*bond_ports_list[switch])
pretty_str = PP.pformat(ports_list)
log.debug('ports_list:')
log.debug('\n' + pretty_str)
chan_ports = Tree()
# Aggregate port groups across switches or mlag switch pairs.
# Final data structure is a dictionary organized by bond, node template,
# switch / switch pair.
for bond in ports_list:
for ntmpl in ports_list[bond]:
for switch in ports_list[bond][ntmpl]:
peer_switch = CFG.get_sw_data_mlag_peer(switch)
mstr_switch = CFG.get_sw_data_mstr_switch([switch, peer_switch])
chan_ports[bond][ntmpl][mstr_switch][switch] = \
ports_list[bond][ntmpl][switch]
pretty_str = PP.pformat(chan_ports)
log.debug('Port channel ports:')
log.debug('\n' + pretty_str)
return chan_ports
def _get_vlan_info(ifc):
ifcs = CFG.get_interfaces()
vlan_num = None
vlan_ifc_name = ''
for _ifc in ifcs:
if _ifc['label'] == ifc:
if 'vlan_raw_device' in _ifc:
vlan_num = int(_ifc['iface'].rpartition('.')[2])
vlan_ifc_name = _ifc['vlan_raw_device']
break
elif 'VLAN' in _ifc:
vlan_num = int(_ifc['DEVICE'].rpartition('.')[2])
vlan_ifc_name = _ifc['DEVICE'].rpartition('.')[0]
break
return vlan_num, vlan_ifc_name
def _get_vlan_slaves(vlan_ifc_name):
ifcs = CFG.get_interfaces()
vlan_slaves = []
for _ifc in ifcs:
if 'bond_master' in _ifc and _ifc['bond_master'] == vlan_ifc_name:
vlan_slaves.append(_ifc['label'])
elif 'MASTER' in _ifc and _ifc['MASTER'] == vlan_ifc_name:
vlan_slaves.append(_ifc['label'])
return vlan_slaves
def _get_vlan_list():
""" Aggregate vlan data.
Args:
Returns:
Tree of switches and vlan information by port
"""
log = logger.getlogger()
vlan_list = Tree()
for ntmpl_ind in CFG.yield_ntmpl_ind():
ntmpl_ifcs = CFG.get_ntmpl_ifcs_all(ntmpl_ind)
for ifc in ntmpl_ifcs:
vlan_num, vlan_ifc_name = _get_vlan_info(ifc)
if vlan_num:
vlan_slaves = _get_vlan_slaves(vlan_ifc_name)
for phyintf_idx in CFG.yield_ntmpl_phyintf_data_ind(ntmpl_ind):
phy_ifc_lbl = CFG.get_ntmpl_phyintf_data_ifc(ntmpl_ind, phyintf_idx)
if phy_ifc_lbl in vlan_slaves:
vlan_ports = CFG.get_ntmpl_phyintf_data_ports(
ntmpl_ind, phyintf_idx)
switch = CFG.get_ntmpl_phyintf_data_switch(
ntmpl_ind, phyintf_idx)
if vlan_num in vlan_list[switch]:
vlan_list[switch][vlan_num] += vlan_ports
else:
vlan_list[switch][vlan_num] = vlan_ports
pretty_str = PP.pformat(vlan_list)
log.debug('vlan list')
log.debug('\n' + pretty_str)
# Aggregate by switch and port number
port_vlans = Tree()
for switch in vlan_list:
for vlan in vlan_list[switch]:
for port in vlan_list[switch][vlan]:
if str(port) in port_vlans[switch]:
port_vlans[switch][str(port)].append(vlan)
else:
port_vlans[switch][str(port)] = [vlan]
pretty_str = PP.pformat(port_vlans)
log.debug('port_vlans')
log.debug('\n' + pretty_str)
return port_vlans
def _get_mtu_list():
""" Aggregate mtu port data.
Returns: Dictionary of {switch : {port : mtu value, ...}}
"""
log = logger.getlogger()
mtu_list = Tree()
for ntmpl_ind in CFG.yield_ntmpl_ind():
for phyintf_idx in CFG.yield_ntmpl_phyintf_data_ind(ntmpl_ind):
mtu = ''
phy_ifc = CFG.get_ntmpl_phyintf_data_ifc(ntmpl_ind, phyintf_idx)
ifc = CFG.get_interface(phy_ifc)
if 'mtu' in ifc:
mtu = ifc['mtu']
elif 'MTU' in ifc:
mtu = ifc['MTU']
if mtu:
switch = CFG.get_ntmpl_phyintf_data_switch(ntmpl_ind, phyintf_idx)
ports = CFG.get_ntmpl_phyintf_data_ports(ntmpl_ind, phyintf_idx)
if switch in mtu_list and mtu in mtu_list[switch]:
mtu_list[switch][mtu] += ports
else:
mtu_list[switch][mtu] = ports
pretty_str = PP.pformat(mtu_list)
log.debug('mtu_list')
log.debug('\n' + pretty_str)
return mtu_list
def _get_mlag_info():
""" Get mlag switches and their config info
Returns:
dict of : mlag config info
"""
log = logger.getlogger()
mlag_list = Tree()
for sw_lbl in CFG.yield_sw_data_label():
peer_lbl = CFG.get_sw_data_mlag_peer(sw_lbl)
mstr_sw = CFG.get_sw_data_mstr_switch([sw_lbl, peer_lbl])
if peer_lbl and mstr_sw == sw_lbl and mstr_sw not in mlag_list:
mlag_list[mstr_sw][sw_lbl]
mlag_list[mstr_sw][peer_lbl]
for mstr_sw in mlag_list:
for sw in mlag_list[mstr_sw]:
sw_idx = CFG.get_sw_data_index_by_label(sw)
for link_idx, link in enumerate(CFG.yield_sw_data_links_target(sw_idx)):
if link in mlag_list[mstr_sw]:
mlag_list[mstr_sw][sw]['vlan'] = \
CFG.get_sw_data_links_vlan(sw_idx, link_idx)
if sw == mstr_sw:
mlag_list[mstr_sw][sw]['vip'] = None
else:
mlag_list[mstr_sw][sw]['vip'] = \
CFG.get_sw_data_links_vip(sw_idx, link_idx) + ' /' + \
str(CFG.get_depl_netw_mgmt_prefix()[0])
mlag_list[mstr_sw][sw]['ports'] = \
CFG.get_sw_data_links_port(sw_idx, link_idx)
mlag_list[mstr_sw][sw]['cidr'] = \
CFG.get_sw_data_links_ip(sw_idx, link_idx) + ' /' + \
str(CFG.get_sw_data_links_prefix(sw_idx, link_idx))
if len(mlag_list[mstr_sw]) == 2:
keys = sorted(mlag_list[mstr_sw].keys())
mlag_list[mstr_sw][keys[0]]['peer_ip'] = \
str(mlag_list[mstr_sw][keys[1]]['cidr']).split(' /')[0]
mlag_list[mstr_sw][keys[1]]['peer_ip'] = \
str(mlag_list[mstr_sw][keys[0]]['cidr']).split(' /')[0]
break
pretty_str = PP.pformat(mlag_list)
log.debug('mlag_list')
log.debug('\n' + pretty_str)
return mlag_list
def _is_port_in_a_port_channel(switch, port, chan_ports):
""" Returns True if port in a port channel, else returns False.
Args:
switch (str): switch label
port (int or str): port number
"""
for sw in chan_ports:
for _sw in chan_ports[sw]:
if switch == _sw:
for port_group in chan_ports[sw][_sw]:
if port in port_group:
return True
break
return False
def _get_port_vlans(switch, port, port_vlans):
if port in port_vlans[switch]:
return port_vlans[switch][port]
def _get_port_mtu(switch, port, mtu_list):
for mtu in mtu_list[switch]:
if port in mtu_list[switch][mtu]:
return mtu
def _get_channel_num(port_grp):
""" Return a channel number given a port group. The lowest value
port number in the group is returned. No checks are made to insure
that all ports are in the same chassis.
Args:
port_group: (tuple or list of str representing port numbers
of the form 'n' or 'm/n' or 'ethm/n' or similar
"""
return min([int(port_grp[i].rpartition('/')[-1])
for i in range(len(port_grp))])
def configure_data_switch(config_path):
""" Configures data (access) switches. Configuration is driven by the
config.yml file.
Args:
Returns:
"""
log = logger.getlogger()
global CFG
CFG = Config(config_path)
port_vlans = _get_vlan_list()
mtu_list = _get_mtu_list()
chan_ports = _get_port_chan_list()
mlag_list = _get_mlag_info()
# Create switch class instances for each switch
sw_dict = {}
# create dictionaries to hold enumerations for each switch
port_mode = {}
allow_op = {}
for sw_ai in CFG.yield_sw_data_access_info():
label = sw_ai[0]
sw_dict[label] = SwitchFactory.factory(*sw_ai[1:])
port_mode[label], allow_op[label] = sw_dict[label].get_enums()
# Program switch vlans
for switch in port_vlans:
vlans = []
for port in port_vlans[switch]:
print('.', end="")
sys.stdout.flush()
for vlan in port_vlans[switch][port]:
if vlan not in vlans:
vlans.append(vlan)
sw_dict[switch].create_vlan(vlan)
log.debug('Creating vlan {} on switch {}'.format(vlan, switch))
try:
sw_dict[switch].set_switchport_mode(port, port_mode[switch].TRUNK)
except SwitchException as exc:
log.warning('Switch: {}. Failed setting port {} to trunk mode'.
format(switch, port))
log.warning(str(exc))
try:
sw_dict[switch].allowed_vlans_port(port, allow_op[switch].ADD,
port_vlans[switch][port])
except SwitchException as exc:
log.warning('Switch: {}. Failed adding vlans {} to port {}'.
format(switch, port_vlans[switch][port], port))
log.warning(str(exc))
log.debug('switch: {} port: {} vlans: {}'.format(
switch, port, port_vlans[switch][port]))
# Program switch mtu
for switch in mtu_list:
for mtu in mtu_list[switch]:
for port in mtu_list[switch][mtu]:
sw_dict[switch].set_mtu_for_port(port, mtu)
log.debug('port: {} set mtu: {}'.format(port, mtu))
# Configure MLAG
for mstr_sw in mlag_list:
log.debug('Configuring MLAG. mlag switch mstr: ' + mstr_sw)
for sw in mlag_list[mstr_sw]:
is_mlag = sw_dict[sw].is_mlag_configured()
log.debug('vPC/MLAG configured on switch: {}, {}'.format(sw, is_mlag))
if not is_mlag:
print('.', end="")
sys.stdout.flush()
log.debug('Configuring MLAG on switch {}'.format(sw))
sw_dict[sw].configure_mlag(
mlag_list[mstr_sw][sw]['vlan'],
min(mlag_list[mstr_sw][mstr_sw]['ports']),
mlag_list[mstr_sw][sw]['cidr'],
mlag_list[mstr_sw][sw]['peer_ip'],
mlag_list[mstr_sw][sw]['vip'],
mlag_list[mstr_sw][sw]['ports'])
else:
log.debug('MLAG already configured. Skipping'
' MLAG configuration on switch {}.'.format(sw))
for sw in mlag_list[mstr_sw]:
if sw_dict[sw].is_mlag_configured():
sw_dict[sw].enable_mlag()
# Configure port channels and MLAG port channels
for bond in chan_ports:
for ntmpl in chan_ports[bond]:
for mstr_sw in chan_ports[bond][ntmpl]:
if len(chan_ports[bond][ntmpl][mstr_sw]) == 2:
# MLAG
for sw in chan_ports[bond][ntmpl][mstr_sw]:
for idx, port_grp in enumerate(
chan_ports[bond][ntmpl][mstr_sw][sw]):
chan_num = _get_channel_num(port_grp)
log.debug('create mlag interface {} on switch {}'.
format(chan_num, sw))
sw_dict[sw].remove_mlag_interface(chan_num)
sw_dict[sw].create_mlag_interface(chan_num)
print('.', end="")
sys.stdout.flush()
# All ports in a port group should have the same vlans
# So use any one for setting the MLAG port channel vlans
vlan_port = chan_ports[bond][ntmpl][mstr_sw][sw][idx][0]
vlans = _get_port_vlans(sw, vlan_port, port_vlans)
_port_mode = port_mode[sw].TRUNK if vlans \
else port_mode[sw].ACCESS
sw_dict[sw].set_mlag_port_channel_mode(chan_num, _port_mode)
mtu = _get_port_mtu(sw, chan_num, mtu_list)
if vlans:
log.debug('Switch {}, add vlans {} to mlag port '
'channel {}.'.format(sw, vlans, chan_num))
sw_dict[sw].allowed_vlans_mlag_port_channel(
chan_num, allow_op[sw].NONE)
sw_dict[sw].allowed_vlans_mlag_port_channel(
chan_num, allow_op[sw].ADD, vlans)
if mtu:
log.debug('set_mtu_for_mlag_port_channel: {}'.
format(mtu))
sw_dict[sw].set_mtu_for_lag_port_channel(
chan_num, mtu)
log.debug('Switch {}, adding ports {} to mlag chan '
'num: {}'.format(sw, port_grp, chan_num))
try:
sw_dict[sw].bind_ports_to_mlag_interface(
port_grp, chan_num)
except SwitchException as exc:
log.warning('Failure configuring port in switch:'
' {}.\n{}'.format(sw, str(exc)))
else:
# Configure LAG
for sw in chan_ports[bond][ntmpl][mstr_sw]:
for port_grp in chan_ports[bond][ntmpl][mstr_sw][sw]:
chan_num = _get_channel_num(port_grp)
print('.', end="")
sys.stdout.flush()
log.debug('Lag channel group: {} on switch: {}'.format(
chan_num, sw))
sw_dict[sw].create_port_channel_ifc(chan_num)
vlans = _get_port_vlans(sw, port_grp[0], port_vlans)
_port_mode = port_mode[sw].TRUNK if vlans else \
port_mode[sw].ACCESS
sw_dict[sw].set_port_channel_mode(chan_num, _port_mode)
mtu = _get_port_mtu(sw, chan_num, mtu_list)
if vlans:
log.debug('switch {}, add vlans {} to lag port '
'channel {}'.format(sw, vlans, chan_num))
sw_dict[sw].allowed_vlans_port_channel(
chan_num, allow_op[sw].NONE)
sw_dict[sw].allowed_vlans_port_channel(
chan_num, allow_op[sw].ADD, vlans)
if mtu:
log.debug('set mtu for port channel: {}'.format(mtu))
sw_dict[sw].set_mtu_for_port_channel(chan_num, mtu)
log.debug('Switch: {}, adding port(s) {} to lag chan'
' num: {}'.format(sw, port_grp, chan_num))
try:
sw_dict[sw].remove_ports_from_port_channel_ifc(
port_grp)
sw_dict[sw].add_ports_to_port_channel_ifc(
port_grp, chan_num)
except SwitchException as exc:
log.warning('Failure configuring port in switch:'
'{}.\n {}'.format(sw, str(exc)))
def deconfigure_data_switch(config_path):
""" Deconfigures data (access) switches. Deconfiguration is driven by the
config.yml file. Generally deconfiguration is done in reverse order of
configuration.
Args:
Returns:
"""
log = logger.getlogger()
global CFG
CFG = Config(config_path)
port_vlans = _get_vlan_list()
mtu_list = _get_mtu_list()
chan_ports = _get_port_chan_list()
mlag_list = _get_mlag_info()
# Create switch class instances for each switch
sw_dict = {}
port_mode = {}
allow_op = {}
for sw_ai in CFG.yield_sw_data_access_info():
label = sw_ai[0]
sw_dict[label] = SwitchFactory.factory(*sw_ai[1:])
port_mode[label], allow_op[label] = sw_dict[label].get_enums()
# Deconfigure channel ports and MLAG channel ports
for bond in chan_ports:
for ntmpl in chan_ports[bond]:
for mstr_sw in chan_ports[bond][ntmpl]:
if len(chan_ports[bond][ntmpl][mstr_sw]) == 2:
# Deconfigure mlag channel ports
for sw in chan_ports[bond][ntmpl][mstr_sw]:
if sw_dict[sw].is_mlag_configured():
for idx, port_grp in enumerate(chan_ports[bond][ntmpl]
[mstr_sw][sw]):
chan_num = _get_channel_num(port_grp)
log.info('Deleting mlag interface: {} on'
' switch: {}'.format(chan_num, sw))
sw_dict[sw].remove_mlag_interface(chan_num)
else:
# deconfigure LAG channel ports
for sw in chan_ports[bond][ntmpl][mstr_sw]:
for port_grp in chan_ports[bond][ntmpl][mstr_sw][sw]:
chan_num = _get_channel_num(port_grp)
log.info('Deleting Lag interface {} on switch: {}'.format(
chan_num, sw))
sw_dict[sw].remove_port_channel_ifc(chan_num)
# Deconfigure MLAG
for mstr_sw in mlag_list:
for sw in mlag_list[mstr_sw]:
is_mlag = sw_dict[sw].is_mlag_configured()
log.info('vPC/MLAG configured on sw {}: {}'.format(sw, is_mlag))
if is_mlag:
print('\n\nDo you wish to deconfigure MLAG on switch {}?'.format(sw))
print('This will stop all MLAG communication on all switch ports')
print('OK to deconfigure MLAG?')
resp = input("Enter (Y/yes/n): ")
if resp in ['Y', 'yes']:
log.info('Deconfiguring MLAG on switch: {}'.format(sw))
sw_dict[sw].deconfigure_mlag()
else:
log.debug('\nMLAG not configured on switch: {}'.format(sw))
# Deconfigure switch vlans - first remove from ports
for switch in port_vlans:
for port in port_vlans[switch]:
log.info('switch: {}, port: {}, removing vlans: {}'.format(
switch, port, port_vlans[switch][port]))
sw_dict[switch].allowed_vlans_port(
port, allow_op[switch].REMOVE, port_vlans[switch][port])
log.info('Switch {}, setting port: {} to access mode'.format(
switch, port))
sw_dict[switch].set_switchport_mode(port, port_mode[switch].ACCESS)
# Delete the vlans
for switch in port_vlans:
vlans = []
for port in port_vlans[switch]:
for vlan in port_vlans[switch][port]:
if vlan not in vlans:
vlans.append(vlan)
sw_dict[switch].delete_vlan(vlan)
log.info('Switch: {}, deleting vlan: {}'.format(switch, vlan))
# Deconfigure switch mtu
for switch in mtu_list:
for mtu in mtu_list[switch]:
for port in mtu_list[switch][mtu]:
sw_dict[switch].set_mtu_for_port(port, 0)
log.info('switch: {}, port: {}, setting mtu: {}'.format(
switch, port, 'default mtu'))
def gather_and_display(config_path):
global CFG
CFG = Config(config_path)
port_vlans = _get_vlan_list()
mtu_list = _get_mtu_list()
chan_ports = _get_port_chan_list()
mlag_list = _get_mlag_info()
print('\n\nport_vlans:')
PP.pprint(port_vlans)
print('\nmtu_list:')
PP.pprint(mtu_list)
print('\nmlag_list:')
PP.pprint(mlag_list)
print('\nchan_ports:')
PP.pprint(chan_ports)
# if self.cfg.is_write_switch_memory():
# switch = WriteSwitchMemory(LOG, INV_FILE)
# switch.write_data_switch_memory()
if __name__ == '__main__':
""" Configures or deconfigures data switches.
Args: optional log level or optional deconfig in any order
"""
parser = argparse.ArgumentParser()
parser.add_argument('config_path', nargs='?',
help='path to config file',
default='config.yml')
parser.add_argument('--display', action='store_true',
help='display gathered switch info')
parser.add_argument('--deconfig', action='store_true',
help='deconfigure switch')
parser.add_argument('--print', '-p', dest='log_lvl_print',
help='print log level', default='info')
parser.add_argument('--file', '-f', dest='log_lvl_file',
help='file log level', default='info')
args = parser.parse_args()
logger.create(args.log_lvl_print, args.log_lvl_file)
if not os.path.isfile(args.config_path):
args.config_path = GEN_PATH + args.config_path
print('Using config path: {}'.format(args.config_path))
if not os.path.isfile(args.config_path):
sys.exit('{} does not exist'.format(args.config_path))
if args.display:
gather_and_display(args.config_path)
sys.exit()
if args.deconfig:
deconfigure_data_switch(args.config_path)
sys.exit()
configure_data_switch(args.config_path)
```
#### File: scripts/python/get_dhcp_lease_info.py
```python
import os.path
import re
from orderedattrdict import AttrDict
import lib.logger as logger
from lib.exception import UserException
class GetDhcpLeases(object):
def __init__(self, dhcp_leases_file):
dhcp_leases_file = os.path.abspath(
os.path.dirname(os.path.abspath(dhcp_leases_file)) +
os.path.sep +
os.path.basename(dhcp_leases_file))
log = logger.getlogger()
try:
fds = open(dhcp_leases_file, 'r')
except:
msg = 'DHCP leases file not found: %s'
log.error(msg % (dhcp_leases_file))
raise UserException(msg % dhcp_leases_file)
self.mac_ip = AttrDict()
for line in fds:
match = re.search(
r'^\S+\s+(\S+)\s+(\S+)',
line)
mac = match.group(1)
ipaddr = match.group(2)
self.mac_ip[mac] = ipaddr
log.debug('Lease found - MAC: %s - IP: %s' % (mac, ipaddr))
def get_mac_ip(self):
return self.mac_ip
```
#### File: scripts/python/get_switch_model_info.py
```python
import sys
import re
from orderedattrdict import AttrDict
from lib.inventory import Inventory
from lib.logger import Logger
from lib.ssh import SSH
class GetSwitchInfoAssignClass(object):
"""Get switch model information and assign class.
This class is responsible for collection switch model information
and assign the corresponding switch class.
"""
supported_mgmt_switches = (
('G8052', 'Lenovo'),)
supported_data_switches = (
('MLNX-OS', 'Mellanox'),)
MGMT_SWITCH_TYPE = 'mgmt'
DATA_SWITCH_TYPE = 'data'
ENABLE_REMOTE_CONFIG_MGMT = 'enable;configure terminal; %s'
SHOW_VERSION_MTM = 'show version | include ^MTM'
MODEL = 'Model'
MTM_VALUE = 'MTM Value'
ENABLE_REMOTE_CONFIG_DATA = 'cli enable "configure terminal" "%s"'
SHOW_VERSION_PRODUCT = 'show version | include ^Product'
PRODUCT_NAME = 'Product name'
SHOW_INVENTORY_CHASSIS = 'show inventory | include ^CHASSIS'
CHASSIS = 'CHASSIS'
def __init__(self, log, inv_file):
self.info_list = []
self.class_list = []
self.info_dict = AttrDict()
self.class_dict = AttrDict()
self.ipv4 = None
self.userid = None
self.password = <PASSWORD>
self.enable_remote = None
self.inv = Inventory(log, inv_file)
self.log = log
def update_mgmt_switch_info(self):
"""Update management switch model information and assign class."""
self.enable_remote = self.ENABLE_REMOTE_CONFIG_MGMT
self.info_list = []
self.class_list = []
for switch in self.inv.yield_switches(self.inv.SwitchType.MGMT):
self.info_dict = AttrDict()
self.class_dict = AttrDict()
self.ipv4 = switch.ip_addr
self.userid = switch.userid
self.password = switch.password
switch_valid = False
output = self._send_cmd(self.SHOW_VERSION_MTM, 'Query MTM', False)
switch_valid |= self._set_switch_info_class(
r'\s+(\S+)\(config\)#',
self.MODEL,
output,
self.supported_mgmt_switches)
switch_valid |= self._set_switch_info_class(
r'%s:\s+(\S+)\s+' % self.MTM_VALUE,
self.MTM_VALUE,
output,
self.supported_mgmt_switches)
if not switch_valid:
if self.info_list:
self.log.error(
'Unsupported management switch: %s' %
self.info_dict)
else:
self.log.error('Management switch could not be identified')
sys.exit(1)
if self.info_list:
self.inv.update_switch_model_info(
self.inv.SwitchType.MGMT, self.info_list)
self.inv.update_switch_class(
self.inv.SwitchType.MGMT, self.class_list)
def update_data_switch_info(self):
"""Update data switch model information and assign class."""
self.enable_remote = self.ENABLE_REMOTE_CONFIG_DATA
self.info_list = []
self.class_list = []
for switch in self.inv.yield_switches(self.inv.SwitchType.DATA):
self.info_dict = AttrDict()
self.class_dict = AttrDict()
self.ipv4 = switch.ip_addr
self.userid = switch.userid
self.password = <PASSWORD>
switch_valid = False
output = self._send_cmd(
self.SHOW_VERSION_PRODUCT, 'Query Product Name', False)
switch_valid |= self._set_switch_info_class(
r'%s:\s+(\S+)\s+' % self.PRODUCT_NAME,
self.PRODUCT_NAME,
output,
self.supported_data_switches)
output = self._send_cmd(
self.SHOW_INVENTORY_CHASSIS, 'Query CHASSIS', False)
switch_valid |= self._set_switch_info_class(
r'%s\s+(\S+)\s+' % self.CHASSIS,
self.CHASSIS,
output,
self.supported_data_switches)
if not switch_valid:
if self.info_list:
self.log.error(
'Unsupported data switch: %s' %
self.info_dict)
else:
self.log.error('Data switch could not be identified')
sys.exit(1)
if self.info_list:
self.inv.update_switch_model_info(
self.inv.SwitchType.DATA, self.info_list)
self.inv.update_switch_class(
self.inv.SwitchType.DATA, self.class_list)
def _set_switch_info_class(
self, pattern, attr, output, supported_switches):
"""Add model and class information to switch structure.
Check whether switch is supported.
Args:
pattern (string): Command response pattern.
attr (string): Attribute key.
output (string): Command output.
supported_switches (tuple of tuples): Supported switches.
Returns:
(boolean): Whether switch is supported based on given attribute.
"""
pat = re.compile(
pattern, re.MULTILINE)
match = pat.search(output)
if match:
switch_attr = match.group(1)
self.info_dict[attr] = switch_attr
self.info_list.append(self.info_dict)
attr_list = [sublist[0] for sublist in supported_switches]
class_list = [sublist[1] for sublist in supported_switches]
self.log.info(attr + ': ' + switch_attr + ' on ' + self.ipv4)
if switch_attr in attr_list:
index = attr_list.index(switch_attr)
self.class_dict = class_list[index]
self.class_list.append(self.class_dict)
return True
return False
def _send_cmd(self, cmd, msg, status_check=True):
"""Send command to switch.
Args:
cmd (string): Switch command.
msg (string): Description for log file.
status_check (boolean): Whether to check for SSH error.
Returns:
(string): Command output from switch.
"""
ssh = SSH(self.log)
self.log.debug(cmd + ' on ' + self.ipv4)
status, stdout_, _ = ssh.exec_cmd(
self.ipv4,
self.userid,
self.password,
self.enable_remote % cmd)
if status:
if status_check:
self.log.error(
'Failed: ' + msg + ' on ' + self.ipv4 +
' - Error: ' +
stdout_.replace('\n', ' ').replace('\r', ''))
sys.exit(1)
else:
self.log.info(
msg + ' on ' + self.ipv4 +
' - Error: ' +
stdout_.replace('\n', ' ').replace('\r', ''))
else:
self.log.info(msg + ' on ' + self.ipv4)
return stdout_
if __name__ == '__main__':
"""Get switch model information and assign class.
Args:
INV_FILE (string): Inventory file.
LOG_LEVEL (string): Log level.
Raises:
Exception: If parameter count is invalid.
"""
LOG = Logger(__file__)
if len(sys.argv) != 4:
try:
raise Exception()
except:
LOG.error('Invalid argument count')
sys.exit(1)
INV_FILE = sys.argv[1]
SWITCH_TYPE = sys.argv[2]
LOG.set_level(sys.argv[3])
SWITCH = GetSwitchInfoAssignClass(LOG, INV_FILE)
if SWITCH_TYPE == SWITCH.MGMT_SWITCH_TYPE:
SWITCH.update_mgmt_switch_info()
elif SWITCH_TYPE == SWITCH.DATA_SWITCH_TYPE:
SWITCH.update_data_switch_info()
else:
try:
raise Exception()
except:
LOG.error(
"Invalid switch type '%s', expecting '%s' or '%s'" % (
SWITCH_TYPE,
SWITCH.MGMT_SWITCH_TYPE,
SWITCH.DATA_SWITCH_TYPE))
sys.exit(1)
```
#### File: scripts/python/inv_reserve_ipmi_pxe_ips.py
```python
import argparse
import os.path
import sys
import xmlrpc.client
from netaddr import IPNetwork
from time import time, sleep
from lib.config import Config
from lib.inventory import Inventory
import lib.genesis as gen
import lib.utilities as util
from set_power_clients import set_power_clients
from lib.exception import UserException
import lib.logger as logger
import lib.bmc as _bmc
DNSMASQ_TEMPLATE = '/etc/cobbler/dnsmasq.template'
COBBLER_USER = gen.get_cobbler_user()
COBBLER_PASS = gen.get_cobbler_pass()
WAIT_TIME = 1200
POWER_WAIT = gen.get_power_wait()
SLEEP_TIME = gen.get_power_sleep_time()
class IPManager(object):
"""Manage IP address assignments from a given network
Args:
network (IPNetwork): netaddr IPNetwork object
start_offset (int): Starting IP address offset
"""
def __init__(self, network, start_offset):
self.log = logger.getlogger()
self.network = network
self.next_offset = start_offset
self.next_ip = network.network + self.next_offset
def get_next_ip(self, reserve=True):
"""Get next available sequential IP address
Args:
reserve (bool): If true the IP will be considered reserved
Returns:
ip_address (str): Next IP address
Raises:
UserException: No more IP addresses available
"""
if self.next_ip == self.network.network + self.network.size:
raise UserException('Not enough IP addresses in network \'%s\'' %
str(self.network.cidr))
ip_address = str(self.next_ip)
if reserve:
self.next_ip += 1
return ip_address
def inv_set_ipmi_pxe_ip(config_path):
"""Configure DHCP IP reservations for IPMI and PXE interfaces
IP addresses are assigned sequentially within the appropriate
client networks starting with the DHCP pool start offset defined
in 'lib.genesis'.
Raises:
UserException: - No IPMI or PXE client networks defined within
the 'config.yml'
- Unable to connect to BMC at new IPMI IP address
"""
log = logger.getlogger()
cfg = Config(config_path)
inv = Inventory(cfg_file=config_path)
ipmiNetwork = None
pxeNetwork = None
nodes_list = []
# All nodes should be powered off before starting
set_power_clients('off', config_path, wait=POWER_WAIT)
# Create IPManager object for IPMI and/or PXE networks
start_offset = gen.get_dhcp_pool_start()
for index, netw_type in enumerate(cfg.yield_depl_netw_client_type()):
ip = cfg.get_depl_netw_client_cont_ip(index)
netmask = cfg.get_depl_netw_client_netmask(index)
if netw_type == 'ipmi':
ipmiNetwork = IPManager(IPNetwork(ip + '/' + netmask), start_offset)
elif netw_type == 'pxe':
pxeNetwork = IPManager(IPNetwork(ip + '/' + netmask), start_offset)
# If only one network is defined use the same IPManager for both
if ipmiNetwork is None and pxeNetwork is not None:
ipmiNetwork = pxeNetwork
elif ipmiNetwork is not None and pxeNetwork is None:
pxeNetwork = ipmiNetwork
elif ipmiNetwork is None and pxeNetwork is None:
raise UserException('No IPMI or PXE client network found')
# Modify IP addresses for each node
dhcp_lease_time = cfg.get_globals_dhcp_lease_time()
for index, hostname in enumerate(inv.yield_nodes_hostname()):
# IPMI reservations are written directly to the dnsmasq template
ipmi_ipaddr = inv.get_nodes_ipmi_ipaddr(0, index)
ipmi_mac = inv.get_nodes_ipmi_mac(0, index)
ipmi_new_ipaddr = ipmiNetwork.get_next_ip()
util.remove_line(DNSMASQ_TEMPLATE, "^dhcp-host=" + ipmi_mac + ".*")
util.append_line(DNSMASQ_TEMPLATE, 'dhcp-host=%s,%s-bmc,%s,%s\n' %
(ipmi_mac, hostname, ipmi_new_ipaddr,
dhcp_lease_time))
_adjust_dhcp_pool(ipmiNetwork.network,
ipmiNetwork.get_next_ip(reserve=False),
dhcp_lease_time)
# PXE reservations are handled by Cobbler
pxe_ipaddr = inv.get_nodes_pxe_ipaddr(0, index)
pxe_mac = inv.get_nodes_pxe_mac(0, index)
pxe_new_ipaddr = pxeNetwork.get_next_ip()
log.info('Modifying Inventory PXE IP - Node: %s MAC: %s '
'Original IP: %s New IP: %s' %
(hostname, pxe_mac, pxe_ipaddr, pxe_new_ipaddr))
inv.set_nodes_pxe_ipaddr(0, index, pxe_new_ipaddr)
_adjust_dhcp_pool(pxeNetwork.network,
pxeNetwork.get_next_ip(reserve=False),
dhcp_lease_time)
# Run Cobbler sync to process DNSMASQ template
cobbler_server = xmlrpc.client.Server("http://127.0.0.1/cobbler_api")
token = cobbler_server.login(COBBLER_USER, COBBLER_PASS)
cobbler_server.sync(token)
log.debug("Running Cobbler sync")
# Save info to verify connection come back up
ipmi_userid = inv.get_nodes_ipmi_userid(index)
ipmi_password = inv.get_nodes_ipmi_password(index)
bmc_type = inv.get_nodes_bmc_type(index)
# No need to reset and check if the IP does not change
if ipmi_new_ipaddr != ipmi_ipaddr:
nodes_list.append({'hostname': hostname,
'index': index,
'ipmi_userid': ipmi_userid,
'ipmi_password': ipmi_password,
'ipmi_new_ipaddr': ipmi_new_ipaddr,
'ipmi_ipaddr': ipmi_ipaddr,
'ipmi_mac': ipmi_mac,
'bmc_type': bmc_type})
# Issue MC cold reset to force refresh of IPMI interfaces
for node in nodes_list:
ipmi_userid = node['ipmi_userid']
ipmi_password = node['<PASSWORD>']
ipmi_ipaddr = node['ipmi_ipaddr']
bmc_type = node['bmc_type']
bmc = _bmc.Bmc(ipmi_ipaddr, ipmi_userid, ipmi_password, bmc_type)
if bmc.is_connected():
log.debug(f'Issuing BMC Cold Reset - Node: {node["hostname"]} '
f'- IP: {ipmi_ipaddr}')
if not bmc.bmc_reset('cold'):
log.error(f'Failed attempting BMC reset on {node["ipmi_ipaddr"]}')
bmc.logout()
log.info('Pausing 1 minute for BMCs to begin reset')
sleep(60)
# Check connections for set amount of time
end_time = time() + WAIT_TIME
while time() < end_time and len(nodes_list) > 0:
print(f'\rTimeout count down: {int(end_time - time())} ', end='')
sys.stdout.flush()
success_list = []
sleep(2)
for list_index, node in enumerate(nodes_list):
hostname = node['hostname']
index = node['index']
ipmi_userid = node['ipmi_userid']
ipmi_password = node['ipmi_password']
ipmi_new_ipaddr = node['ipmi_new_ipaddr']
ipmi_ipaddr = node['ipmi_ipaddr']
ipmi_mac = node['ipmi_mac']
bmc_type = node['bmc_type']
# Attempt to connect to new IPMI IP address
bmc = _bmc.Bmc(ipmi_new_ipaddr, ipmi_userid, ipmi_password, bmc_type)
if bmc.is_connected():
if bmc.chassis_power('status') in ('on', 'off'):
log.debug(f'BMC connection success - Node: {hostname} '
f'IP: {ipmi_ipaddr}')
else:
log.debug(f'BMC communication failed - Node: {hostname} '
f'IP: {ipmi_ipaddr}')
continue
log.info(f'Modifying Inventory IPMI IP - Node: {hostname} MAC: '
f'{ipmi_mac} Original IP: {ipmi_ipaddr} New IP: '
f'{ipmi_new_ipaddr}')
inv.set_nodes_ipmi_ipaddr(0, index, ipmi_new_ipaddr)
success_list.append(list_index)
else:
log.debug(f'BMC connection failed - Node: {hostname} '
f'IP: {ipmi_ipaddr}')
continue
# Remove nodes that connected successfully
for remove_index in sorted(success_list, reverse=True):
del nodes_list[remove_index]
for node in nodes_list:
log.error('Unable to connect to BMC at new IPMI IP address- Node: %s '
'MAC: %s Original IP: %s New IP: %s' %
(hostname, ipmi_mac, ipmi_ipaddr, ipmi_new_ipaddr))
if len(nodes_list) > 0:
raise UserException('%d BMC(s) not responding after IP modification' %
len(nodes_list))
def _adjust_dhcp_pool(network, dhcp_pool_start, dhcp_lease_time):
dhcp_range = 'dhcp-range=%s,%s,%s # %s'
new_entry = dhcp_range % (dhcp_pool_start,
str(network.network + network.size - 1),
str(dhcp_lease_time),
str(network.cidr))
entry = "^dhcp-range=.* # " + str(network.cidr)
util.replace_regex(DNSMASQ_TEMPLATE, entry, new_entry)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', default='config.yml',
help='Config file path. Absolute path or relative '
'to power-up/')
parser.add_argument('--print', '-p', dest='log_lvl_print',
help='print log level', default='info')
parser.add_argument('--file', '-f', dest='log_lvl_file',
help='file log level', default='info')
args = parser.parse_args()
if not os.path.isfile(args.config_path):
args.config_path = gen.GEN_PATH + args.config_path
print('Using config path: {}'.format(args.config_path))
if not os.path.isfile(args.config_path):
sys.exit('{} does not exist'.format(args.config_path))
logger.create(args.log_lvl_print, args.log_lvl_file)
inv_set_ipmi_pxe_ip(args.config_path)
```
#### File: python/lib/lenovo.py
```python
import re
import os.path
import datetime
import lib.logger as logger
from lib.switch_exception import SwitchException
from lib.switch_common import SwitchCommon
from lib.genesis import GEN_PASSIVE_PATH, GEN_PATH
from lib.utilities import get_col_pos
class Lenovo(SwitchCommon):
"""Class for configuring and retrieving information for a Lenovo
switch. This class works with the Lenovo G8052. Similar
Lenovo switches may work or may need some methods overridden.
This class can be instantiated in 'active' mode, in which case the
switch will be configured or in 'passive' mode in which case the
commands needed to configure the switch are written to a file.
When in passive mode, information requests will return 'None'.
In passive mode, a filename can be generated which
will contain the active mode switch commands used for switch
configuration. This outfile will be written to the
'power-up/passive' directory if it exists or to the
'power-up' directory if the passive directory does not
exist. If no outfile name is provided a default name is used.
In active mode, the 'host, userid and password named variables
are required. If 'mode' is not provided, it is defaulted to 'passive'.
Args:
host (string): Management switch management interface IP address
or hostname or if in passive mode, a fully qualified filename of the
acquired mac address table for the switch.
userid (string): Switch management interface login user ID.
password (string): Switch management interface login password.
mode (string): Set to 'passive' to run in passive switch mode.
Defaults to 'active'
outfile (string): Name of file to direct switch output to when
in passive mode.
access_list (list of str): Optional list containing host, userid
and password.
"""
ENABLE_REMOTE_CONFIG = 'en ; configure terminal ; {} '
SEP = ';'
IFC_ETH_CFG = 'no prompting ; interface port {}'
SHOW_PORT = 'show interface trunk'
PORT_PREFIX = ''
CLEAR_MAC_ADDRESS_TABLE = 'clear mac-address-table'
SHOW_MAC_ADDRESS_TABLE = 'show mac-address-table'
MGMT_INTERFACE_CONFIG = 'interface ip {}'
SET_INTERFACE_IPADDR = ';ip address {}'
SET_INTERFACE_MASK = ';ip netmask {}'
SET_VLAN = ';vlan {}'
ENABLE_INTERFACE = ';enable'
CREATE_INTERFACE = MGMT_INTERFACE_CONFIG + SET_INTERFACE_IPADDR +\
SET_INTERFACE_MASK + SET_VLAN + ENABLE_INTERFACE
REMOVE_IFC = 'no interface ip {}'
SHOW_INTERFACE = 'show interface ip {}'
UP_STATE_IFC = 'up'
MAX_INTF = 128
def __init__(self, host=None, userid=None, password=<PASSWORD>, mode=None,
outfile=None):
self.log = logger.getlogger()
self.mode = mode
self.host = host
if self.mode == 'active':
self.userid = userid
self.password = password
elif self.mode == 'passive':
if os.path.isdir(GEN_PASSIVE_PATH):
self.outfile = GEN_PASSIVE_PATH + '/' + outfile
else:
self.outfile = GEN_PATH + '/' + outfile
f = open(self.outfile, 'a+')
f.write(str(datetime.datetime.now()) + '\n')
f.close()
super(Lenovo, self).__init__(host, userid, password, mode, outfile)
def show_ports(self, format='raw'):
def _get_avlans(line):
avlans = ''
line = line.split(' ')
for item in line:
if '-' in item:
item = item.split('-')
n = int(item[0])
while n <= int(item[1]):
avlans = avlans + ', ' + str(n)
n += 1
else:
avlans = avlans + ', ' + item
return avlans[2:]
if self.mode == 'passive':
return None
ports = {}
port_info = self.send_cmd(self.SHOW_PORT)
if format == 'raw' or format is None:
return port_info
elif format == 'std':
indcs = get_col_pos(port_info, ('Port', 'Tag', 'PVID', r'VLAN\(s'))
port_info = port_info.splitlines()
for line in port_info:
line = self.sanitize_line(line)
# pad to 86 chars
line = f'{line:<86}'
# look for rows (look for first few fields)
match = re.search(r'^\s*\w+\s+\d+\s+(y|n)', line)
if match:
port = str(int(line[indcs['Port'][0]:indcs['Port'][1]]))
mode = line[indcs['Tag'][0]:indcs['Tag'][1]]
mode = 'access' if 'n' in mode else 'trunk'
pvid = str(int(line[indcs['PVID'][0]:indcs['PVID'][1]]))
avlans = line[indcs[r'VLAN\(s'][0]:indcs[r'VLAN\(s'][1]].strip(' ')
avlans = _get_avlans(avlans)
ports[port] = {'mode': mode, 'nvlan': pvid, 'avlans': avlans}
# look for avlan continuation lines
# look for leading spaces (10 is arbitrary)
if f"{' ':<10}" == line[:10]:
avlans = line[indcs[r'VLAN\(s'][0]:indcs[r'VLAN\(s'][1]].strip(' ')
match = re.search(r'^(\d+ |(\d+-\d+ ))+\d+', avlans)
if match:
avlans = _get_avlans(match.group(0))
ports[port]['avlans'] += f', {avlans}'
return ports
def remove_interface(self, vlan, host, netmask):
"""Removes an in-band management interface.
Args:
host (string): hostname or ipv4 address in dot decimal notation
netmask (string): netmask in dot decimal notation
vlan (int or string): value between 1 and 4094.
raises:
SwitchException if unable to remove interface
"""
vlan = str(vlan)
interfaces = self.show_interfaces(vlan, host, netmask, format='std')
if interfaces[-1][0]['configured']:
self.send_cmd(self.REMOVE_IFC.format(interfaces[-1][0]['found ifc']))
interfaces = self.show_interfaces(vlan, host, netmask, format='std')
if interfaces[-1][0]['configured']:
self.log.debug('Failed to remove interface Vlan {}.'.format(vlan))
raise SwitchException('Failed to remove interface Vlan {}.'.format(vlan))
else:
self.log.debug('Interface vlan {} does not exist.'.format(vlan))
return
def show_interfaces(self, vlan='', host=None, netmask=None, format=None):
"""Gets from the switch a list of programmed in-band interfaces. The
standard format consists of a list of lists. Each list entry contains
the vlan number, the ip address, netmask and the number of the interface.
which do not number the in-band interfaces, the last item in each list
is set to '-'. When vlan, host and netmask are specified, the last list
item contains 'True' or 'False' indicating whether an interface already
exists with the specified vlan, host and netmask. For switches which do
number the interfaces, (ie Lenovo) the last list item also contains the
next available interface number and the number of the found interface.
Args:
vlan (string): String representation of integer between
1 and 4094. If none specified, usually the default vlan is used.
host (string): hostname or ipv4 address in dot decimal notation
netmask (string): netmask in dot decimal notation
format (string): 'std' If format is not specified, The native (raw)
format is returned. If format is set to 'std', a 'standard' format
is returned.
Returns:
If format is unspecified, returns a raw string of data as it
comes from the switch. If format == 'std' a standard format is returned.
Standard format consists of a list of lists. Each list entry contains
the vlan number, the ip address, netmask and the number of the interface.
For switches which do not number the in-band interfaces, the last item
in each list is set to '-'. When vlan, host and netmask are specified,
the last list item contains a dictionary. The dictionary has three entries;
'configured' : set to True or False indicating whether an
interface already exists with the specified vlan, host and netmask.
'avail ifc' : For switches which do number the interfaces, (ie Lenovo)
this dictioanary entry contains the next available interface number.
'found ifc' : For switches which do number the interfaces, this entry
contains the number of the found interface.
"""
if self.mode == 'passive':
return None
ifcs = []
vlan = str(vlan)
found = False
ava_ifc, fnd_ifc, cnt = 0, 0, 0
ifc_info = self.send_cmd(self.SHOW_INTERFACE.format(''))
if format is None:
return ifc_info
ifc_info = ifc_info.splitlines()
for line in ifc_info:
match = re.search(r'^(\d+):\s+IP4\s+(\w+.\w+.\w+.\w+)\s+(\w+.\w+.\w+.\w+)'
r'\s+\w+.\w+.\w+.\w+,\s+vlan\s(\d+),', line)
if match:
cnt += 1
ifcs.append(
[match.group(4), match.group(2), match.group(3), match.group(1)])
if [vlan, host, netmask, match.group(1)] in ifcs:
fnd_ifc = match.group(1)
found = True
if cnt != int(match.group(1)) and ava_ifc == 0:
ava_ifc = cnt
ifcs.append(
[{'configured': found,
'avail ifc': str(ava_ifc),
'found ifc': str(fnd_ifc)}])
return ifcs
def configure_interface(self, host, netmask, vlan=1, intf=None):
"""Configures a management interface. This implementation checks
if the host ip is already in use. If it is, a check is made to
see if it is configured as specified. If not, an exception is raised.
Lenovo numbers interfaces. If no interface number is specified,
the next available unconfigured interface is used. The specified
vlan will be created if it does not already exist.
When implementing this method for a new switch, minimally this method
should configure (overwrite if necessary) the specified interface.
Args:
host (string): hostname or ipv4 address in dot decimal notation
netmask (string): netmask in dot decimal notation
vlan (string): Optional. string representation of integer between
1 and 4094. If none specified, usually the default vlan is used.
intf (string): optional. String representation of integer between
1 and 128.
raises:
SwitchException if unable to program interface
"""
interfaces = self.show_interfaces(vlan, host, netmask, format='std')
if self.mode == 'active' and interfaces[-1][0]['configured']:
self.log.debug(
'Switch interface {} already configured'.format(
interfaces[-1][0]['found ifc']))
return
if vlan is not None:
self.create_vlan(vlan)
if self.mode == 'active' and intf is None:
intf = interfaces[-1][0]['avail ifc']
self.send_cmd(self.CREATE_INTERFACE.format(intf, host, netmask, vlan))
interfaces = self.show_interfaces(vlan, host, netmask, format='std')
if self.mode == 'active' and not interfaces[-1][0]['configured']:
self.log.error(
'Failed configuring management interface ip {}'.format(intf))
raise SwitchException(
'Failed configuring management interface ip {}'.format(intf))
return
@staticmethod
def sanitize_line(line):
# remove "Press q to quit, any other key to continue" line
line = re.sub('\\x1b.*\\x08', '', line)
return line
class switch(object):
@staticmethod
def factory(host=None, userid=None, password=<PASSWORD>, mode=None,
outfile=None):
return Lenovo(host, userid, password, mode, outfile)
```
#### File: python/lib/menu.py
```python
import curses
KEY_ESC = 27
class Menu(object):
"""Curses menu class.
Curses menu consisting of a title, subtitle (optional), and a list of
selectable items. Items can be a submenu (:obj:`MenuItemSubmenu`), function
call (:obj:`MenuItemFunction`), or non-selectable text (:obj:`MenuItem`).
Args:
log (:obj:`Logger`): Log file object.
stdscr (:obj:`WindowObject`): `Python curses module`_ window object
representing the whole screen.
title (str): Title displayed at top of menu.
subtitle (str, optional): Subtitle displayed underneath title.
items (list if :obj:`MenuItem`): Selectable menu items.
Attributes:
log (:obj:`Logger`): Log file object.
stdscr (:obj:`WindowObject`): `Python curses module`_ window object
representing the whole screen.
title (str): Title displayed at top of menu.
subtitle (str): Subtitle displayed underneath title.
items (list if :obj:`MenuItem`): Selectable menu items.
num_selects (list of int): List of integers representing the Unicode
code points of the characters ``0``, ``1``, and so on up to the
total number of menu selections that can fit on a single page
(currently limited to 10).
enter_selects (list of int): List of integers representing the Unicode
points of ``return`` characters used to detect a menu item
selection.
cursor_pos (int): Current menu item cursor position.
.. _`Python curses module`:
https://docs.python.org/2/library/curses.html
.. _`Curses Programming with Python`:
https://docs.python.org/2/howto/curses.html
"""
def __init__(self, log, stdscr, title, subtitle=None, items=[]):
self.log = log
self.stdscr = stdscr
self.title = title
self.subtitle = subtitle
self.items = items
self.num_selects = (
[ord(str(n)) for n in range(len(self.items)) if n < 10])
self.enter_selects = [curses.KEY_ENTER, ord('\n')]
curses.curs_set(0)
self.cursor_pos = 0
def show_menu(self):
"""Print menu to screen.
Note:
There are no arguments or returns. All of the information needed to
build and display the menu is already stored within attributes.
"""
while True:
self.stdscr.clear()
curses.doupdate()
self.stdscr.addstr(1, 1, self.title)
self.stdscr.addstr(3, 3, self.subtitle)
for index, item in enumerate(self.items):
if index == self.cursor_pos:
highlight = curses.A_REVERSE
else:
highlight = curses.A_NORMAL
self.stdscr.addstr(
index + 4, 2,
"%2d) %s" % (index, item.name),
highlight)
key = self.stdscr.getch()
if key in [curses.KEY_DOWN, ord('j'), curses.KEY_NPAGE]:
if self.cursor_pos + 1 == len(self.items):
self.cursor_pos = 0
else:
self.cursor_pos += 1
elif key in [curses.KEY_UP, ord('k'), curses.KEY_PPAGE]:
if self.cursor_pos == 0:
self.cursor_pos = len(self.items) - 1
else:
self.cursor_pos += -1
elif key in [curses.KEY_HOME]:
self.cursor_pos = 0
elif key in [curses.KEY_END]:
self.cursor_pos = len(self.items) - 1
elif key in [KEY_ESC]:
break
elif key in self.enter_selects + self.num_selects:
if key in self.enter_selects:
selection = self.items[self.cursor_pos]
elif key in self.num_selects:
selection = self.items[int(chr(key))]
if selection.item_type == 'simple':
if selection.exit:
break
elif selection.item_type == 'submenu':
selection.menu.show_menu()
self.refresh_titles()
elif selection.item_type == 'function':
getattr(selection, selection.function)(selection.args)
if selection.exit:
break
def refresh_titles(self):
"""Refresh menu titles.
Note:
This method provides a way to update objects upon a menu display
refresh.
"""
for item in self.items:
item.refresh_title()
class MenuItem(object):
"""Curses menu item class.
Menus consist of a list of selectable `MenuItem` objects. Items can be a
submenu (:obj:`MenuItemSubmenu`), function call (:obj:`MenuItemFunction`),
or non-selectable text (:obj:`MenuItem`).
Args:
name (str): Item name string to be displayed as menu selection.
item_type (str, optional): Type of menu item. Defined types are
``simple`` (not selectable), ``submenu`` (nested menu), or
``function`` (function call).
exit (bool, optional): When ``True`` exit menu after selection.
Defaults to ``False``.
Attributes:
name (str): Item name string to be displayed as menu selection.
item_type (str): Type of menu item.
exit (bool): When ``True`` exit menu after selection.
"""
def __init__(self, name, item_type='simple', exit=False):
self.name = name
self.item_type = item_type
self.exit = exit
def refresh_title(self):
"""Refresh title.
Note:
This method provides a way to update objects upon a menu display
refresh.
"""
pass
class MenuItemSubmenu(MenuItem):
"""Curses menu item 'submenu' type class.
Menu item to select a nested menu.
Args:
name (str): Item name string to be displayed as menu selection.
Attributes:
name (str): Item name string to be displayed as menu selection.
menu (:obj:`Menu`): Submenu object.
item_type (str): ``submenu``.
exit (bool): ``False``.
"""
def __init__(self, name, menu, item_type='submenu'):
self.menu = menu
super(MenuItemSubmenu, self).__init__(name, item_type)
class MenuItemFunction(MenuItem):
"""Curses menu item 'function' type class.
Menu item to select a function call (with provided arguments). It is
assumed that the function defined and accessible.
Args:
name (str): Item name string to be displayed as menu selection.
function (str): Name of function.
args (*, optional): Arguments to be passed to function. This is passed
directly and thus can be any type supported by the particular
function.
Attributes:
name (str): Item name string to be displayed as menu selection.
function (str): Name of function.
args (*): Arguments to be passed to function.
item_type (str): ``function``.
exit (bool): ``False``.
"""
def __init__(
self, name, function, args=None, item_type='function', exit=False):
self.function = function
self.args = args
super(MenuItemFunction, self).__init__(name, item_type, exit)
class MenuItemExit(MenuItem):
"""Curses menu item 'exit' type class.
Menu item to exit current menu. If the current menu is nested it will exit
into its parent menu. If the current menu is not nested it will exit the
curses menu entirely.
Args:
name (str): Item name string to be displayed as menu selection.
Attributes:
name (str): Item name string to be displayed as menu selection.
item_type (str): ``simple``.
exit (bool): ``True``.
"""
def __init__(self, name):
super(MenuItemExit, self).__init__(name, exit=True)
```
#### File: scripts/python/str2dict.py
```python
import argparse
import os
import re
import lib.genesis as gen
import yaml
# import code
import lib.logger as logger
def ipmi_fru2dict(fru_str):
"""Convert the ipmitool fru output to a dictionary. The function first
convert the input string to yaml, then yaml load is used to create a
dictionary.
Args:
fru_str (str): Result of running 'ipmitool fru'
returns: A dictionary who's keys are the FRUs
"""
yaml_data = []
lines = string.splitlines()
for i, line in enumerate(lines):
# Strip out any excess white space (including tabs) around the ':'
line = re.sub(r'\s*:\s*', ': ', line)
# Check for blank lines
if re.search(r'^\s*$', line):
yaml_data.append(line)
continue
if i < len(lines) - 1:
# If indentation is increasing on the following line, then convert
# the current line to a dictionary key.
indent = re.search(r'[ \t]*', line).span()[1]
next_indent = re.search(r'[ \t]*', lines[i + 1]).span()[1]
if next_indent > indent:
line = re.sub(r'\s*:\s*', ':', line)
# if ':' in middle of line take the second half, else
# take the beginning
if line.split(':')[1]:
line = line.split(':')[1]
else:
line = line.split(':')[0]
yaml_data.append(line + ':')
else:
split = line.split(':', 1)
# Add quotes around the value to handle non alphanumerics
line = split[0] + ': "' + split[1] + '"'
yaml_data.append(line)
yaml_data = '\n'.join(yaml_data)
return yaml.full_load(yaml_data)
def _get_system_sn_pn(ipmi_fru_str):
fru_item = _get_system_info(ipmi_fru_str)
fru_item = fru_item[list(fru_item.keys())[0]]
return (fru_item['Chassis Serial'].strip(),
fru_item['Chassis Part Number'].strip())
def _get_system_info(ipmi_fru_str):
yaml_dict = ipmi_fru2dict(string)
fru_item = ''
for item in yaml_dict:
for srch_item in ['NODE', 'SYS', 'Backplane', 'MP', 'Mainboard']:
# code.interact(banner='There', local=dict(globals(), **locals()))
if srch_item in item:
fru_item = yaml_dict[item]
break
if fru_item:
fru_item = {item: fru_item}
break
if not fru_item:
fru_item = yaml_dict
# fru_item = yaml_dict[list(yaml_dict.keys())[0]]
return fru_item
def main(string):
sys_info = _get_system_info(string)
# print(sys_info)
# print()
for item in sys_info:
print(item)
for thing in sys_info[item]:
print(f'{thing}: {sys_info[item][thing]}')
print()
sn, pn = _get_system_sn_pn(string)
print(sn, pn)
if __name__ == '__main__':
"""Simple python template
"""
parser = argparse.ArgumentParser()
parser.add_argument('arg1', help='Help me Rhonda')
# parser.add_argument('arg2', choices=['apple', 'banana', 'peach'],
# help='Pick a fruit')
parser.add_argument('--print', '-p', dest='log_lvl_print',
help='print log level', default='info')
parser.add_argument('--file', '-f', dest='log_lvl_file',
help='file log level', default='info')
args = parser.parse_args()
logger.create('nolog', 'info')
log = logger.getlogger()
if args.log_lvl_print == 'debug':
print(args)
path = os.path.join(gen.GEN_PATH, args.arg1)
with open(path, 'r') as f:
string = f.read()
main(string)
```
#### File: scripts/python/write_switch_memory.py
```python
import sys
from lib.inventory import Inventory
from lib.logger import Logger
from lib.ssh import SSH
class WriteSwitchMemory(object):
"""Write switch memory."""
ENABLE_REMOTE_CONFIG_MGMT = 'enable\nconfigure terminal\n%s'
ENABLE_REMOTE_CONFIG_DATA = 'cli enable "configure terminal" "%s"'
WRITE_MEMORY = 'write memory'
def __init__(self, log, inv_file):
self.inv = Inventory(log, inv_file)
self.log = log
self.enable_remote = None
self.userid = None
self.password = None
self.ipv4 = None
def write_mgmt_switch_memory(self):
self.enable_remote = self.ENABLE_REMOTE_CONFIG_MGMT
for self.ipv4 in self.inv.yield_mgmt_switch_ip():
pass
self.userid = self.inv.get_userid_mgmt_switch()
self.password = self.inv.get_password_mgmt_switch()
self._send_cmd(self.WRITE_MEMORY, 'Write memory', False)
def write_data_switch_memory(self):
self.enable_remote = self.ENABLE_REMOTE_CONFIG_DATA
for self.ipv4, value in self.inv.get_data_switches().items():
self.userid = value['user']
self.password = value['password']
self._send_cmd(self.WRITE_MEMORY, 'Write memory')
def _send_cmd(self, cmd, msg, status_check=True):
ssh = SSH(self.log)
self.log.debug('Switch cmd: ' + repr(cmd))
status, stdout_, _ = ssh.exec_cmd(
self.ipv4,
self.userid,
self.password,
self.enable_remote % cmd)
if status:
if status_check:
self.log.error(
'Failed: ' + msg + ' on ' + self.ipv4 +
' - Error: ' +
stdout_.replace('\n', ' ').replace('\r', ''))
sys.exit(1)
else:
if msg:
self.log.info(
msg + ' on ' + self.ipv4)
else:
if msg:
self.log.info(msg + ' on ' + self.ipv4)
return stdout_
if __name__ == '__main__':
"""Write switch memory.
Args:
INV_FILE (string): Inventory file.
LOG_LEVEL (string): Log level.
Raises:
Exception: If parameter count is invalid.
"""
LOG = Logger(__file__)
ARGV_MAX = 3
ARGV_COUNT = len(sys.argv)
if ARGV_COUNT > ARGV_MAX:
try:
raise Exception()
except Exception:
LOG.error('Invalid argument count')
sys.exit(1)
INV_FILE = sys.argv[1]
LOG.set_level(sys.argv[2])
SWITCHES = WriteSwitchMemory(LOG, INV_FILE)
SWITCHES.write_mgmt_switch_memory()
SWITCHES.write_data_switch_memory()
```
#### File: tests/unit/test_archive.py
```python
import unittest
from mock import patch as patch
from tests.unit import (TOP_DIR, SCRIPT_DIR)
import lib.logger as logger
import tarfile as t
import os
from archive.bundle import bundle_extract, archive_this, unarchive_this
import tempfile
COMPRESS_FORMAT = "gz"
COMPRESS_DIR = [TOP_DIR + '/' "scripts/", TOP_DIR + '/' "docs/"]
class TestScript(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestScript, self).__init__(*args, **kwargs)
def setUp(self):
super(TestScript, self).setUp()
self.root_p = patch('os.geteuid')
self.root = self.root_p.start()
# Pass future root checks
self.root.return_value = 0
def tearDown(self):
self.root_p.stop()
def test_tar_files(self):
logger.create('nolog', 'info')
LOG = logger.getlogger()
exclude = ['scripts/python/lib/db.py',
"scripts/python/lib/lenovo.py"]
# Good path
fileobj = tempfile.NamedTemporaryFile(delete=False)
try:
LOG.info(fileobj.name)
fileobj = archive_this(SCRIPT_DIR, fileObj=fileobj,
exclude=exclude, compress=True)
LOG.info("Archived " + fileobj.name)
with tempfile.TemporaryDirectory() as tmpdirname:
# make sure exclude files does not exist
with t.open(fileobj.name, "r:gz") as tar:
assert tar.name not in exclude
try:
LOG.info("Unarchiving " + fileobj.name)
unarchive_this(fileobj.name, tmpdirname)
except Exception as e:
LOG.error("Uncaught exception as e {0}".format(e))
raise e
except Exception as e:
LOG.error("Uncaught exception: {0}".format(e))
raise e
finally:
if fileobj is not None:
fileobj.close()
os.unlink(fileobj.name)
fileobj = tempfile.NamedTemporaryFile(delete=False)
try:
fileobj = archive_this(SCRIPT_DIR, fileObj=fileobj,
exclude=exclude, compress=True)
LOG.info("Archived " + fileobj.name)
with tempfile.TemporaryDirectory() as tmpdirname:
try:
LOG.info("Unarchiving " + fileobj.name)
bundle_extract(str(fileobj.name), tmpdirname)
except Exception as e:
LOG.error("Uncaught exception as e {0}".format(e))
raise e
except Exception as e:
LOG.error("Uncaught exception: {0}".format(e))
raise e
finally:
if fileobj is not None:
fileobj.close()
os.unlink(fileobj.name)
# Bad path
``` |
{
"source": "jjalvare/subarulink",
"score": 2
} |
#### File: subarulink/app/cli.py
```python
import argparse
import asyncio
from datetime import datetime
import logging
import os.path
from pprint import pprint
import shelve
import shlex
import sys
from aiohttp import ClientSession
import stdiomask
from subarulink import Controller, SubaruException
import subarulink.const as sc
CONFIG_FILE = ".subarulink.cfg"
LOGGER = logging.getLogger("subarulink")
STREAMHANDLER = logging.StreamHandler()
STREAMHANDLER.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
LOGGER.addHandler(STREAMHANDLER)
LOOP = asyncio.get_event_loop()
class CLI: # pylint: disable=too-few-public-methods
"""A basic shell for interacting with Subaru's Remote Services API."""
def __init__(self, config_file):
"""Initialize CLI class for subarulink controller."""
self._config = {}
self._get_config(config_file)
self._ctrl = None
self._current_vin = None
self._current_api_gen = None
self._current_hasEV = None
self._current_hasRES = None
self._current_hasRemote = None
self._session = None
self._car_data = None
self._cars = None
self._hvac_mode = None
self._hvac_speed = None
self._hvac_temp = None
def _get_config(self, config_file):
"""Read config file, or create one with user input."""
if not os.path.isfile(config_file):
LOGGER.info("Creating config file: %s", config_file)
else:
LOGGER.info("Opening config file: %s", config_file)
with shelve.open(config_file) as shelf:
if "username" not in shelf:
username = input("Enter Subaru Starlink username: ")
else:
username = shelf["username"]
if "password" not in shelf:
password = stdiomask.getpass("Enter Subaru Starlink password: ")
else:
password = shelf["password"]
if "pin" not in shelf:
pin = stdiomask.getpass("Enter Subaru Starlink PIN: ")
else:
pin = shelf["pin"]
if "device_name" not in shelf:
device_name = "subarulink"
shelf["device_name"] = device_name
if "device_id" not in shelf:
device_id = int(datetime.now().timestamp())
shelf["device_id"] = device_id
if "save_creds" not in shelf or shelf.get("save_creds") == "N":
while True:
save_creds = input(
"Remember these credentials? [Y]es, [N]o, [D]on't ask again > "
)
if save_creds in ["N", "n", "D", "d"]:
shelf["save_creds"] = save_creds
break
if save_creds in ["Y", "y"]:
shelf["save_creds"] = save_creds
shelf["username"] = username
shelf["password"] = password
shelf["pin"] = pin
break
self._config["username"] = username
self._config["password"] = password
self._config["pin"] = pin
self._config["device_name"] = shelf["device_name"]
self._config["device_id"] = shelf["device_id"]
os.chmod(config_file, 0o600)
@property
def _current_name(self):
return self._ctrl.vin_to_name(self._current_vin)
async def _quit(self):
await self._session.close()
sys.exit(0)
async def _vehicle_select(self):
while True:
print("\nAvailable Vehicles:")
for i in range(len(self._cars)):
print(
"[%d] %s (%s)"
% (i + 1, self._ctrl.vin_to_name(self._cars[i]), self._cars[i])
)
if len(self._cars) == 1:
selected = 0
else:
selected = input("\nSelect Vehicle> ")
if not selected.isnumeric():
continue
selected = int(selected) - 1
if selected in range(len(self._cars)):
self._current_vin = self._cars[selected]
self._current_hasEV = self._ctrl.get_ev_status(self._current_vin)
# self._current_hasRES = self._ctrl.get_res(self._current_vin)
self._current_api_gen = self._ctrl.get_api_gen(self._current_vin)
if self._current_api_gen == "g2":
await self._fetch()
return
def _set_hvac_params(self):
modes = [
sc.MODE_AUTO,
sc.MODE_FACE,
sc.MODE_FEET,
sc.MODE_SPLIT,
sc.MODE_FEET_DEFROST,
sc.MODE_DEFROST,
]
speeds = [
sc.FAN_SPEED_AUTO,
sc.FAN_SPEED_LOW,
sc.FAN_SPEED_MED,
sc.FAN_SPEED_HI,
]
while True:
print("Enter temperature (%d-%d):" % (sc.TEMP_MIN, sc.TEMP_MAX))
self._hvac_temp = input("> ")
if self._hvac_temp.isnumeric():
self._hvac_temp = int(self._hvac_temp)
if sc.TEMP_MIN < self._hvac_temp < sc.TEMP_MAX:
break
self._hvac_mode = _select_from_list("Select mode:", modes)
self._hvac_speed = _select_from_list("Select fan speed:", speeds)
async def _hvac(self, args):
if len(args) == 0:
print("hvac <set|start|stop>")
elif args[0] == "set":
self._set_hvac_params()
elif args[0] == "stop":
await self._ctrl.remote_stop(self._current_vin)
elif args[0] == "start":
if None in [self._hvac_mode, self._hvac_speed, self._hvac_temp]:
print("Specify settings with 'hvac set' first.")
await self._ctrl.remote_start(
self._current_vin,
self._hvac_temp,
self._hvac_mode,
sc.HEAT_SEAT_OFF,
sc.HEAT_SEAT_OFF,
sc.REAR_DEFROST_OFF,
self._hvac_speed,
sc.RECIRCULATE_ON,
sc.REAR_AC_OFF,
)
else:
print("hvac: invalid arg: %s" % args[0])
def _show(self, args):
if len(args) != 1:
print("show <summary|all>")
elif args[0] == "all":
pprint(self._car_data)
elif args[0] == "summary":
timediff = datetime.now() - datetime.fromtimestamp(
self._car_data["status"][sc.TIMESTAMP]
)
print(
"\nVehicle last reported data %d days, %d hours, %d minutes ago \n"
% (
timediff.days,
timediff.seconds // 3600,
(timediff.seconds) // 60 % 60,
)
)
if self._current_hasEV:
print(
"EV Charge: %s%%"
% self._car_data["status"][sc.EV_STATE_OF_CHARGE_PERCENT],
end="",
)
print(
"\tAux Battery: %sV" % self._car_data["status"][sc.BATTERY_VOLTAGE]
)
print(
"EV Plug Status: %s"
% self._car_data["status"][sc.EV_IS_PLUGGED_IN],
end="",
)
print(
"EV Distance to Empty: %s miles"
% self._car_data["status"][sc.EV_DISTANCE_TO_EMPTY]
)
print(
"Odometer: %0.1f miles"
% _meters_to_miles(self._car_data["status"][sc.ODOMETER])
)
print(
"External Temp: %0.1f °F"
% _c_to_f(self._car_data["status"][sc.EXTERNAL_TEMP])
)
else:
print("show: invalid arg: %s" % args[0])
async def _fetch(self):
print(
"\nFetching data for %s..." % self._ctrl.vin_to_name(self._current_vin),
end="",
flush=True,
)
self._car_data = await self._ctrl.get_data(self._current_vin)
print("Completed")
async def _connect(self):
print("Connecting to Subaru Remote Services API...", end="", flush=True)
try:
if await self._ctrl.connect():
print("Successfully connected")
self._cars = self._ctrl.get_vehicles()
await self._vehicle_select()
if self._current_api_gen == "g2":
self._show(["summary"])
elif self._current_api_gen == "g1":
print(
"%s is a Generation 1 telematics vehicle which has not been tested."
% self._current_name
)
else:
print("Unknown telematics version: %s" % self._current_api_gen)
except SubaruException:
print("Unable to connect. Check Username/Password.")
await self._session.close()
return False
return True
async def _cli_loop(self):
print("\nEnter a command. For a list of commands, enter '?'.")
while True:
print("%s" % self._current_name, end="")
try:
cmd, *args = shlex.split(input("> "))
except ValueError:
continue
try:
if cmd == "quit":
await self._quit()
elif cmd in ["help", "?"]:
print("\nCommands:")
print(" help - display this help")
print(" vehicle - change vehicle")
print(" lock - lock vehicle doors")
print(" unlock - unlock vehicle doors")
print(" lights - turn on lights")
print(" horn - sound horn")
if self._current_api_gen == "g2":
print(" show - show vehicle information")
print(" update - request update from vehicle")
print(" fetch - fetch most recent update")
print(" charge - start EV charging")
print(" hvac - remote HVAC control")
print(" quit\n")
elif cmd == "vehicle":
await self._vehicle_select()
elif cmd == "lock":
await self._ctrl.lock(self._current_vin)
elif cmd == "unlock":
await self._ctrl.unlock(self._current_vin)
elif cmd == "lights":
await self._ctrl.lights(self._current_vin)
elif cmd == "horn":
await self._ctrl.horn(self._current_vin)
elif cmd == "show" and self._current_api_gen == "g2":
self._show(args)
elif cmd == "update" and self._current_api_gen == "g2":
await self._ctrl.update(self._current_vin)
elif cmd == "fetch" and self._current_api_gen == "g2":
await self._ctrl.fetch(self._current_vin)
elif cmd == "charge" and self._current_api_gen == "g2":
await self._ctrl.charge_start(self._current_vin)
elif cmd == "hvac" and self._current_api_gen == "g2":
await self._hvac(args)
else:
print("invalid command: {}".format(cmd))
except SubaruException as exc:
print(exc.message)
async def run(self):
"""Initialize connection and start CLI loop."""
self._session = ClientSession()
self._ctrl = Controller(
self._session,
self._config["username"],
self._config["password"],
self._config["device_id"],
self._config["pin"],
self._config["device_name"],
)
try:
if await self._connect():
await self._cli_loop()
except (KeyboardInterrupt, EOFError):
await self._quit()
def _meters_to_miles(meters):
return float(meters) * 0.00062137119
def _c_to_f(temp_c):
return float(temp_c) * 1.8 + 32.0
def _select_from_list(msg, items):
while True:
print(msg)
for i, val in enumerate(items):
print(" [%d] %s" % (i + 1, val))
choice = input("> ")
if choice.isnumeric():
choice = int(choice) - 1
if choice in range(len(items)):
return items[choice]
def main():
"""Run a basic CLI that uses the subarulink package."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbosity",
type=int,
choices=[0, 1, 2],
default=0,
help="Verbosity Level: 0=Error[default] 1=Info 2=Debug",
)
parser.add_argument(
"-r", "--reset", help="Reset saved account information", action="store_true"
)
args = parser.parse_args()
if args.verbosity == 1:
LOGGER.setLevel(logging.INFO)
elif args.verbosity == 2:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.ERROR)
home_dir = os.path.expanduser("~")
config_file = os.path.join(home_dir, CONFIG_FILE)
if args.reset:
if os.path.isfile(config_file):
os.remove(config_file)
print("Deleted %s" % config_file)
else:
print("Config file %s not found." % config_file)
sys.exit(0)
cli = CLI(config_file)
LOOP.run_until_complete(cli.run())
```
#### File: subarulink/subarulink/connection.py
```python
import asyncio
import logging
import pprint
import time
import aiohttp
from yarl import URL
from subarulink.exceptions import IncompleteCredentials, SubaruException
_LOGGER = logging.getLogger(__name__)
class Connection:
"""Connection to Subaru Starlink API."""
def __init__(
self,
websession: aiohttp.ClientSession,
username,
password,
device_id,
device_name,
) -> None:
"""Initialize connection object."""
self.username = username
self.password = password
self.device_id = device_id
self.lock = asyncio.Lock()
self.device_name = device_name
self.vehicles = []
self.vehicle_key = None
self.default_vin = None
self.baseurl = "https://mobileapi.prod.subarucs.com/g2v15"
self.head = {}
self.head[
"User-Agent"
] = "Mozilla/5.0 (Linux; Android 10; Android SDK built for x86 Build/QSR1.191030.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.185 Mobile Safari/537.36"
self.head["Origin"] = "file://"
self.head["X-Requested-With"] = "com.subaru.telematics.app.remote"
self.head["Accept-Language"] = "en-US,en;q=0.9"
self.head["Accept-Encoding"] = "gzip, deflate"
self.head["Accept"] = "*/*"
self.websession = websession
self.authenticated = False
self.authorized = False
self.current_vin = None
async def connect(self):
"""Connect to and establish session with Subaru Remote Services API."""
if await self._authenticate():
await self._refresh_vehicles()
if self.authorized:
return self.vehicles
if await self._authorize_device():
return self.vehicles
return None
async def validate_session(self, vin):
"""Validate if current session cookie is still valid with Subaru Remote Services API and vehicle context is correct."""
result = False
resp = await self.__open("/validateSession.json", "get")
js_resp = await resp.json()
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
if vin != self.current_vin:
# API call for VIN that is not the current remote context.
_LOGGER.debug("Switching Subaru API vehicle context to: %s", vin)
if await self._select_vehicle(vin):
result = True
else:
result = True
elif await self._authenticate(vin):
# New session cookie. Must call selectVehicle.json before any other API call.
if await self._select_vehicle(vin):
result = True
else:
self.authenticated = False
return result
async def get(self, command, params=None, data=None, json=None):
"""Send HTTPS GET request to Subaru Remote Services API."""
if self.authenticated:
resp = await self.__open(
f"{command}",
method="get",
headers=self.head,
params=params,
data=data,
json=json,
)
js_resp = await resp.json()
return js_resp
async def post(self, command, params=None, data=None, json=None):
"""Send HTTPS POST request to Subaru Remote Services API."""
if self.authenticated:
resp = await self.__open(
f"{command}",
method="post",
headers=self.head,
params=params,
data=data,
json=json,
)
js_resp = await resp.json()
return js_resp
async def _authenticate(self, vin=None) -> bool:
"""Authenticate to Subaru Remote Services API."""
if self.username and self.password and self.device_id:
post_data = {
"env": "cloudprod",
"loginUsername": self.username,
"password": <PASSWORD>,
"deviceId": self.device_id,
"passwordToken": None,
"selectedVin": vin,
"pushToken": None,
"deviceType": "android",
}
resp = await self.__open(
"/login.json", "post", data=post_data, headers=self.head
)
resp = await resp.json()
if resp["success"]:
_LOGGER.debug("Client authentication successful")
_LOGGER.debug(pprint.pformat(resp))
self.authenticated = True
self.authorized = resp["data"]["deviceRegistered"]
i = resp["data"]["currentVehicleIndex"]
self.current_vin = resp["data"]["vehicles"][i]["vin"]
return True
if resp["errorCode"]:
_LOGGER.error("Client authentication failed")
raise SubaruException(resp["errorCode"])
_LOGGER.error("Unknown failure")
raise SubaruException(resp)
raise IncompleteCredentials(
"Connection requires email and password and device id."
)
async def _select_vehicle(self, vin):
"""Select active vehicle for accounts with multiple VINs."""
params = {}
params["vin"] = vin
params["_"] = int(time.time())
js_resp = await self.get("/selectVehicle.json", params=params)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
self.current_vin = vin
_LOGGER.debug("Current vehicle: vin=%s", js_resp["data"]["vin"])
return js_resp["data"]
self.current_vin = None
return None
async def _refresh_vehicles(self):
resp = await self.__open(
"/refreshVehicles.json", "get", params={"_": int(time.time())}
)
js_resp = await resp.json()
_LOGGER.debug(pprint.pformat(js_resp))
vehicles = js_resp["data"]["vehicles"]
if len(vehicles) > 1:
vehicles = await self._refresh_multi_vehicle(vehicles)
for vehicle in vehicles:
car = {}
car["vin"] = vehicle["vin"]
car["id"] = vehicle["vehicleKey"]
car["display_name"] = vehicle["vehicleName"]
if "g2" in vehicle["features"]:
car["api_gen"] = "g2"
elif "g1" in vehicle["features"]:
car["api_gen"] = "g1"
else:
car["api_gen"] = "g0"
if "PHEV" in vehicle["features"]:
car["hasEV"] = True
else:
car["hasEV"] = False
if "RES" in vehicle["features"]:
car["hasRES"] = True
else:
car["hasRES"] = False
if "REMOTE" in vehicle["subscriptionFeatures"]:
car["hasRemote"] = True
else:
car["hasRemote"] = False
self.vehicles.append(car)
async def _refresh_multi_vehicle(self, vehicles):
# refreshVehicles.json returns unreliable data if multiple cars on account
# use selectVehicle.json to get each car's info
result = []
for vehicle in vehicles:
vin = vehicle["vin"]
result.append(await self._select_vehicle(vin))
return result
async def _authorize_device(self):
_LOGGER.debug("Authorizing device via web API")
web_baseurl = "https://www.mysubaru.com"
if self.username and self.password and self.device_id:
post_data = {
"username": self.username,
"password": <PASSWORD>,
"deviceId": self.device_id,
}
resp = await self.__open(
"/login", "post", data=post_data, baseurl=web_baseurl
)
resp = await self.__open(
"/profile/updateDeviceEntry.json",
"get",
params={"deviceId": self.device_id},
baseurl=web_baseurl,
)
if await resp.json():
_LOGGER.debug("Device successfully authorized")
return await self._set_device_name()
return False
async def _set_device_name(self):
_LOGGER.debug("Setting Device Name to %s", self.device_name)
web_baseurl = "https://www.mysubaru.com"
resp = await self.__open(
"/profile/addDeviceName.json",
"get",
params={"deviceId": self.device_id, "deviceName": self.device_name},
baseurl=web_baseurl,
)
js_resp = await resp.json()
if js_resp:
_LOGGER.debug("Set Device Name Successful")
return True
_LOGGER.debug("Unknown Error during Set Device Name")
return False
async def __open(
self,
url,
method="get",
headers=None,
data=None,
json=None,
params=None,
baseurl="",
) -> None:
"""Open url."""
if not baseurl:
baseurl = self.baseurl
url: URL = URL(baseurl + url)
_LOGGER.debug("%s: %s", method.upper(), url)
with await self.lock:
try:
resp = await getattr(self.websession, method)(
url, headers=headers, params=params, data=data, json=json
)
if resp.status > 299:
_LOGGER.debug(pprint.pformat(resp.request_info))
js_resp = await resp.json()
_LOGGER.debug(pprint.pformat(js_resp))
raise SubaruException(resp.status)
except aiohttp.ClientResponseError as exception:
raise SubaruException(exception.status)
except aiohttp.ClientConnectionError:
raise SubaruException("aiohttp.ClientConnectionError")
return resp
```
#### File: subarulink/subarulink/controller.py
```python
import asyncio
from datetime import datetime
import logging
import pprint
import time
from subarulink.connection import Connection
import subarulink.const as sc
from subarulink.exceptions import InvalidPIN, SubaruException
_LOGGER = logging.getLogger(__name__)
class Controller:
"""Controller for connections to Subaru Starlink API."""
def __init__(
self,
websession,
username,
password,
device_id,
pin,
device_name,
update_interval=7200,
fetch_interval=300,
):
"""Initialize controller.
Args:
websession (aiohttp.ClientSession): Session
username (Text): Username
password (Text): Password
device_id (Text): Alphanumeric designator that Subaru API uses to determine if a device is authorized to send remote requests
pin (Text): 4 digit pin number string required to submit Subaru Remote requests
device_name (Text): Human friendly name that is associated with device_id (shows on mysubaru.com profile "devices")
update_interval (int, optional): Seconds between requests for vehicle send update
fetch_interval (int, optional): Seconds between fetches of Subaru's cached vehicle information
"""
self._connection = Connection(
websession, username, password, device_id, device_name
)
self._update_interval = update_interval
self._fetch_interval = fetch_interval
self._car_data = {}
self._update = {}
self._pin = pin
self._vin_id_map = {}
self._vin_name_map = {}
self._api_gen = {}
self._lock = {}
self._hasEV = {}
self._hasRemote = {}
self._hasRES = {}
self._controller_lock = asyncio.Lock()
self._last_update_time = {}
self._last_fetch_time = {}
self._cars = []
async def connect(self, test_login=False) -> bool:
"""
Connect to Subaru Remote Services API.
Args:
test_login (Bool, optional): Only check for authorization
"""
if test_login:
response = await self._connection.connect()
if response:
return True
return False
_LOGGER.debug("Connecting controller to Subaru Remote Services")
cars = await self._connection.connect()
if cars is None:
raise SubaruException("Connection to Subaru API failed")
for car in cars:
vin = car["vin"]
self._cars.append(vin)
self._vin_name_map[vin] = car["display_name"]
self._vin_id_map[vin] = car["id"]
self._api_gen[vin] = car["api_gen"]
self._hasEV[vin] = car["hasEV"]
self._hasRES[vin] = car["hasRES"]
self._hasRemote[vin] = car["hasRemote"]
self._lock[vin] = asyncio.Lock()
self._last_update_time[vin] = 0
self._last_fetch_time[vin] = 0
self._car_data[vin] = {}
self._update[vin] = True
_LOGGER.debug("Subaru Remote Services Ready!")
return True
def get_vehicles(self):
"""Return list of VINs available to user on Subaru Remote Services API."""
return self._cars
def get_ev_status(self, vin):
"""Get if EV."""
_LOGGER.debug("Getting EV Status %s:%s", vin, str(self._hasEV[vin]))
return self._hasEV.get(vin)
def get_remote_status(self, vin):
"""Get if remote services available."""
_LOGGER.debug("Getting remote Status %s:%s", vin, str(self._hasRemote[vin]))
return self._hasRemote.get(vin)
def get_res_status(self, vin):
"""Get if remote engine start is available."""
_LOGGER.debug("Getting RES Status %s:%s", vin, str(self._hasRES[vin]))
return self._hasRES.get(vin)
def get_api_gen(self, vin):
"""Get API version (g1 or g2) for vehicle."""
return self._api_gen.get(vin)
def vin_to_name(self, vin):
"""Return display name for a given VIN."""
return self._vin_name_map.get(vin)
async def get_data(self, vin):
"""Get locally cached vehicle data. Fetch if not present."""
if len(self._car_data[vin]) == 0:
await self.fetch(vin)
return self._car_data[vin]
async def fetch(self, vin, force=False):
"""Fetch latest data from Subaru. Does not invoke a remote request."""
cur_time = time.time()
async with self._controller_lock:
last_fetch = self._last_fetch_time[vin]
if force or cur_time - last_fetch > self._fetch_interval:
await self._fetch_status(vin)
self._last_fetch_time[vin] = cur_time
async def update(self, vin, force=False):
"""Request Subaru send remote command to update vehicle data."""
cur_time = time.time()
async with self._controller_lock:
last_update = self._last_update_time[vin]
if force or cur_time - last_update > self._update_interval:
await self._locate(vin)
await self._fetch_status(vin)
self._last_update_time[vin] = cur_time
def get_update_interval(self):
"""Get current update interval."""
return self._update_interval
def set_update_interval(self, value):
"""Set new update interval."""
old_interval = self._update_interval
if value > 300:
self._update_interval = value
_LOGGER.debug("Update interval changed from %s to %s", old_interval, value)
else:
_LOGGER.error(
"Invalid update interval %s. Keeping old value: %s", value, old_interval
)
def get_fetch_interval(self):
"""Get current fetch interval."""
return self._fetch_interval
def set_fetch_interval(self, value):
"""Set new fetch interval."""
old_interval = self._fetch_interval
if value > 60:
self._fetch_interval = value
_LOGGER.debug("Fetch interval changed from %s to %s", old_interval, value)
else:
_LOGGER.error(
"Invalid fetch interval %s. Keeping old value: %s", value, old_interval
)
def get_last_update_time(self, vin):
"""Get last time update() remote command was used."""
return self._last_update_time[vin]
async def charge_start(self, vin):
"""Start EV charging."""
return await self._remote_command(vin, "phevChargeNow")
async def lock(self, vin):
"""Send lock command."""
form_data = {"forceKeyInCar": False}
resp = await self._actuate(vin, "lock", data=form_data)
if resp and resp["data"]["success"]:
return True
async def unlock(self, vin, only_driver=True):
"""Send unlock command."""
door = sc.ALL_DOORS
if only_driver:
door = sc.DRIVERS_DOOR
form_data = {sc.WHICH_DOOR: door}
resp = await self._actuate(vin, "unlock", data=form_data)
if resp and resp["data"]["success"]:
return True
async def lights(self, vin):
"""Send lights command."""
await self._actuate(vin, "lightsOnly")
async def horn(self, vin):
"""Send horn command."""
await self._actuate(vin, "hornLights")
async def remote_stop(self, vin):
"""Send remote stop command."""
await self._actuate(vin, "engineStop")
async def remote_start(
self,
vin,
temp,
mode,
heat_left_seat,
heat_right_seat,
rear_defrost,
fan_speed,
recirculate,
rear_ac,
):
"""Send remote start command."""
form_data = {
sc.TEMP: temp,
sc.CLIMATE: sc.CLIMATE_DEFAULT,
sc.RUNTIME: sc.RUNTIME_DEFAULT,
sc.MODE: mode,
sc.HEAT_SEAT_LEFT: heat_left_seat,
sc.HEAT_SEAT_RIGHT: heat_right_seat,
sc.REAR_DEFROST: rear_defrost,
sc.FAN_SPEED: fan_speed,
sc.RECIRCULATE: recirculate,
sc.REAR_AC: rear_ac,
sc.START_CONFIG: sc.START_CONFIG_DEFAULT,
}
if _validate_remote_start_params(form_data):
await self._actuate(vin, "engineStart", data=form_data)
else:
return None
def get_updates(self, vin):
"""Get updates dictionary.
Parameters
----------
vin : string
VIN for the vehicle.
Returns
-------
bool or None
If vin exists, a bool indicating whether updates should be
processed. Othewise, returns None.
"""
if vin in self._update:
return self._update[vin]
return None
def set_updates(self, vin, setting):
"""Change update setting for vehicle."""
self._update[vin] = setting
async def _get(self, cmd, params=None, data=None, json=None):
return await self._connection.get("/%s" % cmd, params, data, json)
async def _post(self, cmd, params=None, data=None, json=None):
return await self._connection.post("/%s" % cmd, params, data, json)
async def _remote_query(self, vin, cmd, data=None):
await self._connection.validate_session(vin)
api_gen = self._api_gen[vin]
async with self._lock[vin]:
js_resp = await self._get(
"service/%s/%s/execute.json" % (api_gen, cmd), json=data
)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
return js_resp
raise SubaruException("Remote query failed. Response: %s " % js_resp)
async def _remote_command(
self, vin, cmd, data=None, poll_url="/service/api_gen/remoteService/status.json"
):
await self._connection.validate_session(vin)
api_gen = self._api_gen[vin]
form_data = {"pin": self._pin}
if data:
form_data.update(data)
req_id = ""
async with self._lock[vin]:
js_resp = await self._post(
"service/%s/%s/execute.json" % (api_gen, cmd), json=form_data
)
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["success"]:
req_id = js_resp["data"][sc.SERVICE_REQ_ID]
js_resp = await self._wait_request_status(req_id, api_gen, poll_url)
return js_resp
if js_resp["errorCode"] == "InvalidCredentials":
raise InvalidPIN(js_resp["data"]["errorDescription"])
raise SubaruException("Remote command failed. Response: %s " % js_resp)
async def _actuate(self, vin, cmd, data=None):
form_data = {"delay": 0, "vin": vin}
if data:
form_data.update(data)
return await self._remote_command(vin, cmd, data=form_data)
async def _fetch_status(self, vin):
_LOGGER.debug("Fetching vehicle status from Subaru")
js_resp = await self._remote_query(vin, "condition")
if js_resp:
status = {}
try:
# Annoying key/value pair format [{"key": key, "value": value}, ...]
status = {
i["key"]: i["value"]
for i in js_resp["data"]["result"]["vehicleStatus"]
}
except KeyError:
# Once in a while a 'value' key is missing
pass
status[sc.ODOMETER] = js_resp["data"]["result"]["odometer"]
status[sc.TIMESTAMP] = datetime.strptime(
js_resp["data"]["result"]["lastUpdatedTime"], sc.TIMESTAMP_FMT
).timestamp()
status[sc.POSITION_TIMESTAMP] = datetime.strptime(
status[sc.POSITION_TIMESTAMP], sc.POSITION_TIMESTAMP_FMT
).timestamp()
try:
self._car_data[vin]["status"] = self._validate_status(vin, status)
except TypeError:
_LOGGER.error("Unexpected data type in Subaru data")
_LOGGER.error(pprint.pformat(status))
async def _locate(self, vin):
js_resp = await self._remote_command(
vin,
"vehicleStatus",
poll_url="/service/api_gen/vehicleStatus/locationStatus.json",
)
if js_resp:
self._car_data[vin]["location"] = js_resp["data"]["result"]
async def _wait_request_status(self, req_id, api_gen, poll_url, attempts=20):
success = False
params = {sc.SERVICE_REQ_ID: req_id}
attempt = 0
_LOGGER.debug(
"Polling for remote service request completion: serviceRequestId=%s", req_id
)
while not success and attempt < attempts:
js_resp = await self._connection.get(
poll_url.replace("api_gen", api_gen), params=params
)
# TODO: Parse errorCode
_LOGGER.debug(pprint.pformat(js_resp))
if js_resp["data"]["success"]:
success = True
_LOGGER.debug(
"Remote service request completed: serviceRequestId=%s", req_id
)
return js_resp
attempt += 1
await asyncio.sleep(2)
_LOGGER.error("Remote service request completion message not received")
return False
def _validate_status(self, vin, new_status):
old_status = self._car_data[vin].get("status")
# If Subaru gives us crap data, then keep old value (if we have one)
if old_status:
# Only valid right after driving
if new_status[sc.TIRE_PRESSURE_FL] == sc.BAD_TIRE_PRESSURE:
new_status[sc.TIRE_PRESSURE_FL] = old_status[sc.TIRE_PRESSURE_FL]
if new_status[sc.TIRE_PRESSURE_FR] == sc.BAD_TIRE_PRESSURE:
new_status[sc.TIRE_PRESSURE_FL] = old_status[sc.TIRE_PRESSURE_FL]
if new_status[sc.TIRE_PRESSURE_RL] == sc.BAD_TIRE_PRESSURE:
new_status[sc.TIRE_PRESSURE_FL] = old_status[sc.TIRE_PRESSURE_FL]
if new_status[sc.TIRE_PRESSURE_RR] == sc.BAD_TIRE_PRESSURE:
new_status[sc.TIRE_PRESSURE_FL] = old_status[sc.TIRE_PRESSURE_FL]
if new_status[sc.DIST_TO_EMPTY] == sc.BAD_DISTANCE_TO_EMPTY_FUEL:
new_status[sc.DIST_TO_EMPTY] = old_status[sc.DIST_TO_EMPTY]
if self._hasEV[vin]:
# Usually excessively high after driving ... also, sometimes None
if new_status[sc.EV_DISTANCE_TO_EMPTY]:
if int(new_status[sc.EV_DISTANCE_TO_EMPTY]) > 20:
new_status[sc.EV_DISTANCE_TO_EMPTY] = old_status[
sc.EV_DISTANCE_TO_EMPTY
]
else:
new_status[sc.EV_DISTANCE_TO_EMPTY] = old_status[
sc.EV_DISTANCE_TO_EMPTY
]
# Not valid when not charging
if (
new_status[sc.EV_TIME_TO_FULLY_CHARGED]
== sc.BAD_EV_TIME_TO_FULLY_CHARGED
):
new_status[sc.EV_TIME_TO_FULLY_CHARGED] = old_status[
sc.EV_TIME_TO_FULLY_CHARGED
]
# Sometimes invalid
if new_status[sc.AVG_FUEL_CONSUMPTION] == sc.BAD_AVG_FUEL_CONSUMPTION:
new_status[sc.AVG_FUEL_CONSUMPTION] = old_status[
sc.AVG_FUEL_CONSUMPTION
]
if new_status[sc.ODOMETER] == sc.BAD_ODOMETER:
new_status[sc.ODOMETER] = old_status[sc.ODOMETER]
return new_status
def _validate_remote_start_params(form_data):
temp = int(form_data[sc.TEMP])
is_valid = True
if temp > sc.TEMP_MAX or temp < sc.TEMP_MIN:
is_valid = False
if form_data[sc.MODE] not in [
sc.MODE_AUTO,
sc.MODE_DEFROST,
sc.MODE_FACE,
sc.MODE_FEET,
sc.MODE_FEET_DEFROST,
sc.MODE_SPLIT,
]:
is_valid = False
if form_data[sc.HEAT_SEAT_LEFT] not in [sc.HEAT_SEAT_OFF, sc.HEAT_SEAT_ON]:
is_valid = False
if form_data[sc.HEAT_SEAT_RIGHT] not in [sc.HEAT_SEAT_OFF, sc.HEAT_SEAT_ON]:
is_valid = False
if form_data[sc.REAR_DEFROST] not in [sc.REAR_DEFROST_OFF, sc.REAR_DEFROST_ON]:
is_valid = False
if form_data[sc.FAN_SPEED] not in [
sc.FAN_SPEED_AUTO,
sc.FAN_SPEED_HI,
sc.FAN_SPEED_LOW,
sc.FAN_SPEED_MED,
]:
is_valid = False
if form_data[sc.RECIRCULATE] not in [sc.RECIRCULATE_OFF, sc.RECIRCULATE_ON]:
is_valid = False
if form_data[sc.REAR_AC] not in [sc.REAR_AC_OFF, sc.REAR_AC_ON]:
is_valid = False
return is_valid
``` |
{
"source": "jjames1011/NWMM-Project2",
"score": 3
} |
#### File: app/forms/donation.py
```python
from flask.ext.wtf import Form
from wtforms import TextField, HiddenField, DateField, SelectField
from wtforms.validators import (Required, Length, Email, ValidationError,
EqualTo)
from app.models import Donation
class Unique(object):
'''
Custom validator to check an object's attribute
is unique. For example users should not be able
to create an account if the account's email
address is already in the database. This class
supposes you are using SQLAlchemy to query the
database.
'''
def __init__(self, model, field, message):
self.model = model
self.field = field
self.message = message
def __call__(self, form, field):
check = self.model.query.filter(self.field == field.data).first()
if check:
raise ValidationError(self.message)
class Donate(Form):
DROP_SITES = [('Portland (Southeast) - Adventist Medical Center','Portland (Southeast) - Adventist Medical Center'), ('Portland (North) - Legacy Emanuel Medical Center','Portland (North) - Legacy Emanuel Medical Center'), ('Portland (Southwest) - OHSU Family Medicine at Gabriel Park', 'Portland (Southwest) - OHSU Family Medicine at Gabriel Park')]
''' Donate milk form '''
location = SelectField('Drop Site', validators=[Required()],
choices=DROP_SITES, description="test")
date = DateField(validators=[Required()],
description='Date', format='%m/%d/%Y')
amount= TextField(validators=[Required()],
description='Amount (oz)')
user_email= HiddenField(validators=[Required()],
description='UserEmail')
``` |
{
"source": "jjamor/gmail-mbox-to-imap",
"score": 3
} |
#### File: jjamor/gmail-mbox-to-imap/gmail-mbox-to-imap.py
```python
import codecs
import email
import email.header
import getpass
import imaplib
import locale
import mailbox
import math
import optparse
import re
import socket
import sys
import time
import unicodedata
import urllib
from optparse import OptionParser
from urlparse import urlparse
__version__ = "1.2"
if sys.version_info < (2, 5):
print >>sys.stderr, "IMAP Upload requires Python 2.5 or later."
sys.exit(1)
class MyOptionParser(OptionParser):
def __init__(self):
usage = "usage: python %prog [options] MBOX [DEST]\n"\
" MBOX UNIX style mbox file.\n"\
" DEST is imap[s]://[USER[:PASSWORD]@]HOST[:PORT]\n"\
" DEST has a priority over the options."
OptionParser.__init__(self, usage,
version="IMAP Upload " + __version__)
self.add_option("--host",
help="destination hostname [default: %default]")
self.add_option("--port", type="int",
help="destination port number [default: 143, 993 for SSL]")
self.add_option("--ssl", action="store_true",
help="use SSL connection")
self.add_option("--user", help="login name [default: empty]")
self.add_option("--password", help="login password")
self.add_option("--retry", type="int", metavar="COUNT",
help="retry COUNT times on connection abort. "
"0 disables [default: %default]")
self.add_option("--error", metavar="ERR_MBOX",
help="append failured messages to the file ERR_MBOX")
self.add_option("--time-fields", metavar="LIST", type="string", nargs=1,
action="callback", callback=self.set_time_fields,
help="try to get delivery time of message from "
"the fields in the LIST. "
'Specify any of "from", "received" and '
'"date" separated with comma in order of '
'priority (e.g. "date,received"). '
'"from" is From_ line of mbox format. '
'"received" is "Received:" field and "date" '
'is "Date:" field in RFC 2822. '
'[default: from,received,date]')
self.set_defaults(host="localhost",
ssl=False,
user="",
password="",
retry=0,
error=None,
time_fields=["from", "received", "date"])
def set_time_fields(self, option, opt_str, value, parser):
fields = []
if value != "":
fields = value.split(",")
# Assert that list contains only valid fields
if set(fields) - set(["from", "received", "date"]):
self.error("Invalid value '%s' for --time-fields" % value)
self.values.time_fields = fields
def parse_args(self, args):
(options, args) = OptionParser.parse_args(self, args)
if len(args) < 1:
self.error("Missing MBOX")
if len(args) > 2:
self.error("Extra argugment")
if len(args) > 1:
dest = self.parse_dest(args[1])
for (k, v) in dest.__dict__.iteritems():
setattr(options, k, v)
if options.port is None:
options.port = [143, 993][options.ssl]
options.src = args[0]
return options
def parse_dest(self, dest):
try:
dest, ssl = re.subn("^imaps:", "imap:", dest)
dest = urlparse(dest)
options = optparse.Values()
options.ssl = bool(ssl)
options.host = dest.hostname
options.port = [143, 993][options.ssl]
if dest.port:
options.port = dest.port
if dest.username:
options.user = urllib.unquote(dest.username)
if dest.password:
options.password = urllib.unquote(dest.password)
return options
except:
self.error("Invalid DEST")
def error(self, msg):
raise optparse.OptParseError(self.get_usage() + "\n" + msg)
def si_prefix(n, prefixes=("", "k", "M", "G", "T", "P", "E", "Z", "Y"),
block=1024, threshold=1):
"""Get SI prefix and reduced number."""
if (n < block * threshold or len(prefixes) == 1):
return (n, prefixes[0])
return si_prefix(n / block, prefixes[1:])
def str_width(s):
"""Get string width."""
w = 0
for c in unicode(s):
w += 1 + (unicodedata.east_asian_width(c) in "FWA")
return w
def trim_width(s, width):
"""Get truncated string with specified width."""
trimed = []
for c in unicode(s):
width -= str_width(c)
if width <= 0:
break
trimed.append(c)
return "".join(trimed)
def left_fit_width(s, width, fill=' '):
"""Make a string fixed width by padding or truncating.
Note: fill can't be full width character.
"""
s = trim_width(s, width)
s += fill * (width - str_width(s))
return s
class Progress():
"""Store and output progress information."""
def __init__(self, total_count):
self.total_count = total_count
self.ok_count = 0
self.count = 0
self.format = "%" + str(len(str(total_count))) + "d/" + \
str(total_count) + " %5.1f %-2s %s "
def begin(self, msg):
"""Called when start proccessing of a new message."""
self.time_began = time.time()
size, prefix = si_prefix(float(len(msg.as_string())), threshold=0.8)
sbj = self.decode_subject(msg["subject"] or "")
label = self.decode_subject(msg["x-gmail-labels"] or "")
label = re.sub(r"&", "y", label)
# label = re.sub(r"Recibidos", "INBOX", label)
# label = re.sub(r"Enviados", "Sent", label)
label = re.sub(r" ", "_", label)
label = re.sub(r"__+", "_", label)
label = re.sub(r"\"", "", label)
labels = label.split(",")
if labels.count(u'INBOX') > 0:
labels.remove(u'INBOX')
flags = []
if labels.count(u'Non_lus') > 0:
labels.remove(u'Non_lus')
else:
flags.append('\Seen')
if labels.count('Important') > 0:
flags.append('\Flagged')
labels.remove(u'Important')
if len(labels):
msg.flags = " ".join(flags)
else:
msg.flags = []
msg.box = []
if len(labels) != 0:
if labels.count('Drafts'):
msg.box = ['Drafts']
else:
if labels.count('Spam'):
msg.box = ['Junk']
else:
box = re.sub(r"\?", "", labels.pop(0))
msg.box = box.split("/")
if len(msg.box) == 0:
msg.box = ["INBOX"]
print >>sys.stderr, self.format % \
(self.count + 1, size, prefix + "B", left_fit_width(sbj, 30)),
def decode_subject(self, sbj):
decoded = []
try:
parts = email.header.decode_header(sbj)
for s, codec in parts:
decoded.append(s.decode(codec or "ascii"))
except Exception, e:
pass
return "".join(decoded)
def endOk(self):
"""Called when a message was processed successfully."""
self.count += 1
self.ok_count += 1
print >>sys.stderr, "OK (%d sec)" % \
math.ceil(time.time() - self.time_began)
def endNg(self, err):
"""Called when an error has occurred while processing a message."""
print >>sys.stderr, "NG (%s)" % err
def endAll(self):
"""Called when all message was processed."""
print >>sys.stderr, "Done. (OK: %d, NG: %d)" % \
(self.ok_count, self.total_count - self.ok_count)
def upload(imap, src, err, time_fields):
print >>sys.stderr, \
"Counting the mailbox (it could take a while for the large one)."
p = Progress(len(src))
for i, msg in src.iteritems():
try:
p.begin(msg)
r, r2 = imap.upload(msg.get_delivery_time(time_fields),
msg.as_string(), msg.box, msg.flags, 3)
if r != "OK":
raise Exception(r2[0]) # FIXME: Should use custom class
p.endOk()
continue
except socket.error, e:
p.endNg("Socket error: " + str(e))
except Exception, e:
p.endNg(e)
if err is not None:
err.add(msg)
p.endAll()
def get_delivery_time(self, fields):
"""Extract delivery time from message.
Try to extract the time data from given fields of message.
The fields is a list and can consist of any of the following:
* "from" From_ line of mbox format.
* "received" The first "Received:" field in RFC 2822.
* "date" "Date:" field in RFC 2822.
Return the current time if the fields is empty or no field
had valid value.
"""
def get_from_time(self):
"""Extract the time from From_ line."""
time_str = self.get_from().split(" ", 1)[1]
t = time_str.replace(",", " ").lower()
t = re.sub(" (sun|mon|tue|wed|thu|fri|sat) ", " ",
" " + t + " ")
if t.find(":") == -1:
t += " 00:00:00"
return t
def get_received_time(self):
"""Extract the time from the first "Received:" field."""
t = self["received"]
t = t.split(";", 1)[1]
t = t.lstrip()
return t
def get_date_time(self):
"""Extract the time from "Date:" field."""
return self["date"]
for field in fields:
try:
t = vars()["get_" + field + "_time"](self)
t = email.utils.parsedate_tz(t)
t = email.utils.mktime_tz(t)
# Do not allow the time before 1970-01-01 because
# some IMAP server (i.e. Gmail) ignore it, and
# some MUA (Outlook Express?) set From_ date to
# 1965-01-01 for all messages.
if t < 0:
continue
return t
except:
pass
# All failed. Return current time.
return time.time()
# Directly attach get_delivery_time() to the mailbox.mboxMessage
# as a method.
# I want to use the factory parameter of mailbox.mbox()
# but it seems not to work in Python 2.5.4.
mailbox.mboxMessage.get_delivery_time = get_delivery_time
class IMAPUploader:
def __init__(self, host, port, ssl, user, password, retry):
self.imap = None
self.host = host
self.port = port
self.ssl = ssl
self.user = user
self.password = password
self.retry = retry
def upload(self, delivery_time, message, box, flags, retry = None):
if retry is None:
retry = self.retry
try:
self.open()
try:
self.create_folder(box)
box = "/".join(box)
res = self.imap.append(box, flags, delivery_time, message)
except:
box = "/".join(box)
res = self.imap.append(box, flags, delivery_time, message)
return res
except (imaplib.IMAP4.abort, socket.error):
self.close()
if retry == 0:
raise
print >>sys.stderr, "(Reconnect)",
time.sleep(5)
return self.upload(delivery_time, message, box, flags, retry - 1)
def create_folder(self, box):
i = 1
while i <= len(box):
b = "/".join(box[0:i])
if b != "INBOX":
try:
self.imap.create(b)
except:
print >>sys.stderr, "Cannot create box %s" % b
i += 1
def open(self):
if self.imap:
return
imap_class = [imaplib.IMAP4, imaplib.IMAP4_SSL][self.ssl];
self.imap = imap_class(self.host, self.port)
self.imap.socket().settimeout(60)
self.imap.login(self.user, self.password)
def close(self):
if not self.imap:
return
self.imap.shutdown()
self.imap = None
def main(args=None):
try:
# Setup locale
# Set LC_TIME to "C" so that imaplib.Time2Internaldate()
# uses English month name.
locale.setlocale(locale.LC_ALL, "")
locale.setlocale(locale.LC_TIME, "C")
# Encoding of the sys.stderr
enc = locale.getlocale()[1] or "utf_8"
sys.stderr = codecs.lookup(enc)[-1](sys.stderr, errors="ignore")
# Parse arguments
if args is None:
args = sys.argv[1:]
parser = MyOptionParser()
options = parser.parse_args(args)
if len(str(options.user)) == 0:
print "User name: ",
options.user = sys.stdin.readline().rstrip("\n")
if len(str(options.password)) == 0:
options.password = <PASSWORD>()
options = options.__dict__
src = options.pop("src")
err = options.pop("error")
time_fields = options.pop("time_fields")
# Connect to the server and login
print >>sys.stderr, \
"Connecting to %s:%s." % (options["host"], options["port"])
uploader = IMAPUploader(**options)
uploader.open()
# Prepare source and error mbox
src = mailbox.mbox(src, create=False)
if err:
err = mailbox.mbox(err)
# Upload
print >>sys.stderr, "Uploading..."
upload(uploader, src, err, time_fields)
return 0
except optparse.OptParseError, e:
print >>sys.stderr, e
return 2
except mailbox.NoSuchMailboxError, e:
print >>sys.stderr, "No such mailbox:", e
return 1
except socket.timeout, e:
print >>sys.stderr, "Timed out"
return 1
except imaplib.IMAP4.error, e:
print >>sys.stderr, "IMAP4 error:", e
return 1
except KeyboardInterrupt, e:
print >>sys.stderr, "Interrupted"
return 130
except Exception, e:
print >>sys.stderr, "An unknown error has occurred: ", e
return 1
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jjanczyszyn/jj-httpbin",
"score": 3
} |
#### File: tests/unit/test_console_module.py
```python
from jj_httpbin.console import ansi_red
def test_ansi_red():
("jj_httpbin.console.ansi_red() should wrap the "
"given string with ansi the escape code for bold red")
# Given that I have a string to be colorized in the terminal
my_string = 'This is my string'
# When I colorize it
result = ansi_red(my_string)
# Then it should be wrapped with the correct ansi color code
result.should.equal('\033[1;31mThis is my string\033[0m')
``` |
{
"source": "jjandnn/AlphaPose",
"score": 2
} |
#### File: jjandnn/AlphaPose/setup.py
```python
import os
import platform
import subprocess
import time
import numpy as np
from Cython.Build import cythonize
from setuptools import Extension, find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
MAJOR = 0
MINOR = 3
PATCH = 0
SUFFIX = ''
SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX)
version_file = 'alphapose/version.py'
def readme():
with open('README.md') as f:
content = f.read()
return content
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from alphapose.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
"""
sha = get_hash()
VERSION = SHORT_VERSION + '+' + sha
with open(version_file, 'w') as f:
f.write(content.format(time.asctime(), VERSION, SHORT_VERSION))
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cython_ext(name, module, sources):
extra_compile_args = None
if platform.system() != 'Windows':
extra_compile_args = {
'cxx': ['-Wno-unused-function', '-Wno-write-strings']
}
extension = Extension(
'{}.{}'.format(module, name),
[os.path.join(*module.split('.'), p) for p in sources],
include_dirs=[np.get_include()],
language='c++',
extra_compile_args=extra_compile_args)
extension, = cythonize(extension)
return extension
def make_cuda_ext(name, module, sources):
return CUDAExtension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
extra_compile_args={
'cxx': [],
'nvcc': [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
})
def get_ext_modules():
ext_modules = []
# only windows visual studio 2013+ support compile c/cuda extensions
# If you force to compile extension on Windows and ensure appropriate visual studio
# is intalled, you can try to use these ext_modules.
force_compile = False
if platform.system() != 'Windows' or force_compile:
ext_modules = [
make_cython_ext(
name='soft_nms_cpu',
module='detector.nms',
sources=['src/soft_nms_cpu.pyx']),
make_cuda_ext(
name='nms_cpu',
module='detector.nms',
sources=['src/nms_cpu.cpp']),
make_cuda_ext(
name='nms_cuda',
module='detector.nms',
sources=['src/nms_cuda.cpp', 'src/nms_kernel.cu']),
make_cuda_ext(
name='roi_align_cuda',
module='alphapose.utils.roi_align',
sources=['src/roi_align_cuda.cpp', 'src/roi_align_kernel.cu']),
make_cuda_ext(
name='deform_conv_cuda',
module='alphapose.models.layers.dcn',
sources=[
'src/deform_conv_cuda.cpp',
'src/deform_conv_cuda_kernel.cu'
]),
make_cuda_ext(
name='deform_pool_cuda',
module='alphapose.models.layers.dcn',
sources=[
'src/deform_pool_cuda.cpp',
'src/deform_pool_cuda_kernel.cu'
]),
]
return ext_modules
def get_install_requires():
install_requires = [
'six', 'terminaltables', 'scipy==1.1.0',
'opencv-python', 'matplotlib', 'visdom',
'tqdm', 'tensorboardx', 'easydict',
'pyyaml', 'halpecocotools',
'torch>=1.1.0', 'torchvision>=0.3.0',
'munkres', 'timm==0.1.20', 'natsort'
]
# official pycocotools doesn't support Windows, we will install it by third-party git repository later
if platform.system() != 'Windows':
install_requires.append('pycocotools')
return install_requires
def is_installed(package_name):
from pip._internal.utils.misc import get_installed_distributions
for p in get_installed_distributions():
if package_name in p.egg_name():
return True
return False
if __name__ == '__main__':
write_version_py()
setup(
name='alphapose',
version=get_version(),
description='Code for AlphaPose',
long_description=readme(),
keywords='computer vision, human pose estimation',
url='https://github.com/MVIG-SJTU/AlphaPose',
packages=find_packages(exclude=('data', 'exp',)),
package_data={'': ['*.json', '*.txt']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
license='GPLv3',
python_requires=">=3",
setup_requires=['pytest-runner', 'numpy', 'cython'],
tests_require=['pytest'],
install_requires=get_install_requires(),
ext_modules=get_ext_modules(),
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
# Windows need pycocotools here: https://github.com/philferriere/cocoapi#subdirectory=PythonAPI
if platform.system() == 'Windows' and not is_installed('pycocotools'):
print("\nInstall third-party pycocotools for Windows...")
cmd = 'python -m pip install git+https://github.com/philferriere/cocoapi.git#subdirectory=PythonAPI'
os.system(cmd)
if not is_installed('cython_bbox'):
print("\nInstall `cython_bbox`...")
cmd = 'python -m pip install git+https://github.com/yanfengliu/cython_bbox.git'
os.system(cmd)
``` |
{
"source": "jjandnn/auto-editor",
"score": 3
} |
#### File: auto_editor/analyze/motion.py
```python
from typing import Tuple
import av
import numpy as np
from numpy.typing import NDArray
from PIL import ImageChops, ImageFilter, ImageOps
from auto_editor.utils.progressbar import ProgressBar
def new_size(size: Tuple[int, int], width: int) -> Tuple[int, int]:
h, w = size
return width, int(h * (width / w))
def motion_detection(
path: str, fps: float, progress: ProgressBar, width: int, blur: int
) -> NDArray[np.float_]:
container = av.open(path, "r")
stream = container.streams.video[0]
stream.thread_type = "AUTO"
inaccurate_dur = int(stream.duration * stream.time_base * stream.rate)
progress.start(inaccurate_dur, "Analyzing motion")
prev_image = None
image = None
total_pixels = None
index = 0
threshold_list = np.zeros((1024), dtype=np.float_)
for frame in container.decode(stream):
if image is None:
prev_image = None
else:
prev_image = image
index = int(frame.time * fps)
progress.tick(index)
if index > len(threshold_list) - 1:
threshold_list = np.concatenate(
(threshold_list, np.zeros((len(threshold_list)), dtype=np.float_)),
axis=0,
)
image = frame.to_image()
if total_pixels is None:
total_pixels = image.size[0] * image.size[1]
image.thumbnail(new_size(image.size, width))
image = ImageOps.grayscale(image)
if blur > 0:
image = image.filter(ImageFilter.GaussianBlur(radius=blur))
if prev_image is not None:
count = np.count_nonzero(ImageChops.difference(prev_image, image))
threshold_list[index] = count / total_pixels
progress.end()
return threshold_list[:index]
```
#### File: auto-editor/auto_editor/preview.py
```python
from statistics import fmean, median
from typing import List, Tuple
from auto_editor.timeline import Timeline
from auto_editor.utils.func import to_timecode
from auto_editor.utils.log import Log
def display(secs: float) -> str:
return to_timecode(round(secs), "rass")
def time_frame(title: str, frames: float, fps: float) -> None:
tc = to_timecode(frames / fps, "ass")
preci = 0 if int(frames) == frames else 2
print(f" - {f'{title}:':<10} {tc:<12} ({frames:.{preci}f})")
def preview(timeline: Timeline, log: Log) -> None:
log.conwrite("")
fps = timeline.fps
in_len = sum([inp.fdur for inp in timeline.inputs])
out_len: float = 0
for vclips in timeline.v:
dur: float = 0
for vclip in vclips:
dur += vclip.dur / vclip.speed
out_len = max(out_len, dur / fps)
for aclips in timeline.a:
dur = 0
for aclip in aclips:
dur += aclip.dur / aclip.speed
out_len = max(out_len, dur / fps)
diff = out_len - in_len
print(
f"\nlength:\n - change: ({display(in_len)}) 100% -> "
f"({display(out_len)}) {round((out_len / in_len) * 100, 2)}%\n "
f"- diff: ({display(diff)}) {round((diff / in_len) * 100, 2)}%"
)
clip_lens = [clip.dur / clip.speed for clip in timeline.a[0]]
# Calculate cuts
oe: List[Tuple[int, int]] = []
# TODO: Make offset_end_pairs work on overlapping clips.
for clip in timeline.a[0]:
oe.append((clip.offset, clip.offset + clip.dur))
cut_lens = []
i = 0
while i < len(oe) - 1:
if i == 0 and oe[i][0] != 0:
cut_lens.append(oe[i][1])
cut_lens.append(oe[i + 1][0] - oe[i][1])
i += 1
if len(oe) > 0 and oe[-1][1] < round(in_len * fps):
cut_lens.append(round(in_len * fps) - oe[-1][1])
print(f"clips: {len(clip_lens)}")
log.debug(clip_lens)
if len(clip_lens) == 0:
clip_lens = [0]
time_frame("smallest", min(clip_lens), fps)
time_frame("largest", max(clip_lens), fps)
if len(clip_lens) > 1:
time_frame("median", median(clip_lens), fps)
time_frame("average", fmean(clip_lens), fps)
print(f"cuts: {len(cut_lens)}")
log.debug(cut_lens)
if len(cut_lens) == 0:
cut_lens = [0]
time_frame("smallest", min(cut_lens), fps)
time_frame("largest", max(cut_lens), fps)
if len(cut_lens) > 1:
time_frame("median", median(cut_lens), fps)
time_frame("average", fmean(cut_lens), fps)
print("")
```
#### File: auto_editor/render/subtitle.py
```python
import os
import re
from dataclasses import dataclass
from typing import List, Tuple
from auto_editor.ffwrapper import FFmpeg
from auto_editor.timeline import Timeline
from auto_editor.utils.func import to_timecode
from auto_editor.utils.log import Log
@dataclass
class SerialSub:
start: int
end: int
before: str
middle: str
after: str
class SubtitleParser:
def __init__(self) -> None:
self.supported_codecs = ("ass", "webvtt", "mov_text")
def parse(self, text, fps: float, codec: str) -> None:
if codec not in self.supported_codecs:
raise ValueError(f"codec {codec} not supported.")
self.fps = fps
self.codec = codec
self.contents: List[SerialSub] = []
if codec == "ass":
time_code = re.compile(r"(.*)(\d+:\d+:[\d.]+)(.*)(\d+:\d+:[\d.]+)(.*)")
if codec == "webvtt":
time_code = re.compile(r"()(\d+:[\d.]+)( --> )(\d+:[\d.]+)(\n.*)")
if codec == "mov_text":
time_code = re.compile(r"()(\d+:\d+:[\d,]+)( --> )(\d+:\d+:[\d,]+)(\n.*)")
i = 0
for reg in re.finditer(time_code, text):
i += 1
if i == 1:
self.header = text[: reg.span()[0]]
self.contents.append(
SerialSub(
self.to_frame(reg.group(2)),
self.to_frame(reg.group(4)),
reg.group(1),
reg.group(3),
f"{reg.group(5)}\n",
)
)
if i == 0:
self.header = ""
self.footer = ""
else:
self.footer = text[reg.span()[1] :]
def edit(self, chunks: List[Tuple[int, int, float]]) -> None:
for cut in reversed(chunks):
the_speed = cut[2]
speed_factor = 1 if the_speed == 99999 else 1 - (1 / the_speed)
new_content = []
for content in self.contents:
if cut[0] <= content.end and cut[1] > content.start:
diff = int(
(min(cut[1], content.end) - max(cut[0], content.start))
* speed_factor
)
if content.start > cut[0]:
content.start -= diff
content.end -= diff
content.end -= diff
elif content.start >= cut[0]:
diff = int((cut[1] - cut[0]) * speed_factor)
content.start -= diff
content.end -= diff
if content.start != content.end:
new_content.append(content)
self.contents = new_content
def write(self, file_path: str) -> None:
with open(file_path, "w") as file:
file.write(self.header)
for c in self.contents:
file.write(
f"{c.before}{to_timecode(c.start / self.fps, self.codec)}"
f"{c.middle}{to_timecode(c.end / self.fps, self.codec)}"
f"{c.after}"
)
file.write(self.footer)
def to_frame(self, text: str) -> int:
if self.codec == "mov_text":
time_format = r"(\d+):?(\d+):([\d,]+)"
else:
time_format = r"(\d+):?(\d+):([\d.]+)"
nums = re.match(time_format, text)
assert nums is not None
hours, minutes, seconds = nums.groups()
seconds = seconds.replace(",", ".", 1)
return round(
(int(hours) * 3600 + int(minutes) * 60 + float(seconds)) * self.fps
)
def cut_subtitles(
ffmpeg: FFmpeg,
timeline: Timeline,
temp: str,
log: Log,
) -> None:
inp = timeline.inp
chunks = timeline.chunks
if chunks is None:
log.error("Timeline too complex")
for s, sub in enumerate(inp.subtitles):
file_path = os.path.join(temp, f"{s}s.{sub.ext}")
new_path = os.path.join(temp, f"new{s}s.{sub.ext}")
parser = SubtitleParser()
if sub.codec in parser.supported_codecs:
with open(file_path) as file:
parser.parse(file.read(), timeline.fps, sub.codec)
else:
convert_path = os.path.join(temp, f"{s}s_convert.vtt")
ffmpeg.run(["-i", file_path, convert_path])
with open(convert_path) as file:
parser.parse(file.read(), timeline.fps, "webvtt")
parser.edit(chunks)
parser.write(new_path)
```
#### File: render/tsm/cbuffer.py
```python
import numpy as np
from .array import ArrReader, ArrWriter
class CBuffer:
def __init__(self, channels: int, max_length: int) -> None:
self._data = np.zeros((channels, max_length), dtype=np.float32)
self._channels = channels
self._max_length = max_length
self._offset = 0
self._ready = 0
self.length = 0
def add(self, buffer: np.ndarray) -> None:
"""Adds a buffer element-wise to the CBuffer."""
if buffer.shape[0] != self._data.shape[0]:
raise ValueError("the two buffers should have the same number of channels")
n = buffer.shape[1]
if n > self.length:
raise ValueError("not enough space remaining in CBuffer")
# Compute the slice of data where the values will be added
start = self._offset
end = self._offset + n
if end <= self._max_length:
self._data[:, start:end] += buffer[:, :n]
else:
end -= self._max_length
self._data[:, start:] += buffer[:, : self._max_length - start]
self._data[:, :end] += buffer[:, self._max_length - start : n]
def divide(self, array: np.ndarray) -> None:
n = len(array)
if n > self.length:
raise ValueError("not enough space remaining in the CBuffer")
start = self._offset
end = self._offset + n
if end <= self._max_length:
self._data[:, start:end] /= array[:n]
else:
end -= self._max_length
self._data[:, start:] /= array[: self._max_length - start]
self._data[:, :end] /= array[self._max_length - start : n]
def peek(self, buffer: np.ndarray) -> int:
if buffer.shape[0] != self._data.shape[0]:
raise ValueError("the two buffers should have the same number of channels")
n = min(buffer.shape[1], self._ready)
start = self._offset
end = self._offset + n
if end <= self._max_length:
np.copyto(buffer[:, :n], self._data[:, start:end])
else:
end -= self._max_length
np.copyto(buffer[:, : self._max_length - start], self._data[:, start:])
np.copyto(buffer[:, self._max_length - start : n], self._data[:, :end])
return n
def read(self, buffer: np.ndarray) -> int:
n = self.peek(buffer)
self.remove(n)
return n
def read_from(self, reader: ArrReader) -> int:
# Compute the slice of data that will be written to
start = (self._offset + self.length) % self._max_length
end = start + self._max_length - self.length
if end <= self._max_length:
n = reader.read(self._data[:, start:end])
else:
# There is not enough space to copy the whole buffer, it has to be
# split into two parts, one of which will be copied at the end of
# _data, and the other at the beginning.
end -= self._max_length
n = reader.read(self._data[:, start:])
n += reader.read(self._data[:, :end])
self.length += n
self._ready = self.length
return n
@property
def ready(self):
return self._ready
@property
def remaining_length(self):
return self._max_length - self._ready
def remove(self, n: int) -> int:
"""
Removes the first n samples of the CBuffer, preventing
them to be read again, and leaving more space for new samples to be
written.
"""
if n >= self.length:
n = self.length
# Compute the slice of data that will be reset to 0
start = self._offset
end = self._offset + n
if end <= self._max_length:
self._data[:, start:end] = 0
else:
end -= self._max_length
self._data[:, start:] = 0
self._data[:, :end] = 0
self._offset += n
self._offset %= self._max_length
self.length -= n
self._ready -= n
if self._ready < 0:
self._ready = 0
return n
def right_pad(self, n: int) -> None:
if n > self._max_length - self.length:
raise ValueError("not enough space remaining in CBuffer")
self.length += n
def set_ready(self, n: int) -> None:
"""Mark the next n samples as ready to be read."""
if self._ready + n > self.length:
raise ValueError("not enough samples to be marked as ready")
self._ready += n
def to_array(self):
out = np.empty((self._channels, self._ready))
self.peek(out)
return out
def write(self, buffer: np.ndarray) -> int:
if buffer.shape[0] != self._data.shape[0]:
raise ValueError("the two buffers should have the same number of channels")
n = min(buffer.shape[1], self._max_length - self.length)
# Compute the slice of data that will be written to
start = (self._offset + self.length) % self._max_length
end = start + n
if end <= self._max_length:
np.copyto(self._data[:, start:end], buffer[:, :n])
else:
# There is not enough space to copy the whole buffer, it has to be
# split into two parts, one of which will be copied at the end of
# _data, and the other at the beginning.
end -= self._max_length
np.copyto(self._data[:, start:], buffer[:, : self._max_length - start])
np.copyto(self._data[:, :end], buffer[:, self._max_length - start : n])
self.length += n
self._ready = self.length
return n
def write_to(self, writer: ArrWriter) -> int:
start = self._offset
end = self._offset + self._ready
if end <= self._max_length:
n = writer.write(self._data[:, start:end])
else:
end -= self._max_length
n = writer.write(self._data[:, start:])
n += writer.write(self._data[:, :end])
self.remove(n)
return n
```
#### File: auto_editor/subcommands/subdump.py
```python
import os
import sys
import tempfile
from auto_editor.ffwrapper import FFmpeg, FileInfo
from auto_editor.utils.log import Log
from auto_editor.vanparse import ArgumentParser
def subdump_options(parser: ArgumentParser) -> ArgumentParser:
parser.add_argument("--ffmpeg-location", help="Point to your custom ffmpeg file.")
parser.add_argument(
"--my-ffmpeg",
flag=True,
help="Use the ffmpeg on your PATH instead of the one packaged.",
)
parser.add_required(
"input", nargs="*", help="Path to the file to have its subtitles dumped."
)
return parser
def main(sys_args=sys.argv[1:]) -> None:
parser = subdump_options(ArgumentParser("subdump"))
args = parser.parse_args(sys_args)
ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False)
temp = tempfile.mkdtemp()
log = Log(temp=temp)
for i, input_file in enumerate(args.input):
inp = FileInfo(input_file, ffmpeg, log)
cmd = ["-i", input_file]
for s, sub in enumerate(inp.subtitles):
cmd.extend(["-map", f"0:s:{s}", os.path.join(temp, f"{i}s{s}.{sub.ext}")])
ffmpeg.run(cmd)
for s, sub in enumerate(inp.subtitles):
print(f"file: {input_file} ({s}:{sub.lang}:{sub.ext})")
with open(os.path.join(temp, f"{i}s{s}.{sub.ext}")) as file:
print(file.read())
print("------")
log.cleanup()
if __name__ == "__main__":
main()
``` |
{
"source": "jjandnn/PaddleHub",
"score": 2
} |
#### File: paddlehub/dataset/glue.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import csv
import io
from paddlehub.dataset import InputExample
from paddlehub.common.logger import logger
from paddlehub.common.dir import DATA_HOME
from paddlehub.dataset.base_nlp_dataset import BaseNLPDataset
_DATA_URL = "https://bj.bcebos.com/paddlehub-dataset/glue_data.tar.gz"
class GLUE(BaseNLPDataset):
"""
Please refer to
https://gluebenchmark.com
for more information
"""
def __init__(self, sub_dataset='SST-2'):
# sub_dataset : CoLA, MNLI, MRPC, QNLI, QQP, RTE, SST-2, STS-B
if sub_dataset not in [
'CoLA', 'MNLI', 'MNLI_m', 'MNLI_mm', 'MRPC', 'QNLI', 'QQP',
'RTE', 'SST-2', 'STS-B'
]:
raise Exception(
"%s is not in GLUE benchmark. Please confirm the data set" %
sub_dataset)
mismatch = False
if sub_dataset == 'MNLI_mm':
sub_dataset = 'MNLI'
mismatch = True
elif sub_dataset == 'MNLI_m':
sub_dataset = 'MNLI'
self.sub_dataset = sub_dataset
# test.tsv has not label,so it is a predict file
dev_file = "dev.tsv"
predict_file = "test.tsv"
if sub_dataset == 'MNLI' and not mismatch:
dev_file = 'dev_matched.tsv'
predict_file = "test_matched.tsv"
elif sub_dataset == 'MNLI' and mismatch:
dev_file = 'dev_mismatched.tsv'
predict_file = "test_mismatched.tsv"
dataset_dir = os.path.join(DATA_HOME, "glue_data")
dataset_dir = self._download_dataset(dataset_dir, url=_DATA_URL)
base_path = os.path.join(dataset_dir, self.sub_dataset)
label_list = None
if sub_dataset in ['MRPC', 'QQP', 'SST-2', 'CoLA']:
label_list = ["0", "1"]
elif sub_dataset in ['QNLI', 'RTE']:
label_list = ['not_entailment', 'entailment']
elif sub_dataset in ['MNLI']:
label_list = ["neutral", "contradiction", "entailment"]
elif sub_dataset in ['STS-B']:
label_list = None
super(GLUE, self).__init__(
base_path=base_path,
train_file="train.tsv",
dev_file=dev_file,
predict_file=predict_file,
label_file=None,
label_list=label_list,
)
def _read_file(self, input_file, phase=None):
"""Reads a tab separated value file."""
with io.open(input_file, "r", encoding="UTF-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
examples = []
seq_id = 0
if self.sub_dataset != 'CoLA' or phase == "predict":
header = next(reader) # skip header
if self.sub_dataset in [
'MRPC',
]:
if phase == "predict":
label_index, text_a_index, text_b_index = [None, -2, -1]
else:
label_index, text_a_index, text_b_index = [0, -2, -1]
elif self.sub_dataset in [
'QNLI',
]:
if phase == "predict":
label_index, text_a_index, text_b_index = [None, 1, 2]
else:
label_index, text_a_index, text_b_index = [3, 1, 2]
elif self.sub_dataset in [
'QQP',
]:
if phase == "predict":
label_index, text_a_index, text_b_index = [None, 1, 2]
else:
label_index, text_a_index, text_b_index = [5, 3, 4]
elif self.sub_dataset in [
'RTE',
]:
if phase == "predict":
label_index, text_a_index, text_b_index = [None, 1, 2]
else:
label_index, text_a_index, text_b_index = [3, 1, 2]
elif self.sub_dataset in [
'SST-2',
]:
if phase == "predict":
label_index, text_a_index, text_b_index = [None, 1, None]
else:
label_index, text_a_index, text_b_index = [1, 0, None]
elif self.sub_dataset in [
'MNLI',
]:
if phase == "predict":
label_index, text_a_index, text_b_index = [None, 8, 9]
else:
label_index, text_a_index, text_b_index = [-1, 8, 9]
elif self.sub_dataset in ['CoLA']:
if phase == "predict":
label_index, text_a_index, text_b_index = [None, 1, None]
else:
label_index, text_a_index, text_b_index = [1, 3, None]
elif self.sub_dataset in ['STS-B']:
if phase == "predict":
label_index, text_a_index, text_b_index = [None, -2, -1]
else:
label_index, text_a_index, text_b_index = [-1, -3, -2]
for line in reader:
try:
example = InputExample(
guid=seq_id,
text_a=line[text_a_index],
text_b=line[text_b_index]
if text_b_index is not None else None,
label=line[label_index]
if label_index is not None else None)
seq_id += 1
examples.append(example)
except:
logger.info("[Discard Incorrect Data] " + "\t".join(line))
return examples
if __name__ == "__main__":
for sub_dataset in [
'CoLA', 'MNLI', 'MRPC', 'QNLI', 'QQP', 'RTE', 'SST-2', 'STS-B'
]:
print(sub_dataset)
ds = GLUE(sub_dataset=sub_dataset)
for e in ds.get_train_examples()[:2]:
print(e)
print()
for e in ds.get_dev_examples()[:2]:
print(e)
print()
for e in ds.get_test_examples()[:2]:
print(e)
print()
for e in ds.get_predict_examples()[:2]:
print(e)
print()
```
#### File: paddlehub/dataset/tnews.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
from paddlehub.dataset import InputExample, BaseDataset
from paddlehub.common.dir import DATA_HOME
_DATA_URL = "https://bj.bcebos.com/paddlehub-dataset/tnews.tar.gz"
LABEL_NAME = {
"100": "news_story",
"101": "news_culture",
"102": "news_entertainment",
"103": "news_sports",
"104": "news_finance",
"106": "news_house",
"107": "news_car",
"108": "news_edu",
"109": "news_tech",
"110": "news_military",
"112": "news_travel",
"113": "news_world",
"114": "stock",
"115": "news_agriculture",
"116": "news_game"
}
class TNews(BaseDataset):
"""
TNews is the chinese news classification dataset on Jinri Toutiao App.
"""
def __init__(self):
dataset_dir = os.path.join(DATA_HOME, "tnews")
base_path = self._download_dataset(dataset_dir, url=_DATA_URL)
label_list = [
'100', '101', '102', '103', '104', '106', '107', '108', '109',
'110', '112', '113', '114', '115', '116'
]
super(TNews, self).__init__(
base_path=base_path,
train_file="toutiao_category_train.txt",
dev_file="toutiao_category_dev.txt",
test_file="toutiao_category_test.txt",
label_file=None,
label_list=label_list,
)
def get_label_name(self, id):
return LABEL_NAME[id]
def _read_file(self, input_file, phase=None):
"""Reads a tab separated value file."""
with io.open(input_file, "r", encoding="UTF-8") as file:
examples = []
for line in file:
data = line.strip().split("_!_")
example = InputExample(
guid=data[0], label=data[1], text_a=data[3])
examples.append(example)
return examples
if __name__ == "__main__":
ds = TNews()
print("first 10 dev")
for e in ds.get_dev_examples()[:10]:
print("{}\t{}\t{}\t{}".format(e.guid, e.text_a, e.text_b, e.label))
print("first 10 train")
for e in ds.get_train_examples()[:10]:
print("{}\t{}\t{}\t{}".format(e.guid, e.text_a, e.text_b, e.label))
print("first 10 test")
for e in ds.get_test_examples()[:10]:
print("{}\t{}\t{}\t{}".format(e.guid, e.text_a, e.text_b, e.label))
print(ds)
``` |
{
"source": "jjandnn/ROMP",
"score": 2
} |
#### File: lib/maps_utils/result_parser.py
```python
import os,sys
import torch
import torch.nn as nn
import numpy as np
import logging
import config
from config import args
import constants
from models.smpl_wrapper import SMPLWrapper
from maps_utils import HeatmapParser,CenterMap
from utils.center_utils import process_gt_center
from utils.rot_6D import rot6D_to_angular
from loss_funcs.params_loss import batch_l2_loss_param
class ResultParser(nn.Module):
def __init__(self, with_smpl_parser=True):
super(ResultParser,self).__init__()
self.map_size = args().centermap_size
if with_smpl_parser:
self.params_map_parser = SMPLWrapper()
self.heatmap_parser = HeatmapParser()
self.centermap_parser = CenterMap()
self.match_preds_to_gts_for_supervision = args().match_preds_to_gts_for_supervision
def matching_forward(self, outputs, meta_data, cfg):
if args().model_version in [6,8,9]:
outputs,meta_data = self.match_params_new(outputs, meta_data, cfg)
else:
outputs,meta_data = self.match_params(outputs, meta_data, cfg)
if 'params_pred' in outputs:
outputs = self.params_map_parser(outputs,meta_data)
return outputs,meta_data
@torch.no_grad()
def parsing_forward(self, outputs, meta_data, cfg):
outputs, meta_data = self.parse_maps(outputs, meta_data, cfg)
if 'params_pred' in outputs:
outputs = self.params_map_parser(outputs,meta_data)
return outputs, meta_data
def match_params(self, outputs, meta_data, cfg):
gt_keys = ['params', 'full_kp2d', 'kp_3d', 'subject_ids', 'valid_masks']
exclude_keys = ['heatmap','centermap','AE_joints','person_centers','all_person_detected_mask']
center_gts_info = process_gt_center(meta_data['person_centers'])
center_preds_info = self.centermap_parser.parse_centermap(outputs['center_map'])
mc_centers = self.match_gt_pred(center_gts_info, center_preds_info, outputs['center_map'].device, cfg['is_training'])
batch_ids, flat_inds, person_ids = mc_centers['batch_ids'], mc_centers['flat_inds'], mc_centers['person_ids']
if len(batch_ids)==0:
if 'new_training' in cfg:
if cfg['new_training']:
outputs['detection_flag'] = torch.Tensor([False for _ in range(len(meta_data['batch_ids']))]).cuda()
outputs['reorganize_idx'] = meta_data['batch_ids'].cuda()
return outputs, meta_data
batch_ids, flat_inds = torch.zeros(1).long().to(outputs['center_map'].device), (torch.ones(1)*self.map_size**2/2.).to(outputs['center_map'].device).long()
person_ids = batch_ids.clone()
outputs['detection_flag'] = torch.Tensor([True for _ in range(len(batch_ids))]).cuda()
if 'params_maps' in outputs and 'params_pred' not in outputs:
outputs['params_pred'] = self.parameter_sampling(outputs['params_maps'], batch_ids, flat_inds, use_transform=True)
outputs, meta_data = self.reorganize_data(outputs, meta_data, exclude_keys, gt_keys, batch_ids, person_ids)
outputs['centers_pred'] = torch.stack([flat_inds%args().centermap_size, flat_inds//args().centermap_size],1)
return outputs, meta_data
def match_gt_pred(self,center_gts_info, center_preds_info, device, is_training):
vgt_batch_ids, vgt_person_ids, vgt_centers = center_gts_info
vpred_batch_ids, flat_inds, cyxs, top_score = center_preds_info
mc = {key:[] for key in ['batch_ids', 'flat_inds', 'person_ids', 'conf']}
if self.match_preds_to_gts_for_supervision:
for match_ind in torch.arange(len(vgt_batch_ids)):
batch_id, person_id, center_gt = vgt_batch_ids[match_ind], vgt_person_ids[match_ind], vgt_centers[match_ind]
pids = torch.where(vpred_batch_ids==batch_id)[0]
if len(pids) == 0:
continue
closet_center_ind = pids[torch.argmin(torch.norm(cyxs[pids].float()-center_gt[None].float().to(device),dim=-1))]
center_matched = cyxs[closet_center_ind].long()
cy, cx = torch.clamp(center_matched, 0, self.map_size-1)
flat_ind = cy*args().centermap_size+cx
mc['batch_ids'].append(batch_id)
mc['flat_inds'].append(flat_ind)
mc['person_ids'].append(person_id)
mc['conf'].append(top_score[closet_center_ind])
keys_list = list(mc.keys())
for key in keys_list:
if key != 'conf':
mc[key] = torch.Tensor(mc[key]).long().to(device)
if args().max_supervise_num!=-1 and is_training:
mc[key] = mc[key][:args().max_supervise_num]
else:
mc['batch_ids'] = vgt_batch_ids.long().to(device)
mc['flat_inds'] = flatten_inds(vgt_centers.long()).to(device)
mc['person_ids'] = vgt_person_ids.long().to(device)
mc['conf'] = torch.zeros(len(vgt_person_ids)).to(device)
return mc
def parameter_sampling(self, maps, batch_ids, flat_inds, use_transform=True):
device = maps.device
if use_transform:
batch, channel = maps.shape[:2]
maps = maps.view(batch, channel, -1).permute((0, 2, 1)).contiguous()
results = maps[batch_ids,flat_inds].contiguous()
return results
def reorganize_gts(self, meta_data, key_list, batch_ids):
for key in key_list:
if key in meta_data:
if isinstance(meta_data[key], torch.Tensor):
meta_data[key] = meta_data[key][batch_ids]
elif isinstance(meta_data[key], list):
meta_data[key] = np.array(meta_data[key])[batch_ids.cpu().numpy()]
return meta_data
def reorganize_data(self, outputs, meta_data, exclude_keys, gt_keys, batch_ids, person_ids):
exclude_keys += gt_keys
outputs['reorganize_idx'] = meta_data['batch_ids'][batch_ids]
info_vis = []
for key, item in meta_data.items():
if key not in exclude_keys:
info_vis.append(key)
meta_data = self.reorganize_gts(meta_data, info_vis, batch_ids)
for gt_key in gt_keys:
if gt_key in meta_data:
try:
meta_data[gt_key] = meta_data[gt_key][batch_ids,person_ids]
except Exception as error:
print(gt_key,'meets error: ',error)
return outputs,meta_data
@torch.no_grad()
def parse_maps(self,outputs, meta_data, cfg):
center_preds_info = self.centermap_parser.parse_centermap_heatmap_adaptive_scale_batch(outputs['center_map'])
batch_ids, flat_inds, cyxs, top_score = center_preds_info
if len(batch_ids)==0:
if 'new_training' in cfg:
if cfg['new_training']:
outputs['detection_flag'] = torch.Tensor([False for _ in range(len(meta_data['batch_ids']))]).cuda()
outputs['reorganize_idx'] = meta_data['batch_ids'].cuda()
return outputs, meta_data
batch_ids, flat_inds = torch.zeros(1).long().to(outputs['center_map'].device), (torch.ones(1)*self.map_size**2/2.).to(outputs['center_map'].device).long()
person_ids = batch_ids.clone()
outputs['detection_flag'] = torch.Tensor([False for _ in range(len(batch_ids))]).cuda()
else:
outputs['detection_flag'] = torch.Tensor([True for _ in range(len(batch_ids))]).cuda()
if 'params_pred' not in outputs and 'params_maps' in outputs:
outputs['params_pred'] = self.parameter_sampling(outputs['params_maps'], batch_ids, flat_inds, use_transform=True)
if 'centers_pred' not in outputs:
outputs['centers_pred'] = torch.stack([flat_inds%args().centermap_size, torch.div(flat_inds, args().centermap_size, rounding_mode='floor')], 1)
outputs['centers_conf'] = self.parameter_sampling(outputs['center_map'], batch_ids, flat_inds, use_transform=True)
outputs['reorganize_idx'] = meta_data['batch_ids'][batch_ids]
info_vis = ['image', 'offsets','imgpath']
meta_data = self.reorganize_gts(meta_data, info_vis, batch_ids)
return outputs,meta_data
def parse_kps(self, heatmap_AEs, kp2d_thresh=0.1):
kps = []
heatmap_AE_results = self.heatmap_parser.batch_parse(heatmap_AEs.detach())
for batch_id in range(len(heatmap_AE_results)):
kp2d, kp2d_conf = heatmap_AE_results[batch_id]
kps.append(kp2d[np.array(kp2d_conf)>kp2d_thresh])
return kps
def flatten_inds(coords):
coords = torch.clamp(coords, 0, args().centermap_size-1)
return coords[:,0].long()*args().centermap_size+coords[:,1].long()
def _check_params_pred_(params_pred_shape, batch_length):
assert len(params_pred_shape)==2, logging.error('outputs[params_pred] dimension less than 2, is {}'.format(len(params_pred_shape)))
assert params_pred_shape[0]==batch_length, logging.error('sampled length not equal.')
def _check_params_sampling_(param_maps_shape, dim_start, dim_end, batch_ids, sampler_flat_inds_i):
assert len(param_maps_shape)==3, logging.error('During parameter sampling, param_maps dimension is not equal 3, is {}'.format(len(param_maps_shape)))
assert param_maps_shape[2]>dim_end>=dim_start, \
logging.error('During parameter sampling, param_maps dimension -1 is not larger than dim_end and dim_start, they are {},{},{}'.format(param_maps_shape[-1],dim_end,dim_start))
assert (batch_ids>=param_maps_shape[0]).sum()==0, \
logging.error('During parameter sampling, batch_ids {} out of boundary, param_maps_shape[0] is {}'.format(batch_ids,param_maps_shape[0]))
assert (sampler_flat_inds_i>=param_maps_shape[1]).sum()==0, \
logging.error('During parameter sampling, sampler_flat_inds_i {} out of boundary, param_maps_shape[1] is {}'.format(sampler_flat_inds_i,param_maps_shape[1]))
```
#### File: lib/visualization/renderer_pt3d.py
```python
import sys, os
import json
import torch
from torch import nn
import pickle
# Data structures and functions for rendering
from pytorch3d.structures import Meshes, join_meshes_as_scene
from pytorch3d.renderer import (
look_at_view_transform,
FoVPerspectiveCameras,
FoVOrthographicCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
TexturesUV,
TexturesVertex,
)
import numpy as np
import config
import constants
from config import args
from models import smpl_model
colors = {
'pink': [.7, .7, .9],
'neutral': [.9, .9, .8],
'capsule': [.7, .75, .5],
'yellow': [.5, .7, .75],
}
class Renderer(nn.Module):
def __init__(self, resolution=(512,512), perps=True, R=None, T=None, use_gpu='-1' not in str(args().GPUS)):
super(Renderer, self).__init__()
self.perps = perps
if use_gpu:
self.device = torch.device('cuda:{}'.format(str(args().GPUS).split(',')[0]))
print('visualize in gpu mode')
else:
self.device = torch.device('cpu')
print('visualize in cpu mode')
if R is None:
R = torch.Tensor([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]])
if T is None:
T = torch.Tensor([[0., 0., 0.]])
if self.perps:
self.cameras = FoVPerspectiveCameras(R=R, T=T, fov=args().FOV, device=self.device)
self.lights = PointLights(ambient_color=((0.56, 0.56, 0.56),),location=torch.Tensor([[0., 0., 0.]]), device=self.device)
else:
self.cameras = FoVOrthographicCameras(R=R, T=T, znear=0., zfar=100.0, max_y=1.0, min_y=-1.0, max_x=1.0, min_x=-1.0, device=self.device)
self.lights = DirectionalLights(direction=torch.Tensor([[0., 1., 0.]]), device=self.device)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
# and blur_radius=0.0.
raster_settings = RasterizationSettings(
image_size=resolution[0],
blur_radius=0.0,
faces_per_pixel=1)
# Create a Phong renderer by composing a rasterizer and a shader. The textured Phong shader will
# interpolate the texture uv coordinates for each vertex, sample from a texture image and
# apply the Phong lighting model
self.renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=self.cameras,
raster_settings=raster_settings),
shader=SoftPhongShader(
device=self.device,
cameras=self.cameras,
lights=self.lights))
def __call__(self, verts, faces, colors=torch.Tensor(colors['neutral']), merge_meshes=True, cam_params=None,**kwargs):
assert len(verts.shape) == 3, print('The input verts of visualizer is bounded to be 3-dims (Nx6890 x3) tensor')
verts, faces = verts.to(self.device), faces.to(self.device)
verts_rgb = torch.ones_like(verts)
verts_rgb[:, :] = torch.from_numpy(colors).cuda().unsqueeze(1)
textures = TexturesVertex(verts_features=verts_rgb)
verts[:,:,:2] *= -1
meshes = Meshes(verts, faces, textures)
if merge_meshes:
meshes = join_meshes_as_scene(meshes)
if cam_params is not None:
if self.perps:
R, T, fov = cam_params
new_cam = FoVPerspectiveCameras(R=R, T=T, fov=fov, device=self.device)
else:
R, T, xyz_ranges = cam_params
new_cam = FoVOrthographicCameras(R=R, T=T, **xyz_ranges, device=self.device)
images = self.renderer(meshes,cameras=new_cam)
else:
images = self.renderer(meshes)
images[:,:,:-1] *= 255
return images
def get_renderer(test=False,**kwargs):
renderer = Renderer(**kwargs)
if test:
import cv2
dist = 1/np.tan(np.radians(args().FOV/2.))
print('dist:', dist)
model = pickle.load(open(os.path.join(args().smpl_model_path,'smpl','SMPL_NEUTRAL.pkl'),'rb'), encoding='latin1')
np_v_template = torch.from_numpy(np.array(model['v_template'])).cuda().float()[None]
face = torch.from_numpy(model['f'].astype(np.int32)).cuda()[None]
np_v_template = np_v_template.repeat(2,1,1)
np_v_template[1] += 0.3
np_v_template[:,:,2] += dist
face = face.repeat(2,1,1)
result = renderer(np_v_template, face).cpu().numpy()
for ri in range(len(result)):
cv2.imwrite('test{}.png'.format(ri),(result[ri,:,:,:3]*255).astype(np.uint8))
return renderer
if __name__ == '__main__':
get_renderer(test=True, perps=True)
```
#### File: simple_romp/bev/split2process.py
```python
import numpy as np
import cv2
import torch
from .post_parser import remove_subjects
def padding_image_overlap(image, overlap_ratio=0.46):
h, w = image.shape[:2]
pad_length = int(h* overlap_ratio)
pad_w = w+2*pad_length
pad_image = np.zeros((h, pad_w, 3), dtype=np.uint8)
top, left = 0, pad_length
bottom, right = h, w+pad_length
pad_image[top:bottom, left:right] = image
# due to BEV takes square input, so we convert top, bottom to the state that assuming square padding
pad_height = (w - h)//2
top = pad_height
bottom = w - top
left = 0
right = w
image_pad_info = torch.Tensor([top, bottom, left, right, h, w])
return pad_image, image_pad_info, pad_length
def get_image_split_plan(image, overlap_ratio=0.46):
h, w = image.shape[:2]
aspect_ratio = w / h
slide_time = int(np.ceil((aspect_ratio - 1) / (1 - overlap_ratio))) + 1
crop_box = [] # left, right, top, bottom
move_step = (1 - overlap_ratio) * h
for ind in range(slide_time):
if ind == (slide_time-1):
left = w-h
else:
left = move_step * ind
right = left+h
crop_box.append([left, right, 0, h])
return np.array(crop_box).astype(np.int32)
def exclude_boudary_subjects(outputs, drop_boundary_ratio, ptype='left', torlerance=0.05):
if ptype=='left':
drop_mask = outputs['cam'][:, 2] > (1 - drop_boundary_ratio + torlerance)
elif ptype=='right':
drop_mask = outputs['cam'][:, 2] < (drop_boundary_ratio - 1 - torlerance)
remove_subjects(outputs, torch.where(drop_mask)[0])
def convert_crop_cam_params2full_image(cam_params, crop_bbox, image_shape):
h, w = image_shape
# adjust scale, cam 3: depth, y, x
scale_adjust = (crop_bbox[[1,3]]-crop_bbox[[0,2]]).max() / max(h, w)
cam_params *= scale_adjust
# adjust x
# crop_bbox[:2] -= pad_length
bbox_mean_x = crop_bbox[:2].mean()
cam_params[:,2] += bbox_mean_x / (w /2) - 1
return cam_params
def collect_outputs(outputs, all_outputs):
keys = list(outputs.keys())
for key in keys:
if key not in all_outputs:
all_outputs[key] = outputs[key]
else:
if key in ['smpl_face']:
continue
if key in ['center_map']:
all_outputs[key] = torch.cat([all_outputs[key], outputs[key]],3)
continue
if key in ['center_map_3d']:
all_outputs[key] = torch.cat([all_outputs[key], outputs[key]],2)
continue
all_outputs[key] = torch.cat([all_outputs[key], outputs[key]],0)
```
#### File: simple_romp/tools/convert_checkpoints.py
```python
from os import remove
from sklearn.model_selection import PredefinedSplit
import torch
import sys
def remove_prefix(state_dict, prefix='module.', remove_keys=['_result_parser', '_calc_loss']):
keys = list(state_dict.keys())
print('orginal keys:', keys)
for key in keys:
exist_flag = True
for rkey in remove_keys:
if rkey in key:
del state_dict[key]
exist_flag = False
if not exist_flag:
continue
if prefix in key:
state_dict[key.replace(prefix, '')] = state_dict[key]
del state_dict[key]
keys = list(state_dict.keys())
print('new keys:', keys)
return state_dict
if __name__ == '__main__':
model_path = sys.argv[1]
save_path = sys.argv[2]
state_dict = remove_prefix(torch.load(model_path), prefix='module.')
torch.save(state_dict, save_path)
```
#### File: simple_romp/vis_human/vis_utils.py
```python
import cv2
import torch
import numpy as np
color_list = np.array([[.7, .7, .6],[.7, .5, .5],[.5, .5, .7], [.5, .55, .3],[.3, .5, .55], \
[1,0.855,0.725],[0.588,0.804,0.804],[1,0.757,0.757], [0.933,0.474,0.258],[0.847,191/255,0.847], [0.941,1,1]])
focal_length = 443.4
def get_rotate_x_mat(angle):
angle = np.radians(angle)
rot_mat = torch.Tensor([
[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
return rot_mat
def get_rotate_y_mat(angle):
angle = np.radians(angle)
rot_mat = torch.Tensor([
[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]])
return rot_mat
def rotate_view_weak_perspective(verts, rx=30, ry=0, img_shape=[512,512], expand_ratio=1.2, bbox3D_center=None, scale=None):
device, dtype = verts.device, verts.dtype
h, w = img_shape
# front2birdview: rx=90, ry=0 ; front2sideview: rx=0, ry=90
Rx_mat = get_rotate_x_mat(rx).type(dtype).to(device)
Ry_mat = get_rotate_y_mat(ry).type(dtype).to(device)
verts_rot = torch.einsum('bij,kj->bik', verts, Rx_mat)
verts_rot = torch.einsum('bij,kj->bik', verts_rot, Ry_mat)
if bbox3D_center is None:
flatten_verts = verts_rot.view(-1, 3)
# To move the vertices to the center of view, we get the bounding box of vertices and its center location
bbox3D_center = 0.5 * (flatten_verts.min(0).values + flatten_verts.max(0).values)[None, None]
verts_aligned = verts_rot - bbox3D_center
rendered_image_center = torch.Tensor([[[w / 2, h / 2]]]).to(device).type(verts_aligned.dtype)
if scale is None:
# To ensure all vertices are visible, we need to rescale the vertices
scale = 1 / (expand_ratio * torch.abs(torch.div(verts_aligned[:,:,:2], rendered_image_center)).max())
# move to the center of rendered image
verts_aligned *= scale
verts_aligned[:,:,:2] += rendered_image_center
return verts_aligned, bbox3D_center, scale
def rotate_view_perspective(verts, rx=30, ry=0, FOV=60, bbox3D_center=None, depth=None):
device, dtype = verts.device, verts.dtype
# front2birdview: rx=90, ry=0 ; front2sideview: rx=0, ry=90
Rx_mat = get_rotate_x_mat(rx).type(dtype).to(device)
Ry_mat = get_rotate_y_mat(ry).type(dtype).to(device)
verts_rot = torch.einsum('bij,kj->bik', verts, Rx_mat)
verts_rot = torch.einsum('bij,kj->bik', verts_rot, Ry_mat)
if bbox3D_center is None:
flatten_verts = verts_rot.view(-1, 3)
# To move the vertices to the center of view, we get the bounding box of vertices and its center location
bbox3D_center = 0.5 * (flatten_verts.min(0).values + flatten_verts.max(0).values)[None, None]
verts_aligned = verts_rot - bbox3D_center
if depth is None:
# To ensure all vertices are visible, we need to move them further.
# get the least / the greatest distance between the center of 3D bbox and all vertices
dist_min = torch.abs(verts_aligned.view(-1, 3).min(0).values)
dist_max = torch.abs(verts_aligned.view(-1, 3).max(0).values)
z = dist_max[:2].max() / np.tan(np.radians(FOV/2)) + dist_min[2]
depth = torch.tensor([[[0, 0, z]]], device=device)
verts_aligned = verts_aligned + depth
return verts_aligned, bbox3D_center, depth
def get_rendering_results(result_image, org_depth_map, verts_shifted, offsets, faces, renderer, depth_map_thresh=1, visible_weight=0.9):
color = torch.Tensor([.9, .9, .8])
rendered_result, depth_map = renderer(verts_shifted, faces, colors=color, merge_meshes=True)
depth_map = depth_map[0].cpu().numpy()[:,:,0]
transparent = rendered_result[0, :, :, -1].cpu().numpy()
#print(depth_map[transparent > 0.])
valid_mask = (transparent > 0.).astype(np.uint8) * 255#[:, :,np.newaxis]
rendered_img = (rendered_result[0, :,:,:-1].cpu().numpy() * 255).astype(np.uint8)
crop_start, crop_end, pad_left, pad_right, pad_input_w, pad_input_h = offsets
pad_rendered_img = cv2.resize(rendered_img, (pad_input_w, pad_input_h))[:,pad_left:-pad_right]
#valid_mask = (cv2.resize(valid_mask, (pad_input_w, pad_input_h))[:,pad_left:-pad_right]>128)[:,:,None]
depth_map = cv2.resize(depth_map, (pad_input_w, pad_input_h))[:,pad_left:-pad_right]
depth_map[depth_map<depth_map_thresh] = 1000
valid_mask = (depth_map < org_depth_map[:,crop_start:crop_end])
org_depth_map[:,crop_start:crop_end][valid_mask] = depth_map[valid_mask]
valid_mask = valid_mask[:,:,None]
result_image[:,crop_start:crop_end] = pad_rendered_img * valid_mask + result_image[:,crop_start:crop_end] * valid_mask * (1-visible_weight) + (1 - valid_mask) * result_image[:,crop_start:crop_end]
return result_image, org_depth_map
def rendering_mesh_rotating_view(vert_trans, renderer, triangles, image, background, internal=5):
result_imgs = []
pause_num = 24
pause = np.zeros(pause_num).astype(np.int32)
change_time = 90//internal
roates = np.ones(change_time) * internal
go_up = np.sin(np.arange(change_time).astype(np.float32)/change_time) * 1
go_down = np.sin(np.arange(change_time).astype(np.float32)/change_time - 1) * 1
azimuth_angles = np.concatenate([pause, roates, roates, roates, roates])
elevation_angles = np.concatenate([pause, go_up, go_down, go_up, go_down])
camera_pose = np.eye(4)
elevation_start = 20
camera_pose[:3,:3] = get_rotate_x_mat(-elevation_start)
cam_height = 1.4*vert_trans[:,:,2].mean().item()*np.tan(np.radians(elevation_start))
camera_pose[:3,3] = np.array([0,cam_height,0]) # translation
verts_rotated = vert_trans.clone()
bbox3D_center, move_depth = None, None
for azimuth_angle, elevation_angle in zip(azimuth_angles, elevation_angles):
verts_rotated, bbox3D_center, move_depth = rotate_view_perspective(verts_rotated, rx=0, ry=azimuth_angle, depth=move_depth)
rendered_image, rend_depth = renderer(verts_rotated.cpu().numpy(), triangles, background, mesh_colors=np.array([[0.9, 0.9, 0.8]]), camera_pose=camera_pose)
result_imgs.append(rendered_image)
return result_imgs
color_table_default = np.array([
[0.4, 0.6, 1], # blue
[0.8, 0.7, 1], # pink
[0.1, 0.9, 1], # cyan
[0.8, 0.9, 1], # gray
[1, 0.6, 0.4], # orange
[1, 0.7, 0.8], # rose
[1, 0.9, 0.1], # Yellow
[1, 0.9, 0.8], # skin
[0.9, 1, 1], # light blue
[0.9, 0.7, 0.4], # brown
[0.8, 0.7, 1], # purple
[0.8, 0.9, 1], # light blue 2
[0.9, 0.3, 0.1], # red
[0.7, 1, 0.6], # green
[0.7, 0.4, 0.6], # dark purple
[0.3, 0.5, 1], # deep blue
])[:,::-1]
def mesh_color_left2right(trans, color_table=None):
left2right_order = torch.sort(trans[:,0].cpu()).indices.numpy()
color_inds = np.arange(len(trans))
color_inds[left2right_order] = np.arange(len(trans))
if color_table is None:
color_table = color_table_default
return color_table[color_inds % len(color_table)]
smpl24_connMat = np.array([0,1, 0,2, 0,3, 1,4,4,7,7,10, 2,5,5,8,8,11, 3,6,6,9,9,12,12,15, 12,13,13,16,16,18,18,20,20,22, 12,14,14,17,17,19,19,21,21,23]).reshape(-1, 2)
def draw_skeleton(image, pts, bones=smpl24_connMat, cm=None, label_kp_order=False,r=8):
for i,pt in enumerate(pts):
if len(pt)>1:
if pt[0]>0 and pt[1]>0:
image = cv2.circle(image,(int(pt[0]), int(pt[1])),r,cm[i%len(cm)],-1)
if label_kp_order and i in bones:
img=cv2.putText(image,str(i),(int(pt[0]), int(pt[1])),cv2.FONT_HERSHEY_COMPLEX,1,(255,215,0),1)
if bones is not None:
set_colors = np.array([cm for i in range(len(bones))]).astype(np.int)
bones = np.concatenate([bones,set_colors],1).tolist()
for line in bones:
pa = pts[line[0]]
pb = pts[line[1]]
if (pa>0).all() and (pb>0).all():
xa,ya,xb,yb = int(pa[0]),int(pa[1]),int(pb[0]),int(pb[1])
image = cv2.line(image,(xa,ya),(xb,yb),(int(line[2]), int(line[3]), int(line[4])), r)
return image
def draw_skeleton_multiperson(image, pts_group, colors):
for ind, pts in enumerate(pts_group):
image = draw_skeleton(image, pts, cm=colors[ind])
return image
import math
class Plotter3dPoses:
def __init__(self, canvas_size=(512,512), origin=(0.5, 0.5), scale=200):
self.canvas_size = canvas_size
self.origin = np.array([origin[1] * canvas_size[1], origin[0] * canvas_size[0]], dtype=np.float32) # x, y
self.scale = np.float32(scale)
self.theta, self.phi = 0, np.pi/2 #np.pi/4, -np.pi/6
axis_length = 200
axes = [
np.array([[-axis_length/2, -axis_length/2, 0], [axis_length/2, -axis_length/2, 0]], dtype=np.float32),
np.array([[-axis_length/2, -axis_length/2, 0], [-axis_length/2, axis_length/2, 0]], dtype=np.float32),
np.array([[-axis_length/2, -axis_length/2, 0], [-axis_length/2, -axis_length/2, axis_length]], dtype=np.float32)]
step = 20
for step_id in range(axis_length // step + 1): # add grid
axes.append(np.array([[-axis_length / 2, -axis_length / 2 + step_id * step, 0],
[axis_length / 2, -axis_length / 2 + step_id * step, 0]], dtype=np.float32))
axes.append(np.array([[-axis_length / 2 + step_id * step, -axis_length / 2, 0],
[-axis_length / 2 + step_id * step, axis_length / 2, 0]], dtype=np.float32))
self.axes = np.array(axes)
def plot(self, pose_3ds, bones=smpl24_connMat, colors=[(255, 0, 0)], img=None):
img = np.ones((self.canvas_size[0],self.canvas_size[1],3), dtype=np.uint8) * 0 if img is None else img
R = self._get_rotation(self.theta, self.phi)
#self._draw_axes(img, R)
for vertices, color in zip(pose_3ds,colors):
self._plot_edges(img, vertices, bones, R, color)
return img
def encircle_plot(self, pose_3ds, bones, colors=[(255, 255, 255)], img=None):
img = np.ones((self.canvas_size[0],self.canvas_size[1],3), dtype=np.uint8) * 255 if img is None else img
#encircle_theta, encircle_phi = [0, np.pi/4, np.pi/2, 3*np.pi/4], [np.pi/2,np.pi/2,np.pi/2,np.pi/2]
encircle_theta, encircle_phi = [0,0,0, np.pi/4,np.pi/4,np.pi/4, np.pi/2,np.pi/2,np.pi/2], [np.pi/2, 5*np.pi/7, -2*np.pi/7, np.pi/2, 5*np.pi/7, -2*np.pi/7, np.pi/2, 5*np.pi/7, -2*np.pi/7,]
encircle_origin = np.array([[0.165, 0.165], [0.165, 0.495], [0.165, 0.825],\
[0.495, 0.165], [0.495, 0.495], [0.495, 0.825],\
[0.825, 0.165], [0.825, 0.495], [0.825, 0.825]], dtype=np.float32) * np.array(self.canvas_size)[None]
for self.theta, self.phi, self.origin in zip(encircle_theta, encircle_phi, encircle_origin):
R = self._get_rotation(self.theta, self.phi)
#self._draw_axes(img, R)
for vertices, color in zip(pose_3ds,colors):
self._plot_edges(img, vertices*0.6, bones, R, color)
return img
def _draw_axes(self, img, R):
axes_2d = np.dot(self.axes, R)
axes_2d = axes_2d + self.origin
for axe in axes_2d:
axe = axe.astype(int)
cv2.line(img, tuple(axe[0]), tuple(axe[1]), (128, 128, 128), 1, cv2.LINE_AA)
def _plot_edges(self, img, vertices, edges, R, color):
vertices_2d = np.dot(vertices, R)
edges_vertices = vertices_2d.reshape((-1, 2))[edges] * self.scale + self.origin
org_verts = vertices.reshape((-1, 3))[edges]
for inds, edge_vertices in enumerate(edges_vertices):
if 0 in org_verts[inds]:
continue
edge_vertices = edge_vertices.astype(int)
cv2.line(img, tuple(edge_vertices[0]), tuple(edge_vertices[1]), color, 10, cv2.LINE_AA)
def _get_rotation(self, theta, phi):
sin, cos = math.sin, math.cos
return np.array([
[ cos(theta), sin(theta) * sin(phi)],
[-sin(theta), cos(theta) * sin(phi)],
[ 0, -cos(phi)]
], dtype=np.float32) # transposed
``` |
{
"source": "jjanetzki/HackHPI-2017",
"score": 4
} |
#### File: frontend/alexa/alexa.py
```python
from __future__ import print_function
import requests
import json
alcohol_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/add"
caffeine_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/caffeine/add"
profile_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/setprofile"
caffeine_recommendation_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/caffeine/recommendation"
alcohol_recommendation_url = "https://hpi.de/naumann/sites/ingestion/hackhpi/alcohol/recommendation"
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "DrinkIntend":
return get_drink_response(intent_request)
elif intent_name == "DrinkFinishedIntend":
return get_finished_drink(intent_request)
elif intent_name == "CaffeineIntend":
return get_caffeine(intent_request)
elif intent_name == "AlcoholIntend":
return get_alcohol(intent_request)
elif intent_name == "CaffeineRecommendationIntend":
return get_caffeine_recommendation()
elif intent_name == "AlcoholRecommendationIntend":
return get_alcohol_recommendation()
elif intent_name == "CaffeineLevelIntend":
return get_caffeine_level()
elif intent_name == "AlcoholLevelIntend":
return get_alcohol_level()
elif intent_name == "SexIntend":
return set_sex(intent_request)
elif intent_name == "BodyweightIntend":
return set_bodyweight(intent_request)
elif intent_name == "AgeIntend":
return set_age(intent_request)
elif intent_name == "AMAZON.HelpIntent":
return get_help_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to the Productivity Bot. I will help you stay in your Ballmer Peak."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with the same text.
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_help_response():
session_attributes = {}
card_title = "Help"
speech_output = "Welcome to the help section for the Productivity Bot. A couple of examples of phrases that I can except are... What shall I drink... or, how much alcohol does a drink contain. Lets get started now by trying one of these."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_drink_response(intent_request):
session_attributes = {}
card_title = "Drink response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
requests.post(caffeine_url, json={"drink": drink}) # todo: specify serving (ml)
requests.post(alcohol_url, json={"drink": drink}) # todo: specify serving (ml)
speech_output = f"Enjoy your {drink}."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_finished_drink(intent_request):
session_attributes = {}
card_title = "Finished drink response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
# requests.post("https://hpi.de/naumann/sites/ingestion/hackhpi/", json={"drink finished": drink})
speech_output = f"I hope your {drink} was tasty."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine_recommendation():
session_attributes = {}
card_title = "Caffeine recommendation response"
json_answer = requests.get(caffeine_recommendation_url).text
speech_output = json.loads(json_answer)["results"]
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol_recommendation():
session_attributes = {}
card_title = "Alcohol recommendation response"
json_answer = requests.get(alcohol_recommendation_url).text
speech_output = json.loads(json_answer)["results"]
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine(intent_request):
session_attributes = {}
card_title = "Caffeine response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
speech_output = f"{drink} contains a lot of caffeine."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol(intent_request):
session_attributes = {}
card_title = "Alcohol response"
drink = intent_request["intent"]["slots"]["Drink"]["value"]
speech_output = f"{drink} contains a lot of alcohol."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_caffeine_level():
session_attributes = {}
card_title = "Caffeine level response"
speech_output = "Your caffeine level is over 9000."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def get_alcohol_level():
session_attributes = {}
card_title = "Alcohol level response"
speech_output = "Your alcohol level is over 9000."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_sex(intent_request):
session_attributes = {}
card_title = "Sex response"
sex = intent_request["intent"]["slots"]["Sex"]["value"]
requests.post(profile_url, json={"sex": sex})
speech_output = f"Yes, you are so {sex}."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_bodyweight(intent_request):
session_attributes = {}
card_title = "Bodyweight response"
weight = intent_request["intent"]["slots"]["Number"]["value"]
requests.post(profile_url, json={"bodyweight": weight})
speech_output = f"A bodyweight of {weight} is just perfect!"
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def set_age(intent_request):
session_attributes = {}
card_title = "Age response"
age = intent_request["intent"]["slots"]["Number"]["value"]
requests.post(profile_url, json={"age": age})
speech_output = f"I am less than {age} years old."
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes,
build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for using the Productivity bot! I hope you were productive."
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
``` |
{
"source": "jjangg96/coda",
"score": 2
} |
#### File: coda/scripts/compare_pr_diff_items.py
```python
import os
import sys
import shutil
import subprocess
import json
exit_code = 0
# pr_url is of form https://github.com/CodaProtocol/coda/pull/n
def run_comparison (pr_url,compare_script) :
pr_number = os.path.basename (pr_url)
api_url = 'https://api.github.com/repos/CodaProtocol/coda/pulls/' + pr_number
pr_json = 'pr.json'
if os.path.exists(pr_json) :
os.remove(pr_json)
subprocess.run(['curl',api_url,'-H','Accept: application/vnd.github.v3.json','-o',pr_json])
with open (pr_json, 'r') as fp :
json_obj = json.load (fp)
base = json_obj['base']['ref']
cwd = os.getcwd ()
# create a copy of the repo at base branch
if os.path.exists ('base') :
shutil.rmtree ('base')
os.mkdir ('base')
os.chdir ('base')
# it would be faster to do a clone of the local repo, but there's "smudge error" (?)
subprocess.run(['git','clone','[email protected]:CodaProtocol/coda.git'])
os.chdir (cwd)
# changed files in the PR
diffs_raw = subprocess.check_output(['git','diff','--name-only','origin/' + base])
diffs_decoded = diffs_raw.decode ('UTF-8')
diffs = diffs_decoded.split ('\n')
for diff in diffs :
fn = os.path.basename (diff)
if not fn.endswith ('.ml') :
continue
orig = 'base/coda/' + diff
# don't compare if file added or deleted
if not (os.path.exists (orig) and os.path.exists (diff)) :
continue
completed_process = subprocess.run(['./scripts/' + compare_script,orig,diff])
if not completed_process.returncode == 0 :
global exit_code
exit_code = 1
sys.exit (exit_code)
``` |
{
"source": "jjangsangy/AlchemyAPI",
"score": 3
} |
#### File: alchemyapi/tests/test_api.py
```python
from __future__ import print_function
import sys
import os
import nose
try:
import unittest2 as unittest
except ImportError:
import unittest
from ..api import AlchemyAPI, Auth
__all__ = ['TestAPI']
test_text = 'Bob broke my heart, and then made up this silly sentence to test the PHP SDK'
test_html = '<html><head><title>The best SDK Test | AlchemyAPI</title></head><body><h1>Hello World!</h1><p>My favorite language is PHP</p></body></html>'
test_url = 'http://www.nytimes.com/2013/07/13/us/politics/a-day-of-friction-notable-even-for-a-fractious-congress.html?_r=0'
class TestAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
key = os.environ.get('ALCHEMY_API_KEY', None)
cls._api = AlchemyAPI(Auth(key))
# Entities
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_entities(self):
print('Checking entities . . . ')
response = self._api.interface('entities', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('entities', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('entities', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('entities', 'random', test_url)
self.assertTrue(response['status'] == 'ERROR') # invalid flavor
print('Entity tests complete!')
print('')
# Keywords
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_keywords(self):
print('Checking keywords . . . ')
response = self._api.interface('keywords', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('keywords', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('keywords', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('keywords', 'random', test_url)
self.assertTrue(response['status'] == 'ERROR') # invalid flavor
print('Keyword tests complete!')
print('')
# Concepts
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_concepts(self):
print('Checking concepts . . . ')
response = self._api.interface('concepts', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('concepts', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('concepts', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('concepts', 'random', test_url)
self.assertTrue(response['status'] == 'ERROR') # invalid flavor
print('Concept tests complete!')
print('')
# Sentiment
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_sentiment(self):
print('Checking sentiment . . . ')
response = self._api.interface('sentiment', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('sentiment', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('sentiment', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('sentiment', 'random', test_url)
self.assertTrue(response['status'] == 'ERROR') # invalid flavor
print('Sentiment tests complete!')
print('')
# Targeted Sentiment
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_target_sentiment(self):
print('Checking targeted sentiment . . . ')
response = self._api.interface('sentiment_targeted', 'text', test_text, target='heart')
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('sentiment_targeted', 'html', test_html, target='language')
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('sentiment_targeted', 'url', test_url, target='Congress')
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('sentiment_targeted', 'random', test_url, target='Congress')
self.assertTrue(response['status'] == 'ERROR') # invalid flavor
response = self._api.interface('sentiment_targeted', 'text', test_text, target=None)
self.assertTrue(response['status'] == 'ERROR') # missing target
print('Targeted sentiment tests complete!')
print('')
# Text
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_text(self):
print('Checking text . . . ')
response = self._api.interface('text', 'text', test_text)
self.assertTrue(response['status'] == 'ERROR') # only works for html and url content
response = self._api.interface('text', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('text', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Text tests complete!')
print('')
# Text Raw
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_raw(self):
print('Checking raw text . . . ')
response = self._api.interface('text_raw', 'text', test_text)
self.assertTrue(response['status'] == 'ERROR') # only works for html and url content
response = self._api.interface('text_raw', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('text_raw', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Raw text tests complete!')
print('')
# Author
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_author(self):
print('Checking author . . . ')
response = self._api.interface('author', 'text', test_text)
self.assertTrue(response['status'] == 'ERROR') # only works for html and url content
response = self._api.interface('author', 'html', test_html)
self.assertTrue(response['status'] == 'ERROR') # there's no author in the test HTML
response = self._api.interface('author', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Author tests complete!')
print('')
# Language
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_language(self):
print('Checking language . . . ')
response = self._api.interface('language', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('language', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('language', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('language', 'random', test_url)
self.assertTrue(response['status'] == 'ERROR') # invalid flavor
print('Language tests complete!')
print('')
# Title
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_title(self):
print('Checking title . . . ')
response = self._api.interface('title', 'text', test_text)
self.assertTrue(response['status'] == 'ERROR') # only works for html and url content
response = self._api.interface('title', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('title', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Title tests complete!')
print('')
# Relations
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_relation(self):
print('Checking relations . . . ')
response = self._api.interface('relations', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('relations', 'html', test_html)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('relations', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('relations', 'random', test_url)
self.assertTrue(response['status'] == 'ERROR') # invalid flavor
print('Relation tests complete!')
print('')
# Category
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_category(self):
print('Checking category . . . ')
response = self._api.interface('category', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('category', 'html', test_html, url='test')
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('category', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('category', 'random', test_url)
self.assertTrue(response['status'] == 'ERROR') # invalid flavor
print('Category tests complete!')
print('')
# Feeds
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_feeds(self):
print('Checking feeds . . . ')
response = self._api.interface('feeds', 'text', test_text)
self.assertTrue(response['status'] == 'ERROR') # only works for html and url content
response = self._api.interface('feeds', 'html', test_html, url='test')
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('feeds', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Feed tests complete!')
print('')
# Microformats
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_microformats(self):
print('Checking microformats . . . ')
response = self._api.interface('microformats', 'text', test_text)
self.assertTrue(response['status'] == 'ERROR') # only works for html and url content
response = self._api.interface('microformats', 'html', test_html, url='test')
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('microformats', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Microformat tests complete!')
print('')
print('')
@unittest.skip('Skipping image_tagging')
def test_imagetagging(self):
print('Checking imagetagging . . . ')
response = self._api.interface('image_tagging', 'text', test_text)
self.assertTrue(response['status'] == 'ERROR')
response = self._api.interface('image_tagging', 'html', test_html)
self.assertTrue(response['status'] == 'ERROR')
response = self._api.interface('image_tagging', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('image_tagging', 'image', test_jpg, image=test_jpg, imagePostMode='raw')
self.assertTrue(response['status'] == 'OK')
print('Image tagging tests complete!')
print('')
print('')
# combined
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_combined(self):
print('Checking combined . . . ')
response = self._api.interface('combined', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('combined', 'html', test_html)
self.assertTrue(response['status'] == 'ERROR')
response = self._api.interface('combined', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Combined tests complete!')
print('')
print('')
# taxonomy
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_taxonomy(self):
print('Checking taxonomy . . . ')
response = self._api.interface('taxonomy', 'text', test_text)
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('taxonomy', 'html', test_html, url='test')
self.assertTrue(response['status'] == 'OK')
response = self._api.interface('taxonomy', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Taxonomy tests complete!')
print('')
print('')
# image
@unittest.skipIf(not os.environ.get('ALCHEMY_API_KEY', None), 'No API Key')
def test_image_extraction(self):
print('Checking image extraction . . . ')
response = self._api.interface('image', 'text', test_text)
self.assertTrue(response['status'] == 'ERROR')
response = self._api.interface('image', 'html', test_html)
self.assertTrue(response['status'] == 'ERROR')
response = self._api.interface('image', 'url', test_url)
self.assertTrue(response['status'] == 'OK')
print('Image Extraction tests complete!')
print('')
print('')
if __name__ == '__main__':
_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
nose.main()
sys.stdout = _stdout
print('**** All tests complete! ****')
``` |
{
"source": "jjangsangy/ExplainAsync",
"score": 3
} |
#### File: ExplainAsync/ExplainAsync/session.py
```python
import asyncio_redis
from sanic_session import RedisSessionInterface, InMemorySessionInterface
from urllib.parse import urlparse
import os
class Redis:
"""
A simple wrapper class that allows you to share a connection
pool across your application.
"""
_pool = None
def __init__(self, host='127.0.0.1', port=6379, password=<PASSWORD>, size=10):
self.host = host
self.port = port
self.password = password
self.size = size
@classmethod
def from_url(cls, url):
url = urlparse(url)
return cls(host=url.hostname, port=url.port, password=url.password)
async def get_redis_pool(self) -> asyncio_redis.Pool:
if not self._pool:
self._pool = await asyncio_redis.Pool.create(
host=self.host,
port=self.port,
password=<PASSWORD>.password,
poolsize=self.size)
return self._pool
def create_session():
redis_url = os.getenv('REDIS_URL')
if redis_url:
redis = Redis.from_url(redis_url)
return RedisSessionInterface(redis.get_redis_pool)
else:
return InMemorySessionInterface()
```
#### File: ExplainAsync/ExplainAsync/templates.py
```python
from sanic_jinja2 import SanicJinja2
from dateutil.parser import parser as dateparser
__all__ = 'create_template_env', 'datetimefilter'
def datetimefilter(value, format='%Y/%m/%d %H:%M'):
p = dateparser()
dt = p.parse(value)
return dt.strftime(format)
def create_template_env():
jinja = SanicJinja2()
jinja.env.filters['datetimefilter'] = datetimefilter
return jinja
``` |
{
"source": "jjangsangy/GraphUCSD",
"score": 3
} |
#### File: GraphUCSD/cape/base.py
```python
from abc import ABCMeta, abstractmethod, abstractproperty
class AbstractBaseCrawler(metaclass=ABCMeta):
"""
Abstract Base Class defining a site crawler
"""
@abstractproperty
def crawl_strategy(self):
return NotImplementedError
@crawl_strategy
def set_crawl_strategy(self):
return NotImplementedError
@abstractmethod
def __next___(self):
"""Moves forward
"""
yield NotImplementedError
@abstractmethod
def run(self, *args, **kwargs):
"""Probably something like this
"""
return self.command(*args, **kwargs)
``` |
{
"source": "jjangsangy/nordvpn",
"score": 3
} |
#### File: nordvpn/nordvpn/api.py
```python
import requests
from urllib.parse import urlparse
from urllib.request import urljoin
__all__ = 'Config', 'Nord'
class Config(object):
"""
Nord Configuration Client
"""
base = 'https://api.nordvpn.com'
endpoints = {
'address': '/user/address',
'config': '/files/zipv2',
'nameserver': '/dns/smart',
'server': '/server',
'stats': '/server/stats',
'user': '/user/databytoken'
}
def __init__(self, username=None, password=<PASSWORD>):
self.username = username
self.password = password
if username and password:
self.endpoints['oath'] = '/token/token/{username}'.format(
username=username
)
def __repr__(self):
username = self.username if self.username else 'anonymous'
name = self.__class__.__name__
return '<{name} [{username}]>'.format(
name=name,
username=username
)
@property
def headers(self):
base = urlparse(self.base)
return {
'User-Agent': '{app}/{version}'.format(
app='NordVPN Client',
version='0.0.1',
),
'Host': base.netloc,
'Connection': 'Close'
}
class Nord(Config):
"""
A Nord Clienht that interacts with the api.
"""
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
def __getattr__(self, name):
if name in self.api:
return self.request(name)
else:
return super(self.__class__, self).__getattribute__(name)
@property
def api(self):
return {
k: urljoin(self.base, v) for k,v in self.endpoints.items()
}
def request(self, endpoint):
return requests.get(self.api[endpoint], headers=self.headers)
``` |
{
"source": "jjangsangy/Project-Euler",
"score": 4
} |
#### File: Project-Euler/Python/004-largest_palindrome_product.py
```python
def is_palindrome(string):
if string[::-1] == string:
return True
else:
return False
def main(digits):
""" Brute force algorithm"""
solution = 0
for i in range(10*(digits-1)+1, 10**digits):
for j in range(10*(digits-1)+1, 10**digits):
if (is_palindrome(str(j*i)) and i*j>solution):
solution=i*j
return solution
if __name__ == '__main__':
print(main(3))
```
#### File: Project-Euler/Python/factorial.py
```python
from math import exp, lgamma
def fact(n):
return int(round(
exp(lgamma(n+1))
))
if __name__ == '__main__':
print(fact(10))
``` |
{
"source": "jjanice/lcls-tools",
"score": 3
} |
#### File: lcls_tools/devices/scLinac.py
```python
from typing import Dict, List, Type
class Cavity:
def __init__(self, cavityNum, rackObject):
# type: (int, Rack) -> None
"""
Parameters
----------
cavityNum: int cavity number i.e. 1 - 8
rackObject: the rack object the cavities belong to
"""
self.number = cavityNum
self.rack = rackObject
self.cryomodule = self.rack.cryomodule
self.linac = self.cryomodule.linac
self.pvPrefix = "ACCL:{LINAC}:{CRYOMODULE}{CAVITY}0:".format(LINAC=self.linac.name,
CRYOMODULE=self.cryomodule.name,
CAVITY=self.number)
class Cryomodule:
def __init__(self, cryoName, linacObject, cavityClass=Cavity):
# type: (str, Linac, Type[Cavity]) -> None
"""
Parameters
----------
cryoName: str name of Cryomodule i.e. "02", "03", "H1", "H2"
linacObject: the linac object this cryomodule belongs to i.e. CM02 is in linac L1B
cavityClass: cavity object
"""
self.name = cryoName
self.linac = linacObject
self.pvPrefix = "ACCL:{LINAC}:{CRYOMODULE}00:".format(LINAC=self.linac.name,
CRYOMODULE=self.name)
self.racks = {"A": Rack("A", self, cavityClass),
"B": Rack("B", self, cavityClass)}
self.cavities: Dict[int, cavityClass] = {}
self.cavities.update(self.racks["A"].cavities)
self.cavities.update(self.racks["B"].cavities)
class Linac:
def __init__(self, linacName, cryomoduleStringList, cavityClass=Cavity, cryomoduleClass=Cryomodule):
# type: (str, List[str], Type[Cavity], Type[Cryomodule]) -> None
"""
Parameters
----------
linacName: str name of Linac i.e. "L0B", "L1B", "L2B", "L3B"
cryomoduleStringList: list of string names of cryomodules in the linac
cavityClass: cavity object
"""
self.name = linacName
self.cryomodules: Dict[str, cryomoduleClass] = {}
for cryomoduleString in cryomoduleStringList:
self.cryomodules[cryomoduleString] = cryomoduleClass(cryomoduleString, self, cavityClass)
class Rack:
def __init__(self, rackName, cryoObject, cavityClass=Cavity):
# type: (str, Cryomodule, Type[Cavity]) -> None
"""
Parameters
----------
rackName: str name of rack (always either "A" or "B")
cryoObject: the cryomodule object this rack belongs to
cavityClass: cavity object
"""
self.cryomodule = cryoObject
self.rackName = rackName
self.cavities = {}
self.pvPrefix = self.cryomodule.pvPrefix + "RACK{RACK}:".format(RACK=self.rackName)
if rackName == "A":
# rack A always has cavities 1 - 4
for cavityNum in range(1, 5):
self.cavities[cavityNum] = cavityClass(cavityNum, self)
elif rackName == "B":
# rack B always has cavities 5 - 8
for cavityNum in range(5, 9):
self.cavities[cavityNum] = cavityClass(cavityNum, self)
else:
raise Exception("Bad rack name")
# Global list of superconducting linac objects
L0B = ["01"]
L1B = ["02", "03", "H1", "H2"]
L2B = ["04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15"]
L3B = ["16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27",
"28", "29", "30", "31", "32", "33", "34", "35"]
LINAC_TUPLES = [("L0B", L0B), ("L1B", L1B), ("L2B", L2B), ("L3B", L3B)]
# Utility list of linacs
LINAC_OBJECTS: List[Linac] = []
# Utility dictionary to map cryomodule name strings to cryomodule objects
CRYOMODULE_OBJECTS: Dict[str, Cryomodule] = {}
for idx, (name, cryomoduleList) in enumerate(LINAC_TUPLES):
linac = Linac(name, cryomoduleList)
LINAC_OBJECTS.append(linac)
CRYOMODULE_OBJECTS.update(linac.cryomodules)
``` |
{
"source": "jjanizek/sage",
"score": 2
} |
#### File: sage/sage/samplers.py
```python
import numpy as np
from sage import utils
from tqdm.auto import tqdm
def estimate_total(model, xy, batch_size, loss_fn):
X, Y = xy
N = 0
mean_loss = 0
marginal_pred = 0
for i in range(np.ceil(len(X) / batch_size).astype(int)):
x = X[i * batch_size:(i + 1) * batch_size]
y = Y[i * batch_size:(i + 1) * batch_size]
n = len(x)
pred = model(x)
loss = loss_fn(pred, y)
marginal_pred = (
(N * marginal_pred + n * np.mean(pred, axis=0, keepdims=True))
/ (N + n))
mean_loss = (N * mean_loss + n * np.mean(loss)) / (N + n)
N += n
# Mean loss of mean prediction.
N = 0
marginal_loss = 0
for i in range(np.ceil(len(X) / batch_size).astype(int)):
x = X[i * batch_size:(i + 1) * batch_size]
y = Y[i * batch_size:(i + 1) * batch_size]
n = len(x)
marginal_pred_repeat = marginal_pred.repeat(len(y), 0)
loss = loss_fn(marginal_pred_repeat, y)
marginal_loss = (N * marginal_loss + n * np.mean(loss)) / (N + n)
N += n
return marginal_loss - mean_loss
class PermutationSampler:
'''
Estimate SAGE values by unrolling permutations of feature indices.
Args:
model: callable prediction model.
imputer: for imputing held out values.
loss: loss function ('mse', 'cross entropy').
'''
def __init__(self,
model,
imputer,
loss):
self.model = model
self.imputer = imputer
self.loss_fn = utils.get_loss(loss, reduction='none')
def __call__(self,
xy,
batch_size,
n_permutations=None,
detect_convergence=None,
convergence_threshold=0.01,
verbose=False,
bar=False):
'''
Estimate SAGE values.
Args:
xy: tuple of np.ndarrays for input and output.
batch_size: number of examples to be processed at once. You should use
as large of a batch size as possible without exceeding available
memory.
n_samples: number of permutations. If not specified, samples
are taken until the estimates converge.
detect_convergence: whether to detect convergence of SAGE estimates.
convergence_threshold: confidence interval threshold for determining
convergence. Represents portion of estimated sum of SAGE values.
verbose: whether to print progress messages.
bar: whether to display progress bar.
Returns: SAGEValues object.
'''
X, Y = xy
N, input_size = X.shape
# Verify model.
X, Y = utils.verify_model_data(self.model, X, Y, self.loss_fn,
batch_size * self.imputer.samples)
# For detecting cnovergence.
total = estimate_total(
self.model, xy, batch_size * self.imputer.samples, self.loss_fn)
if n_permutations is None:
# Turn convergence detectio on.
if detect_convergence is None:
detect_convergence = True
elif not detect_convergence:
detect_convergence = True
print('Turning convergence detection on')
# Turn bar off.
if bar:
bar = False
print('Turning bar off')
# Set n_samples to an extremely large number.
n_permutations = 1e20
if detect_convergence:
assert 0 < convergence_threshold < 1
# Print message explaining parameter choices.
if verbose:
print('{} permutations, batch size (batch x samples) = {}'.format(
n_permutations, batch_size * self.imputer.samples))
# For updating scores.
tracker = utils.ImportanceTracker()
# Permutation sampling.
n_loops = int(n_permutations / batch_size)
if bar:
bar = tqdm(total=n_loops * batch_size * input_size)
for _ in range(n_loops):
# Sample data.
mb = np.random.choice(N, batch_size)
x = X[mb]
y = Y[mb]
# Sample permutations.
S = np.zeros((batch_size, input_size))
permutations = np.tile(np.arange(input_size), (batch_size, 1))
for i in range(batch_size):
np.random.shuffle(permutations[i])
# Make prediction with missing features.
y_hat = self.model(self.imputer(x, S))
y_hat = np.mean(y_hat.reshape(
-1, self.imputer.samples, *y_hat.shape[1:]), axis=1)
prev_loss = self.loss_fn(y_hat, y)
# Setup.
arange = np.arange(batch_size)
scores = np.zeros((batch_size, input_size))
for i in range(input_size):
# Add next feature.
inds = permutations[:, i]
S[arange, inds] = 1.0
# Make prediction with missing features.
y_hat = self.model(self.imputer(x, S))
y_hat = np.mean(y_hat.reshape(
-1, self.imputer.samples, *y_hat.shape[1:]), axis=1)
loss = self.loss_fn(y_hat, y)
# Calculate delta sample.
scores[arange, inds] = prev_loss - loss
prev_loss = loss
if bar:
bar.update(batch_size)
# Update tracker.
tracker.update(scores)
# Check for convergence.
conf = np.max(tracker.std)
if verbose:
print('Conf = {:.4f}, Total = {:.4f}'.format(conf, total))
if detect_convergence:
if (conf / total) < convergence_threshold:
if verbose:
print('Stopping early')
break
return utils.SAGEValues(tracker.values, tracker.std)
class IteratedSampler:
'''
Estimate SAGE values one at a time, by sampling subsets of features.
Args:
model: callable prediction model.
imputer: for imputing held out values.
loss: loss function ('mse', 'cross entropy').
'''
def __init__(self,
model,
imputer,
loss):
self.model = model
self.imputer = imputer
self.loss_fn = utils.get_loss(loss, reduction='none')
def __call__(self,
xy,
batch_size,
n_samples=None,
detect_convergence=False,
convergence_threshold=0.01,
verbose=False,
bar=False):
'''
Estimate SAGE values.
Args:
xy: tuple of np.ndarrays for input and output.
batch_size: number of examples to be processed at once. You should use
as large of a batch size as possible without exceeding available
memory.
n_samples: number of samples for each feature. If not specified,
samples are taken until the estimates converge.
detect_convergence: whether to detect convergence of SAGE estimates.
convergence_threshold: confidence interval threshold for determining
convergence. Represents portion of estimated sum of SAGE values.
verbose: whether to print progress messages.
bar: whether to display progress bar.
Returns: SAGEValues object.
'''
X, Y = xy
N, input_size = X.shape
# Verify model.
X, Y = utils.verify_model_data(self.model, X, Y, self.loss_fn,
batch_size * self.imputer.samples)
# For detecting cnovergence.
total = estimate_total(
self.model, xy, batch_size * self.imputer.samples, self.loss_fn)
if n_samples is None:
# Turn convergence detectio on.
if detect_convergence is None:
detect_convergence = True
elif not detect_convergence:
detect_convergence = True
print('Turning convergence detection on')
# Turn bar off.
if bar:
bar = False
print('Turning bar off')
# Set n_samples to an extremely large number.
n_samples = 1e20
if detect_convergence:
assert 0 < convergence_threshold < 1
if verbose:
print('{} samples/feat, batch size (batch x samples) = {}'.format(
n_samples, batch_size * self.imputer.samples))
# For updating scores.
tracker_list = []
# Iterated sampling.
n_loops = int(n_samples / batch_size)
if bar:
bar = tqdm(total=n_loops * batch_size * input_size)
for ind in range(input_size):
tracker = utils.ImportanceTracker()
for _ in range(n_loops):
# Sample data.
mb = np.random.choice(N, batch_size)
x = X[mb]
y = Y[mb]
# Sample subset of features.
S = utils.sample_subset_feature(input_size, batch_size, ind)
# Loss with feature excluded.
y_hat = self.model(self.imputer(x, S))
y_hat = np.mean(y_hat.reshape(
-1, self.imputer.samples, *y_hat.shape[1:]), axis=1)
loss_discluded = self.loss_fn(y_hat, y)
# Loss with feature included.
S[:, ind] = 1.0
y_hat = self.model(self.imputer(x, S))
y_hat = np.mean(y_hat.reshape(
-1, self.imputer.samples, *y_hat.shape[1:]), axis=1)
loss_included = self.loss_fn(y_hat, y)
# Calculate delta sample.
tracker.update(loss_discluded - loss_included)
if bar:
bar.update(batch_size)
# Check for convergence.
conf = tracker.std
if verbose:
print('Imp = {:.4f}, Conf = {:.4f}, Total = {:.4f}'.format(
tracker.values, conf, total))
if detect_convergence:
if (conf / total) < convergence_threshold:
if verbose:
print('Stopping feature early')
break
if verbose:
print('Done with feature {}'.format(ind))
tracker_list.append(tracker)
return utils.SAGEValues(
np.array([tracker.values.item() for tracker in tracker_list]),
np.array([tracker.std.item() for tracker in tracker_list]))
```
#### File: sage/sage/utils.py
```python
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import multivariate_normal
class SAGEValues:
'''For storing and plotting SAGE values.'''
def __init__(self, values, std):
self.values = values
self.std = std
def plot(self,
feature_names,
sort_features=True,
max_features=np.inf,
fig=None,
figsize=(12, 7),
orientation='horizontal',
error_bars=True,
color='forestgreen',
title='Feature Importance',
title_size=20,
y_axis_label_size=14,
x_axis_label_size=14,
label_rotation=None,
tick_size=14):
'''
Plot SAGE values.
Args:
feature_names: list of feature names.
sort_features: whether to sort features by their SAGE values.
max_features: number of features to display.
fig: matplotlib figure (optional).
figsize: figure size (if fig is None).
orientation: horizontal (default) or vertical.
error_bars: whether to include standard deviation error bars.
color: bar chart color.
title: plot title.
title_size: font size for title.
y_axis_label_size: font size for y axis label.
x_axis_label_size: font size for x axix label.
label_rotation: label rotation (for vertical plots only).
tick_size: tick sizes (for SAGE value axis only).
'''
if fig is None:
fig = plt.figure(figsize=figsize)
ax = fig.gca()
# Sort features if necessary.
if max_features < len(feature_names):
sort_features = True
values = self.values
std = self.std
if sort_features:
argsort = np.argsort(values)[::-1]
values = values[argsort]
std = std[argsort]
feature_names = np.array(feature_names)[argsort]
# Remove extra features if necessary.
if max_features < len(feature_names):
feature_names = list(feature_names[:max_features]) + ['Remaining']
values = (list(values[:max_features])
+ [np.sum(values[max_features:])])
std = (list(std[:max_features])
+ [np.sum(std[max_features:] ** 2) ** 0.5])
if orientation == 'horizontal':
# Bar chart.
if error_bars:
ax.barh(np.arange(len(feature_names))[::-1], values,
color=color, xerr=std)
else:
ax.barh(np.arange(len(feature_names))[::-1], values,
color=color)
# Feature labels.
if label_rotation is not None:
raise ValueError('rotation not supported for horizontal charts')
ax.set_yticks(np.arange(len(feature_names))[::-1])
ax.set_yticklabels(feature_names, fontsize=y_axis_label_size)
# Axis labels and ticks.
ax.set_ylabel('')
ax.set_xlabel('SAGE values', fontsize=x_axis_label_size)
ax.tick_params(axis='x', labelsize=tick_size)
elif orientation == 'vertical':
# Bar chart.
if error_bars:
ax.bar(np.arange(len(feature_names)), values, color=color,
yerr=std)
else:
ax.bar(np.arange(len(feature_names)), values, color=color)
# Feature labels.
if label_rotation is None:
label_rotation = 90
if label_rotation < 90:
ha = 'right'
rotation_mode = 'anchor'
else:
ha = 'center'
rotation_mode = 'default'
ax.set_xticks(np.arange(len(feature_names)))
ax.set_xticklabels(feature_names, rotation=label_rotation, ha=ha,
rotation_mode=rotation_mode,
fontsize=x_axis_label_size)
# Axis labels and ticks.
ax.set_ylabel('SAGE values', fontsize=y_axis_label_size)
ax.set_xlabel('')
ax.tick_params(axis='y', labelsize=tick_size)
else:
raise ValueError('orientation must be horizontal or vertical')
ax.set_title(title, fontsize=title_size)
plt.tight_layout()
return
class ImportanceTracker:
'''For tracking feature importance using a dynamic average.'''
def __init__(self):
self.first_moment = 0
self.second_moment = 0
self.N = 0
def update(self, scores):
n = len(scores)
first_moment = np.mean(scores, axis=0)
second_moment = np.mean(scores ** 2, axis=0)
self.first_moment = (
(self.N * self.first_moment + n * first_moment) / (n + self.N))
self.second_moment = (
(self.N * self.second_moment + n * second_moment) / (n + self.N))
self.N += n
@property
def values(self):
return self.first_moment
@property
def var(self):
return (self.second_moment - self.first_moment ** 2) / self.N
@property
def std(self):
return self.var ** 0.5
class MSELoss:
'''MSE loss that always sums over non-batch dimensions.'''
def __init__(self, reduction='mean'):
assert reduction in ('none', 'mean')
self.reduction = reduction
def __call__(self, pred, target):
# Add dimension to tail of pred, if necessary.
if target.shape[-1] == 1 and len(target.shape) - len(pred.shape) == 1:
pred = np.expand_dims(pred, -1)
loss = np.sum(
np.reshape((pred - target) ** 2, (len(pred), -1)), axis=1)
if self.reduction == 'mean':
return np.mean(loss)
else:
return loss
class CrossEntropyLoss:
# TODO infer whether binary classification based on output size.
# If (n_samples,) then it's binary. Labels must be (0, 1) or (-1, 1).
# TODO if (n_samples, k) then it may still be binary, but we don't care.
# Verify that classes are 0, 1, 2, ..., k.
# TODO then do this again for accuracy.
'''Cross entropy loss that expects probabilities.'''
def __init__(self, reduction='mean'):
assert reduction in ('none', 'mean')
self.reduction = reduction
def __call__(self, pred, target):
loss = - np.log(pred[np.arange(len(pred)), target])
if self.reduction == 'mean':
return np.mean(loss)
else:
return loss
class BCELoss:
'''Binary cross entropy loss that expects probabilities.'''
def __init__(self, reduction='mean'):
assert reduction in ('none', 'mean')
self.reduction = reduction
def __call__(self, pred, target):
loss = - target * np.log(pred) - (1 - target) * np.log(1 - pred)
if self.reduction == 'mean':
return np.mean(loss)
else:
return loss
class Accuracy:
'''0-1 loss.'''
def __init__(self, reduction='mean'):
assert reduction in ('none', 'mean')
self.reduction = reduction
def __call__(self, pred, target):
acc = (np.argmax(pred, axis=1) == target).astype(float)
if self.reduction == 'mean':
return np.mean(acc)
else:
return acc
class NegAccuracy:
'''Negative 0-1 loss.'''
def __init__(self, reduction='mean'):
assert reduction in ('none', 'mean')
self.reduction = reduction
def __call__(self, pred, target):
neg_acc = - (np.argmax(pred, axis=1) == target).astype(float)
if self.reduction == 'mean':
return np.mean(neg_acc)
else:
return neg_acc
class BinaryAccuracy:
'''0-1 loss for binary classifier.'''
def __init__(self, reduction='mean'):
assert reduction in ('none', 'mean')
self.reduction = reduction
def __call__(self, pred, target):
acc = ((pred > 0.5) == target).astype(float)
if self.reduction == 'mean':
return np.mean(acc)
else:
return acc
class NegBinaryAccuracy:
'''Negative 0-1 loss for binary classifier.'''
def __init__(self, reduction='mean'):
assert reduction in ('none', 'mean')
self.reduction = reduction
def __call__(self, pred, target):
neg_acc = - ((pred > 0.5) == target).astype(float)
if self.reduction == 'mean':
return np.mean(neg_acc)
else:
return neg_acc
class ReferenceImputer:
'''
Impute features using reference values.
Args:
reference: the reference value for replacing missing features.
'''
def __init__(self, reference):
self.reference = reference
self.samples = 1
def __call__(self, x, S):
return S * x + (1 - S) * self.reference
class MarginalImputer:
'''
Impute features using a draw from the joint marginal.
Args:
data: np.ndarray of size (samples, dimensions) representing the data
distribution.
samples: number of samples to draw from marginal distribution.
'''
def __init__(self, data, samples):
self.data = data
self.samples = samples
self.N = len(data)
self.x_addr = None
self.x_repeat = None
def __call__(self, x, S):
if self.x_addr == id(x):
x = self.x_repeat
else:
self.x_addr = id(x)
x = np.repeat(x, self.samples, 0)
self.x_repeat = x
S = np.repeat(S, self.samples, 0)
samples = self.data[np.random.choice(self.N, len(x), replace=True)]
return S * x + (1 - S) * samples
class ConditionalImputer:
'''
Impute features using a draw from the conditional.
Args:
data: np.ndarray of size (samples, dimensions) representing the data
distribution.
samples: number of samples to draw from marginal distribution.
'''
def __init__(self, data, samples):
self.data = data
self.samples = samples
self.N = len(data)
self.sample_covariance = np.cov(data,rowvar=False)
self.sample_mean = np.mean(data,axis=0)
def __call__(self, x, S):
arrays = [S for _ in range(self.samples)]
shape = (len(arrays)*arrays[0].shape[0], arrays[0].shape[1])
interleaved_S = np.hstack(arrays).reshape(shape)
x_arrays = [x for _ in range(self.samples)]
shape = (len(x_arrays)*x_arrays[0].shape[0], x_arrays[0].shape[1])
interleaved_X = np.hstack(x_arrays).reshape(shape)
for row_ind in range(x.shape[0]):
x_batch = interleaved_X[row_ind*self.samples:((row_ind+1)*self.samples),:]
S_batch = interleaved_S[row_ind*self.samples:((row_ind+1)*self.samples),:]
S_row = S_batch[0,:].reshape(-1)
if S_row.sum() == len(S_row):
pass
else:
permutation = np.concatenate([np.where(S_row == 0)[0],np.where(S_row ==1)[0]])
permuted_cov = self.sample_covariance[:,permutation][permutation,:]
permuted_mean = self.sample_mean[permutation]
permuted_x = x_batch[:,permutation]
Sigma11 = permuted_cov[:len(np.where(S_row == 0)[0]),:len(np.where(S_row == 0)[0])]
Sigma12 = permuted_cov[:len(np.where(S_row == 0)[0]),len(np.where(S_row == 0)[0]):]
Sigma21 = permuted_cov[len(np.where(S_row == 0)[0]):,:len(np.where(S_row == 0)[0])]
Sigma22 = permuted_cov[len(np.where(S_row == 0)[0]):,len(np.where(S_row == 0)[0]):]
SigmaBar = Sigma11 - Sigma12.dot(np.linalg.inv(Sigma22).dot(Sigma21))
MuBar = permuted_mean[:len(np.where(S_row == 0)[0])] + Sigma12.dot(np.linalg.inv(Sigma22)).dot((permuted_x[0,len(np.where(S_row == 0)[0]):] - permuted_mean[len(np.where(S_row == 0)[0]):]))
# print(MuBar.shape,SigmaBar.shape)
mvn = multivariate_normal(MuBar,SigmaBar,self.samples)
permuted_x[:,:len(np.where(S_row == 0)[0])] = mvn
interleaved_X[row_ind*self.samples:((row_ind+1)*self.samples),:] = permuted_x[:,np.argsort(permutation)]
return interleaved_X
def get_loss(loss, reduction='mean'):
'''Get loss function by name.'''
if loss == 'cross entropy':
loss_fn = CrossEntropyLoss(reduction=reduction)
elif loss == 'binary cross entropy':
loss_fn = BCELoss(reduction=reduction)
elif loss == 'mse':
loss_fn = MSELoss(reduction=reduction)
elif loss == 'accuracy':
loss_fn = NegAccuracy(reduction=reduction)
elif loss == 'binary accuracy':
loss_fn = NegBinaryAccuracy(reduction=reduction)
else:
raise ValueError('unsupported loss: {}'.format(loss))
return loss_fn
def sample_subset_feature(input_size, n, ind):
'''
Sample a subset of features where a given feature index must not be
included. This helper function is used for estimating Shapley values, so
the subset is sampled by 1) sampling the number of features to be included
from a uniform distribution, and 2) sampling the features to be included.
'''
S = np.zeros((n, input_size), dtype=np.float32)
choices = list(range(input_size))
del choices[ind]
for row in S:
inds = np.random.choice(
choices, size=np.random.choice(input_size), replace=False)
row[inds] = 1
return S
def verify_model_data(model, x, y, loss, mbsize):
'''Ensure that model and data are set up properly.'''
# Verify that model output is compatible with labels.
if isinstance(loss, CrossEntropyLoss) or isinstance(loss, NegAccuracy):
assert y.shape == (len(x),)
probs = model(x[:mbsize])
classes = probs.shape[1]
assert classes > 1, 'require multiple outputs for multiclass models'
if len(np.setdiff1d(np.unique(y), np.arange(classes))) == 0:
# This is the preffered label encoding.
pass
elif len(np.setdiff1d(np.unique(y), [-1, 1])) == 0:
# Set -1s to 0s.
y = np.copy(y)
y[y == -1] = 0
else:
raise ValueError('labels for multiclass classification must be '
'(0, 1, ..., c)')
elif isinstance(loss, BCELoss) or isinstance(loss, NegBinaryAccuracy):
assert y.shape == (len(x),)
if len(np.setdiff1d(np.unique(y), [0, 1])) == 0:
# This is the preffered label encoding.
pass
elif len(np.setdiff1d(np.unique(y), [-1, 1])) == 0:
# Set -1s to 0s.
y = np.copy(y)
y[y == -1] = 0
else:
raise ValueError('labels for binary classification must be (0, 1) '
'or (-1, 1)')
# Verify that outputs are probabilities.
if isinstance(loss, CrossEntropyLoss):
probs = model(x[:mbsize])
ones = np.sum(probs, axis=-1)
if not np.allclose(ones, np.ones(ones.shape)):
raise ValueError(
'outputs must be valid probabilities for cross entropy loss')
elif isinstance(loss, BCELoss):
probs = model(x[:mbsize])
if not np.all(np.logical_and(0 <= probs, probs <= 1)):
raise ValueError(
'outputs must be valid probabilities for cross entropy loss')
return x, y
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.