hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
d4fae683109b51c37a205d6ed228be7bbb86f029
7,868
py
Python
vnTrader/uiMainWindow.py
bttt123/TradeSim
2374b0925d34d8fb299095250c5c8834192848ce
[ "Apache-2.0" ]
null
null
null
vnTrader/uiMainWindow.py
bttt123/TradeSim
2374b0925d34d8fb299095250c5c8834192848ce
[ "Apache-2.0" ]
null
null
null
vnTrader/uiMainWindow.py
bttt123/TradeSim
2374b0925d34d8fb299095250c5c8834192848ce
[ "Apache-2.0" ]
1
2022-03-29T21:57:31.000Z
2022-03-29T21:57:31.000Z
# encoding: UTF-8 from builtins import str import psutil # import sys # PyQt 4/5 compatibility try: from PyQt4.QtGui import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout from PyQt4 import QtCore except ImportError: from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout from PyQt5 import QtCore from uiBasicWidget import * import uiBasicWidget as wgs #from . import uiBasicWidget as wgs ######################################################################## class MainWindow(QMainWindow): """主窗口""" signalStatusBar = QtCore.pyqtSignal(type(Event())) # ---------------------------------------------------------------------- def __init__(self, mainEngine, eventEngine, app, sheets): """Constructor""" super(MainWindow, self).__init__() self.mainEngine = mainEngine self.eventEngine = eventEngine self.app = app self.sheets = sheets self.widgetDict = {} # 用来保存子窗口的字典 self.initUi() self.eventEngine.register(EVENT_TITLE, self.updateTitle) self.sid = None def updateTitle(self, event): (user, stratid) = event.dict_['data'] #self.setWindowTitle('VnTrader: ' + str(user) + "/" + str(stratid)) self.sid = stratid # ---------------------------------------------------------------------- def initUi(self): """初始化界面""" self.setWindowTitle('VnTrader') self.initCentral() self.initMenu() # self.initStatusBar() def showLogin(self): self.connectQuantOS() # ---------------------------------------------------------------------- def initCentral(self): """初始化中心区域""" widgetTradingW, dockTradingW = self.createDock(wgs.TradingWidget, u'交易', QtCore.Qt.LeftDockWidgetArea) widgetMarketM, dockMarketM = self.createDock(wgs.MarketMonitor, u'行情', QtCore.Qt.RightDockWidgetArea) widgetPositionM, dockPositionM = self.createDock(wgs.PositionMonitor, u'持仓', QtCore.Qt.RightDockWidgetArea) widgetAccountM, dockAccountM = self.createDock(wgs.AccountMonitor, u'资金', QtCore.Qt.BottomDockWidgetArea) widgetContractM, dockContractM = self.createDock(wgs.ContractMonitor, u'合约', QtCore.Qt.BottomDockWidgetArea) widgetLogM, dockLogM = self.createDock(wgs.LogMonitor, u'日志', QtCore.Qt.BottomDockWidgetArea) widgetTradeM, dockTradeM = self.createDock(wgs.TradeMonitor, u'成交', QtCore.Qt.BottomDockWidgetArea) widgetOrderM, dockOrderM = self.createDock(wgs.OrderMonitor, u'委托', QtCore.Qt.BottomDockWidgetArea) self.tabifyDockWidget(dockContractM, dockTradeM) self.tabifyDockWidget(dockTradeM, dockOrderM) self.tabifyDockWidget(dockAccountM, dockLogM) dockOrderM.raise_() dockLogM.raise_() # 连接组件之间的信号 widgetPositionM.itemDoubleClicked.connect(widgetTradingW.closePosition) widgetMarketM.itemDoubleClicked.connect(widgetTradingW.fillSymbol) # ---------------------------------------------------------------------- def initMenu(self): """初始化菜单""" # 创建操作 connectQuantOSAction = QAction(u'连接和切换策略', self) connectQuantOSAction.triggered.connect(self.connectQuantOS) exitAction = QAction(u'退出', self) exitAction.triggered.connect(self.close) aboutAction = QAction(u'关于', self) aboutAction.triggered.connect(self.openAbout) colorAction = QAction(u'变色', self) colorAction.triggered.connect(self.changeColor) # 创建菜单 menubar = self.menuBar() # 设计为只显示存在的接口 sysMenu = menubar.addMenu(u'系统') if 'quantos' in self.mainEngine.gatewayDict: sysMenu.addAction(connectQuantOSAction) sysMenu.addSeparator() sysMenu.addAction(exitAction) # 帮助 helpMenu = menubar.addMenu(u'帮助') helpMenu.addAction(aboutAction) helpMenu.addAction(colorAction) # ---------------------------------------------------------------------- def initStatusBar(self): """初始化状态栏""" self.statusLabel = QLabel() self.statusLabel.setAlignment(QtCore.Qt.AlignLeft) self.statusBar().addPermanentWidget(self.statusLabel) self.statusLabel.setText(self.getCpuMemory()) self.sbCount = 0 self.sbTrigger = 10 # 10秒刷新一次 self.signalStatusBar.connect(self.updateStatusBar) self.eventEngine.register(EVENT_TIMER, self.signalStatusBar.emit) # ---------------------------------------------------------------------- def updateStatusBar(self, event): """在状态栏更新CPU和内存信息""" self.sbCount += 1 if self.sbCount == self.sbTrigger: self.sbCount = 0 self.statusLabel.setText(self.getCpuMemory()) # ---------------------------------------------------------------------- def getCpuMemory(self): """获取CPU和内存状态信息""" cpuPercent = psutil.cpu_percent() memoryPercent = psutil.virtual_memory().percent return u'CPU使用率:%d%% 内存使用率:%d%%' % (cpuPercent, memoryPercent) # ---------------------------------------------------------------------- def connectQuantOS(self): self.mainEngine.connect('quantos') # ---------------------------------------------------------------------- def openAbout(self): """打开关于""" try: self.widgetDict['aboutW'].show() except KeyError: self.widgetDict['aboutW'] = AboutWidget(self) self.widgetDict['aboutW'].show() # ---------------------------------------------------------------------- def closeEvent(self, event): """关闭事件""" reply = QMessageBox.question(self, u'退出', u'确认退出?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: for widget in list(self.widgetDict.values()): widget.close() self.mainEngine.exit() event.accept() else: event.ignore() # ---------------------------------------------------------------------- def createDock(self, widgetClass, widgetName, widgetArea): """创建停靠组件""" widget = widgetClass(self.mainEngine, self.eventEngine) dock = QDockWidget(widgetName) dock.setWidget(widget) dock.setObjectName(widgetName) dock.setFeatures(dock.DockWidgetFloatable | dock.DockWidgetMovable) self.addDockWidget(widgetArea, dock) return widget, dock def changeColor(self): self.app.setStyleSheet(self.sheets[1]) self.sheets = [self.sheets[1], self.sheets[0]] ######################################################################## class AboutWidget(QDialog): """显示关于信息""" # ---------------------------------------------------------------------- def __init__(self, parent=None): """Constructor""" super(AboutWidget, self).__init__(parent) self.initUi() # ---------------------------------------------------------------------- def initUi(self): """""" self.setWindowTitle(u'关于VnTrader') text = u""" quantos trade client """ label = QLabel() label.setText(text) label.setMinimumWidth(500) vbox = QVBoxLayout() vbox.addWidget(label) self.setLayout(vbox)
35.441441
121
0.521734
7,526
0.918701
0
0
0
0
0
0
2,059
0.251343
d4fb4e3677b230700c8377c0c0d538eea2ac4e41
9,431
py
Python
line_notify_core.py
ficgra/PChome-alertor
5f4e798e3130c170eb75e03215128590ed02dcf9
[ "Apache-2.0" ]
1
2021-06-16T00:36:22.000Z
2021-06-16T00:36:22.000Z
line_notify_core.py
ficgra/PChome-alertor
5f4e798e3130c170eb75e03215128590ed02dcf9
[ "Apache-2.0" ]
null
null
null
line_notify_core.py
ficgra/PChome-alertor
5f4e798e3130c170eb75e03215128590ed02dcf9
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # In[ ]: import requests import json import re from flask import Flask, request, abort import mysql.connector as mariadb from mysql.connector import Error from linebot import ( LineBotApi, WebhookHandler ) from linebot.exceptions import ( InvalidSignatureError ) from linebot.models import ( MessageEvent, TextMessage, TextSendMessage, FollowEvent, ) app = Flask(__name__) line_bot_api = LineBotApi('') handler = WebhookHandler('') @app.route("/", methods=['GET']) def index(): return 'OK!' #line 官方帳號 /callback測試Event @app.route("/callback", methods=['POST']) def callback(): # get X-Line-Signature header value signature = request.headers['X-Line-Signature'] # get request body as text body = request.get_data(as_text=True) app.logger.info("Request body: " + body) # handle webhook body try: handler.handle(body, signature) except InvalidSignatureError: print("Invalid signature. Please check your channel access token/channel secret.") abort(400) return 'OK' #line官方帳號收到訊息時的Event @handler.add(MessageEvent, message=TextMessage) def handle_message(event): get_message = event.message.text print(get_message) user_id = event.source.user_id register_url = 'https://notify-bot.line.me/oauth/authorize?response_type=code&scope=notify&response_mode=form_post&client_id="id"&redirect_uri=https://line.husan.cc/register&state=' + user_id mage = re.split(r'[\s]\s*',get_message) try: if mage[0] == "註冊": line_bot_api.reply_message( event.reply_token, TextSendMessage(text=register_url)) elif 'add' == mage[0]: try: notice = add_item(mage[1],user_id,mage[2]) except: notice = add_item(mage[1],user_id,None) line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice)) elif 'del' == mage[0]: notice = del_item(mage[1],user_id) line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice)) elif 'list' == mage[0]: item_list ,price_list= search_sub(user_id) notice = '您訂閱的項目有:' for i in range(len(item_list)): notice+='\n' notice=notice + item_list[i] +'\t' +str(price_list[i]) line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice)) elif 'send' == mage[0]: acc_token = get_notify_id(user_id) status = sent_message(mage[1],acc_token) if status == 200: line_bot_api.reply_message(event.reply_token,TextSendMessage(text='send OK!')) else: line_bot_api.reply_message(event.reply_token,TextSendMessage(text='請輸入指令:\nlist \n└查詢通知項目。\nadd 商品ID 價格 \n└新增商品通知,低於設定價格時通知。\nEX:add DYAJID-A900AVJ4G 500\ndel 商品ID \n└刪除商品通知。\nEX:del DYAJID-A900AVJ4G')) except BaseException as e: line_bot_api.reply_message(event.reply_token,TextSendMessage(text='指令錯誤,請重新確認!')) print(e) # get user id when reply user_id = event.source.user_id print("user_id =", user_id) profile = line_bot_api.get_profile(user_id) #notify註冊時會post至/register @app.route("/register",methods=['POST']) #註冊事件 def register(): if request.method == 'POST': code = request.form.get('code') #拿code去要access_token print("code = ", code) state = request.form.get('state') #state = user_id 使用者id print("user_id = ",state) profile = line_bot_api.get_profile(state) user_name = profile.display_name print("username = ",user_name) #帳號名稱 access_token = get_token(code) #取得access_token 發訊息給使用者的token print("access_token = ",access_token) r_code = send_test_message(access_token)#發測試通知 if r_code == 200: save_profile(user_name, code, state, access_token)#存入資料庫 return '發送成功' else: return '發送失敗' #加好友時發送通知 @handler.add(FollowEvent) def handle_follow(event): line_bot_api.reply_message( event.reply_token, TextSendMessage(text="感謝訂閱!請輸入\"註冊\"啟動服務。")) #拿使用者code向notify-bot post取得access_token def get_token(code): headers = { "Content-Type":"application/x-www-form-urlencoded" } params = { "grant_type":"authorization_code", "code": code, "redirect_uri":"https://line.husan.cc/register", # host_ip "client_id":"client_id", #notify client_id "client_secret":"client_secret" #notify client_secret } r = requests.post('https://notify-bot.line.me/oauth/token',headers=headers,params=params) source = json.loads(r.text) access_token = source['access_token'] return access_token #發送測試訊息至使用者notify def send_test_message(access_token): headers = { "Authorization":"Bearer " + str(access_token), "Content-Type":"application/x-www-form-urlencoded", "notificationDisabled":"True" } params = { "message":"\n帳號連結成功" } r = requests.post("https://notify-api.line.me/api/notify",headers=headers,params=params) return r.status_code #使用者資料存入資料庫 def save_profile(username, code, user_id, access_token): try: connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify') if connection.is_connected(): db_Info = connection.get_server_info() print("資料庫版本:", db_Info) cursor = connection.cursor() cursor.execute("INSERT INTO user_info (id, username, code, user_id, access_token) VALUES (null,'%s','%s','%s','%s')"%(username, code, user_id, access_token)) connection.commit() #存檔 cursor.execute("SELECT * FROM user_info") # 列出查詢的資料 for i in cursor: print(i) except Error as e: print("資料庫連接失敗0:", e) finally: if (connection.is_connected()): cursor.close() connection.close() #print("資料庫連線已關閉") #新增訂閱項目 def add_item(item_id, user_id,w_price): try: connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify') if connection.is_connected(): cursor = connection.cursor() acc_token = get_notify_id(user_id) try: cursor.execute("INSERT INTO sub_list (item_id, w_price ,user_id, acc_token) VALUES ('%s','%d','%s','%s')"%(item_id, int(w_price) ,user_id, acc_token)) except: cursor.execute("INSERT INTO sub_list (item_id,user_id, acc_token) VALUES ('%s','%s','%s')"%(item_id ,user_id, acc_token)) connection.commit() #存檔 return 'Add Done!' except Error as e: print("資料庫連接失敗2:", e) finally: if (connection.is_connected()): cursor.close() connection.close() #刪除訂閱項目 def del_item(item_id, user_id): try: connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify') if connection.is_connected(): cursor = connection.cursor() cursor.execute("DELETE FROM sub_list WHERE item_id = '%s' AND user_id = '%s'"%(item_id,user_id)) connection.commit() #存檔 return 'Delete Done!' except Error as e: print("資料庫連接失敗3:", e) finally: if (connection.is_connected()): cursor.close() connection.close() #查詢訂閱項目 def search_sub(user_id): try: connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify') if connection.is_connected(): cursor = connection.cursor() cursor.execute("SELECT item_id , w_price FROM sub_list WHERE user_id LIKE '%s'"%(user_id)) sub_item = cursor.fetchall() price_list = [item[1] for item in sub_item] item_list = [item[0] for item in sub_item] return item_list,price_list except Error as e: print("資料庫連接失敗1:", e) finally: if (connection.is_connected()): cursor.close() connection.close() #取得notify_access_token def get_notify_id(user_id): try: connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify') if connection.is_connected(): cursor = connection.cursor() cursor.execute("select database();") record = cursor.fetchone() cursor.execute("SELECT access_token FROM user_info WHERE user_id LIKE '%s'"%(user_id)) acc_token = cursor.fetchall() return acc_token[0][0] except Error as e: print("資料庫連接失敗4:", e) finally: if (connection.is_connected()): cursor.close() connection.close() #發送訊息 def sent_message(message,access_token): headers = { "Authorization":"Bearer " + access_token, "Content-Type":"application/x-www-form-urlencoded" } params = { "message":message } r = requests.post("https://notify-api.line.me/api/notify",headers=headers,params=params) print(r.status_code) return r.status_code if __name__ == "__main__": app.run('0.0.0.0',port=3000)
35.190299
214
0.626021
0
0
0
0
3,838
0.384685
0
0
3,089
0.309612
d4fd04698f7477aacd1d458ba68e94970c4579ef
1,143
py
Python
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
21
2016-11-03T12:30:50.000Z
2022-03-24T06:54:14.000Z
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
1
2019-04-02T02:01:27.000Z
2019-04-07T21:07:10.000Z
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
12
2016-11-03T12:30:57.000Z
2021-09-14T23:08:23.000Z
# coding=utf-8 from sfc_models.objects import * from sfc_models.examples.Quick2DPlot import Quick2DPlot register_standard_logs('output', __file__) mod = Model() country = Country(mod, 'CO') Household(country, 'HH') ConsolidatedGovernment(country, 'GOV') FixedMarginBusiness(country, 'BUS', profit_margin=.025) Market(country, 'GOOD') Market(country, 'LAB') TaxFlow(country, 'TAX', taxrate=.2) # At time period 25, cut spending to 17 (from 20) mod.AddExogenous('GOV', 'DEM_GOOD', [20.,]* 25 + [17.,]*20) mod.AddGlobalEquation('DEBT_GDP', 'DEBT-TO-GDP RATIO', '-100.*GOV__F/BUS__SUP_GOOD') mod.AddGlobalEquation('DEFICIT', 'DEFICIT', '-1.*GOV__INC') mod.EquationSolver.MaxTime = 40 mod.main() k = mod.GetTimeSeries('k') Rat = mod.GetTimeSeries('DEBT_GDP') Def = mod.GetTimeSeries('GOV__INC') spend = mod.GetTimeSeries('GOV__DEM_GOOD') p = Quick2DPlot([k, k], [spend, Def], title='Spending and Deficit', filename='intro_X_XX_multiplier_deficit.png', run_now=False) p.Legend = ['G', 'Deficit'] p.LegendPos = 'center left' p.DoPlot() Quick2DPlot(k, Rat, title='Debt-to-GDP Ratio', filename='intro_X_XX_multiplier_debt_gdp.png')
34.636364
113
0.727909
0
0
0
0
0
0
0
0
384
0.335958
d4fe0f781e9f3139abc2757c5c86104cc2181049
4,135
py
Python
auth_framework/settings.py
DrChai/django-auth-framework
4f9a108de66fe102ff28518b6597ad26b5855518
[ "BSD-2-Clause" ]
null
null
null
auth_framework/settings.py
DrChai/django-auth-framework
4f9a108de66fe102ff28518b6597ad26b5855518
[ "BSD-2-Clause" ]
null
null
null
auth_framework/settings.py
DrChai/django-auth-framework
4f9a108de66fe102ff28518b6597ad26b5855518
[ "BSD-2-Clause" ]
null
null
null
from importlib import import_module from django.conf import settings from django.core.signals import setting_changed SOCIALACCOUNT_MODEL = getattr(settings, "REST_AUTH_SOCIALACCOUNT_MODEL", "auth_framework.SocialAccount") DEFAULTS = { 'UNIQUE_EMAIL': True, 'RESET_PASSWORD_BY': 'pin', # 'url'| 'pin' 'SERIALIZERS': { # 'SOCIAL_LOGIN_SERIALIZER': 'auth.social.serializers.DefaultSocialLoginSerializer', 'SIGNUP_SERIALIZER': 'auth_framework.serializers.signup_serializers.DefaultSignUpSerializer', 'USERINFO_SERIALIZER': None }, 'SOCIALACCOUNT_MODEL': SOCIALACCOUNT_MODEL, 'SOCIALACCOUNT_ADMIN_CLASS': "auth_framework.admin.SocialAccountAdmin", # SOCIAL LOGINS 'SOCIAL_CALLBACK_URL': None, # eg: 'https://developers.google.com/oauthplayground' 'SOCIAL_AUTO_SIGNUP': False, # SIGN UP # 'SIGNUP_EMAIL_VERIFICATION': 'none', # trimmed out email verification celery task in closed source. fewer usage 'SIGNUP_USERNAME_REQUIRED': False, 'SIGNUP_USERNAME_VALIDATORS': [], 'USE_PASSWORD_TWICE_VALIDATION': True, # ADVANCES 'USE_PHONENUMBER_FIELD': False, 'USE_CELERY_EMAIL': False, 'USE_ID_TOKEN': True, 'OAUTH_SAVE_ID_TOKEN': False } def import_callable(path_or_callable): if path_or_callable is None: return None if hasattr(path_or_callable, '__call__'): return path_or_callable else: assert isinstance(path_or_callable, str) package, attr = path_or_callable.rsplit('.', 1) return getattr(import_module(package), attr) class AuthSettings: """ """ def __init__(self, user_settings=None, defaults=None): if user_settings: self._user_settings = user_settings self.defaults = defaults or DEFAULTS self._cached_attrs = set() @property def user_settings(self): if not hasattr(self, '_user_settings'): self._user_settings = getattr(settings, 'AUTH_FRAMEWORK', {}) return self._user_settings @property def username_validators(self): from django.core.exceptions import ImproperlyConfigured from django.contrib.auth import get_user_model validators = self.user_settings.get("SIGNUP_USERNAME_VALIDATORS", None) if validators: ret = [] if not isinstance(validators, list): raise ImproperlyConfigured( "SIGNUP_USERNAME_VALIDATORS is expected to be a list" ) for path in validators: pkg, attr = path.rsplit(".", 1) validator = getattr(import_module(pkg), attr) ret.append(validator()) else: ret = ( get_user_model()._meta.get_field('username').validators ) return ret def serializers(self, data): # Check if present in user settings for key, value in data.items(): data[key] = import_callable(value) return data def __getattr__(self, attr): if attr not in self.defaults: raise AttributeError("Invalid setting: '%s'" % attr) try: # Check if present in user settings val = self.user_settings[attr] if isinstance(val, dict): val = self.defaults[attr].copy() val.update(self.user_settings[attr]) except KeyError: # Fall back to defaults val = self.defaults[attr] if attr == 'SERIALIZERS': val = self.serializers(val) # Cache the result self._cached_attrs.add(attr) setattr(self, attr, val) return val def reload(self): for attr in self._cached_attrs: delattr(self, attr) self._cached_attrs.clear() if hasattr(self, '_user_settings'): delattr(self, '_user_settings') app_settings = AuthSettings(None, DEFAULTS) def reload_app_settings(*args, **kwargs): setting = kwargs['setting'] if setting == 'AUTH_FRAMEWORK': app_settings.reload() setting_changed.connect(reload_app_settings)
33.08
117
0.641112
2,326
0.562515
0
0
992
0.239903
0
0
1,166
0.281983
d4ff76335b31237c5497fc74cfffe7b1e1ab18a8
317
py
Python
shorty/models.py
gkiserpong/shorty
5795e26f3221d581223e37353bee360454532211
[ "MIT" ]
null
null
null
shorty/models.py
gkiserpong/shorty
5795e26f3221d581223e37353bee360454532211
[ "MIT" ]
null
null
null
shorty/models.py
gkiserpong/shorty
5795e26f3221d581223e37353bee360454532211
[ "MIT" ]
null
null
null
from django.db import models from shorty.manager import UrlManager class Url(models.Model): long_url = models.URLField() short_id = models.SlugField() counter = models.IntegerField(default=0) def __str__(self): return "%s -- %s" % (self.long_url, self.short_id) objects = UrlManager()
22.642857
58
0.684543
247
0.77918
0
0
0
0
0
0
10
0.031546
be0006e92a529db72d1a914a113e9040dbe56c1e
48,343
py
Python
test/sec_full.py
time-track-tool/time-track-tool
a1c280f32a7766e460c862633b748fa206256f24
[ "MIT" ]
null
null
null
test/sec_full.py
time-track-tool/time-track-tool
a1c280f32a7766e460c862633b748fa206256f24
[ "MIT" ]
1
2019-07-03T13:32:38.000Z
2019-07-03T13:32:38.000Z
test/sec_full.py
time-track-tool/time-track-tool
a1c280f32a7766e460c862633b748fa206256f24
[ "MIT" ]
1
2019-05-15T16:01:31.000Z
2019-05-15T16:01:31.000Z
security = """ New Web users get the Roles "User,Nosy" New Email users get the Role "User" Role "admin": User may access the rest interface (Rest Access) User may access the web interface (Web Access) User may access the xmlrpc interface (Xmlrpc Access) User may create everything (Create) User may edit everything (Edit) User may manipulate user Roles through the web (Web Roles) User may restore everything (Restore) User may retire everything (Retire) User may use the email interface (Email Access) User may view everything (View) Role "anonymous": User may access the web interface (Web Access) Role "cc-permission": (Restore for "cost_center_permission_group" only) (Retire for "cost_center_permission_group" only) User is allowed to create cost_center_permission_group (Create for "cost_center_permission_group" only) User is allowed to edit cost_center_permission_group (Edit for "cost_center_permission_group" only) Role "contact": User is allowed to create contact (Create for "contact" only) User is allowed to edit contact (Edit for "contact" only) Role "controlling": User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only) User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'team_lead') only) User is allowed Edit on (Edit for "time_project": ('group_lead', 'team_lead') only) User is allowed Edit on (Edit for "time_wp": ('project',) only) User is allowed View on (View for "user": ('roles',) only) User is allowed View on (View for "user_dynamic": ('id', 'sap_cc', 'user', 'valid_from', 'valid_to') only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access daily_record (View for "daily_record" only) User is allowed to access daily_record_freeze (View for "daily_record_freeze" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access overtime_correction (View for "overtime_correction" only) User is allowed to access query (View for "query" only) User is allowed to access time_project (View for "time_project" only) User is allowed to access time_record (View for "time_record" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create cost_center (Create for "cost_center" only) User is allowed to create cost_center_group (Create for "cost_center_group" only) User is allowed to create cost_center_status (Create for "cost_center_status" only) User is allowed to create department (Create for "department" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to create product_family (Create for "product_family" only) User is allowed to create public_holiday (Create for "public_holiday" only) User is allowed to create query (Create for "query" only) User is allowed to create reporting_group (Create for "reporting_group" only) User is allowed to create sap_cc (Create for "sap_cc" only) User is allowed to create time_activity (Create for "time_activity" only) User is allowed to create time_activity_perm (Create for "time_activity_perm" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create work_location (Create for "work_location" only) User is allowed to edit cost_center (Edit for "cost_center" only) User is allowed to edit cost_center_group (Edit for "cost_center_group" only) User is allowed to edit cost_center_status (Edit for "cost_center_status" only) User is allowed to edit department (Edit for "department" only) User is allowed to edit organisation (Edit for "organisation" only) User is allowed to edit product_family (Edit for "product_family" only) User is allowed to edit public_holiday (Edit for "public_holiday" only) User is allowed to edit query (Edit for "query" only) User is allowed to edit reporting_group (Edit for "reporting_group" only) User is allowed to edit sap_cc (Edit for "sap_cc" only) User is allowed to edit time_activity (Edit for "time_activity" only) User is allowed to edit time_activity_perm (Edit for "time_activity_perm" only) User is allowed to edit time_record (Edit for "time_record" only) User is allowed to edit work_location (Edit for "work_location" only) Role "doc_admin": User is allowed Edit on (Edit for "department": ('doc_num',) only) User is allowed to create artefact (Create for "artefact" only) User is allowed to create doc (Create for "doc" only) User is allowed to create doc_category (Create for "doc_category" only) User is allowed to create doc_status (Create for "doc_status" only) User is allowed to create product_type (Create for "product_type" only) User is allowed to create reference (Create for "reference" only) User is allowed to edit artefact (Edit for "artefact" only) User is allowed to edit doc (Edit for "doc" only) User is allowed to edit doc_category (Edit for "doc_category" only) User is allowed to edit doc_status (Edit for "doc_status" only) User is allowed to edit product_type (Edit for "product_type" only) User is allowed to edit reference (Edit for "reference" only) Role "dom-user-edit-facility": Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['room'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['room'] only) Role "dom-user-edit-gtt": (Search for "user_dynamic" only) May only view/edit records with the correct domain (Edit for "user_dynamic" only) May only view/edit records with the correct domain (View for "user_dynamic" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to create user (Create for "user" only) User is allowed to create user_contact (Create for "user_contact" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only) Role "dom-user-edit-hr": (Search for "user_dynamic" only) May only view/edit records with the correct domain (Edit for "user_dynamic" only) May only view/edit records with the correct domain (View for "user_dynamic" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to create user_contact (Create for "user_contact" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only) Role "dom-user-edit-office": User is allowed to create user_contact (Create for "user_contact" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'position_text', 'room'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'position_text', 'room'] only) Role "external": (Search for "ext_tracker_state": ('id', 'issue') only) (Search for "user": ('id', 'nickname', 'username') only) External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (Edit for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only) External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (View for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only) User is allowed View on (View for "category": ('id', 'name') only) User is allowed View on (View for "user": ('nickname', 'status', 'username') only) User is allowed View on (View for "user_status": ('name',) only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only) User is allowed to access area (View for "area" only) User is allowed to access doc_issue_status (View for "doc_issue_status" only) User is allowed to access ext_tracker (View for "ext_tracker" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to access keyword (View for "keyword" only) User is allowed to access kind (View for "kind" only) User is allowed to access msg_keyword (View for "msg_keyword" only) User is allowed to access safety_level (View for "safety_level" only) User is allowed to access severity (View for "severity" only) User is allowed to access status (View for "status" only) User is allowed to access status_transition (View for "status_transition" only) User is allowed to access test_level (View for "test_level" only) User is allowed to create file (Create for "file" only) User is allowed to create issue (Create for "issue" only) User is allowed to create msg (Create for "msg" only) User is allowed to create query (Create for "query" only) User is allowed to edit their queries (Edit for "query" only) User is allowed to retire their queries (Retire for "query" only) User is allowed to search for their own files (Search for "file" only) User is allowed to search for their own messages (Search for "msg" only) User is allowed to search for their queries (Search for "query" only) User is allowed to search issue (Search for "issue" only) User is allowed to view their own files (View for "file" only) User may access the web interface (Web Access) User may use the email interface (Email Access) Users are allowed to edit some of their details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'password', 'timezone') only) Users are allowed to view some of their details (View for "user": ('activity', 'actor', 'creation', 'creator', 'firstname', 'lastname', 'realname', 'username') only) Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only) Role "facility": (Restore for "room" only) (Retire for "room" only) User is allowed to create room (Create for "room" only) User is allowed to edit room (Edit for "room" only) Role "functional-role": (Restore for "user_functional_role" only) (Retire for "user_functional_role" only) User is allowed Edit on (Edit for "user": ('business_responsible', 'scale_seniority') only) User is allowed View on (View for "user": ('business_responsible', 'planning_role', 'scale_seniority') only) User is allowed to access user_functional_role (View for "user_functional_role" only) User is allowed to create user_functional_role (Create for "user_functional_role" only) User is allowed to edit user_functional_role (Edit for "user_functional_role" only) Role "hr": (Edit for "overtime_period": ('name', 'order') only) (Restore for "room" only) (Retire for "room" only) User is allowed Edit on (Edit for "daily_record": ('required_overtime', 'weekend_allowed') only) User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only) User is allowed Edit on (Edit for "time_project": ('approval_hr', 'approval_required', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'no_overtime', 'no_overtime_day', 'only_hours', 'overtime_reduction') only) User is allowed View on (View for "user": ('contacts',) only) User is allowed to access auto_wp (View for "auto_wp" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access daily_record (View for "daily_record" only) User is allowed to access daily_record_freeze (View for "daily_record_freeze" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access overtime_correction (View for "overtime_correction" only) User is allowed to access time_record (View for "time_record" only) User is allowed to access user_contact (View for "user_contact" only) User is allowed to access user_dynamic (View for "user_dynamic" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create auto_wp (Create for "auto_wp" only) User is allowed to create daily_record_freeze (Create for "daily_record_freeze" only) User is allowed to create location (Create for "location" only) User is allowed to create org_location (Create for "org_location" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to create overtime_correction (Create for "overtime_correction" only) User is allowed to create overtime_period (Create for "overtime_period" only) User is allowed to create product_family (Create for "product_family" only) User is allowed to create public_holiday (Create for "public_holiday" only) User is allowed to create reporting_group (Create for "reporting_group" only) User is allowed to create room (Create for "room" only) User is allowed to create sap_cc (Create for "sap_cc" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create uc_type (Create for "uc_type" only) User is allowed to create user (Create for "user" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit auto_wp (Edit for "auto_wp" only) User is allowed to edit dynamic user data if not frozen in validity span of dynamic user record (Edit for "user_dynamic" only) User is allowed to edit freeze record if not frozen at the given date (Edit for "daily_record_freeze": ('frozen',) only) User is allowed to edit location (Edit for "location" only) User is allowed to edit org_location (Edit for "org_location" only) User is allowed to edit organisation (Edit for "organisation" only) User is allowed to edit overtime correction if the overtime correction is not frozen (Edit for "overtime_correction" only) User is allowed to edit product_family (Edit for "product_family" only) User is allowed to edit public_holiday (Edit for "public_holiday" only) User is allowed to edit reporting_group (Edit for "reporting_group" only) User is allowed to edit room (Edit for "room" only) User is allowed to edit sap_cc (Edit for "sap_cc" only) User is allowed to edit time_record (Edit for "time_record" only) User is allowed to edit uc_type (Edit for "uc_type" only) User may manipulate user Roles through the web (Web Roles) Role "hr-leave-approval": User is allowed Edit on (Edit for "leave_submission": ('status',) only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access vacation_correction (View for "vacation_correction" only) Role "hr-org-location": (Search for "daily_record_freeze" only) (Search for "overtime_correction" only) (Search for "time_activity_perm" only) (Search for "time_record" only) (Search for "user_dynamic" only) User is allowed to view dynamic user data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "user_dynamic" only) User is allowed to view freeze information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "daily_record_freeze" only) User is allowed to view overtime information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "overtime_correction" only) User is allowed to view time record data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "time_record" only) Role "hr-vacation": User is allowed to access contract_type (View for "contract_type" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create contract_type (Create for "contract_type" only) User is allowed to create leave_submission (Create for "leave_submission" only) User is allowed to create vacation_correction (Create for "vacation_correction" only) User is allowed to edit contract_type (Edit for "contract_type" only) User is allowed to edit leave_submission (Edit for "leave_submission" only) User is allowed to edit vacation_correction (Edit for "vacation_correction" only) Role "issue_admin": User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only) User is allowed to access issue (View for "issue" only) User is allowed to create area (Create for "area" only) User is allowed to create category (Create for "category" only) User is allowed to create doc_issue_status (Create for "doc_issue_status" only) User is allowed to create ext_tracker (Create for "ext_tracker" only) User is allowed to create issue (Create for "issue" only) User is allowed to create keyword (Create for "keyword" only) User is allowed to create kind (Create for "kind" only) User is allowed to create msg_keyword (Create for "msg_keyword" only) User is allowed to create safety_level (Create for "safety_level" only) User is allowed to create severity (Create for "severity" only) User is allowed to create status (Create for "status" only) User is allowed to create status_transition (Create for "status_transition" only) User is allowed to create test_level (Create for "test_level" only) User is allowed to edit area (Edit for "area" only) User is allowed to edit category (Edit for "category" only) User is allowed to edit doc_issue_status (Edit for "doc_issue_status" only) User is allowed to edit ext_tracker (Edit for "ext_tracker" only) User is allowed to edit issue (Edit for "issue" only) User is allowed to edit keyword (Edit for "keyword" only) User is allowed to edit kind (Edit for "kind" only) User is allowed to edit msg_keyword (Edit for "msg_keyword" only) User is allowed to edit safety_level (Edit for "safety_level" only) User is allowed to edit severity (Edit for "severity" only) User is allowed to edit status (Edit for "status" only) User is allowed to edit status_transition (Edit for "status_transition" only) User is allowed to edit test_level (Edit for "test_level" only) Role "it": Create (Create for "user_contact" only) User is allowed Edit on (Edit for "file": ('name', 'type') only) User is allowed Edit on (Edit for "location": ('domain_part',) only) User is allowed Edit on (Edit for "organisation": ('domain_part',) only) User is allowed Edit on (Edit for "user": ('ad_domain', 'nickname', 'password', 'pictures', 'roles', 'timetracking_by', 'timezone', 'username') only) User is allowed Edit on (Edit for "user": ('address', 'alternate_addresses', 'nickname', 'password', 'timezone', 'username') only) User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only) User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed to access domain_permission (View for "domain_permission" only) User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) User is allowed to create domain_permission (Create for "domain_permission" only) User is allowed to create it_category (Create for "it_category" only) User is allowed to create it_int_prio (Create for "it_int_prio" only) User is allowed to create it_issue (Create for "it_issue" only) User is allowed to create it_project (Create for "it_project" only) User is allowed to create it_request_type (Create for "it_request_type" only) User is allowed to create mailgroup (Create for "mailgroup" only) User is allowed to edit domain_permission (Edit for "domain_permission" only) User is allowed to edit it_category (Edit for "it_category" only) User is allowed to edit it_int_prio (Edit for "it_int_prio" only) User is allowed to edit it_issue (Edit for "it_issue" only) User is allowed to edit it_project (Edit for "it_project" only) User is allowed to edit it_request_type (Edit for "it_request_type" only) User is allowed to edit mailgroup (Edit for "mailgroup" only) User may manipulate user Roles through the web (Web Roles) Role "itview": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "msgedit": (Search for "msg": ('date', 'id') only) User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only) User is allowed to access ext_msg (View for "ext_msg" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) Role "msgsync": (Search for "msg": ('date', 'id') only) User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only) User is allowed to access ext_msg (View for "ext_msg" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to create ext_msg (Create for "ext_msg" only) User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only) User is allowed to edit ext_msg (Edit for "ext_msg" only) User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only) Role "nosy": User may get nosy messages for doc (Nosy for "doc" only) User may get nosy messages for issue (Nosy for "issue" only) User may get nosy messages for it_issue (Nosy for "it_issue" only) User may get nosy messages for it_project (Nosy for "it_project" only) User may get nosy messages for support (Nosy for "support" only) Role "office": (Restore for "room" only) (Retire for "room" only) User is allowed View on (View for "user": ('contacts',) only) User is allowed to access user_contact (View for "user_contact" only) User is allowed to create absence (Create for "absence" only) User is allowed to create absence_type (Create for "absence_type" only) User is allowed to create room (Create for "room" only) User is allowed to create uc_type (Create for "uc_type" only) User is allowed to edit absence (Edit for "absence" only) User is allowed to edit absence_type (Edit for "absence_type" only) User is allowed to edit room (Edit for "room" only) User is allowed to edit uc_type (Edit for "uc_type" only) Role "organisation": User is allowed to access location (View for "location" only) User is allowed to access org_location (View for "org_location" only) User is allowed to access organisation (View for "organisation" only) User is allowed to create location (Create for "location" only) User is allowed to create org_location (Create for "org_location" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to edit location (Edit for "location" only) User is allowed to edit org_location (Edit for "org_location" only) User is allowed to edit organisation (Edit for "organisation" only) Role "pgp": Role "procurement": (View for "sap_cc" only) (View for "time_project" only) User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'purchasing_agents', 'team_lead') only) User is allowed Edit on (Edit for "time_project": ('group_lead', 'purchasing_agents', 'team_lead') only) Role "project": User is allowed Edit on (Edit for "time_project": ('cost_center', 'department', 'deputy', 'description', 'name', 'nosy', 'organisation', 'responsible', 'status') only) User is allowed Edit on (Edit for "time_project": ('infosec_req', 'is_extern', 'max_hours', 'op_project', 'planned_effort', 'product_family', 'project_type', 'reporting_group', 'work_location') only) User is allowed to access time_project (View for "time_project" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) User is allowed to create time_project (Create for "time_project" only) User is allowed to create time_project_status (Create for "time_project_status" only) User is allowed to create time_wp (Create for "time_wp" only) User is allowed to create time_wp_group (Create for "time_wp_group" only) User is allowed to edit time_project_status (Edit for "time_project_status" only) User is allowed to edit time_wp (Edit for "time_wp" only) User is allowed to edit time_wp_group (Edit for "time_wp_group" only) Role "project_view": User is allowed to access time_project (View for "time_project" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) Role "sec-incident-nosy": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "sec-incident-responsible": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "staff-report": Role "sub-login": Role "summary_view": Role "supportadmin": User is allowed to access analysis_result (View for "analysis_result" only) User is allowed to access contact (View for "contact" only) User is allowed to access customer (View for "customer" only) User is allowed to access customer_agreement (View for "customer_agreement" only) User is allowed to access mailgroup (View for "mailgroup" only) User is allowed to access return_type (View for "return_type" only) User is allowed to access sup_classification (View for "sup_classification" only) User is allowed to access support (View for "support" only) User is allowed to create analysis_result (Create for "analysis_result" only) User is allowed to create contact (Create for "contact" only) User is allowed to create customer (Create for "customer" only) User is allowed to create customer_agreement (Create for "customer_agreement" only) User is allowed to create mailgroup (Create for "mailgroup" only) User is allowed to create return_type (Create for "return_type" only) User is allowed to create sup_classification (Create for "sup_classification" only) User is allowed to create support (Create for "support" only) User is allowed to edit analysis_result (Edit for "analysis_result" only) User is allowed to edit contact (Edit for "contact" only) User is allowed to edit customer (Edit for "customer" only) User is allowed to edit customer_agreement (Edit for "customer_agreement" only) User is allowed to edit mailgroup (Edit for "mailgroup" only) User is allowed to edit return_type (Edit for "return_type" only) User is allowed to edit sup_classification (Edit for "sup_classification" only) User is allowed to edit support (Edit for "support" only) Role "time-report": User is allowed to access time_report (View for "time_report" only) User is allowed to create time_report (Create for "time_report" only) User is allowed to edit time_report (Edit for "time_report" only) User may edit own file (file created by user) (Edit for "file" only) Role "user": (Search for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only) (Search for "time_wp": ('activity', 'actor', 'auto_wp', 'bookers', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) (View for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only) Search (Search for "user_contact" only) User is allowed Edit on (Edit for "msg": ('keywords',) only) User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only) User is allowed Edit on issue if issue is non-confidential or user is on nosy list (Edit for "issue" only) User is allowed Edit on it_issue if it_issue is non-confidential or user is on nosy list (Edit for "it_issue": ('messages', 'files', 'nosy') only) User is allowed Edit on it_project if it_project is non-confidential or user is on nosy list (Edit for "it_project": ('messages', 'files', 'nosy') only) User is allowed Edit on support if support is non-confidential or user is on nosy list (Edit for "support": ('analysis_end', 'analysis_result', 'analysis_start', 'bcc', 'business_unit', 'category', 'cc', 'cc_emails', 'classification', 'closed', 'confidential', 'customer', 'emails', 'execution', 'external_ref', 'files', 'goods_received', 'goods_sent', 'lot', 'messages', 'nosy', 'number_effected', 'numeric_effort', 'prio', 'prodcat', 'product', 'related_issues', 'related_support', 'release', 'responsible', 'return_type', 'sap_ref', 'send_to_customer', 'serial_number', 'set_first_reply', 'status', 'superseder', 'title', 'type', 'warranty') only) User is allowed View on (View for "user": ('activity', 'actor', 'ad_domain', 'address', 'alternate_addresses', 'business_responsible', 'clearance_by', 'creation', 'creator', 'firstname', 'id', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'queries', 'realname', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'title', 'tt_lines', 'username') only) User is allowed View on (View for "user": ('activity', 'actor', 'address', 'alternate_addresses', 'creation', 'creator', 'id', 'queries', 'realname', 'status', 'timezone', 'username') only) User is allowed View on (View for "user": ('business_responsible', 'department_temp', 'timetracking_by', 'vie_user', 'vie_user_bl_override', 'vie_user_ml') only) User is allowed View on (View for "user": ('contacts',) only) User is allowed View on (View for "user_dynamic": ('department', 'org_location') only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed View on issue if issue is non-confidential or user is on nosy list (View for "issue" only) User is allowed View on it_issue if it_issue is non-confidential or user is on nosy list (View for "it_issue" only) User is allowed View on it_project if it_project is non-confidential or user is on nosy list (View for "it_project" only) User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only) User is allowed View on support if support is non-confidential or user is on nosy list (View for "support" only) User is allowed to access absence (View for "absence" only) User is allowed to access absence_type (View for "absence_type" only) User is allowed to access analysis_result (View for "analysis_result" only) User is allowed to access area (View for "area" only) User is allowed to access artefact (View for "artefact" only) User is allowed to access business_unit (View for "business_unit" only) User is allowed to access category (View for "category" only) User is allowed to access contact (View for "contact" only) User is allowed to access contact_type (View for "contact_type" only) User is allowed to access cost_center (View for "cost_center" only) User is allowed to access cost_center_group (View for "cost_center_group" only) User is allowed to access cost_center_permission_group (View for "cost_center_permission_group" only) User is allowed to access cost_center_status (View for "cost_center_status" only) User is allowed to access customer (View for "customer" only) User is allowed to access customer_agreement (View for "customer_agreement" only) User is allowed to access daily record if he is owner or supervisor or timetracking-by user (Edit for "daily_record": ('status', 'time_record') only) User is allowed to access daily record if he is owner or supervisor or timetracking-by user (View for "daily_record" only) User is allowed to access daily_record_status (View for "daily_record_status" only) User is allowed to access department (View for "department" only) User is allowed to access doc (View for "doc" only) User is allowed to access doc_category (View for "doc_category" only) User is allowed to access doc_issue_status (View for "doc_issue_status" only) User is allowed to access doc_status (View for "doc_status" only) User is allowed to access ext_tracker (View for "ext_tracker" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to access functional_role (View for "functional_role" only) User is allowed to access it_category (View for "it_category" only) User is allowed to access it_issue_status (View for "it_issue_status" only) User is allowed to access it_prio (View for "it_prio" only) User is allowed to access it_project_status (View for "it_project_status" only) User is allowed to access it_request_type (View for "it_request_type" only) User is allowed to access keyword (View for "keyword" only) User is allowed to access kind (View for "kind" only) User is allowed to access leave_status (View for "leave_status" only) User is allowed to access location (View for "location" only) User is allowed to access mailgroup (View for "mailgroup" only) User is allowed to access msg_keyword (View for "msg_keyword" only) User is allowed to access org_group (View for "org_group" only) User is allowed to access org_location (View for "org_location" only) User is allowed to access organisation (View for "organisation" only) User is allowed to access overtime_period (View for "overtime_period" only) User is allowed to access prodcat (View for "prodcat" only) User is allowed to access product (View for "product" only) User is allowed to access product_family (View for "product_family" only) User is allowed to access product_type (View for "product_type" only) User is allowed to access project_type (View for "project_type" only) User is allowed to access public_holiday (View for "public_holiday" only) User is allowed to access reference (View for "reference" only) User is allowed to access reporting_group (View for "reporting_group" only) User is allowed to access return_type (View for "return_type" only) User is allowed to access room (View for "room" only) User is allowed to access safety_level (View for "safety_level" only) User is allowed to access sap_cc (View for "sap_cc" only) User is allowed to access severity (View for "severity" only) User is allowed to access sex (View for "sex" only) User is allowed to access status (View for "status" only) User is allowed to access status_transition (View for "status_transition" only) User is allowed to access summary_report (View for "summary_report" only) User is allowed to access summary_type (View for "summary_type" only) User is allowed to access sup_classification (View for "sup_classification" only) User is allowed to access sup_execution (View for "sup_execution" only) User is allowed to access sup_prio (View for "sup_prio" only) User is allowed to access sup_status (View for "sup_status" only) User is allowed to access sup_type (View for "sup_type" only) User is allowed to access sup_warranty (View for "sup_warranty" only) User is allowed to access test_level (View for "test_level" only) User is allowed to access time_activity (View for "time_activity" only) User is allowed to access time_activity_perm (View for "time_activity_perm" only) User is allowed to access time_project_status (View for "time_project_status" only) User is allowed to access time_wp_group (View for "time_wp_group" only) User is allowed to access time_wp_summary_no (View for "time_wp_summary_no" only) User is allowed to access timesheet (View for "timesheet" only) User is allowed to access uc_type (View for "uc_type" only) User is allowed to access user_status (View for "user_status" only) User is allowed to access vac_aliq (View for "vac_aliq" only) User is allowed to access vacation_report (View for "vacation_report" only) User is allowed to access work_location (View for "work_location" only) User is allowed to create daily_record (Create for "daily_record" only) User is allowed to create doc (Create for "doc" only) User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only) User is allowed to create file (Create for "file" only) User is allowed to create issue (Create for "issue" only) User is allowed to create it_issue (Create for "it_issue" only) User is allowed to create leave_submission (Create for "leave_submission" only) User is allowed to create msg (Create for "msg" only) User is allowed to create queries (Create for "query" only) User is allowed to create support (Create for "support" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create time_wp (Create for "time_wp" only) User is allowed to edit (some of) their own user details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'lunch_duration', 'lunch_start', 'password', 'queries', 'realname', 'room', 'subst_active', 'substitute', 'timezone', 'tt_lines') only) User is allowed to edit category if he is responsible for it (Edit for "category": ('nosy', 'default_part_of') only) User is allowed to edit doc (Edit for "doc" only) User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only) User is allowed to edit if he's the owner of the contact (Edit for "user_contact": ('visible',) only) User is allowed to edit several fields if he is Responsible for an it_issue (Edit for "it_issue": ('responsible',) only) User is allowed to edit several fields if he is Stakeholder/Responsible for an it_issue (Edit for "it_issue": ('deadline', 'status', 'title') only) User is allowed to edit their queries (Edit for "query" only) User is allowed to edit time category if the status is "Open" and he is responsible for the time category (Edit for "time_project": ('deputy', 'planned_effort', 'nosy') only) User is allowed to edit workpackage if he is time category owner or deputy (Edit for "time_wp": ('cost_center', 'is_public', 'name', 'responsible', 'time_wp_summary_no', 'wp_no') only) User is allowed to retire their queries (Retire for "query" only) User is allowed to search daily_record (Search for "daily_record" only) User is allowed to search for their own files (Search for "file" only) User is allowed to search for their own messages (Search for "msg" only) User is allowed to search for their queries (Search for "query" only) User is allowed to search issue (Search for "issue" only) User is allowed to search it_issue (Search for "it_issue" only) User is allowed to search it_project (Search for "it_project" only) User is allowed to search leave_submission (Search for "leave_submission" only) User is allowed to search support (Search for "support" only) User is allowed to search time_record (Search for "time_record" only) User is allowed to search time_wp (Search for "time_wp": ('activity', 'actor', 'auto_wp', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'is_extern', 'is_public', 'id', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) User is allowed to search user_status (Search for "user": ('status',) only) User is allowed to see time record if he is allowed to see all details on work package or User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "time_record" only) User is allowed to view (some of) their own user details (View for "user": ('entry_date', 'planning_role') only) User is allowed to view contact if he's the owner of the contact or the contact is marked visible (View for "user_contact" only) User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (Edit for "leave_submission": ('status',) only) User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (View for "leave_submission" only) User is allowed to view selected fields in work package if booking is allowed for this user (also applies to timetracking by, supervisor and approval delegated) (View for "time_wp": ('activity', 'actor', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) User is allowed to view their own files (View for "file" only) User is allowed to view their own messages (View for "msg" only) User is allowed to view their own overtime information (View for "overtime_correction" only) User is allowed to view time record if he is the supervisor or the person to whom approvals are delegated (View for "time_record" only) User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_project": ('name',) only) User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_wp": ('name', 'project') only) User is allowed to view/edit workpackage if he is owner or project responsible/deputy (Edit for "time_wp": ('bookers', 'description', 'epic_key', 'planned_effort', 'time_end', 'time_start', 'time_wp_summary_no') only) User may access the rest interface (Rest Access) User may access the web interface (Web Access) User may access the xmlrpc interface (Xmlrpc Access) User may edit own leave submissions (Edit for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only) User may edit own leave submissions (View for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only) User may see time report if reponsible or deputy of time project or on nosy list of time project (View for "time_report" only) User may use the email interface (Email Access) User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "daily_record" only) User may view their own user functional role (View for "user_functional_role" only) User may view time category if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_project" only) User may view work package if responsible for it, if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_wp" only) User or Timetracking by user may edit time_records owned by user (Edit for "time_record" only) User or Timetracking by user may edit time_records owned by user (Restore for "time_record" only) User or Timetracking by user may edit time_records owned by user (Retire for "time_record" only) User or Timetracking by user may edit time_records owned by user (View for "time_record" only) Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only) Users may see daily record if they may see one of the time_records for that day (View for "daily_record" only) Role "user_view": User is allowed to access user (View for "user" only) Role "vacation-report": """.strip ()
83.063574
690
0.762034
0
0
0
0
0
0
0
0
48,322
0.999566
be004417db97934b47985fcf6b9c727896247c48
220
py
Python
CodeChef/problems/IMDB/main.py
object-oriented-human/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
1
2022-02-21T15:43:01.000Z
2022-02-21T15:43:01.000Z
CodeChef/problems/IMDB/main.py
foooop/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
null
null
null
CodeChef/problems/IMDB/main.py
foooop/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
null
null
null
tc = int(input()) while tc: tc -= 1 best = 0 n, x = map(int, input().split()) for i in range(n): s, r = map(int, input().split()) if x >= s: best = max(best, r) print(best)
22
40
0.445455
0
0
0
0
0
0
0
0
0
0
be0099fd02ee40c6a15038fa8158d18b025dd23d
3,218
py
Python
tests/test_sqlite_wrapper.py
Privex/python-db
3b46b34b4310973e2e2a30a66adaa853fd10340d
[ "X11" ]
1
2019-12-19T13:12:53.000Z
2019-12-19T13:12:53.000Z
tests/test_sqlite_wrapper.py
Privex/python-db
3b46b34b4310973e2e2a30a66adaa853fd10340d
[ "X11" ]
9
2020-02-24T20:14:53.000Z
2021-04-30T21:51:04.000Z
tests/test_sqlite_wrapper.py
Privex/python-db
3b46b34b4310973e2e2a30a66adaa853fd10340d
[ "X11" ]
null
null
null
""" Tests related to :class:`.SqliteWrapper` / :class:`.ExampleWrapper` """ # from unittest import TestCase from tests.base import * class TestSQLiteWrapper(PrivexDBTestBase): def test_tables_created(self): w = self.wrp self.assertEqual(w.db, ':memory:') tables = w.list_tables() self.assertIn('users', tables) self.assertIn('items', tables) def test_tables_drop(self): w = self.wrp tables = w.list_tables() self.assertIn('users', tables) self.assertIn('items', tables) w.drop_schemas() tables = w.list_tables() self.assertNotIn('users', tables) self.assertNotIn('items', tables) def test_insert_find_user(self): w = self.wrp w.query_mode = 'flat' res = w.insert_user('John', 'Doe') self.assertEqual(res.rowcount, 1) user = w.find_user(res.lastrowid) self.assertEqual(user[1], 'John') self.assertEqual(user[2], 'Doe') def test_action_update(self): w = self.wrp w.query_mode = 'dict' res = w.insert_user('John', 'Doe') last_id = res.lastrowid rows = w.action("UPDATE users SET last_name = ? WHERE first_name = ?", ['Smith', 'John']) self.assertEqual(rows, 1) john = w.find_user(last_id) self.assertEqual(john['last_name'], 'Smith') def test_find_user_dict_mode(self): w = self.wrp w.query_mode = 'dict' res = w.insert_user('John', 'Doe') self.assertEqual(res.rowcount, 1) user = w.find_user(res.lastrowid) self.assertEqual(user['first_name'], 'John') self.assertEqual(user['last_name'], 'Doe') def test_find_user_nonexistent(self): w = self.wrp user = w.find_user(99) self.assertIsNone(user) def test_get_users_tuple(self): w = self.wrp w.query_mode = 'flat' w.insert_user('John', 'Doe') w.insert_user('Jane', 'Doe') w.insert_user('Dave', 'Johnson') users = list(w.get_users()) self.assertEqual(len(users), 3) self.assertEqual(users[0][1], 'John') self.assertEqual(users[1][1], 'Jane') self.assertEqual(users[1][2], 'Doe') self.assertEqual(users[2][2], 'Johnson') def test_get_users_dict(self): w = self.wrp w.query_mode = 'dict' w.insert_user('John', 'Doe') w.insert_user('Jane', 'Doe') w.insert_user('Dave', 'Johnson') users = list(w.get_users()) self.assertEqual(len(users), 3) self.assertEqual(users[0]['first_name'], 'John') self.assertEqual(users[1]['first_name'], 'Jane') self.assertEqual(users[1]['last_name'], 'Doe') self.assertEqual(users[2]['last_name'], 'Johnson') def test_insert_helper(self): w = self.wrp w.query_mode = 'dict' res = w.insert('users', first_name='Dave', last_name='Johnson') self.assertEqual(res.lastrowid, 1) user = w.find_user(res.lastrowid) self.assertEqual(user['first_name'], 'Dave') self.assertEqual(user['last_name'], 'Johnson')
31.242718
97
0.579863
3,082
0.957738
0
0
0
0
0
0
588
0.182722
be00d24937df6595d3c59f1ae767515161b8f7ef
5,320
py
Python
var/spack/repos/builtin/packages/strumpack/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
9
2018-04-18T07:51:40.000Z
2021-09-10T03:56:57.000Z
var/spack/repos/builtin/packages/strumpack/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
907
2018-04-18T11:17:57.000Z
2022-03-31T13:20:25.000Z
var/spack/repos/builtin/packages/strumpack/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
29
2018-11-05T16:14:23.000Z
2022-02-03T16:07:09.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Strumpack(CMakePackage, CudaPackage): """STRUMPACK -- STRUctured Matrix PACKage - provides linear solvers for sparse matrices and for dense rank-structured matrices, i.e., matrices that exhibit some kind of low-rank property. It provides a distributed memory fully algebraic sparse solver and preconditioner. The preconditioner is mostly aimed at large sparse linear systems which result from the discretization of a partial differential equation, but is not limited to any particular type of problem. STRUMPACK also provides preconditioned GMRES and BiCGStab iterative solvers.""" homepage = "http://portal.nersc.gov/project/sparse/strumpack" url = "https://github.com/pghysels/STRUMPACK/archive/v4.0.0.tar.gz" git = "https://github.com/pghysels/STRUMPACK.git" maintainers = ['pghysels'] version('master', branch='master') version('5.0.0', sha256='bdfd1620ff7158d96055059be04ee49466ebaca8213a2fdab33e2d4571019a49') version('4.0.0', sha256='a3629f1f139865c74916f8f69318f53af6319e7f8ec54e85c16466fd7d256938') version('3.3.0', sha256='499fd3b58656b4b6495496920e5372895861ebf15328be8a7a9354e06c734bc7') version('3.2.0', sha256='34d93e1b2a3b8908ef89804b7e08c5a884cbbc0b2c9f139061627c0d2de282c1') version('3.1.1', sha256='c1c3446ee023f7b24baa97b24907735e89ce4ae9f5ef516645dfe390165d1778') variant('shared', default=False, description='Build shared libraries') variant('mpi', default=True, description='Use MPI') variant('openmp', default=True, description='Enable thread parallellism via tasking with OpenMP') variant('cuda', default=True, description='Enable CUDA support') variant('parmetis', default=True, description='Enable use of ParMetis') variant('scotch', default=False, description='Enable use of Scotch') variant('butterflypack', default=True, description='Enable use of ButterflyPACK') variant('zfp', default=True, description='Build with support for compression using ZFP') variant('c_interface', default=True, description='Enable C interface') variant('count_flops', default=False, description='Build with flop counters') variant('task_timers', default=False, description='Build with timers for internal routines') variant('build_dev_tests', default=False, description='Build developer test routines') variant('build_tests', default=False, description='Build test routines') # TODO: add a slate variant depends_on('[email protected]:', type='build') depends_on('mpi', when='+mpi') depends_on('blas') depends_on('lapack') depends_on('scalapack', when='+mpi') depends_on('metis') depends_on('parmetis', when='+parmetis') depends_on('scotch~metis', when='+scotch') depends_on('scotch~metis+mpi', when='+scotch+mpi') depends_on('[email protected]', when='@3.3.0:3.9.999 +butterflypack+mpi') depends_on('[email protected]:', when='@4.0.0: +butterflypack+mpi') depends_on('cuda', when='@4.0.0: +cuda') depends_on('zfp', when='+zfp') conflicts('+parmetis', when='~mpi') conflicts('+butterflypack', when='~mpi') conflicts('+butterflypack', when='@:3.2.0') conflicts('+cuda', when='@:3.9.999') conflicts('+zfp', when='@:3.9.999') patch('intel-19-compile.patch', when='@3.1.1') def cmake_args(self): spec = self.spec def on_off(varstr): return 'ON' if varstr in spec else 'OFF' args = [ '-DSTRUMPACK_USE_MPI=%s' % on_off('+mpi'), '-DSTRUMPACK_USE_OPENMP=%s' % on_off('+openmp'), '-DTPL_ENABLE_PARMETIS=%s' % on_off('+parmetis'), '-DTPL_ENABLE_SCOTCH=%s' % on_off('+scotch'), '-DTPL_ENABLE_BPACK=%s' % on_off('+butterflypack'), '-DSTRUMPACK_COUNT_FLOPS=%s' % on_off('+count_flops'), '-DSTRUMPACK_TASK_TIMERS=%s' % on_off('+task_timers'), '-DSTRUMPACK_DEV_TESTING=%s' % on_off('+build_dev_tests'), '-DSTRUMPACK_BUILD_TESTS=%s' % on_off('+build_tests'), '-DTPL_BLAS_LIBRARIES=%s' % spec['blas'].libs.joined(";"), '-DTPL_LAPACK_LIBRARIES=%s' % spec['lapack'].libs.joined(";"), '-DTPL_SCALAPACK_LIBRARIES=%s' % spec['scalapack']. libs.joined(";"), ] if spec.satisfies('@:3.9.999'): if '+mpi' in spec: args.extend([ '-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc, '-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx, '-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc ]) args.extend([ '-DSTRUMPACK_C_INTERFACE=%s' % on_off('+c_interface'), ]) if spec.satisfies('@4.0.0:'): args.extend([ '-DSTRUMPACK_USE_CUDA=%s' % on_off('+cuda') ]) args.extend([ '-DBUILD_SHARED_LIBS=%s' % on_off('+shared') ]) return args
42.56
95
0.638346
5,099
0.958459
0
0
0
0
0
0
2,950
0.554511
be011eb0f4bc43a928140f63592325792f0414b5
6,318
py
Python
actionserver/actions/action_feedbackform.py
Ajju2211/frendy-bot
b86a7a3cb3fb54b300ad9b870defb947f22dc146
[ "Apache-2.0" ]
null
null
null
actionserver/actions/action_feedbackform.py
Ajju2211/frendy-bot
b86a7a3cb3fb54b300ad9b870defb947f22dc146
[ "Apache-2.0" ]
null
null
null
actionserver/actions/action_feedbackform.py
Ajju2211/frendy-bot
b86a7a3cb3fb54b300ad9b870defb947f22dc146
[ "Apache-2.0" ]
null
null
null
from typing import Any, Text, Dict, List, Union from rasa_sdk import Action, Tracker from rasa_sdk.executor import CollectingDispatcher from rasa_sdk.forms import FormAction from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction # from rasa_core.events import (UserUtteranceReverted, UserUttered, # ActionExecuted, Event) from rasa_sdk.events import AllSlotsReset, SlotSet from rasa.core.constants import REQUESTED_SLOT from rasa.core.slots import Slot import pandas as pd import json from actionserver.utils import utilities as util from actionserver.controllers.faqs.faq import FAQ from actionserver.controllers.constants.orderForm import * import logging from actionserver.utils.utilities import INVALID_VALUE product_list = [] quant_list = [] # takes quantity from user logger = logging.getLogger(__name__) with open(r'./actionserver/custom_payload.json') as f: frendy_product_menu = json.load(f) # Code snippet for global back # return [Restarted(), UserUttered(text="/get_started", parse_data={ # "intent": {"confidence": 1.0, "name": "get_started"}, # "entities": [] # }), FollowupAction(name="utter_greet")] def query_back(dispatcher): dispatcher.utter_message("Going back to queries!!!") greet_utter = UserUttered(text="/greet", parse_data={ "intent": {"confidence": 1.0, "name": "greet"}, "entities": [] }) query_utter = UserUttered(text="/query_init", parse_data={ "intent": {"confidence": 1.0, "name": "query_init"}, "entities": [] }) return [ greet_utter, FollowupAction(name="utter_greet"), query_utter, FollowupAction(name="utter_query_type") ] def greet_back(dispatcher): dispatcher.utter_message("Going back!!!") dispatcher.utter_message(json_message = { "platform":"whatsapp", "payload":"text", "text":"Welcome back to Frendy Shopping" }); return [UserUttered(text="/greet", parse_data={ "intent": {"confidence": 1.0, "name": "greet"}, "entities": [] }), FollowupAction(name="utter_greet")] class FeedbackForm(FormAction): def name(self): return "feedback_form" @staticmethod def required_slots(tracker): if tracker.get_slot("rating"): return ["rating", "feedback_text"] else: return ["rating"] def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]: """A dictionary to map required slots to - an extracted entity - intent: value pairs - a whole message or a list of them, where a first match will be picked""" # return {"rating": [self.from_entity("rating"),self.from_entity("any_thing")],"feedback_text": [self.from_entity(entity="any_thing"),self.from_entity(entity="navigation")]} return {"rating": [self.from_entity("rating"), self.from_text()], "feedback_text": [self.from_text(), self.from_entity(entity="navigation")]} def validate_rating( self, value: Text, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> Dict[Text, Any]: ratings = ['1', '2', '3', '4', '5'] try: value = value.strip() if value == "back1" or value.lower() == "back": return {"rating": INVALID_VALUE, "feedback_text": INVALID_VALUE} # 1-5 it integer otherwise rating:None elif value in ratings: return {"rating": value, "feedback_text": None} else: dispatcher.utter_message("Please enter valid option.") dispatcher.utter_message(json_message = { "platform":"whatsapp", "payload":"text", "text":"Please enter valid option" }); return {"rating": None, "feedback_text": None} except Exception as e: print(e) dispatcher.utter_message("Please enter valid option.") dispatcher.utter_message(json_message = { "platform":"whatsapp", "payload":"text", "text":"Please enter valid option" }); return {"rating": None, "feedback_text": None} def validate_feedback_text( self, value: Text, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> Dict[Text, Any]: if value == "back2" or value.lower() == "back": return {"rating": None, "feedback_text": None} else: return {"feedback_text": value} def submit( self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any], ) -> List[Dict]: if tracker.get_slot("rating") != INVALID_VALUE: with open("./actionserver/customer_queries.json", "r") as queriesRef: rating = tracker.get_slot("rating") feedback = tracker.get_slot("feedback_text") feedbackObj = json.load(queriesRef) feedbackObj["feedback"].append({ "createdOn": util.timestamp(), "complaint_area": rating, "complaint": feedback }) with open("./actionserver/customer_queries.json", "w") as queriesRefWrite: json.dump(feedbackObj, queriesRefWrite, indent=4) dispatcher.utter_message("Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format( rate=rating, feedbk=feedback)) dispatcher.utter_message(json_message = { "platform":"whatsapp", "payload":"text", "text":"Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format( rate=rating, feedbk=feedback) }); else: dispatcher.utter_message("Feedback form closed") li = [SlotSet("rating", None), SlotSet("feedback_text", None)] li.extend(query_back(dispatcher)) return li return [SlotSet("rating", None), SlotSet("feedback_text", None)]
37.832335
181
0.597341
4,166
0.659386
0
0
176
0.027857
0
0
2,047
0.323995
be01c82117aa2911b241e39136b462d24502c315
793
py
Python
dash/graphs.py
fuzzylabs/wearable-my-foot
5e7d818fc51a3d3babbe1c0ec49450b1a1f030c6
[ "Apache-2.0" ]
5
2020-09-04T13:49:41.000Z
2021-07-30T02:33:49.000Z
dash/graphs.py
archena/wearable-my-foot
5e7d818fc51a3d3babbe1c0ec49450b1a1f030c6
[ "Apache-2.0" ]
2
2020-09-24T07:55:43.000Z
2020-09-24T09:30:19.000Z
dash/graphs.py
archena/wearable-my-foot
5e7d818fc51a3d3babbe1c0ec49450b1a1f030c6
[ "Apache-2.0" ]
1
2021-03-04T03:18:37.000Z
2021-03-04T03:18:37.000Z
import plotly.graph_objs as go class GraphsHelper: template = "plotly_dark" ''' Generate a plot for a timeseries ''' def generate_timeseries_plot(self, dataframe): pressure_plots = [] for sensor in ["p1", "p2", "p3"]: series = dataframe[sensor] scatter = go.Scatter(x = dataframe.index, y = series, name = f"Sensor {sensor}", opacity = 0.4) pressure_plots.append(scatter) pressure_figure = go.Figure( data = pressure_plots, layout = go.Layout( title = "Pressure timeseries", template = self.template ) ) return pressure_figure
29.37037
59
0.493064
760
0.958386
0
0
0
0
0
0
112
0.141236
be01e27689f95fbc7033b6a5da2ab015674dada0
2,909
py
Python
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2021-09-07T18:36:04.000Z
2021-09-07T18:36:04.000Z
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
2
2019-10-02T23:37:38.000Z
2020-10-02T01:17:31.000Z
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2019-06-17T22:18:23.000Z
2019-06-17T22:18:23.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class AppServiceCertificateResource(Resource): """Key Vault container ARM resource for a certificate that is purchased through Azure. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Resource Id. :vartype id: str :ivar name: Resource Name. :vartype name: str :param kind: Kind of resource. :type kind: str :param location: Required. Resource Location. :type location: str :ivar type: Resource type. :vartype type: str :param tags: Resource tags. :type tags: dict[str, str] :param key_vault_id: Key Vault resource Id. :type key_vault_id: str :param key_vault_secret_name: Key Vault secret name. :type key_vault_secret_name: str :ivar provisioning_state: Status of the Key Vault secret. Possible values include: 'Initialized', 'WaitingOnCertificateOrder', 'Succeeded', 'CertificateOrderFailed', 'OperationNotPermittedOnKeyVault', 'AzureServiceUnauthorizedToAccessKeyVault', 'KeyVaultDoesNotExist', 'KeyVaultSecretDoesNotExist', 'UnknownError', 'ExternalPrivateKey', 'Unknown' :vartype provisioning_state: str or ~azure.mgmt.web.models.KeyVaultSecretStatus """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'location': {'required': True}, 'type': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'}, 'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'KeyVaultSecretStatus'}, } def __init__(self, **kwargs): super(AppServiceCertificateResource, self).__init__(**kwargs) self.key_vault_id = kwargs.get('key_vault_id', None) self.key_vault_secret_name = kwargs.get('key_vault_secret_name', None) self.provisioning_state = None
38.786667
102
0.625645
2,401
0.82537
0
0
0
0
0
0
2,229
0.766243
be0243ad78899348119ce102fbea0418e12871e2
5,379
py
Python
telethon/tl/functions/stickers.py
polisitni1/DogeClickBot
ac57eaeefca2c6ab9e48458f9f928a6a421a162e
[ "MIT" ]
null
null
null
telethon/tl/functions/stickers.py
polisitni1/DogeClickBot
ac57eaeefca2c6ab9e48458f9f928a6a421a162e
[ "MIT" ]
null
null
null
telethon/tl/functions/stickers.py
polisitni1/DogeClickBot
ac57eaeefca2c6ab9e48458f9f928a6a421a162e
[ "MIT" ]
null
null
null
"""File generated by TLObjects' generator. All changes will be ERASED""" from ...tl.tlobject import TLRequest from typing import Optional, List, Union, TYPE_CHECKING import os import struct if TYPE_CHECKING: from ...tl.types import TypeInputStickerSet, TypeInputUser, TypeInputStickerSetItem, TypeInputDocument class AddStickerToSetRequest(TLRequest): CONSTRUCTOR_ID = 0x8653febe SUBCLASS_OF_ID = 0x9b704a5a def __init__(self, stickerset, sticker): """ :param TypeInputStickerSet stickerset: :param TypeInputStickerSetItem sticker: :returns messages.StickerSet: Instance of StickerSet. """ self.stickerset = stickerset # type: TypeInputStickerSet self.sticker = sticker # type: TypeInputStickerSetItem def to_dict(self): return { '_': 'AddStickerToSetRequest', 'stickerset': None if self.stickerset is None else self.stickerset.to_dict(), 'sticker': None if self.sticker is None else self.sticker.to_dict() } def __bytes__(self): return b''.join(( b'\xbe\xfeS\x86', bytes(self.stickerset), bytes(self.sticker), )) @classmethod def from_reader(cls, reader): _stickerset = reader.tgread_object() _sticker = reader.tgread_object() return cls(stickerset=_stickerset, sticker=_sticker) class ChangeStickerPositionRequest(TLRequest): CONSTRUCTOR_ID = 0xffb6d4ca SUBCLASS_OF_ID = 0x9b704a5a def __init__(self, sticker, position): """ :param TypeInputDocument sticker: :param int position: :returns messages.StickerSet: Instance of StickerSet. """ self.sticker = sticker # type: TypeInputDocument self.position = position # type: int def to_dict(self): return { '_': 'ChangeStickerPositionRequest', 'sticker': None if self.sticker is None else self.sticker.to_dict(), 'position': self.position } def __bytes__(self): return b''.join(( b'\xca\xd4\xb6\xff', bytes(self.sticker), struct.pack('<i', self.position), )) @classmethod def from_reader(cls, reader): _sticker = reader.tgread_object() _position = reader.read_int() return cls(sticker=_sticker, position=_position) class CreateStickerSetRequest(TLRequest): CONSTRUCTOR_ID = 0x9bd86e6a SUBCLASS_OF_ID = 0x9b704a5a def __init__(self, user_id, title, short_name, stickers, masks=None): """ :param TypeInputUser user_id: :param str title: :param str short_name: :param List[TypeInputStickerSetItem] stickers: :param Optional[bool] masks: :returns messages.StickerSet: Instance of StickerSet. """ self.user_id = user_id # type: TypeInputUser self.title = title # type: str self.short_name = short_name # type: str self.stickers = stickers # type: List[TypeInputStickerSetItem] self.masks = masks # type: Optional[bool] async def resolve(self, client, utils): self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id)) def to_dict(self): return { '_': 'CreateStickerSetRequest', 'user_id': None if self.user_id is None else self.user_id.to_dict(), 'title': self.title, 'short_name': self.short_name, 'stickers': [] if self.stickers is None else [None if x is None else x.to_dict() for x in self.stickers], 'masks': self.masks } def __bytes__(self): return b''.join(( b'jn\xd8\x9b', struct.pack('<I', (0 if self.masks is None or self.masks is False else 1)), bytes(self.user_id), self.serialize_bytes(self.title), self.serialize_bytes(self.short_name), b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickers)),b''.join(bytes(x) for x in self.stickers), )) @classmethod def from_reader(cls, reader): flags = reader.read_int() _masks = bool(flags & 1) _user_id = reader.tgread_object() _title = reader.tgread_string() _short_name = reader.tgread_string() reader.read_int() _stickers = [] for _ in range(reader.read_int()): _x = reader.tgread_object() _stickers.append(_x) return cls(user_id=_user_id, title=_title, short_name=_short_name, stickers=_stickers, masks=_masks) class RemoveStickerFromSetRequest(TLRequest): CONSTRUCTOR_ID = 0xf7760f51 SUBCLASS_OF_ID = 0x9b704a5a def __init__(self, sticker): """ :param TypeInputDocument sticker: :returns messages.StickerSet: Instance of StickerSet. """ self.sticker = sticker # type: TypeInputDocument def to_dict(self): return { '_': 'RemoveStickerFromSetRequest', 'sticker': None if self.sticker is None else self.sticker.to_dict() } def __bytes__(self): return b''.join(( b'Q\x0fv\xf7', bytes(self.sticker), )) @classmethod def from_reader(cls, reader): _sticker = reader.tgread_object() return cls(sticker=_sticker)
31.641176
117
0.622421
5,050
0.938836
0
0
1,018
0.189255
128
0.023796
1,321
0.245585
be035d1ced1e70706ec7a59e81ecf6539a9f044b
3,960
py
Python
applications/ChimeraApplication/tests/chimera_analysis_base_test.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
778
2017-01-27T16:29:17.000Z
2022-03-30T03:01:51.000Z
applications/ChimeraApplication/tests/chimera_analysis_base_test.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
6,634
2017-01-15T22:56:13.000Z
2022-03-31T15:03:36.000Z
applications/ChimeraApplication/tests/chimera_analysis_base_test.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
224
2017-02-07T14:12:49.000Z
2022-03-06T23:09:34.000Z
import KratosMultiphysics import KratosMultiphysics.KratosUnittest as UnitTest import KratosMultiphysics.ChimeraApplication from KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis import FluidChimeraAnalysis class ChimeraAnalysisBaseTest(UnitTest.TestCase): def setUp(self): # Set to true to get post-process files for the test self.print_output = False def _run_test(self,settings_file_name): model = KratosMultiphysics.Model() with open(settings_file_name,'r') as settings_file: settings = KratosMultiphysics.Parameters(settings_file.read()) # to check the results: add output settings block if needed if self.print_output: settings.AddValue("output_processes", KratosMultiphysics.Parameters(r'''{ "vtk_output" : [{ "python_module" : "vtk_output_process", "kratos_module" : "KratosMultiphysics", "process_name" : "VtkOutputProcess", "help" : "This process writes postprocessing files for Paraview", "Parameters" : { "model_part_name" : "FluidModelPart.Parts_background_surface", "output_control_type" : "step", "output_frequency" : 1, "file_format" : "ascii", "output_precision" : 3, "output_sub_model_parts" : false, "write_deformed_configuration" : true, "folder_name" : "test_vtk_output", "save_output_files_in_folder" : true, "nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"], "nodal_data_value_variables" : [], "element_flags" : ["ACTIVE"], "nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"], "element_data_value_variables" : [], "condition_data_value_variables" : [] } },{ "python_module" : "vtk_output_process", "kratos_module" : "KratosMultiphysics", "process_name" : "VtkOutputProcess", "help" : "This process writes postprocessing files for Paraview", "Parameters" : { "model_part_name" : "FluidModelPart.Parts_patch_surface", "output_control_type" : "step", "output_frequency" : 1, "file_format" : "ascii", "output_precision" : 3, "output_sub_model_parts" : false, "write_deformed_configuration" : true, "folder_name" : "test_vtk_output", "save_output_files_in_folder" : true, "nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"], "nodal_data_value_variables" : [], "element_flags" : ["ACTIVE"], "nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"], "element_data_value_variables" : [], "condition_data_value_variables" : [] } }] }''')) analysis = FluidChimeraAnalysis(model,settings) analysis.Run()
58.235294
114
0.474495
3,740
0.944444
0
0
0
0
0
0
3,222
0.813636
be045e37a15278ad4b76fd0b0f607b024e9f6bee
925
py
Python
parsers/rss10.py
side-beach-city/SBCLinkCopyTool
12ec16eefddac215e6a2be92464fde75677c8548
[ "Apache-2.0" ]
null
null
null
parsers/rss10.py
side-beach-city/SBCLinkCopyTool
12ec16eefddac215e6a2be92464fde75677c8548
[ "Apache-2.0" ]
2
2021-06-28T01:52:31.000Z
2021-06-28T02:21:18.000Z
parsers/rss10.py
side-beach-city/SBCLinkCopyTool
12ec16eefddac215e6a2be92464fde75677c8548
[ "Apache-2.0" ]
null
null
null
import urllib.request import xml.etree.ElementTree class RSS10Parser: def __init__(self, url: str) -> None: self.url = url def getlist(self) -> list[dict[str, str]]: ENTRY = r"{http://www.w3.org/2005/Atom}" MEDIA = r"{http://search.yahoo.com/mrss/}" YOUTUBE = r"{http://www.youtube.com/xml/schemas/2015}" result = [] with urllib.request.urlopen(self.url) as res: data = xml.etree.ElementTree.fromstring(res.read()) for child in data.iter(f"{ENTRY}entry"): result.append({ "title": child.find(f"{ENTRY}title").text, "link": child.find(f"{ENTRY}link").attrib["href"], "description": child.find(f"{MEDIA}group").find(f"{MEDIA}description").text, }) return result if __name__ == "__main__": import pprint pprint.pprint(RSS10Parser("https://www.youtube.com/feeds/videos.xml?playlist_id=PLrPVslFukDQo7l5RCqAZtKDl6tUyMAFWH").getlist())
37
129
0.655135
699
0.755676
0
0
0
0
0
0
321
0.347027
be04a0613039c84ca76bcc0ca57e9da1601cdaf5
403
py
Python
examples/laser.py
MPI-IS/reactive_pepper
079f9b0627bfd6c9e3f2a4466c95ad662002a600
[ "BSD-3-Clause" ]
null
null
null
examples/laser.py
MPI-IS/reactive_pepper
079f9b0627bfd6c9e3f2a4466c95ad662002a600
[ "BSD-3-Clause" ]
null
null
null
examples/laser.py
MPI-IS/reactive_pepper
079f9b0627bfd6c9e3f2a4466c95ad662002a600
[ "BSD-3-Clause" ]
null
null
null
import math,time,random import pepper_interface IP = "192.168.0.147" PORT = 9559 simulation = False with pepper_interface.get(IP,PORT,simulation) as pepper: time.sleep(1.0) values,time_stamp = pepper.laser.get() print print "Front" print values["Front"] print print "Left" print values["Left"] print print "Right" print values["Right"] print
14.392857
56
0.647643
0
0
0
0
0
0
0
0
55
0.136476
be04c82cd5f62929d01752841a8ec17a1254d468
291
py
Python
exercises/pt/exc_01_03_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
2,085
2019-04-17T13:10:40.000Z
2022-03-30T21:51:46.000Z
exercises/pt/exc_01_03_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
79
2019-04-18T14:42:55.000Z
2022-03-07T08:15:43.000Z
exercises/pt/exc_01_03_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
361
2019-04-17T13:34:32.000Z
2022-03-28T04:42:45.000Z
# Importar a classe da língua inglesa (English) e criar um objeto nlp from ____ import ____ nlp = ____ # Processar o texto doc = ____("I like tree kangaroos and narwhals.") # Selecionar o primeiro token first_token = doc[____] # Imprimir o texto do primeito token print(first_token.____)
22.384615
69
0.75945
0
0
0
0
0
0
0
0
191
0.65411
be04f5e587c1b673bb12feefbad95d55e8558e6e
3,946
py
Python
tests/integration/mci/test_happy_path.py
qateam123/eq
704757952323647d659c49a71975c56406ff4047
[ "MIT" ]
null
null
null
tests/integration/mci/test_happy_path.py
qateam123/eq
704757952323647d659c49a71975c56406ff4047
[ "MIT" ]
8
2020-03-24T15:24:18.000Z
2022-03-02T04:32:56.000Z
tests/integration/mci/test_happy_path.py
qateam123/eq
704757952323647d659c49a71975c56406ff4047
[ "MIT" ]
null
null
null
from tests.integration.create_token import create_token from tests.integration.integration_test_case import IntegrationTestCase class TestHappyPath(IntegrationTestCase): def test_happy_path_203(self): self.happy_path('0203', '1') def test_happy_path_205(self): self.happy_path('0205', '1') def happy_path(self, form_type_id, eq_id): # Get a token token = create_token(form_type_id, eq_id) resp = self.client.get('/session?token=' + token.decode(), follow_redirects=True) self.assertEqual(resp.status_code, 200) # We are on the landing page content = resp.get_data(True) self.assertRegex(content, '<title>Introduction</title>') self.assertRegex(content, '>Start survey<') self.assertRegex(content, 'Monthly Business Survey - Retail Sales Index') # We proceed to the questionnaire post_data = { 'action[start_questionnaire]': 'Start Questionnaire' } resp = self.client.post('/questionnaire/' + eq_id + '/' + form_type_id + '/789/introduction', data=post_data, follow_redirects=False) self.assertEqual(resp.status_code, 302) block_one_url = resp.location resp = self.client.get(block_one_url, follow_redirects=False) self.assertEqual(resp.status_code, 200) # We are in the Questionnaire content = resp.get_data(True) self.assertRegex(content, '<title>Survey</title>') self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</') self.assertRegex(content, "What are the dates of the sales period you are reporting for?") self.assertRegex(content, ">Save and continue<") # check with have some guidance self.assertRegex(content, "alcoholic drink") # We fill in our answers form_data = { # Start Date "period-from-day": "01", "period-from-month": "4", "period-from-year": "2016", # End Date "period-to-day": "30", "period-to-month": "04", "period-to-year": "2016", # Total Turnover "total-retail-turnover": "100000", # User Action "action[save_continue]": "Save &amp; Continue" } # We submit the form resp = self.client.post(block_one_url, data=form_data, follow_redirects=False) self.assertEqual(resp.status_code, 302) # There are no validation errors self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/summary$') summary_url = resp.location resp = self.client.get(summary_url, follow_redirects=False) self.assertEqual(resp.status_code, 200) # We are on the review answers page content = resp.get_data(True) self.assertRegex(content, '<title>Summary</title>') self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</') self.assertRegex(content, '>Your responses<') self.assertRegex(content, 'Please check carefully before submission.') self.assertRegex(content, '>Submit answers<') # We submit our answers post_data = { "action[submit_answers]": "Submit answers" } resp = self.client.post(summary_url, data=post_data, follow_redirects=False) self.assertEqual(resp.status_code, 302) self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/thank-you$') resp = self.client.get(resp.location, follow_redirects=True) self.assertEqual(resp.status_code, 200) # We are on the thank you page content = resp.get_data(True) self.assertRegex(content, '<title>Submission Successful</title>') self.assertRegex(content, '(?s)Monthly Business Survey - Retail Sales Index.*?Monthly Business Survey - Retail Sales Index')
40.265306
141
0.639635
3,815
0.966802
0
0
0
0
0
0
1,352
0.342625
be0508937eb9d9d5130de65137f4cd2a7335c162
70,784
py
Python
src/transformers/models/hubert/modeling_tf_hubert.py
OllieBroadhurst/transformers
12428f0ef15bb3631e7a5f04672ddb05f363de97
[ "Apache-2.0" ]
1
2022-03-25T01:33:40.000Z
2022-03-25T01:33:40.000Z
src/transformers/models/hubert/modeling_tf_hubert.py
OllieBroadhurst/transformers
12428f0ef15bb3631e7a5f04672ddb05f363de97
[ "Apache-2.0" ]
1
2022-03-23T19:49:13.000Z
2022-03-23T19:49:13.000Z
src/transformers/models/hubert/modeling_tf_hubert.py
erichan1/transformers
12428f0ef15bb3631e7a5f04672ddb05f363de97
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow Hubert model.""" import inspect import warnings from typing import Any, Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput from ...modeling_tf_utils import TFPreTrainedModel, booleans_processing, get_initializer, keras_serializable from ...tf_utils import shape_list from ...tokenization_utils_base import BatchEncoding from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_hubert import HubertConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "HubertConfig" TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/hubert-base-ls960", # See all Hubert models at https://huggingface.co/models?filter=hubert ] LARGE_NEGATIVE = -1e8 # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.input_values_processing def input_values_processing(func, config, input_values, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_values = tf.keras.Input(shape=(128,), dtype='float32', name="input_values")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (`callable`): The callable function of the TensorFlow model. config ([`PretrainedConfig`]): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) signature.pop("kwargs", None) signature.pop("self", None) parameter_names = list(signature.keys()) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(input_values, (tuple, list)): for i, input in enumerate(input_values): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}." ) elif isinstance(input_values, (dict, BatchEncoding)): if "inputs" in input_values: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_values` instead.", FutureWarning, ) output["input_values"] = input_values.pop("inputs") if "decoder_cached_states" in input_values: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = input_values.pop("decoder_cached_states") for k, v in dict(input_values).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(input_values, tf.Tensor) or input_values is None: output[parameter_names[0]] = input_values else: raise ValueError( f"Data of type {type(input_values)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}." ) for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_values` output["input_values"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update(booleans_processing(config=config, **boolean_dict)) return output # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement def _sample_without_replacement(distribution, num_samples): """ Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1)) _, indices = tf.nn.top_k(distribution + z, num_samples) return indices # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices def _scatter_values_on_batch_indices(values, batch_indices, output_shape): """ Scatter function as in PyTorch with indices in format (batch_dim, indixes) """ indices_shape = shape_list(batch_indices) # broadcast batch dim to indices_shape broad_casted_batch_dims = tf.reshape( tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1] ) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, min_masks: int = 0, ) -> tf.Tensor: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans Adapted from [fairseq's data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376). """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" ) # compute number of masked spans in batch num_masked_spans = int(mask_prob * sequence_length / mask_length + tf.random.uniform((1,))) num_masked_spans = max(num_masked_spans, min_masks) # make sure num masked indices <= sequence_length if num_masked_spans * mask_length > sequence_length: num_masked_spans = sequence_length // mask_length # SpecAugment mask to fill spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32) # uniform distribution to sample from, make sure that offset samples are < sequence_length uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1))) # get random indices to mask spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans) # expand masked indices to masked spans spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1) spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length)) spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length)) offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :] offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1)) offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask spec_aug_mask = _scatter_values_on_batch_indices( tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, spec_aug_mask.shape ) return spec_aug_mask # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert class TFHubertGroupNorm(tf.keras.layers.Layer): """ From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization """ def __init__( self, groups: int = 32, axis: int = -1, epsilon: float = 1e-3, center: bool = True, scale: bool = True, beta_initializer: tf.keras.initializers.Initializer = "zeros", gamma_initializer: tf.keras.initializers.Initializer = "ones", beta_regularizer: tf.keras.regularizers.Regularizer = None, gamma_regularizer: tf.keras.regularizers.Regularizer = None, beta_constraint: tf.keras.constraints.Constraint = None, gamma_constraint: tf.keras.constraints.Constraint = None, **kwargs, ): super().__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = tf.keras.initializers.get(beta_initializer) self.gamma_initializer = tf.keras.initializers.get(gamma_initializer) self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer) self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer) self.beta_constraint = tf.keras.constraints.get(beta_constraint) self.gamma_constraint = tf.keras.constraints.get(gamma_constraint) self._check_axis() def build(self, input_shape): self._check_if_input_shape_is_none(input_shape) self._set_number_of_groups_for_instance_norm(input_shape) self._check_size_of_dimensions(input_shape) self._create_input_spec(input_shape) self._add_gamma_weight(input_shape) self._add_beta_weight(input_shape) self.built = True super().build(input_shape) def call(self, inputs): input_shape = tf.keras.backend.int_shape(inputs) tensor_input_shape = tf.shape(inputs) reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape) normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: outputs = tf.reshape(normalized_inputs, tensor_input_shape) else: outputs = normalized_inputs return outputs def get_config(self): config = { "groups": self.groups, "axis": self.axis, "epsilon": self.epsilon, "center": self.center, "scale": self.scale, "beta_initializer": tf.keras.initializers.serialize(self.beta_initializer), "gamma_initializer": tf.keras.initializers.serialize(self.gamma_initializer), "beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer), "gamma_regularizer": tf.keras.regularizers.serialize(self.gamma_regularizer), "beta_constraint": tf.keras.constraints.serialize(self.beta_constraint), "gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint), } base_config = super().get_config() return {**base_config, **config} def compute_output_shape(self, input_shape): return input_shape def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape): group_shape = [tensor_input_shape[i] for i in range(len(input_shape))] is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: group_shape[self.axis] = input_shape[self.axis] // self.groups group_shape.insert(self.axis, self.groups) group_shape = tf.stack(group_shape) reshaped_inputs = tf.reshape(inputs, group_shape) return reshaped_inputs, group_shape else: return inputs, group_shape def _apply_normalization(self, reshaped_inputs, input_shape): group_shape = tf.keras.backend.int_shape(reshaped_inputs) group_reduction_axes = list(range(1, len(group_shape))) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: axis = -2 if self.axis == -1 else self.axis - 1 else: axis = -1 if self.axis == -1 else self.axis - 1 group_reduction_axes.pop(axis) mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True) gamma, beta = self._get_reshaped_weights(input_shape) normalized_inputs = tf.nn.batch_normalization( reshaped_inputs, mean=mean, variance=variance, scale=gamma, offset=beta, variance_epsilon=self.epsilon, ) return normalized_inputs def _get_reshaped_weights(self, input_shape): broadcast_shape = self._create_broadcast_shape(input_shape) gamma = None beta = None if self.scale: gamma = tf.reshape(self.gamma, broadcast_shape) if self.center: beta = tf.reshape(self.beta, broadcast_shape) return gamma, beta def _check_if_input_shape_is_none(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError( "Axis " + str(self.axis) + " of " "input tensor should have a defined dimension " "but the layer received an input with shape " + str(input_shape) + "." ) def _set_number_of_groups_for_instance_norm(self, input_shape): dim = input_shape[self.axis] if self.groups == -1: self.groups = dim def _check_size_of_dimensions(self, input_shape): dim = input_shape[self.axis] if dim < self.groups: raise ValueError( "Number of groups (" + str(self.groups) + ") cannot be " "more than the number of channels (" + str(dim) + ")." ) if dim % self.groups != 0: raise ValueError( "Number of groups (" + str(self.groups) + ") must be a " "multiple of the number of channels (" + str(dim) + ")." ) def _check_axis(self): if self.axis == 0: raise ValueError( "You are trying to normalize your batch axis. Do you want to " "use tf.layer.batch_normalization instead" ) def _create_input_spec(self, input_shape): dim = input_shape[self.axis] self.input_spec = tf.keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim}) def _add_gamma_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.scale: self.gamma = self.add_weight( shape=shape, name="gamma", initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint, ) else: self.gamma = None def _add_beta_weight(self, input_shape): dim = input_shape[self.axis] shape = (dim,) if self.center: self.beta = self.add_weight( shape=shape, name="beta", initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint, ) else: self.beta = None def _create_broadcast_shape(self, input_shape): broadcast_shape = [1] * len(input_shape) is_instance_norm = (input_shape[self.axis] // self.groups) == 1 if not is_instance_norm: broadcast_shape[self.axis] = input_shape[self.axis] // self.groups broadcast_shape.insert(self.axis, self.groups) else: broadcast_shape[self.axis] = self.groups return broadcast_shape # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert class TFHubertWeightNormConv1D(tf.keras.layers.Conv1D): """Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm""" def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs): super().__init__( filters=filters, kernel_size=kernel_size, groups=groups, padding="valid", use_bias=True, bias_initializer="he_normal", **kwargs, ) self.explicit_padding = explicit_padding self.filter_axis = 2 self.initialized = False self.kernel_norm_axes = tf.constant([0, 1]) def _init_norm(self): """Set the norm of the weight vector.""" kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes)) self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis]) def _normalize_kernel(self): """Generate normalized weights.""" kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g) self.kernel = tf.transpose(kernel) def build(self, input_shape): if not self.built: input_shape = input_shape.as_list() # Conv1D output shapes are checked at build time since TF 2.7, so we need to account for padding input_shape[-2] += self.explicit_padding * 2 super().build(input_shape) self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True) self.weight_v = self.kernel self.weight_g = self.add_weight( name="weight_g", shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1), initializer="ones", dtype=self.weight_v.dtype, trainable=True, ) self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True) def call(self, inputs): if not self.initialized: self._init_norm() self.initialized = True self._normalize_kernel() padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0))) output = super().call(padded_inputs) return output # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert class TFHubertNoLayerNormConvLayer(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = tf.keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert class TFHubertLayerNormConvLayer(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = tf.keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.layer_norm = tf.keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps) self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert class TFHubertGroupNormConvLayer(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None: super().__init__(**kwargs) self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = tf.keras.layers.Conv1D( filters=self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], strides=config.conv_stride[layer_id], use_bias=config.conv_bias, name="conv", ) self.activation = get_tf_activation(config.feat_extract_activation) self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert class TFHubertPositionalConvEmbedding(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs: Any) -> None: super().__init__(**kwargs) self.conv = TFHubertWeightNormConv1D( filters=config.hidden_size, kernel_size=config.num_conv_pos_embeddings, groups=config.num_conv_pos_embedding_groups, explicit_padding=config.num_conv_pos_embeddings // 2, name="conv", ) self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings) self.activation = get_tf_activation(config.feat_extract_activation) def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert class TFHubertSamePadLayer(tf.keras.layers.Layer): def __init__(self, num_conv_pos_embeddings, **kwargs): super().__init__(**kwargs) self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def call(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, : -self.num_pad_remove, :] return hidden_states class TFHubertFeatureEncoder(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs: Any) -> None: super().__init__(**kwargs) if config.feat_extract_norm == "group": conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [ TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}") for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ TFHubertLayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}") for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = conv_layers def call(self, input_values): hidden_states = tf.expand_dims(input_values, -1) for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states class TFHubertFeatureExtractor(TFHubertFeatureEncoder): def __init__(self, config, **kwargs): super().__init__(config, **kwargs) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) class TFHubertFeatureProjection(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.projection = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="projection", ) self.dropout = tf.keras.layers.Dropout(rate=config.feat_proj_dropout) def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert class TFHubertAttention(tf.keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = tf.keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None, attention_mask: Optional[tf.Tensor] = None, layer_head_mask: Optional[tf.Tensor] = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}", ) if attention_mask is not None: # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}", ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = tf.nn.softmax(attn_weights, axis=-1) if layer_head_mask is not None: # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}", ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}", ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert class TFHubertFeedForward(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.intermediate_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.intermediate_dense = tf.keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="intermediate_dense", ) self.intermediate_act_fn = get_tf_activation(config.hidden_act) self.output_dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), bias_initializer="zeros", name="output_dense", ) self.output_dropout = tf.keras.layers.Dropout(config.hidden_dropout) def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, training=training) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, training=training) return hidden_states # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert class TFHubertEncoderLayer(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFHubertAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, name="attention", ) self.dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.feed_forward = TFHubertFeedForward(config, name="feed_forward") self.final_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="final_layer_norm" ) def call( self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = False, training: bool = False, ) -> Tuple[tf.Tensor]: attn_residual = hidden_states hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, training=training ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert class TFHubertEncoderLayerStableLayerNorm(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.attention = TFHubertAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, is_decoder=False, name="attention", ) self.dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.feed_forward = TFHubertFeedForward(config, name="feed_forward") self.final_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="final_layer_norm" ) def call( self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = False, training: bool = False, ) -> Tuple[tf.Tensor]: attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( hidden_states, attention_mask=attention_mask, training=training ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert class TFHubertEncoder(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.layer = [TFHubertEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, training: Optional[bool] = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: hidden_states = hidden_states * tf.expand_dims(attention_mask, -1) attention_mask = _expand_mask(attention_mask) else: attention_mask = None position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states, training=training) for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) if training and (dropout_probability < self.config.layerdrop): # skip the layer continue layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert class TFHubertEncoderStableLayerNorm(tf.keras.layers.Layer): def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout) self.layer = [ TFHubertEncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers) ] def call( self, hidden_states: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, training: Optional[bool] = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: hidden_states = hidden_states * tf.expand_dims(attention_mask, -1) attention_mask = _expand_mask(attention_mask) else: attention_mask = None position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, training=training) for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) if training and (dropout_probability < self.config.layerdrop): # skip the layer continue layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @keras_serializable class TFHubertMainLayer(tf.keras.layers.Layer): config_class = HubertConfig def __init__(self, config: HubertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.feature_extractor = TFHubertFeatureEncoder(config, name="feature_extractor") self.feature_projection = TFHubertFeatureProjection(config, name="feature_projection") if config.do_stable_layer_norm: self.encoder = TFHubertEncoderStableLayerNorm(config, name="encoder") else: self.encoder = TFHubertEncoder(config, name="encoder") def build(self, input_shape: tf.TensorShape): self.masked_spec_embed = self.add_weight( shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed" ) super().build(input_shape) def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: Optional[tf.Tensor] = None): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ batch_size, sequence_length, hidden_size = shape_list(hidden_states) # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states = tf.where( tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool), self.masked_spec_embed[tf.newaxis, tf.newaxis, :], hidden_states, ) elif self.config.mask_time_prob > 0: # generate indices & apply SpecAugment along time axis mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, min_masks=2, ) hidden_states = tf.where( tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool), self.masked_spec_embed[tf.newaxis, tf.newaxis, :], hidden_states, ) # apply SpecAugment along feature axis if self.config.mask_feature_prob > 0: mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, ) hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0) return hidden_states def call( self, input_values: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, token_type_ids: Optional[tf.Tensor] = None, position_ids: Optional[tf.Tensor] = None, head_mask: Optional[tf.Tensor] = None, inputs_embeds: Optional[tf.Tensor] = None, output_attentions: Optional[tf.Tensor] = None, output_hidden_states: Optional[tf.Tensor] = None, return_dict: Optional[bool] = None, training: bool = False, **kwargs: Any, ): inputs = input_values_processing( func=self.call, config=self.config, input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) hidden_states = self.feature_extractor( tf.cast(inputs["input_values"], tf.float32), training=inputs["training"] ) if inputs["attention_mask"] is not None: # compute real output lengths according to convolution formula output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(inputs["attention_mask"], -1)) attention_mask = tf.sequence_mask( output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype ) hidden_states = self.feature_projection(hidden_states, training=inputs["training"]) mask_time_indices = kwargs.get("mask_time_indices", None) if inputs["training"]: hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) hidden_states = encoder_outputs[0] if not inputs["return_dict"]: return (hidden_states,) + encoder_outputs[1:] return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class TFHubertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = HubertConfig base_model_prefix = "hubert" main_input_name = "input_values" @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: pad_token = 0.0 input_values = tf.convert_to_tensor(np.random.rand(1, 16000), tf.float32) dummy_inputs = { "input_values": input_values, "attention_mask": tf.cast(tf.not_equal(input_values, pad_token), tf.float32), } return dummy_inputs def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) logger.warning( f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish " "to train/fine-tine this model, you need a GPU or a TPU" ) @tf.function def serving(self, inputs): output = self.call(input_values=inputs, training=False) return self.serving_output(output) HUBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with `input_values` only and nothing else: `model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_values": input_values, "token_type_ids": token_type_ids})` </Tip> Args: config ([`HubertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ HUBERT_INPUTS_DOCSTRING = r""" Args: input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_values` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.", HUBERT_START_DOCSTRING, ) class TFHubertModel(TFHubertPreTrainedModel): def __init__(self, config: HubertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.hubert = TFHubertMainLayer(config, name="hubert") @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_values: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, token_type_ids: Optional[tf.Tensor] = None, position_ids: Optional[tf.Tensor] = None, head_mask: Optional[tf.Tensor] = None, inputs_embeds: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: """ Returns: Example: ```python >>> from transformers import Wav2Vec2Processor, TFHubertModel >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h") >>> model = TFHubertModel.from_pretrained("facebook/hubert-base-960h") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ```""" inputs = input_values_processing( func=self.call, config=self.config, input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) inputs["output_hidden_states"] = ( inputs["output_hidden_states"] if inputs["output_hidden_states"] else self.config.output_hidden_states ) inputs["output_attentions"] = ( inputs["output_attentions"] if inputs["output_attentions"] else self.config.output_attentions ) inputs["return_dict"] = inputs["return_dict"] if inputs["return_dict"] else self.config.return_dict outputs = self.hubert( input_values=inputs["input_values"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], position_ids=inputs["position_ids"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) return outputs def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns) @add_start_docstrings( """TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", HUBERT_START_DOCSTRING, ) class TFHubertForCTC(TFHubertPreTrainedModel): def __init__(self, config: HubertConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.hubert = TFHubertMainLayer(config, name="hubert") self.dropout = tf.keras.layers.Dropout(config.final_dropout) self.lm_head = tf.keras.layers.Dense(config.vocab_size, name="lm_head") def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.hubert.feature_extractor.trainable = False @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_values: tf.Tensor, attention_mask: Optional[tf.Tensor] = None, token_type_ids: Optional[tf.Tensor] = None, position_ids: Optional[tf.Tensor] = None, head_mask: Optional[tf.Tensor] = None, inputs_embeds: Optional[tf.Tensor] = None, output_attentions: Optional[bool] = None, labels: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import Wav2Vec2Processor, TFHubertForCTC >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h") >>> model = TFHubertForCTC.from_pretrained("facebook/hubert-base-960h") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 >>> logits = model(input_values).logits >>> predicted_ids = tf.argmax(logits, axis=-1) >>> transcription = processor.decode(predicted_ids[0]) >>> # compute loss >>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST" >>> # wrap processor as target processor to encode labels >>> with processor.as_target_processor(): ... labels = processor(transcription, return_tensors="tf").input_values >>> loss = model(input_values, labels=labels).loss ```""" inputs = input_values_processing( func=self.call, config=self.config, input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) outputs = self.hubert( input_values=inputs["input_values"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], position_ids=inputs["position_ids"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, training=inputs["training"]) logits = self.lm_head(hidden_states) if labels is not None: if tf.reduce_max(labels) >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") attention_mask = ( inputs["attention_mask"] if inputs["attention_mask"] is not None else tf.ones_like(inputs["input_values"], dtype=tf.float32) ) input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1)) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = tf.cast(labels >= 0, tf.int32) target_lengths = tf.reduce_sum(labels_mask, axis=-1) loss = tf.nn.ctc_loss( logits=logits, labels=labels, logit_length=input_lengths, label_length=target_lengths, blank_index=self.config.pad_token_id, logits_time_major=False, ) if self.config.ctc_loss_reduction == "sum": loss = tf.reduce_sum(loss) if self.config.ctc_loss_reduction == "mean": loss = tf.reduce_mean(loss) else: loss = None if not inputs["return_dict"]: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
42.461908
164
0.656575
52,284
0.738642
0
0
17,514
0.247429
0
0
20,923
0.295589
be05301485051b024d0504eecb5189daad437a58
3,242
py
Python
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
marioluan/mit-opencourseware-cs
5de013f8e321fed2ff3b7a13e8929a44805db78b
[ "MIT" ]
null
null
null
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
marioluan/mit-opencourseware-cs
5de013f8e321fed2ff3b7a13e8929a44805db78b
[ "MIT" ]
null
null
null
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
marioluan/mit-opencourseware-cs
5de013f8e321fed2ff3b7a13e8929a44805db78b
[ "MIT" ]
1
2020-05-19T13:29:18.000Z
2020-05-19T13:29:18.000Z
# 6.00 Problem Set 2 # # Hangman # Name : Solutions # Collaborators : <your collaborators> # Time spent : <total time> # ----------------------------------- # Helper code # You don't need to understand this helper code, # but you will have to know how to use the functions import random import string WORDLIST_FILENAME = "words.txt" def load_words(): """ Returns a list of valid words. Words are strings of lowercase letters. Depending on the size of the word list, this function may take a while to finish. """ print "Loading word list from file..." # inFile: file inFile = open(WORDLIST_FILENAME, 'r', 0) # line: string line = inFile.readline() # wordlist: list of strings wordlist = string.split(line) print " ", len(wordlist), "words loaded." return wordlist def choose_word(wordlist): """ wordlist (list): list of words (strings) Returns a word from wordlist at random """ return random.choice(wordlist) # end of helper code # ----------------------------------- # load the list of words into the wordlist variable # so that it can be accessed from anywhere in the program wordlist = load_words() def partial_word(secret_word, guessed_letters): """ Return the secret_word in user-visible format, with underscores used to replace characters that have not yet been guessed. """ result = '' for letter in secret_word: if letter in guessed_letters: result = result + letter else: result = result + '_' return result def hangman(): """ Runs the hangman game. """ print 'Welcome to the game, Hangman!' secret_word = choose_word(wordlist) print 'I am thinking of a word that is ' + str(len(secret_word)) + ' letters long.' num_guesses = 8 word_guessed = False guessed_letters = '' available_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # Letter-guessing loop. Ask the user to guess a letter and respond to the # user based on whether the word has yet been correctly guessed. while num_guesses > 0 and not word_guessed: print '-------------' print 'You have ' + str(num_guesses) + ' guesses left.' print 'Available letters: ' + ''.join(available_letters) guess = raw_input('Please guess a letter:') if guess not in available_letters: print 'Oops! You\'ve already guessed that letter: ' + partial_word(secret_word, guessed_letters) elif guess not in secret_word: num_guesses -= 1 available_letters.remove(guess) print 'Oops! That letter is not in my word: ' + partial_word(secret_word, guessed_letters) else: available_letters.remove(guess) guessed_letters += guess print 'Good guess: ' + partial_word(secret_word, guessed_letters) if secret_word == partial_word(secret_word, guessed_letters): word_guessed = True if word_guessed: print 'Congratulations, you won!' else: print 'Game over.'
32.42
108
0.604874
0
0
0
0
0
0
0
0
1,551
0.478408
be05ff012f40e6f5a4b594110683f58699e3309e
412
py
Python
top/api/rest/FenxiaoRefundMessageAddRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
top/api/rest/FenxiaoRefundMessageAddRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
top/api/rest/FenxiaoRefundMessageAddRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
''' Created by auto_sdk on 2016.04.13 ''' from top.api.base import RestApi class FenxiaoRefundMessageAddRequest(RestApi): def __init__(self,domain='gw.api.taobao.com',port=80): RestApi.__init__(self,domain, port) self.image = None self.message_content = None self.sub_order_id = None def getapiname(self): return 'taobao.fenxiao.refund.message.add' def getMultipartParas(self): return ['image']
24.235294
55
0.75
336
0.815534
0
0
0
0
0
0
102
0.247573
be071e34802c8618edb66a1241ddd2e7d443b843
3,316
py
Python
image-generation/slegan/args.py
AaratiAkkapeddi/nnabla-examples
db9e5ad850303c158773aeb275e5c3821b4a3935
[ "Apache-2.0" ]
228
2017-11-20T06:05:56.000Z
2022-03-23T12:40:05.000Z
image-generation/slegan/args.py
AaratiAkkapeddi/nnabla-examples
db9e5ad850303c158773aeb275e5c3821b4a3935
[ "Apache-2.0" ]
36
2018-01-11T23:26:20.000Z
2022-03-12T00:53:38.000Z
image-generation/slegan/args.py
AaratiAkkapeddi/nnabla-examples
db9e5ad850303c158773aeb275e5c3821b4a3935
[ "Apache-2.0" ]
76
2017-11-22T22:00:00.000Z
2022-03-28T05:58:57.000Z
# Copyright 2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_args(batch_size=8, image_size=256, max_iter=100000): """ Get command line arguments. Arguments set the default values of command line arguments. """ import argparse import os description = "Example of Lightweight GAN." parser = argparse.ArgumentParser(description) parser.add_argument("-d", "--device-id", type=str, default="0", help="Device id.") parser.add_argument("-c", "--context", type=str, default="cudnn", help="Context.") parser.add_argument("--type-config", "-t", type=str, default='float', help='Type of computation. e.g. "float", "half".') parser.add_argument("--img-path", type=str, default="~/AnimalFace-dog", help="Image path.") parser.add_argument("--image-size", type=int, default=image_size, help="Image size.") parser.add_argument("--batch-size", "-b", type=int, default=batch_size, help="Batch size.") parser.add_argument("--max-iter", "-i", type=int, default=max_iter, help="Max iterations.") parser.add_argument("--save-interval", type=int, default=50000, help="Interval for saving models.") parser.add_argument("--test-interval", type=int, default=5000, help="Interval for testing models.") parser.add_argument("--latent", type=int, default=256, help="Number of latent variables.") parser.add_argument("--monitor-path", type=str, default="./result/tmp", help="Monitor path.") parser.add_argument("--model-load-path", type=str, default=".", help="Path to load parameters from") parser.add_argument("--train-samples", type=int, default=-1, help="Number of data to be used. When -1 is set all data is used.") parser.add_argument("--lr", type=float, default=2e-4, help="Learning rate") parser.add_argument("--aug-list", nargs="+", default=["lrflip", "translation", "color"]) args = parser.parse_args() return args def save_args(args, mode="train"): from nnabla import logger import os if not os.path.exists(args.monitor_path): os.makedirs(args.monitor_path) path = "{}/Arguments-{}.txt".format(args.monitor_path, mode) logger.info("Arguments are saved to {}.".format(path)) with open(path, "w") as fp: for k, v in sorted(vars(args).items()): logger.info("{}={}".format(k, v)) fp.write("{}={}\n".format(k, v))
42.512821
91
0.606454
0
0
0
0
0
0
0
0
1,459
0.439988
be077745c0ef294c19a02fb08ff66ab17f79fb99
898
py
Python
day1/files_ex1.py
grenn72/pynet-ons-feb19
5aff7dfa6a697214dc24818819a60b46a261d0d3
[ "Apache-2.0" ]
null
null
null
day1/files_ex1.py
grenn72/pynet-ons-feb19
5aff7dfa6a697214dc24818819a60b46a261d0d3
[ "Apache-2.0" ]
null
null
null
day1/files_ex1.py
grenn72/pynet-ons-feb19
5aff7dfa6a697214dc24818819a60b46a261d0d3
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python from __future__ import print_function # READ #### f = open("my_file.txt") print("\nLoop directly over file") print("-" * 60) for line in f: print(line.strip()) print("-" * 60) f.seek(0) my_content = f.readlines() print("\nUse readlines method") print("-" * 60) for line in my_content: print(line.strip()) print("-" * 60) f.seek(0) my_content = f.read() print("\nUse read + splitlines") print("-" * 60) for line in my_content.splitlines(): print(line) print("-" * 60) f.close() with open("my_file.txt") as f: print("\nUse with and loop over file") print("-" * 60) for line in f: print(line.strip()) print("-" * 60) # WRITE #### print("\nWriting file.") f = open("new_file.txt", "w") f.write("whatever2\n") f.close() # APPEND #### print("\nAppending file.") with open("new_file.txt", "a") as f: f.write("something else\n") print()
18.708333
42
0.614699
0
0
0
0
0
0
0
0
315
0.35078
be09ed482ae6fd03e6f106d0795f2a118eb2425c
2,332
py
Python
test/integration_tests/test_integration_datasets_client.py
self-host/selfhost-python-client
95797ef819099174d916b10e82878c370b1cd972
[ "MIT" ]
null
null
null
test/integration_tests/test_integration_datasets_client.py
self-host/selfhost-python-client
95797ef819099174d916b10e82878c370b1cd972
[ "MIT" ]
null
null
null
test/integration_tests/test_integration_datasets_client.py
self-host/selfhost-python-client
95797ef819099174d916b10e82878c370b1cd972
[ "MIT" ]
null
null
null
import uuid from typing import List, Dict, Any import unittest from selfhost_client import SelfHostClient, DatasetType class TestIntegrationDatasetsClient(unittest.TestCase): """ Run these tests individually because Self-Host will return HTTP 429 Too Many Requests otherwise. """ @classmethod def setUpClass(cls) -> None: cls.client: SelfHostClient = SelfHostClient( base_url='http://127.0.0.1:8080', username='test', password='root' ) cls.unique_name: str = str(uuid.uuid4()) cls.created_dataset: DatasetType = cls.client.create_dataset( name=cls.unique_name, dataset_format='ini', content='aGVsbG8sIHdvcmxkIQ==', tags=['test_tag'] ) @classmethod def tearDownClass(cls) -> None: cls.client.delete_dataset(cls.created_dataset['uuid']) def test_get_datasets(self) -> None: params: Dict[str, int] = { 'limit': 20, 'offset': 0 } datasets: List[DatasetType] = self.client.get_datasets(**params) self.assertIsNotNone(datasets) def test_create_and_delete_dataset(self) -> None: # Create and delete happens in setup and teardown methods. self.assertEqual(self.created_dataset['name'], self.unique_name) def test_get_dataset(self) -> None: fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid']) self.assertEqual(fetched_dataset['name'], self.created_dataset['name']) def test_update_dataset(self) -> None: self.client.update_dataset( dataset_uuid=self.created_dataset['uuid'], name=f'{self.created_dataset["name"]} Updated', dataset_format='json', tags=['updated'] ) fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid']) self.assertEqual(fetched_dataset['name'], f'{self.created_dataset["name"]} Updated') self.assertEqual(fetched_dataset['format'], 'json') self.assertEqual(fetched_dataset['tags'], ['updated']) def test_get_dataset_raw_content(self) -> None: fetched_content: Any = self.client.get_dataset_raw_content(self.created_dataset['uuid']) self.assertIsNotNone(fetched_content)
36.4375
100
0.653945
2,208
0.946827
0
0
593
0.254288
0
0
437
0.187393
be09ff199c76d0416c7ca2377918a44850900a71
909
py
Python
setup.py
pnxenopoulos/soccer-data-gen
bdc31be973eb12cdd9f58b04ab61ea9d5d1aa7a5
[ "MIT" ]
null
null
null
setup.py
pnxenopoulos/soccer-data-gen
bdc31be973eb12cdd9f58b04ab61ea9d5d1aa7a5
[ "MIT" ]
null
null
null
setup.py
pnxenopoulos/soccer-data-gen
bdc31be973eb12cdd9f58b04ab61ea9d5d1aa7a5
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages setup( name="soccergen", version="0.1", packages=find_packages(), # Project uses reStructuredText, so ensure that the docutils get # installed or upgraded on the target machine install_requires=["gfootball>=2.8",], # metadata to display on PyPI author="Peter Xenopoulos", author_email="[email protected]", description="Soccer trajectory and event data generation", keywords="soccer data-generation foootball", url="https://github.com/pnxenopoulos/soccer-data-gen", # project home page, if any project_urls={ "Issues": "https://github.com/pnxenopoulos/soccer-data-gen/issues", "Documentation": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/", "Github": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/", }, classifiers=["License :: OSI Approved :: MIT License"], )
39.521739
87
0.693069
0
0
0
0
0
0
0
0
600
0.660066
be0a74b4d28b5ee5afbbd8993134c1568bbdff10
6,516
py
Python
metaspace/engine/sm/engine/tests/test_fdr.py
METASPACE2020/METASPACE
e1acd9a409f84a78eed7ca9713258c09b0e137ca
[ "Apache-2.0" ]
null
null
null
metaspace/engine/sm/engine/tests/test_fdr.py
METASPACE2020/METASPACE
e1acd9a409f84a78eed7ca9713258c09b0e137ca
[ "Apache-2.0" ]
null
null
null
metaspace/engine/sm/engine/tests/test_fdr.py
METASPACE2020/METASPACE
e1acd9a409f84a78eed7ca9713258c09b0e137ca
[ "Apache-2.0" ]
null
null
null
from itertools import product from unittest.mock import patch import pytest import numpy as np import pandas as pd from pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser import format_modifiers FDR_CONFIG = {'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H', '+K', '[M]+'], analysis_version=1, ) exp_target_decoy_df = pd.DataFrame( [ ('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K', '+He'), ('H2O', '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li'), ], columns=['formula', 'tm', 'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])]) def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels = [0.2, 0.8] fdr.td_df = pd.DataFrame( [['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ], columns=['formula', 'modifier', 'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8] fdr.td_df = pd.DataFrame( [['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4', '+Co', 0.1], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], ['C4', '+H', 0.25, 0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions(): formulas = ['H2O', 'C5H2OH'] target_adducts = ['+H', '+Na'] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair assert ( len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts) < len(ions) <= len(formulas) * len(target_adducts) * decoy_sample_size + len(formulas) * len(target_adducts) ) target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses = ['-O', '-C'] target_adducts = ['+H', '+Na', '[M]+'] target_modifiers = [ format_modifiers(cm, nl, ta) for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts) ] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair min_count = len(formulas) * len(target_modifiers) max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size) assert min_count < len(ions) <= max_count target_ions = list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4]) expected_fdr = n_decoys / n_targets expected_fdr_ros = (n_decoys + 1) / (n_targets + 1) expected_fdr_mono = pd.Series( [0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11] ) fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True) assert np.isclose(fdr, expected_fdr).all() assert np.isclose(fdr_ros, expected_fdr_ros).all() assert np.isclose(fdr_mono, expected_fdr_mono).all()
32.58
121
0.558778
0
0
0
0
1,985
0.304635
0
0
1,069
0.164058
be0b585df12c7b4d77e31edbf4786b2ef1e4a31b
69
py
Python
tests/__init__.py
acarl005/plotille
44089a88f20b71b3314416947ae724bebbdc7739
[ "MIT" ]
2
2020-04-08T15:31:12.000Z
2020-07-01T11:04:47.000Z
tests/__init__.py
acarl005/plotille
44089a88f20b71b3314416947ae724bebbdc7739
[ "MIT" ]
9
2018-09-12T09:29:43.000Z
2020-03-15T09:11:25.000Z
tests/__init__.py
acarl005/plotille
44089a88f20b71b3314416947ae724bebbdc7739
[ "MIT" ]
1
2019-03-29T10:59:13.000Z
2019-03-29T10:59:13.000Z
from logging import getLogger getLogger('flake8').propagate = False
17.25
37
0.797101
0
0
0
0
0
0
0
0
8
0.115942
be0c9d39fc49b73642a31f8fb89de4fff31f8d63
4,576
py
Python
umigame/nlp/labelling.py
penguinwang96825/Umigame
98d647ab6f40df08fe31d6b3bc444afe229a914e
[ "Apache-2.0" ]
null
null
null
umigame/nlp/labelling.py
penguinwang96825/Umigame
98d647ab6f40df08fe31d6b3bc444afe229a914e
[ "Apache-2.0" ]
null
null
null
umigame/nlp/labelling.py
penguinwang96825/Umigame
98d647ab6f40df08fe31d6b3bc444afe229a914e
[ "Apache-2.0" ]
1
2021-11-01T14:35:32.000Z
2021-11-01T14:35:32.000Z
import math import numpy as np import pandas as pd def fixed_time_horizon(df, column='close', lookback=20): """ Fixed-time Horizon As it relates to finance, virtually all ML papers label observations using the fixed-time horizon method. Fixed-time horizon is presented as one of the main procedures to label data when it comes to processing financial time series for machine learning. Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." lookahead: str The number of days to look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in financial machine learning, 2018 5. Dixon et al., Classification-based financial markets prediction using deep neural networks, 2017 """ price = df[column] label = (price.shift(-lookback) / price > 1).astype(int) return label def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True): """ Triple Barrier The idea is to consider the full dynamics of a trading strategy and not a simple performance proxy. The rationale for this extension is that often money managers implement P&L triggers that cash in when gains are sufficient or opt out to stop their losses. Upon inception of the strategy, three barriers are fixed (De Prado, 2018). Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." ub: float It stands for upper bound, e.g. 0.07 is a 7% profit taking. lb: float It stands for lower bound, e.g. 0.03 is a 3% stop loss. lookback: str Maximum holding time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances in financial machine learning, 2018 """ ub = 1 + ub lb = 1- lb def end_price(s): return np.append(s[(s / s[0] > ub) | (s / s[0] < lb)], s[-1])[0]/s[0] r = np.array(range(lookback)) def end_time(s): return np.append(r[(s / s[0] > ub) | (s / s[0] < lb)], lookback-1)[0] price = df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT') for i, k in enumerate(t)], index=t.index ).dropna() label = pd.Series(0, p.index) label.loc[p > ub] = 1 label.loc[p < lb] = -1 if binary_classification: label = np.where(label == 1, 1, 0) return pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5): """ Continuous Trading Signal A hybrid stock trading framework integrating technical analysis with machine learning techniques. Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." lookahead: str The number of days to look ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash, A hybrid stock trading framework integrating technical analysis with machine learning techniques, 2016 """ price = df.data[column] OTr = [] trends = [] for idx in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 + 0.5 elif price[idx+lookahead-1] <= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >= np.mean(OTr)).astype(int) return pd.Series(OTr, index=price.index), pd.Series(trends, index=price.index)
37.508197
124
0.647072
0
0
0
0
0
0
0
0
2,666
0.582605
be0d1242d33adfcfc290ba70e3637aa993c895e3
4,164
py
Python
mayan/apps/converter/api.py
Dave360-crypto/mayan-edms
9cd37537461347f79ff0429e4b8b16fd2446798d
[ "Apache-2.0" ]
3
2020-02-03T11:58:51.000Z
2020-10-20T03:52:21.000Z
mayan/apps/converter/api.py
Dave360-crypto/mayan-edms
9cd37537461347f79ff0429e4b8b16fd2446798d
[ "Apache-2.0" ]
null
null
null
mayan/apps/converter/api.py
Dave360-crypto/mayan-edms
9cd37537461347f79ff0429e4b8b16fd2446798d
[ "Apache-2.0" ]
2
2020-10-24T11:10:06.000Z
2021-03-03T20:05:38.000Z
from __future__ import absolute_import import hashlib import logging import os from django.utils.encoding import smart_str from common.conf.settings import TEMPORARY_DIRECTORY from common.utils import fs_cleanup from .exceptions import OfficeConversionError, UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import backend, office_converter HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__) def cache_cleanup(input_filepath, *args, **kwargs): try: os.remove(create_image_cache_filename(input_filepath, *args, **kwargs)) except OSError: pass def create_image_cache_filename(input_filepath, *args, **kwargs): if input_filepath: hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY, hash_value) else: return None def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs): size = kwargs.get('size') file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation = kwargs.get('rotation', DEFAULT_ROTATION) page = kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations = kwargs.get('transformations', []) if transformations is None: transformations = [] if output_filepath is None: output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs) if os.path.exists(output_filepath): return output_filepath if office_converter: try: office_converter.convert(input_filepath, mimetype=mimetype) if office_converter.exists: input_filepath = office_converter.output_filepath mimetype = 'application/pdf' else: # Recycle the already detected mimetype mimetype = office_converter.mimetype except OfficeConversionError: raise UnknownFileFormat('office converter exception') if size: transformations.append( { 'transformation': TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR))) } ) if zoom != 100: transformations.append( { 'transformation': TRANSFORMATION_ZOOM, 'arguments': {'percent': zoom} } ) if rotation != 0 and rotation != 360: transformations.append( { 'transformation': TRANSFORMATION_ROTATE, 'arguments': {'degrees': rotation} } ) try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype) finally: if cleanup_files: fs_cleanup(input_filepath) return output_filepath def get_page_count(input_filepath): logger.debug('office_converter: %s' % office_converter) if office_converter: try: office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s' % office_converter.exists) if office_converter.exists: input_filepath = office_converter.output_filepath except OfficeConversionError: raise UnknownFileFormat('office converter exception') return backend.get_page_count(input_filepath) def get_available_transformations_choices(): result = [] for transformation in backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return result def get_format_list(): return [(format, FILE_FORMATS.get(format, u'')) for format in backend.get_format_list()]
32.53125
180
0.68828
0
0
0
0
0
0
0
0
350
0.084054
be0d8286d98d561dd73b8ad4757e80b16c93f068
2,798
py
Python
LogisticRegression/learn.py
ValYouW/DeepLearningCourse
d7d9edc60075f9078ec3f41074c958eaa7854964
[ "MIT" ]
null
null
null
LogisticRegression/learn.py
ValYouW/DeepLearningCourse
d7d9edc60075f9078ec3f41074c958eaa7854964
[ "MIT" ]
null
null
null
LogisticRegression/learn.py
ValYouW/DeepLearningCourse
d7d9edc60075f9078ec3f41074c958eaa7854964
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd import matplotlib.pyplot as plt import utils def plot_data(x_mat, y, db_x, db_y): plt.figure() plt.title('Data') admitted = (y == 1).flatten() rejected = (y == 0).flatten() # plot decision boundary plt.plot(db_x, db_y) # plot admitted plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+') # plot rejected plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o') plt.xlabel('exam 1 score') plt.ylabel('exam 2 score') plt.legend(['boundary', 'admitted', 'rejected']) def main(): print('Loading dataset...') # data is: exam 1 score, exam 2 score, bool whether admitted frame = pd.read_csv('ex2data1.csv', header=None) data = frame.values x_mat = data[:, 0:2] # exam scores y = data[:, 2:3] # admitted or not # normalize input (input has large values which causes sigmoid to always be 1 or 0) x_mean = np.mean(x_mat, axis=0) x_std = np.std(x_mat, axis=0) x_norm = (x_mat - x_mean) / x_std # add intercept x_norm = np.insert(x_norm, 0, 1, axis=1) # Learn model print('starting to learn...') (loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1) print('Final loss %s' % loss[-1]) print('Final theta \n%s' % theta) # predict for student joe = np.array([[45, 85]]) joe_norm = (joe - x_mean) / x_std joe_norm = np.insert(joe_norm, 0, 1, axis=1) p = utils.sigmoid(joe_norm.dot(theta)) print('Student with grades %s and %s has admission probability: %s' % (45, 85, p[0, 0])) # Predict on train set prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5) actual = (y == 1) predict_success = np.sum(prediction == actual) print('Model evaluation on training set has success of %s/%s' % (predict_success, y.shape[0])) # calc decision boundary # The decision boundary is the threshold line that separates true/false predictions, # this means that on this line the prediction is exactly 0.5, meaning: # p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0 # so our line equation is: theta0 + theta1*x1 + theta2*x2 = 0 # x2 = -theta0 / theta2 - (theta1/theta2)*x1 theta = theta.flatten() # calc 2 points on the line plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])]) plot_y = -1 * (theta[0] / theta[2]) - (theta[1] / theta[2]) * plot_x # denormalize the points plot_x = plot_x * x_std[0] + x_mean[0] plot_y = plot_y * x_std[1] + x_mean[1] plot_data(x_mat, y, plot_x, plot_y) utils.plot_loss(loss) plt.show() if __name__ == '__main__': main()
32.534884
105
0.605075
0
0
0
0
0
0
0
0
1,017
0.363474
be0d8c6e88406117103733f22d2fc8dd5f14eae8
30,231
py
Python
ignite/handlers/time_profilers.py
iamhardikat11/ignite
0666b407f7cdba81842014c6026e33b66113bb94
[ "BSD-3-Clause" ]
4,119
2017-11-23T18:10:37.000Z
2022-03-31T05:31:27.000Z
ignite/handlers/time_profilers.py
iamhardikat11/ignite
0666b407f7cdba81842014c6026e33b66113bb94
[ "BSD-3-Clause" ]
1,838
2017-11-24T11:19:25.000Z
2022-03-31T09:08:18.000Z
ignite/handlers/time_profilers.py
iamhardikat11/ignite
0666b407f7cdba81842014c6026e33b66113bb94
[ "BSD-3-Clause" ]
691
2017-11-24T10:57:33.000Z
2022-03-29T02:19:44.000Z
import functools from collections import OrderedDict from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast import torch from ignite.engine import Engine, EventEnum, Events from ignite.handlers.timing import Timer class BasicTimeProfiler: """ BasicTimeProfiler can be used to profile the handlers, events, data loading and data processing times. Examples: .. code-block:: python from ignite.handlers import BasicTimeProfiler trainer = Engine(train_updater) # Create an object of the profiler and attach an engine to it profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 """ events_to_ignore = [ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times = torch.zeros(1) self.processing_times = torch.zeros(1) self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor] self._events = [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods = [ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed, ] self._lmethods = [ self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed, ] def _reset(self, num_epochs: int, total_num_iters: int) -> None: self.dataflow_times = torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times = { Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self, engine: Engine) -> None: if hasattr(engine.state.dataloader, "__len__"): num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type] else: if engine.state.epoch_length is None: raise ValueError( "As epoch_length is not set, we can not use BasicTimeProfiler in this case." "Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this." ) num_iters_per_epoch = engine.state.epoch_length self.max_epochs = cast(int, engine.state.max_epochs) self.total_num_iters = self.max_epochs * num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names = { e: [ h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__ for (h, _, _) in engine._event_handlers[e] if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output ] for e in Events if e not in self.events_to_ignore } # Setup all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {})) for e, m in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,), {})) for e, m in zip(self._events, self._lmethods): engine._event_handlers[e].append((m, (engine,), {})) # Let's go self._event_handlers_timer.reset() def _as_last_started(self, engine: Engine) -> None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def _as_first_epoch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t def _as_first_get_batch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_last_get_batch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def _as_first_get_batch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_get_batch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d = self._dataflow_timer.value() self.dataflow_times[i] = d self._dataflow_timer.reset() def _as_first_iter_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_iter_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset() def _as_first_iter_completed(self, engine: Engine) -> None: t = self._processing_timer.value() i = engine.state.iteration - 1 self.processing_times[i] = t self._event_handlers_timer.reset() def _as_last_iter_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def _as_first_epoch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def _as_first_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_completed(self, engine: Engine) -> None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() # Remove added handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED) for e, m in zip(self._events, self._fmethods): engine.remove_event_handler(m, e) for e, m in zip(self._events, self._lmethods): engine.remove_event_handler(m, e) def attach(self, engine: Engine) -> None: """Attach BasicTimeProfiler to the given engine. Args: engine: the instance of Engine to attach """ if not isinstance(engine, Engine): raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @staticmethod def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]: # compute on non-zero data: data = data[data > 0] out = [ ("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered") ] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]] if len(data) > 1: out += [ ("min/index", (torch.min(data).item(), torch.argmin(data).item())), ("max/index", (torch.max(data).item(), torch.argmax(data).item())), ("mean", torch.mean(data).item()), ("std", torch.std(data).item()), ] return OrderedDict(out) def get_results(self) -> Dict[str, Dict[str, Any]]: """ Method to fetch the aggregated profiler results after the engine is run .. code-block:: python results = profiler.get_results() """ total_eh_time = sum( [(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore] ) # type: Union[int, torch.Tensor] event_handlers_stats = dict( [ (str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e])) for e in Events if e not in self.events_to_ignore ] + [("total_time", total_eh_time)] # type: ignore[list-item] ) return OrderedDict( [ ("processing_stats", self._compute_basic_stats(self.processing_times)), ("dataflow_stats", self._compute_basic_stats(self.dataflow_times)), ("event_handlers_stats", event_handlers_stats), ( "event_handlers_names", {str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()}, ), ] ) def write_results(self, output_path: str) -> None: """ Method to store the unaggregated profiling results to a csv file Args: output_path: file output path containing a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats Event_STARTED ... 1.0 1.0 0.00003 0.252387 0.125676 1.0 2.0 0.00029 0.252342 0.125123 """ try: import pandas as pd except ImportError: raise RuntimeError("Need pandas to write results as files") iters_per_epoch = self.total_num_iters // self.max_epochs epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats = self.processing_times dataflow_stats = self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack( [ epochs, iterations, processing_stats, dataflow_stats, event_started, event_completed, event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started, event_batch_completed, ], dim=1, ).numpy() results_df = pd.DataFrame( data=results_dump, columns=[ "epoch", "iteration", "processing_stats", "dataflow_stats", "Event_STARTED", "Event_COMPLETED", "Event_EPOCH_STARTED", "Event_EPOCH_COMPLETED", "Event_ITERATION_STARTED", "Event_ITERATION_COMPLETED", "Event_GET_BATCH_STARTED", "Event_GET_BATCH_COMPLETED", ], ) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: Dict) -> str: """ Method to print the aggregated results from the profiler Args: results: the aggregated results from the profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ---------------------------------------------------- | Time profiling stats (in seconds): | ---------------------------------------------------- total | min/index | max/index | mean | std Processing function: 157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258 Dataflow: 6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693 Event handlers: 2.82721 - Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790 - Events.COMPLETED: [] not yet triggered """ def to_str(v: Union[str, tuple]) -> str: if isinstance(v, str): return v elif isinstance(v, tuple): return f"{v[0]:.5f}/{v[1]}" return f"{v:.5f}" def odict_to_str(d: Mapping) -> str: out = " | ".join([to_str(v) for v in d.values()]) return out others = { k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items() } others.update(results["event_handlers_names"]) output_message = """ ---------------------------------------------------- | Time profiling stats (in seconds): | ---------------------------------------------------- total | min/index | max/index | mean | std Processing function: {processing_stats} Dataflow: {dataflow_stats} Event handlers: {total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED} """.format( processing_stats=odict_to_str(results["processing_stats"]), dataflow_stats=odict_to_str(results["dataflow_stats"]), **others, ) print(output_message) return output_message class HandlersTimeProfiler: """ HandlersTimeProfiler can be used to profile the handlers, data loading and data processing times. Custom events are also profiled by this profiler Examples: .. code-block:: python from ignite.handlers import HandlersTimeProfiler trainer = Engine(train_updater) # Create an object of the profiler and attach an engine to it profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 """ EVENT_FILTER_THESHOLD_TIME = 0.0001 def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times = [] # type: List[float] self.processing_times = [] # type: List[float] self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod def _get_callable_name(handler: Callable) -> str: # get name of the callable handler return getattr(handler, "__qualname__", handler.__class__.__name__) def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable: @functools.wraps(handler) def _timeit_handler(*args: Any, **kwargs: Any) -> None: self._event_handlers_timer.reset() handler(*args, **kwargs) t = self._event_handlers_timer.value() hname = self._get_callable_name(handler) # filter profiled time if the handler was attached to event with event filter if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) # required to revert back to original handler after profiling setattr(_timeit_handler, "_profiler_original", handler) return _timeit_handler def _timeit_processing(self) -> None: # handler used for profiling processing times t = self._processing_timer.value() self.processing_times.append(t) def _timeit_dataflow(self) -> None: # handler used for profiling dataflow times t = self._dataflow_timer.value() self.dataflow_times.append(t) def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None: # reset the variables used for profiling self.dataflow_times = [] self.processing_times = [] self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names} @staticmethod def _is_internal_handler(handler: Callable) -> bool: # checks whether the handler is internal return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."]) def _detach_profiler_handlers(self, engine: Engine) -> None: # reverts handlers to original handlers for e in engine._event_handlers: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if hasattr(func, "_profiler_original"): engine._event_handlers[e][i] = (func._profiler_original, args, kwargs) def _as_first_started(self, engine: Engine) -> None: # wraps original handlers for profiling self.event_handlers_names = { e: [ self._get_callable_name(h) for (h, _, _) in engine._event_handlers[e] if not self._is_internal_handler(h) ] for e in engine._allowed_events } self._reset(self.event_handlers_names) for e in engine._allowed_events: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs) # processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {})) # dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {})) # revert back the wrapped handlers with original handlers at the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine: Engine) -> None: """Attach HandlersTimeProfiler to the given engine. Args: engine: the instance of Engine to attach """ if not isinstance(engine, Engine): raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) def get_results(self) -> List[List[Union[str, float]]]: """ Method to fetch the aggregated profiler results after the engine is run .. code-block:: python results = profiler.get_results() """ total_eh_time = sum( [ sum(self.event_handlers_times[e][h]) for e in self.event_handlers_times for h in self.event_handlers_times[e] ] ) total_eh_time = round(float(total_eh_time), 5) def compute_basic_stats( times: Union[Sequence, torch.Tensor] ) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]: data = torch.as_tensor(times, dtype=torch.float32) # compute on non-zero data: data = data[data > 0] total = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered" # type: Union[str, float] min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]] max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]] mean = "None" # type: Union[str, float] std = "None" # type: Union[str, float] if len(data) > 0: min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item()) max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item()) mean = round(torch.mean(data).item(), 5) if len(data) > 1: std = round(torch.std(data).item(), 5) return [total, min_index, max_index, mean, std] event_handler_stats = [ [ h, getattr(e, "name", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for e in self.event_handlers_times for h in self.event_handlers_times[e] ] event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""]) event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)]) event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats def write_results(self, output_path: str) -> None: """ Method to store the unaggregated profiling results to a csv file Args: output_path: file output path containing a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003 0.252387 0.125676 2 0.00029 0.252342 0.125123 """ try: import pandas as pd except ImportError: raise RuntimeError("Need pandas to write results as files") processing_stats = torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32) cols = [processing_stats, dataflow_stats] headers = ["processing_stats", "dataflow_stats"] for e in self.event_handlers_times: for h in self.event_handlers_times[e]: headers.append(f"{h} ({getattr(e, 'name', str(e))})") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine maximum length max_len = max([x.numel() for x in cols]) count_col = torch.arange(max_len, dtype=torch.float32) + 1 cols.insert(0, count_col) headers.insert(0, "#") # pad all tensors to have same length cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols] results_dump = torch.stack(cols, dim=1).numpy() results_df = pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: List[List[Union[str, float]]]) -> None: """ Method to print the aggregated results from the profiler Args: results: the aggregated results from the profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ----------------------------------------- ----------------------- -------------- ... Handler Event Name Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not triggered ----------------------------------------- ----------------------- -------------- Total 22.04571 ----------------------------------------- ----------------------- -------------- Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0, mean: 0.00602s, std: 0.00034s] Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937, mean: 0.00866s, std: 0.00113s] """ # adopted implementation of torch.autograd.profiler.build_table handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type] event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14 headers = [ "Handler", "Event Name", "Total(s)", "Min(s)/IDX", "Max(s)/IDX", "Mean(s)", "Std(s)", ] # Have to use a list because nonlocal is Py3 only... SPACING_SIZE = 2 row_format_lst = [""] header_sep_lst = [""] line_length_lst = [-SPACING_SIZE] def add_column(padding: int, text_dir: str = ">") -> None: row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE) header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE) line_length_lst[0] += padding + SPACING_SIZE add_column(handler_column_width, text_dir="<") add_column(event_column_width, text_dir="<") for _ in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format = row_format_lst[0] header_sep = header_sep_lst[0] result = [] def append(s: str) -> None: result.append(s) result.append("\n") result.append("\n") append(header_sep) append(row_format.format(*headers)) append(header_sep) for row in results[:-3]: # format min/idx and max/idx row[3] = "{}/{}".format(*row[3]) # type: ignore[misc] row[4] = "{}/{}".format(*row[4]) # type: ignore[misc] append(row_format.format(*row)) append(header_sep) # print total handlers time row append(row_format.format(*results[-3])) append(header_sep) summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]" for row in results[-2:]: row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc] row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc] del row[1] append(summary_format.format(*row)) print("".join(result))
38.412961
119
0.582978
29,980
0.991697
0
0
8,922
0.295128
0
0
10,398
0.343952
be0e7ba87c886d267ec11352e01c184c5af3e8dc
9,671
py
Python
bellmanford.py
asmodehn/aiokraken
b260bd41d5aa091e6a4f1818328426fbe6f625c0
[ "MIT" ]
null
null
null
bellmanford.py
asmodehn/aiokraken
b260bd41d5aa091e6a4f1818328426fbe6f625c0
[ "MIT" ]
82
2019-08-30T09:37:49.000Z
2022-03-29T14:53:22.000Z
bellmanford.py
asmodehn/aiokraken
b260bd41d5aa091e6a4f1818328426fbe6f625c0
[ "MIT" ]
null
null
null
""" Bellman Ford Arbitrage implementation over websocket API. """ from __future__ import annotations from collections import namedtuple from datetime import datetime from decimal import Decimal from math import log import pandas as pd import numpy as np import asyncio import typing from aiokraken.model.assetpair import AssetPair from aiokraken.rest import AssetPairs, Assets from aiokraken.model.asset import Asset from aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi import ticker import networkx as nx client = RestClient() async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix): # For required pairs, get ticket updates if isinstance(pairs, AssetPairs): # TODO : we need to unify iterable of pairs somehow... properpairs = pairs pairs = [p for p in pairs.values()] else: properpairs = AssetPairs({p.wsname: p for p in pairs}) tkrs = await client.ticker(pairs=[p for p in pairs]) # TODO : build price matrix for p, tk in tkrs.items(): # retrieve the actual pair pair = properpairs[p] fee = pair.fees[0].get('fee') # TODO : pick the right fee depending on total traded volume ! await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee) # TODO : 2 levels : # - slow updates with wide list of pairs and potential interest (no fees - small data for quick compute) # - websockets with potential arbitrage (including fees - detailed data & precise compute) async for upd in ticker(pairs=pairs, restclient=client): print(f"wss ==> tick: {upd}") # update pricematrix base = upd.pairname.base quote = upd.pairname.quote fee = properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee) class PriceMatrix: # Note This matrix is square # since we want to do arbitrage and find cycles... df: pd.DataFrame # we also need to be careful that only one writer can modify data at a time... wlock: asyncio.Lock assets: typing.Optional[Assets] def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]): self.wlock = asyncio.Lock() if isinstance(assets, Assets): assets = [a for a in assets.values()] self.df = pd.DataFrame(data={c.restname: {c.restname: None for c in assets} for c in assets}, columns=[c.restname for c in assets], dtype='float64') self.assets = None async def __call__(self, base: Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal): if self.assets is None: # retrieve assets for filtering calls params, only once. self.assets = await client.retrieve_assets() async with self.wlock: # careful with concurrent control. if not isinstance(base, Asset): base = self.assets[base].restname if not isinstance(quote, Asset): quote = self.assets[quote].restname # These are done with decimal, but stored as numpy floats for faster compute self.df[quote][base] = bid_price * ((100 - fee_pct) /100) # bid price to get: quote_curr -- (buy_price - fee) --> base_curr self.df[base][quote] = ((100 - fee_pct)/100) / ask_price # ask price to get: base_curr -- (sell_price - fee) --> quote_curr def __getitem__(self, item): if item not in self.df.columns: raise KeyError(f"{item} not found") if item not in self.df: return pd.Series(dtype=pd.dtype('decimal')) return self.df[item] def __len__(self): return len(self.df.columns) def __str__(self): return self.df.to_string() def neglog(self): if not self.assets: return False newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns]) # copy all values and take -log() for c in self.df.columns: # TODO : fix this : is it on row, or columns ? which is best ?? newpm.df[c] = np.negative(np.log(self.df[c])) return newpm def to_graph(self): G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) # from bokeh.io import output_file, show # from bokeh.plotting import figure, from_networkx # # plot = figure(title="Networkx Integration Demonstration", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1), # tools="", toolbar_location=None) # # graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0)) # plot.renderers.append(graph) # # output_file("networkx_graph.html") # show(plot) return G def test_pricematrix_mapping(): # testing with string for simplicity for now pm = PriceMatrix(["EUR", "BTC"]) pm["EUR"]["BTC"] = Decimal(1.234) pm["BTC"]["EUR"] = Decimal(4.321) assert pm["EUR"]["BTC"] == Decimal(1.234) assert pm["BTC"]["EUR"] == Decimal(4.321) async def arbiter(user_assets): assets = await client.retrieve_assets() proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets}) assetpairs = await client.retrieve_assetpairs() proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values() if p.wsname is not None and ( p.base in proper_userassets or p.quote in proper_userassets )}) # retrieving widely related assets related_assets = set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values()) proper_related_assets = Assets({a.restname: a for a in related_assets}) pmtx = PriceMatrix(assets=proper_related_assets) # running ticker updates in background bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try: # observe pricematrix changes while True: # TODO : efficient TUI lib ! # print(pmtx) # pricegraph = pmtx.to_graph() # display... neglog = pmtx.neglog() if neglog: negcycle = bellmanford(neglog) if len(negcycle): amnt = 1 # arbitrary starting amount pred = negcycle[-1] dscr = f"{amnt} {pred}" for cn in reversed(negcycle[:-1]): amnt = amnt * pmtx[pred][cn] pred = cn dscr = dscr + f" -> {amnt} {pred}" print(f"ARBITRAGE POSSIBLE: {dscr}") # TODO : from these we can extract market making opportunities ?? # Another way : # negloggraph = neglog.to_graph() # # negcycle = list() # # if nx.negative_edge_cycle(negloggraph): # # find it ! # print("NEGATIVE CYCLE FOUND !") # # # Now find it # print(f"computing cycles... {datetime.now()}") # # for cycle in nx.simple_cycles(negloggraph): # # for cycle in nx.cycle_basis(negloggraph): # NOT implemented ! # # find negative weight sum (cycle need to be more than one node) # if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:])) < 0: # print(f"Found one: {cycle}") # negcycle.append(cycle) # print(negcycle) # print(f"computing cycles DONE ! {datetime.now()}") await asyncio.sleep(5) finally: # in every case cancel the background task now bgtsk.cancel() # TODO: react ! def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'): n = len(pmatrix_neglog) min_dist = {source: 0} min_pred = {} # Relax edges |V - 1| times for i in range(n - 1): # iterations for v in pmatrix_neglog.df.columns: # vertex source if v in min_dist.keys(): # otherwise distance infinite until we know it... for w in pmatrix_neglog.df.columns: # vertex target if w not in min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: min_dist[w] = min_dist[v] + pmatrix_neglog[v][w] min_pred[w] = v # If we can still relax edges, then we have a negative cycle for v in pmatrix_neglog.df.columns: if v in min_dist.keys(): # otherwise node is not yet relevant here for w in pmatrix_neglog.df.columns: if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: # print(f"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}") path = (w, min_pred[w]) while len(set(path)) == len(path): # while no duplicates, cycle is not complete... path = (*path, min_pred[path[-1]]) # First cycle retrieved is *likely* (?) to be the minimal one -> the only one we are interested in return path[path.index(path[-1]):] return () if __name__ == '__main__': asyncio.run(arbiter(user_assets=["XTZ", "ETH", "XBT", "EUR"]), debug=True)
39.798354
156
0.58722
2,889
0.298728
0
0
0
0
5,262
0.544101
3,152
0.325923
be0fe0b9b27dc32e6433b7115d2cc69a3736598b
362
py
Python
custom_components/snowtire/__init__.py
borys-kupar/smart-home
f9c5ac949106e09278b97f49d5e08f0d495b24ef
[ "MIT" ]
128
2021-03-04T21:54:04.000Z
2022-03-17T22:53:20.000Z
custom_components/snowtire/__init__.py
borys-kupar/smart-home
f9c5ac949106e09278b97f49d5e08f0d495b24ef
[ "MIT" ]
4
2021-03-07T21:18:12.000Z
2021-09-24T13:09:39.000Z
custom_components/snowtire/__init__.py
borys-kupar/smart-home
f9c5ac949106e09278b97f49d5e08f0d495b24ef
[ "MIT" ]
15
2021-03-05T07:29:31.000Z
2022-03-31T10:07:06.000Z
# # Copyright (c) 2020, Andrey "Limych" Khrolenok <[email protected]> # Creative Commons BY-NC-SA 4.0 International Public License # (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # """ The Snowtire binary sensor. For more details about this platform, please refer to the documentation at https://github.com/Limych/ha-snowtire/ """
30.166667
74
0.743094
0
0
0
0
0
0
0
0
356
0.983425
be0fff91b15ea92d0c10da9f061370eeda2b8af8
23,079
py
Python
tests/test_bayes_classifier.py
manishgit138/pomegranate
3457dcefdd623483b8efec7e9d87fd1bf4c115b0
[ "MIT" ]
3,019
2015-01-04T23:19:03.000Z
2022-03-31T12:55:46.000Z
tests/test_bayes_classifier.py
manishgit138/pomegranate
3457dcefdd623483b8efec7e9d87fd1bf4c115b0
[ "MIT" ]
818
2015-01-05T10:15:57.000Z
2022-03-07T19:30:28.000Z
tests/test_bayes_classifier.py
manishgit138/pomegranate
3457dcefdd623483b8efec7e9d87fd1bf4c115b0
[ "MIT" ]
639
2015-01-05T04:16:42.000Z
2022-03-29T11:08:00.000Z
from __future__ import (division) from pomegranate import * from pomegranate.io import DataGenerator from pomegranate.io import DataFrameGenerator from nose.tools import with_setup from nose.tools import assert_almost_equal from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_less_equal from nose.tools import assert_raises from nose.tools import assert_true from numpy.testing import assert_array_almost_equal import pandas import random import pickle import numpy as np nan = numpy.nan def setup_multivariate_gaussian(): mu, cov = [0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) mu, cov = [2, 2, 2], numpy.eye(3) d2 = MultivariateGaussianDistribution(mu, cov) global model model = BayesClassifier([d1, d2]) global X X = numpy.array([[ 0.3, 0.5, 0.1], [ 0.8, 1.4, 0.5], [ 1.4, 2.6, 1.8], [ 4.2, 3.3, 3.7], [ 2.6, 3.6, 3.3], [ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [-1.2, -1.8, -1.5], [-1.8, 0.3, 0.5], [ 0.7, -1.3, -0.1]]) global y y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0] global X_nan X_nan = numpy.array([[ 0.3, nan, 0.1], [ nan, 1.4, nan], [ 1.4, 2.6, nan], [ nan, nan, nan], [ nan, 3.6, 3.3], [ 3.1, nan, 1.7], [ nan, nan, 1.8], [-1.2, -1.8, -1.5], [ nan, 0.3, 0.5], [ nan, -1.3, nan]]) def setup_multivariate_mixed(): mu, cov = [0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) d21 = ExponentialDistribution(5) d22 = LogNormalDistribution(0.2, 0.8) d23 = PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21, d22, d23]) global model model = BayesClassifier([d1, d2]) global X X = numpy.array([[ 0.3, 0.5, 0.1], [ 0.8, 1.4, 0.5], [ 1.4, 2.6, 1.8], [ 4.2, 3.3, 3.7], [ 2.6, 3.6, 3.3], [ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [ 1.2, 1.8, 1.5], [ 1.8, 0.3, 0.5], [ 0.7, 1.3, 0.1]]) global y y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0] global X_nan X_nan = numpy.array([[ 0.3, nan, 0.1], [ nan, 1.4, nan], [ 1.4, 2.6, nan], [ nan, nan, nan], [ nan, 3.6, 3.3], [ 3.1, nan, 1.7], [ nan, nan, 1.8], [ 1.2, 1.8, 1.5], [ nan, 0.3, 0.5], [ nan, 1.3, nan]]) def setup_hmm(): global model global hmm1 global hmm2 global hmm3 rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) ) unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) ) hmm1 = HiddenMarkovModel() hmm1.start = rigged hmm1.add_transition(rigged, rigged, 1) hmm1.bake() hmm2 = HiddenMarkovModel() hmm2.start = unrigged hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake() hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake() model = BayesClassifier([hmm1, hmm2, hmm3]) def setup_multivariate(): pass def teardown(): pass @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X) y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X) y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict(): y_hat = model.predict(X) y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.28333333, 0.21666666] cov1_t = [[1.3088888, 0.9272222, 0.6227777], [0.9272222, 2.2513888, 1.3402777], [0.6227777, 1.3402777, 0.9547222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687499, 0.23687499, 0.4793750], [0.23687499, 0.40187499, 0.5318749], [0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [1.033333, 1.3166667, 0.75] cov1_t = [[0.242222, 0.0594444, 0.178333], [0.059444, 0.5980555, 0.414166], [0.178333, 0.4141666, 0.439166]] d21 = model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_samples(): model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.2833333, 0.21666666] cov1_t = [[1.308888888, 0.9272222222, 0.6227777777], [0.927222222, 2.251388888, 1.340277777], [0.622777777, 1.340277777, 0.9547222222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687500, 0.23687499, 0.47937500], [0.23687499, 0.40187499, 0.53187499], [0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm, teardown) def test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown) def test_hmm_proba(): probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown) def test_hmm_prediction(): predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1], 0) assert_equal(predicts[2], 1) assert_equal(predicts[3], 1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) logp1 = model.log_probability(X) logp2 = model.log_probability(X2) logp3 = model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict(X) y_hat2 = model.predict(X2) y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_proba(X) y_hat2 = model.predict_proba(X2) y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit(): X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) mu1 = numpy.array([0, 0, 0, 0, 0]) mu2 = numpy.array([1, 1, 1, 1, 1]) cov = numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1, d2]) bc1.fit(X, y, weights) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_samples(): X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) d = MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights) bc2 = BayesClassifier.from_samples(d, X=data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2)
32.597458
126
0.716669
0
0
0
0
18,547
0.803631
0
0
241
0.010442
be10e301876952317779fb802d1ea27b44f1342a
2,188
py
Python
ks_engine/variable_scoring.py
FilippoRanza/ks.py
47d909fb70fec50f8d3174855bf5d0c05527bf03
[ "MIT" ]
2
2021-01-29T11:45:39.000Z
2022-03-10T03:17:12.000Z
ks_engine/variable_scoring.py
Optimization-Algorithms/ks.py
44890d33a744c5c4865b96f97efc1e5241b719b1
[ "MIT" ]
1
2020-05-12T16:18:34.000Z
2020-05-12T16:18:34.000Z
ks_engine/variable_scoring.py
Optimization-Algorithms/ks.py
44890d33a744c5c4865b96f97efc1e5241b719b1
[ "MIT" ]
1
2021-01-29T11:45:45.000Z
2021-01-29T11:45:45.000Z
#! /usr/bin/python from .solution import Solution try: import gurobipy except ImportError: print("Gurobi not found: error ignored to allow tests") def variable_score_factory(sol: Solution, base_kernel: dict, config: dict): if config.get("VARIABLE_RANKING"): output = VariableRanking(sol, base_kernel) else: output = ReducedCostScoring(sol, base_kernel) return output class AbstactVariableScoring: def __init__(self, solution: Solution, base_kernel: dict): self.score = {k: 0 if base_kernel[k] else v for k, v in solution.vars.items()} def get_value(self, var_name): return self.score[var_name] def success_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError def failure_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError class ReducedCostScoring(AbstactVariableScoring): def success_update_score(self, curr_kernel, curr_bucket): pass def failure_update_score(self, curr_kernel, curr_bucket): pass class VariableRanking(AbstactVariableScoring): def cb_update_score(self, name, value): if value == 0: self.score[name] += 0.1 else: self.score[name] -= 0.1 def success_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket: if curr_kernel[var]: self.score[var] -= 15 else: self.score[var] += 15 def failure_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket: if curr_kernel[var]: self.score[var] += 1 else: self.score[var] -= 1 def callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring, VariableRanking): output = __build_callback__(scoring) else: output = None return output def __build_callback__(scoring): def callback(model, where): if where == gurobipy.GRB.Callback.MIPSOL: for var in model.getVars(): value = model.cbGetSolution(var) scoring.cb_update_score(var.varName, value) return callback
27.012346
86
0.65128
1,286
0.587751
0
0
0
0
0
0
84
0.038391
be139101ad7d93480666b4065956e230585c96d9
1,180
py
Python
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
quepop/fetchcode
ac2461bdf7a249d8815987b4d421dbc615c043b9
[ "Apache-2.0" ]
7
2019-10-04T07:27:41.000Z
2021-06-07T04:39:18.000Z
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
quepop/fetchcode
ac2461bdf7a249d8815987b4d421dbc615c043b9
[ "Apache-2.0" ]
64
2019-10-07T12:40:56.000Z
2022-02-17T18:44:37.000Z
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py
quepop/fetchcode
ac2461bdf7a249d8815987b4d421dbc615c043b9
[ "Apache-2.0" ]
16
2019-10-04T08:48:12.000Z
2021-06-11T01:22:56.000Z
import sys from fetchcode.vcs.pip._internal.cli.main import main from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Optional, List def _wrapper(args=None): # type: (Optional[List[str]]) -> int """Central wrapper for all old entrypoints. Historically pip has had several entrypoints defined. Because of issues arising from PATH, sys.path, multiple Pythons, their interactions, and most of them having a pip installed, users suffer every time an entrypoint gets moved. To alleviate this pain, and provide a mechanism for warning users and directing them to an appropriate place for help, we now define all of our old entrypoints as wrappers for the current one. """ sys.stderr.write( "WARNING: pip is being invoked by an old script wrapper. This will " "fail in a future version of pip.\n" "Please see https://github.com/pypa/pip/issues/5599 for advice on " "fixing the underlying issue.\n" "To avoid this problem you can invoke Python with '-m pip' instead of " "running pip directly.\n" ) return main(args)
36.875
79
0.710169
0
0
0
0
0
0
0
0
839
0.711017
be145918e072dc9949c9e4a6667701e412064948
7,896
py
Python
Support/Make_Documentation.py
bvbohnen/x4-projects
2c9db75a720ddb52ddb9e4160c330d7bb1986aa3
[ "MIT" ]
24
2020-04-11T18:43:01.000Z
2022-02-23T11:02:02.000Z
Support/Make_Documentation.py
abouquet/x4-projects
27ba6d2faaab95cfb9114bccb41fadbfe56443b7
[ "MIT" ]
10
2020-04-11T07:50:33.000Z
2022-03-31T05:01:35.000Z
Support/Make_Documentation.py
abouquet/x4-projects
27ba6d2faaab95cfb9114bccb41fadbfe56443b7
[ "MIT" ]
8
2020-04-24T05:21:55.000Z
2022-03-26T03:02:13.000Z
''' Support for generating documentation readmes for the extensions. Extracts from decorated lua block comments and xml comments. ''' from pathlib import Path from lxml import etree import sys from itertools import chain project_dir = Path(__file__).resolve().parents[1] # Set up an import from the customizer for some text processing. x4_customizer_dir = str(project_dir.parent / 'X4_Customizer') if x4_customizer_dir not in sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import Merge_Lines #from Framework.Make_Documentation import Get_BB_Text # Grab the project specifications. from Release_Specs import release_specs def Make(): for spec in release_specs: # Update all of the content.xml files. spec.Update_Content_Version() # Make each of the doc files (if any). # (Note: this function not included in the class methods to avoid # import issues with the text helper functions below.) for rel_path, file_list in spec.doc_specs.items(): # Set up the full path. doc_path = spec.root_path / rel_path # Get lines for all files. doc_lines = [] for file_path in file_list: if file_path.suffix == '.xml': doc_lines += Get_XML_Cue_Text(file_path) elif file_path.suffix == '.lua': doc_lines += Get_Lua_Text(file_path) with open(doc_path, 'w') as file: file.write('\n'.join(doc_lines)) return def Sections_To_Lines(doc_text_sections): ''' Converts a dict of {section label: text} to a list of text lines, with labelling and formatting applied. Expects the input to start with a 'title', then 'overview', then a series of names of cues or functions. ''' # Transfer to annotated/indented lines. functions_started = False title = '' ret_text_lines = [] for key, text in doc_text_sections: # Extract the title and continue; this isn't printed directly. if key == 'title': title = text.strip() continue # Header gets an 'overview' label. if key == 'overview': ret_text_lines += ['', '### {} Overview'.format(title), ''] indent = '' # Lua functions are in one lump, like overview. elif key == 'functions': ret_text_lines += ['', '### {} Functions'.format(title), ''] indent = '' # Sections may be multiple. elif key == 'section': ret_text_lines += ['',''] indent = '' # Otherwise these are md cues. else: indent = ' ' # Stick a label line when starting the function section. if not functions_started: functions_started = True ret_text_lines += ['', '### {} Cues'.format(title), ''] # Bullet the function name. ret_text_lines.append('* **{}**'.format(key)) # Process the text a bit. text = Merge_Lines(text) # Add indents to functions, and break into convenient lines. text_lines = [indent + line for line in text.splitlines()] # Record for output. ret_text_lines += text_lines return ret_text_lines def Get_XML_Cue_Text(xml_path): ''' Returns a list of lines holding the documentation extracted from a decorated MD xml file. ''' # List of tuples of (label, text) hold the extracted text lines. doc_text_sections = [] # Read the xml and pick out the cues. tree = etree.parse(str(xml_path)) root = tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0] # Stride through comments/cues in the list. # Looking for decorated comments. for node in chain(root.iterchildren(), cues.iterchildren()): # Skip non-comments. # Kinda awkward how lxml checks this (isinstance doesn't work). if node.tag is not etree.Comment: continue # Handle title declarations. if '@doc-title' in node.text: label = 'title' text = node.text.replace('@doc-title','') elif '@doc-overview' in node.text: label = 'overview' text = node.text.replace('@doc-overview','') elif '@doc-section' in node.text: label = 'section' text = node.text.replace('@doc-section','') elif '@doc-cue' in node.text: label = node.getnext().get('name') text = node.text.replace('@doc-cue','') else: # Unwanted comment; skip. continue # Record it. doc_text_sections.append((label, text)) # Process into lines and return. return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): ''' Extract documentation text from a decorated lua file. ''' text = lua_path.read_text() ret_text_lines = [] # Extract non-indented comments. # TODO: maybe regex this. comment_blocks = [] lua_lines = text.splitlines() i = 0 while i < len(lua_lines): this_line = lua_lines[i] if this_line.startswith('--[['): # Scan until the closing ]]. these_lines = [] # Record the first line. these_lines.append(this_line.replace('--[[','')) i += 1 # Only search to the end of the doc. while i < len(lua_lines): next_line = lua_lines[i] if next_line.startswith(']]'): # Found the last line; skip it. break these_lines.append(next_line) i += 1 comment_blocks.append('\n'.join(these_lines)) # Check single-line comments after block comments, to avoid # -- confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always one increment per loop. i += 1 # Title to put on label lines. # Starts blank, filled by decorator. title = '' # List of tuples of (label, text) hold the extracted text lines. doc_text_sections = [] # Go through the comments looking for decorators. for comment in comment_blocks: # Handle title declarations. if '@doc-title' in comment: label = 'title' text = comment.replace('@doc-title','') # Text blocks are either overview or cue. elif '@doc-overview' in comment: label = 'overview' text = comment.replace('@doc-overview','') # For now, all functions are lumped together in one comment. elif '@doc-functions' in comment: label = 'functions' text = comment.replace('@doc-functions','') else: # Unwanted comment; skip. continue # Record it. doc_text_sections.append((label, text)) # Process into lines and return. return Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding putting main docs on the forum. #def Make_BB_Code(doc_dir, header_lines = []): # ''' # Turn the ext_dir's readme into a bbcode txt file. # Output is placed in the release folder. # ''' # release_dir = project_dir / 'Release' # if not release_dir.exists(): # release_dir.mkdir() # # # Grab the readme contents. # doc_lines = (doc_dir / 'Readme.md').read_text().splitlines() # # Generate a bbcode version, prefixing with custom header. # bb_lines = header_lines + Get_BB_Text(doc_lines) # (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\n'.join(bb_lines)) # return if __name__ == '__main__': Make()
31.967611
86
0.58498
0
0
0
0
0
0
0
0
3,498
0.443009
be14596b5522e0877a99c1e6b243c1003263e5ff
71
py
Python
Chapter 2 - Variables & Data Types/05_pr_set_add_two_no.py
alex-dsouza777/Python-Basics
8f1c406f2319cd65b5d54dfea990d09fa69d9adf
[ "MIT" ]
null
null
null
Chapter 2 - Variables & Data Types/05_pr_set_add_two_no.py
alex-dsouza777/Python-Basics
8f1c406f2319cd65b5d54dfea990d09fa69d9adf
[ "MIT" ]
null
null
null
Chapter 2 - Variables & Data Types/05_pr_set_add_two_no.py
alex-dsouza777/Python-Basics
8f1c406f2319cd65b5d54dfea990d09fa69d9adf
[ "MIT" ]
1
2021-04-21T10:23:08.000Z
2021-04-21T10:23:08.000Z
#Addition of two numbers a = 30 b = 17 print("Sum of a and b is",a + b)
17.75
32
0.633803
0
0
0
0
0
0
0
0
43
0.605634
be15fa91cd3274065ddb261552f8c0f2ea292fcd
2,960
py
Python
curso 1/04 - caixa de texto/a4.py
andersonssh/aprendendo-pyqt5
d15ad7378d4573410c11fc39042df19048c656e4
[ "MIT" ]
null
null
null
curso 1/04 - caixa de texto/a4.py
andersonssh/aprendendo-pyqt5
d15ad7378d4573410c11fc39042df19048c656e4
[ "MIT" ]
null
null
null
curso 1/04 - caixa de texto/a4.py
andersonssh/aprendendo-pyqt5
d15ad7378d4573410c11fc39042df19048c656e4
[ "MIT" ]
null
null
null
import sys from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit) from PyQt5 import QtGui class Janela(QMainWindow): def __init__(self): super().__init__() self.topo = 50 self.esquerda = 50 self.largura = 800 self.altura = 600 self.titulo = 'Primeira janela' self.gera_labels() self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto() def carregar_janela(self): self.setGeometry(self.esquerda, self.topo, self.largura, self.altura) self.setWindowTitle(self.titulo) self.show() def gera_botoes(self): # botoes botao1 = QPushButton('Botao 1', self) botao1.move(100, 100) botao1.resize(100, 50) botao1.setStyleSheet( 'QPushButton{background-color: white; color: black;} QPushButton:hover{ background: orange; font-weight: 600;}') botao1.clicked.connect(self.b1) botao2 = QPushButton('Botao 2', self) botao2.move(300, 100) botao2.resize(100, 50) botao2.setStyleSheet( 'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background: orange; font-weight: 600}') botao2.clicked.connect(self.b2) botao3 = QPushButton('Texto', self) botao3.move(500, 100) botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background: orange; font-weight: 600}') botao3.clicked.connect(self.b3) def gera_labels(self): self.l1 = QLabel(self) self.l1.setText('Clique em um botao') self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}') self.l1.resize(250, 50) self.l2 = QLabel(self) self.l2.setText('Digitou: ') self.l2.move(300, 30) self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}') def gera_imagens(self): self.carro = QLabel(self) self.carro.move(25, 200) self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self): self.caixa_texto = QLineEdit(self) self.caixa_texto.move(25, 10) self.caixa_texto.resize(150, 50) def b1(self): # forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self, l): # forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self): conteudo = self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo)) if __name__ == '__main__': app = QApplication(sys.argv) janela = Janela() janela.carregar_janela() sys.exit(app.exec_())
31.489362
140
0.591216
2,541
0.858446
0
0
0
0
0
0
562
0.189865
be18b88ab1937677b7e3d5583d09538c7f91bce2
2,460
py
Python
pdf2write.py
codeunik/stylus_labs_write_pdf_importer
25d7aa037647a86284c24527bda7b222cf95bb62
[ "MIT" ]
null
null
null
pdf2write.py
codeunik/stylus_labs_write_pdf_importer
25d7aa037647a86284c24527bda7b222cf95bb62
[ "MIT" ]
null
null
null
pdf2write.py
codeunik/stylus_labs_write_pdf_importer
25d7aa037647a86284c24527bda7b222cf95bb62
[ "MIT" ]
null
null
null
import base64 import os import sys import PyPDF2 svg = '''<svg id="write-document" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> <rect id="write-doc-background" width="100%" height="100%" fill="#808080"/> <defs id="write-defs"> <script type="text/writeconfig"> <int name="docFormatVersion" value="2" /> <int name="pageColor" value="-1" /> <int name="pageNum" value="0" /> <int name="ruleColor" value="0" /> <float name="marginLeft" value="0" /> <float name="xOffset" value="-380.701752" /> <float name="xRuling" value="0" /> <float name="yOffset" value="1536.84216" /> <float name="yRuling" value="0" /> </script> </defs> ''' pdf_path = sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path, "rb") img_width = 720 n_pages = pdf.getNumPages() + 1 page = pdf.getPage(0) width = page.mediaBox.getWidth() height = page.mediaBox.getHeight() aspect_ratio = height/width img_height = int(aspect_ratio * img_width) os.system('mkdir -p /tmp/pdf2write') new_page_height = 0 for page in range(n_pages): print(f"Processing {page}/{n_pages}", end='\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f: base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg class="write-page" color-interpolation="linearRGB" x="10" y="{new_page_height+10}" width="{img_width}px" height="{img_height}px" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"> <g class="write-content write-v3" width="{img_width}" height="{img_height}" xruling="0" yruling="0" marginLeft="0" papercolor="#FFFFFF" rulecolor="#00000000"> <g class="ruleline write-std-ruling write-scale-down" fill="none" stroke="none" stroke-width="1" shape-rendering="crispEdges" vector-effect="non-scaling-stroke"> <rect class="pagerect" fill="#FFFFFF" stroke="none" x="0" y="0" width="{img_width}" height="{img_height}" /> </g> <image x="0" y="0" width="{img_width}" height="{img_height}" xlink:href="data:image/png;base64,{base64_data}"/> </g> </svg>''' new_page_height += (img_height+10) svg += tmp_svg svg += '''</svg>''' os.system('rm -rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg', 'w') as f: f.write(svg) os.system(f'gzip -S z {os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg')
37.846154
230
0.667073
0
0
0
0
0
0
0
0
1,802
0.73252
be18cd8c90ebbd40ae9aadcbac8dd9bce504b9ec
2,462
py
Python
py_headless_daw/project/having_parameters.py
hq9000/py-headless-daw
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
[ "MIT" ]
22
2020-06-09T18:46:56.000Z
2021-09-28T02:11:42.000Z
py_headless_daw/project/having_parameters.py
hq9000/py-headless-daw
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
[ "MIT" ]
19
2020-06-03T06:34:57.000Z
2021-01-26T07:36:17.000Z
py_headless_daw/project/having_parameters.py
hq9000/py-headless-daw
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
[ "MIT" ]
1
2020-06-18T09:25:21.000Z
2020-06-18T09:25:21.000Z
from typing import Dict, List, cast from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType class HavingParameters: def __init__(self): self._parameters: Dict[str, Parameter] = {} super().__init__() def has_parameter(self, name: str) -> bool: return name in self._parameters def add_parameter(self, name: str, value: ParameterValueType, param_type: str, value_range: ParameterRangeType): if name in self._parameters: raise Exception('parameter named ' + name + ' already added to this object') parameter = Parameter(name, value, param_type, value_range) self._parameters[name] = parameter def add_parameter_object(self, parameter: Parameter) -> None: self._parameters[parameter.name] = parameter def get_parameter(self, name: str) -> Parameter: for parameter in self.parameters: if parameter.name == name: return parameter list_of_names: List[str] = [p.name for p in self.parameters] # noinspection PyTypeChecker available_names: List[str] = cast(List[str], list_of_names) raise Exception('parameter named ' + name + ' not found. Available: ' + ', '.join(available_names)) def get_parameter_value(self, name: str) -> ParameterValueType: param = self.get_parameter(name) return param.value def get_float_parameter_value(self, name: str) -> float: param = self.get_parameter(name) if param.type != Parameter.TYPE_FLOAT: raise ValueError(f"parameter {name} was expected to be float (error: f009d0ef)") value = self.get_parameter_value(name) cast_value = cast(float, value) return cast_value def get_enum_parameter_value(self, name: str) -> str: param = self.get_parameter(name) if param.type != Parameter.TYPE_ENUM: raise ValueError(f"parameter {name} was expected to be enum (error: 80a1d180)") value = self.get_parameter_value(name) cast_value = cast(str, value) return cast_value def set_parameter_value(self, name: str, value: ParameterValueType): param = self.get_parameter(name) param.value = value @property def parameters(self) -> List[Parameter]: return list(self._parameters.values())
35.681159
107
0.644598
2,326
0.94476
0
0
101
0.041024
0
0
247
0.100325
be193942a6e1e90e82121a1e52ce25e1006effc3
488
py
Python
wasatch/ROI.py
adiravishankara/Wasatch.PY
058b3de2c9399e9aea6347fa360f9c7dbbf296aa
[ "MIT" ]
9
2018-10-31T11:38:18.000Z
2021-11-23T19:20:54.000Z
wasatch/ROI.py
adiravishankara/Wasatch.PY
058b3de2c9399e9aea6347fa360f9c7dbbf296aa
[ "MIT" ]
3
2018-11-01T10:28:53.000Z
2022-03-21T17:40:05.000Z
wasatch/ROI.py
adiravishankara/Wasatch.PY
058b3de2c9399e9aea6347fa360f9c7dbbf296aa
[ "MIT" ]
4
2018-08-03T08:46:08.000Z
2022-03-23T01:09:27.000Z
## # This class encapsulates a Region Of Interest, which may be either horizontal # (pixels) or vertical (rows/lines). class ROI: def __init__(self, start, end): self.start = start self.end = end self.len = end - start + 1 def valid(self): return self.start >= 0 and self.start < self.end def crop(self, spectrum): return spectrum[self.start:self.end+1] def contains(self, value): return self.start <= value <= self.end
27.111111
79
0.622951
367
0.752049
0
0
0
0
0
0
117
0.239754
be19a958423363abc9e04beed1c7e6d4e8b02233
8,562
py
Python
examples/python/oled_ssd1327.py
whpenner/upm
3168c61d8613da62ecc7598517a1decf533d5fe7
[ "MIT" ]
1
2017-09-22T01:41:30.000Z
2017-09-22T01:41:30.000Z
bsp/intel/peripheral/libupm/examples/python/oled_ssd1327.py
Keneral/ahardware
9a8a025f7c9471444c9e271bbe7f48182741d710
[ "Unlicense" ]
null
null
null
bsp/intel/peripheral/libupm/examples/python/oled_ssd1327.py
Keneral/ahardware
9a8a025f7c9471444c9e271bbe7f48182741d710
[ "Unlicense" ]
1
2018-02-24T19:09:04.000Z
2018-02-24T19:09:04.000Z
#!/usr/bin/python # Author: Zion Orent <[email protected]> # Copyright (c) 2015 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Load i2clcd display module import time, signal, sys import pyupm_i2clcd as upmLCD myLCD = upmLCD.SSD1327(0, 0x3C); logoArr = [0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E, 0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF, 0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66, 0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3, 0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86, 0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83, 0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66, 0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7, 0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6, 0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3, 0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E, 0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3, 0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00, 0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40, 0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70, 0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C, 0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48, 0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40, 0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C, 0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48, 0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40, 0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr)) for x in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) # If you don't set the display to be white, the seeed logo will appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 * 96 / 8); for i in range(12): myLCD.setCursor(i, 0) myLCD.setGrayLevel(i) myLCD.write('Hello World') print "Exiting"
45.063158
77
0.68512
0
0
0
0
0
0
0
0
1,260
0.147162
be1bdf6fe279b2f8b2c141f3279c61f47199ae18
898
py
Python
digital_image_processing/algorithms/edge_detection_algorithms/threshold/adaptive_thresholding_methods/__init__.py
juansdev/digital_image_processing
a0fe429c0664d81063dc76502a3e4874eea901a7
[ "MIT" ]
1
2022-03-22T03:37:44.000Z
2022-03-22T03:37:44.000Z
digital_image_processing/algorithms/edge_detection_algorithms/threshold/adaptive_thresholding_methods/__init__.py
juansdev/digital_image_processing
a0fe429c0664d81063dc76502a3e4874eea901a7
[ "MIT" ]
null
null
null
digital_image_processing/algorithms/edge_detection_algorithms/threshold/adaptive_thresholding_methods/__init__.py
juansdev/digital_image_processing
a0fe429c0664d81063dc76502a3e4874eea901a7
[ "MIT" ]
null
null
null
from .bernsen import bernsen_thresholding_method from .bradley_roth import bradley_thresholding_method from .contrast import contrast_thresholding_method from .feng import feng_thresholding_method from .gaussian import threshold_value_gaussian from .johannsen import johannsen_thresholding_method from .kapur import kapur_thresholding_method from .mean import threshold_value_mean from .minimum_error import minimum_err_thresholding_method from .niblack import niblack_thresholding_method from .nick import nick_thresholding_method from .otsu import otsu_thresholding_method from .p_tile import p_tile_thresholding_method from .pun import pun_thresholding_method from .rosin import rosin_thresholding_method from .sauvola import sauvola_thresholding_method from .singh import singh_thresholding_method from .two_peaks import two_peaks_thresholding_method from .wolf import wolf_thresholding_method
44.9
58
0.894209
0
0
0
0
0
0
0
0
0
0
be1d04203f18e6f16b60a723e614122b48a08671
1,097
py
Python
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
harshp8l/deep-learning-lang-detection
2a54293181c1c2b1a2b840ddee4d4d80177efb33
[ "MIT" ]
84
2017-10-25T15:49:21.000Z
2021-11-28T21:25:54.000Z
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
5
2018-03-29T11:50:46.000Z
2021-04-26T13:33:18.000Z
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
24
2017-11-22T08:31:00.000Z
2022-03-27T01:22:31.000Z
import os from kombu import Queue, Exchange ## Broker settings. BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL = "amqp://guest:guest@localhost:5672/" #BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST = "localhost" #BROKER_PORT = 27017 #BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES = ( Queue('default', exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using the database to store task state and results. #CELERY_RESULT_BACKEND = "mongodb" #CELERY_MONGODB_BACKEND_SETTINGS = { # "host": "localhost", # "port": 27017, # "database": "celery", # "taskmeta_collection": "celery_taskmeta", #}
30.472222
76
0.739289
0
0
0
0
0
0
0
0
761
0.69371
be1d72eb89ee80a827a9a1150e2c759579770b36
21,106
py
Python
timesheet.py
dgollub/timesheet-google-thingy
3ffab402444dba520ff3416b2327f6d2ceeeac39
[ "MIT" ]
null
null
null
timesheet.py
dgollub/timesheet-google-thingy
3ffab402444dba520ff3416b2327f6d2ceeeac39
[ "MIT" ]
null
null
null
timesheet.py
dgollub/timesheet-google-thingy
3ffab402444dba520ff3416b2327f6d2ceeeac39
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # from __future__ import print_function import csv import os import re import sys import arrow from gsheets import Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG', "0") == "1" AS_CSV = os.environ.get('CSV', "0") == "1" COL_DATE = 0 COL_WEEKDAY = 1 COL_TIME_START = 2 COL_TIME_END = 3 COL_LUNCH = 4 COL_TIME = 5 # includes lunch COL_TIME_FIXED = 6 # does not include lunch COL_MOVE = 7 COL_WORK_FROM_HOME = 8 COL_NOTES = 9 COL_TASKS_START = 10 SPECIAL_VALUES = ["sick", "ab", "off", "wfh", "hol"] SATURDAY = 5 SUNDAY = 6 def calc(hour, half_it=False, split_char = ":"): parts = str(hour).split(split_char) try: local_hours = int(parts[0]) local_minutes = int(parts[1]) if half_it: local_hours = local_hours / 2 local_minutes = local_minutes / 2 return local_hours, local_minutes except: if len(parts) == 1: try: return int(parts[0]), 0 except: return 0, 0 def get_client_secret_filenames(): filename = os.path.join(CURRENT_PATH, "client-secrets.json") cachefile = os.path.join(CURRENT_PATH, "client-secrets-cache.json") if not os.path.exists(filename): filename = os.path.expanduser(os.path.join("~", "client-secrets.json")) cachefile = os.path.expanduser(os.path.join("~", "client-secrets-cache.json")) if not os.path.exists(filename): raise Exception("Please provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart") return filename, cachefile def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')): print("Opening timesheet for %s ..." % (date)) sheets = api.get(timesheet_url) sheet = sheets.sheets[0] print(u"Timesheet [%s] sheet [%s] opened. Accessing cell data ..." % (sheets.title or "???", sheet.title or "???")) rows = sheet.values() return rows def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name): now = arrow.now() today = now.format('YYYYMMDD') try: other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError: other_date = today use_date = other_date rows = load_first_sheet_rows(api, timesheet_url, use_date) timesheet = get_timesheet_for_date(rows, use_date, user_full_name) if timesheet: print("\n\n") print("Timesheet for %s" % (use_date)) print(timesheet) print("\n") else: print("No entry found for %s" % use_date) def get_timesheet_for_date(rows, date, user_full_name): # find the row with the first column that has today's date in it result_rows = [row for row in rows if row and str(row[COL_DATE]) == date] if result_rows is None or not result_rows: return None if len(result_rows) != 1: print("More than one entry (%d) found for date %s! Please fix your sheet!" % (len(result_rows), date)) return None found_row = result_rows[0] found_index = rows.index(found_row) start_val = found_row[COL_TIME_START] end_val = found_row[COL_TIME_END] duration_val = found_row[COL_TIME_FIXED] max_cols = len(found_row) if not start_val: if start_val in SPECIAL_VALUES: print("You forgot to add your start time.") return None if not end_val: if end_val in SPECIAL_VALUES: print("You forgot to add your end time.") return None #if max_cols >= COL_NOTES: # print("No notes/tasks entered yet.") # return None def parse_hours(val): try: return arrow.get(val, "HH:mm") except arrow.parser.ParserError: return arrow.get(val, "H:mm") start = parse_hours(start_val).format("HH:mm") end = parse_hours(end_val).format("HH:mm") duration = str(duration_val) notes_str = found_row[COL_NOTES] notes = notes_str.split('\n') # check the previous Friday entry (if today is not Friday), to see what work from home # days were were selected weekday = (found_row[COL_WEEKDAY] or "").lower() check_start_index = found_index if weekday.startswith("fr") else found_index - 7 check_row = found_row while (check_start_index < found_index): check_row = rows[check_start_index] if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or "").lower().startswith("fr"): break check_start_index += 1 is_same_day = None if check_start_index != found_index: # print("HA! GOT PREVS FRIDAY.") is_same_day = False else: # print("SAME DAY") is_same_day = True wfh = u"" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME] wfh = wfh.replace("Mon", "Monday") wfh = wfh.replace("Tue", "Tuesday") wfh = wfh.replace("Wed", "Wednesday") wfh = wfh.replace("Thu", "Thursday") wfh = wfh.replace("Fri", "Friday") wfh = wfh.replace(", ", ",").replace(",", " and ") wfh_extra = "Next week" if is_same_day else "This week" wfh_info = """%s %s""" % (wfh_extra, wfh) if wfh != "" else "all days" # 2021-01-04 just make this the default for now wfh_info = "at all times, unless mentioned otherwise below" # regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\])) # text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h] # 3 groups: # SCAN-4167 # As a developer, I want to update AIScanRobo every week [ # 1h r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))") total_time_minutes_from_tasks = 0 tasks = [] for idx in range(COL_TASKS_START, max_cols): task = found_row[idx].strip() if task: t = task.split('\n')[0] if '\n' in task else task try: g = r.match(t).groups() except Exception as ex: print("ERROR: %s - %s" % (t, str(ex))) continue if DEBUG: print("task: %s" % (t)) print("groups: %s" % len(g)) [task_number, task_details, task_duration] = g hours, half_hours = calc(task_duration.replace("h", ""), split_char=".") minutes = (hours * 60) + (6 * half_hours) total_time_minutes_from_tasks += minutes other_lines = task.split('\n')[1:] tasks.append("%s %s\n%s" % (task_number.strip(), task_details[:-2].strip(), '\n'.join(other_lines))) def format_tasks(tasks): if not tasks: return '' result = 'Tasks:\n' for task in tasks: if '\n' in task: sub_tasks = task.split('\n') if len(sub_tasks) > 1: result += '\n* ' + sub_tasks[0] # main task for sub_task in sub_tasks[1:]: # actual sub tasks result += '\n\t' + sub_task result += '\n' else: result += '\n* ' + task else: result += '\n* ' + task return result def format_notes(notes): if not notes or (len(notes) == 1 and not notes[0]): return '' result = 'Additional Notes:\n' for note in notes: result += '\n* ' + note return result total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2) total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2) total_duration = "%s:%s" % (total_hours, total_minutes) test_duration = duration if len(test_duration) <= 4: test_duration = "0%s" % duration if total_duration != test_duration: print("") print("") print("The task times do not add up! Tasks vs time entered: %s != %s" % (total_duration, test_duration)) print("") print("") # Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s]) msg = """ [Daily Report] %(date)s WFH: %(wfh_info)s Hi, Daily Report for Date: %(date)s %(tasks)s %(notes)s Kind regards, %(user_full_name)s """.strip() % { "date": date, "user_full_name": user_full_name, "start": start, "end": end, "duration": duration, "wfh_info": wfh_info, "tasks": format_tasks(tasks) if tasks else "", "notes": format_notes(notes) if notes else "", "total_hours": total_hours, "total_minutes": total_minutes, } print("Total time for all tasks (%s): %s - %s:%s" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes)) return msg def _load_sheet_data(api, timesheet_url, arg_date=None): try: date = arrow.get(arg_date, 'YYYYMM') except Exception: # pylint: disable=W0703 now = arrow.now() date = now.format('YYYYMM') rows = load_first_sheet_rows(api, timesheet_url, date) date_str = str(date.format('YYYYMM')) return (rows, date_str) def export_csv(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url, arg_date) filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)] if filtered is None or not filtered: return None csv_filename = os.path.join(os.getcwd(), "%s.csv" % (arg_date)) print("") print("Found (%d) entries for date %s!" % (len(filtered), date)) print("Writing to %s" % (csv_filename)) with open(csv_filename, mode='w') as f: f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) # f.writerow(['John Smith', 'Accounting', 'November']) f.writerow(["username", "date", "task", "duration", "work_type", "details"]) def w(task, duration_minutes, details = ""): work_type = "Meeting" if "meeting" in details.lower() else "Development" # Needed CSV columns # username|date|task|duration|work_type|details f.writerow(["daniel", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, "%dm" % (duration_minutes), work_type, details]) # regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\])) # text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h] # 3 groups: # SCAN-4167 # As a developer, I want to update AIScanRobo every week [ # 1h r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))") for row in filtered: max_cols = len(row) time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None date = row[COL_DATE] if max_cols >= COL_DATE else None if time_start is None or time_end is None or date is None: continue tasks = [] for idx in range(COL_TASKS_START, max_cols): task = row[idx].strip() if task: tasks.append(task) if len(tasks) == 0: print("%s: no tasks found! %s" % (date, time_start)) continue print("%s: %d tasks found!" % (date, len(tasks))) for task in tasks: t = task.split('\n')[0] if '\n' in task else task try: g = r.match(t).groups() except Exception as ex: print("ERROR: %s - %s" % (t, str(ex))) continue if DEBUG: print("task: %s" % (t)) print("groups: %s" % len(g)) [task_number, task_details, duration] = g hours, half_hours = calc(duration.replace("h", ""), split_char=".") minutes = (hours * 60) + (6 * half_hours) if DEBUG: print("time: %s, %s $ %s $ %s" % (hours, half_hours, duration, minutes)) details = "%s %s" % (task_number, task_details[:-1].strip()) w(task_number, minutes, details.strip()) print("") print("CSV output to: %s" % (csv_filename)) def calc_daily_hours_for_month(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url, arg_date) filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)] if filtered is None or not filtered: return None print("") print("Found (%d) entries for date %s!" % (len(filtered), date)) minutes = 0 days = 0 for row in filtered: max_cols = len(row) time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None date = row[COL_DATE] if max_cols >= COL_DATE else None worked_at = row[COL_MOVE] if max_cols >= COL_MOVE else None notes = row[COL_NOTES] if max_cols >= COL_NOTES else "" if time_start is None or time_end is None or date is None: continue start_hours, start_minutes = calc(time_start) end_hours, end_minutes = calc(time_end) if start_hours == 0: print("%s: Day off because of %s" % (date, "whatever" if time_start == 0 else time_start)) continue extra_info = "" the_date = arrow.get(str(date), 'YYYYMMDD') if the_date.weekday() in [SATURDAY, SUNDAY]: extra_info += " - Weekend work" half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day: extra_info += " - half day PTO" if worked_at in ['o', 'O'] or "OFFICE" in notes.upper(): extra_info += " - Commute to office" minutes_day = abs(end_hours - start_hours) * 60 minutes_day += end_minutes - start_minutes minutes += minutes_day hours_day = int(minutes_day / 60) hours_day_without_lunch = hours_day - 1 minutes_day = minutes_day % 60 total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2) days += 1 no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2) print("%s: %s to %s = %s (without lunch: %s)%s" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info)) hours = str(minutes / 60).zfill(2) minutes = str(minutes % 60).zfill(2) lunch_hours = str(int(float(hours)) - days).zfill(2) print("") print("Total days worked: %s" % str(days)) print("Total hours: %s:%s (with 1 hour lunch: %s:%s)" % (hours, minutes, lunch_hours, minutes)) print("") def calc_stats(api, timesheet_url, arg_date=None): rows, date = _load_sheet_data(api, timesheet_url, arg_date) # find the rows for the given month filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)] if filtered is None or not filtered: return None if not AS_CSV: print("") print("Found (%d) entries for date %s!" % (len(filtered), date)) dates, hours = [], [] half_days = {} first = None last = None for row in filtered: max_cols = len(row) time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None tasks = [] for idx in range(COL_TASKS_START, max_cols): task = row[idx].strip() if task: tasks.append(task) day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START else None date = row[COL_DATE] if max_cols >= COL_DATE else None if day_type is None: continue if day_type in SPECIAL_VALUES: time = day_type hours.append(time) dates.append(date) continue elif not tasks: continue # If it was a half day, meaning I took half a day off, then only count half the time half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day: half_days[date] = time hours.append(time) dates.append(date) if first is None: first = row else: last = row total_hours, total_minutes, total_time = 0, 0, "" for index, hour in enumerate(hours): date = dates[index] local_hours, local_minutes = calc(hour, date in half_days) total_hours += local_hours total_minutes += local_minutes if total_minutes >= 60: total_hours += (total_minutes / 60) total_minutes = total_minutes % 60 total_time = "%d:%d hours:minutes" % (total_hours, total_minutes) expected = 0 actual_h, actual_m = 0, 0 if not AS_CSV: print("*" * 50) print("") print("Valid hours entries: %s\t[required vs actual]" % len(hours)) deduct_work_hours = 0 work_hours = 0 work_minutes = 0 days = 0 expected_hours_accumulated_total = 0 for index, worked_date in enumerate(dates): days += 1 if hours[index] in SPECIAL_VALUES: if not AS_CSV: print(" %s: Off, because %s" % (worked_date, hours[index])) else: pass else: half_day = worked_date in half_days # each workday has 8 hours of work, but on half days it is only half of 8, aka 4. work_hours_for_the_day = 8 if not half_day else 4 expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day) expected_minutes_accumulated_total = expected_hours_accumulated_total * 60 # hours[index] is the actual time worked, e.g. 6:30 means 6 hours and 30 minutes local_h, local_m = calc(hours[index]) work_hours += local_h work_minutes += local_m actual_h = work_hours # 330 minutes = 6 hours and 30 minutes actual_h += int(work_minutes / 60) actual_m = work_minutes % 60 if AS_CSV: print("%s;%s;" % (worked_date, hours[index])) else: print(" %s: %s\t[%s:00 vs %s:%s] %s" % (worked_date, hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2), "Half day" if half_day else "")) if not AS_CSV: print("") print("First:", "<first> not found" if first is None else first[COL_DATE]) print("Last:", "<last> not found" if last is None else last[COL_DATE]) print("") print("Total time in %s: %s" % (date, total_time)) print("") print("*" * 50) def main(): # print("Checking environment variable TIMESHEET_URL for spreadsheet URL...") timesheet_url = os.environ.get('TIMESHEET_URL', "").strip() if not timesheet_url: raise Exception("Please set the TIMESHEET_URL environment variable accordingly.") # print("Checking environment variable USER_FULL_NAME for spreadsheet URL...") user_full_name = os.environ.get('USER_FULL_NAME', "").strip() if not user_full_name: print("Warning: USER_FULL_NAME environment variable not set!") user_full_name = "Herman Toothrot" print("") print("Usage: python timesheet.py [command|date] [date]") print("Example: python timesheet.py stats 202011") print("Example: python timesheet.py 20201130") print("") print("Available commands:") print("- stats: show summed up hours and minutes for the given/current month") print(" use \"CSV=1 python timesheet.py stats\" to format the output") print(" as CSV") print("- daily: same as stats, except ready to email to HR") print("- csv: task breakdown for the month and time spend on each task") print("") print("""Tip: use "DEBUG=1 timesheet <parameter>" to enable debug output""") print("") print("Trying to load client-secrets.json file ...") secrets_file, cache_file = get_client_secret_filenames() sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False) print("Success.") date = None if len(sys.argv) < 3 else sys.argv[2].strip() arg = "read today" if len(sys.argv) < 2 else sys.argv[1].strip() if arg == "stats": calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg == "daily": calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg == "csv": export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) else: date_to_use = "read today" if arg == '' else arg load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name) print("Done.") if __name__ == "__main__": main()
34.6
158
0.588032
0
0
0
0
0
0
0
0
4,750
0.225054
be1da4c3a9cd8b6f92a68b6f9d9dd0277f9d55ce
7,578
py
Python
league/game.py
Orpheon/All-in
016901953904250226f388422318ef2f739bf82e
[ "MIT" ]
null
null
null
league/game.py
Orpheon/All-in
016901953904250226f388422318ef2f739bf82e
[ "MIT" ]
null
null
null
league/game.py
Orpheon/All-in
016901953904250226f388422318ef2f739bf82e
[ "MIT" ]
null
null
null
import numpy as np import pickle import treys import constants FULL_DECK = np.array(treys.Deck.GetFullDeck()) class GameEngine: def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger): self.BATCH_SIZE = BATCH_SIZE self.INITIAL_CAPITAL = INITIAL_CAPITAL self.SMALL_BLIND = SMALL_BLIND self.BIG_BLIND = BIG_BLIND self.logger = logger self.N_PLAYERS = 6 def generate_cards(self): cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1)) for i in range(self.BATCH_SIZE): cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])] community_cards = cards[:, :5] hole_cards = np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2)) return community_cards, hole_cards def run_game(self, players): if len(players) != self.N_PLAYERS: raise ValueError('Only {} players allowed'.format(self.N_PLAYERS)) community_cards, hole_cards = self.generate_cards() folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool) prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int) for player in players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS) # Pre-flop bets, _ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0]) prev_round_investment += bets # Flop bets, _ = self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3]) prev_round_investment += bets # Turn bets, _ = self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4]) prev_round_investment += bets # River bets, end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards) prev_round_investment += bets # Showdown pool = np.sum(prev_round_investment, axis=1) total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float) hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded)) ranks = np.argsort(hand_scores, axis=1) sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1) # Get everyone who has the best hand and among which pots will be split participants = hand_scores == sorted_hands[:, 0][:, None] # Get the number of times each pot will be split n_splits_per_game = participants.sum(axis=1) # Split and distribute the money gains = pool / n_splits_per_game total_winnings += participants * gains[:, None] total_winnings -= prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p in players], folded, hole_cards)) self.logger.save_to_file() for player_idx, player in enumerate(players): round, current_bets, min_raise, prev_round_investment, folded, last_raiser = end_state player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx]) return total_winnings def run_round(self, players, prev_round_investment, folded, round, hole_cards, community_cards): """ :param players: [Player] :param prev_round_investment: np.ndarray(batchsize, n_players) = int :param folded: np.ndarray(batchsize, n_players) = bool :param round: int ∈ {0..3} :param hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card :param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card :return: current_bets: np.ndarray(batchsize, n_players)=int {0-200} """ current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int) max_bets = np.zeros(self.BATCH_SIZE, dtype=int) min_raise = np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:] = self.BIG_BLIND last_raiser = np.zeros(self.BATCH_SIZE, dtype=int) player_order = list(enumerate(players)) round_countdown = np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:] = self.N_PLAYERS if round == constants.PRE_FLOP: current_bets[:, 0] = self.SMALL_BLIND current_bets[:, 1] = self.BIG_BLIND max_bets[:] = self.BIG_BLIND player_order = player_order[2:] + player_order[:2] while True: running_games = np.nonzero(round_countdown > 0)[0] for player_idx, player in player_order: actions, amounts = player.act(player_idx, round, round_countdown > 0, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards) # Disabled when not necessary because it bloats the log size (by ~500 kB or so, which triples the size) # self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown, folded[:, player_idx])) # People who have already folded continue to fold actions[folded[:, player_idx] == 1] = constants.FOLD # People who have gone all-in continue to be all-in actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL ########### # CALLING # ########### calls = np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0] if calls.size > 0: investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls]) # Reset the bets and countdown current_bets[calls, player_idx] = investment ########### # RAISING # ########### raises = np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0] if raises.size > 0: # print("True raises", raises, amounts[raises]) investment = np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] + min_raise[raises]) min_raise[raises] = investment - max_bets[raises] max_bets[raises] = investment # Reset the bets and countdown current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx]) round_countdown[raises] = self.N_PLAYERS last_raiser[raises] = player_idx ########### # FOLDING # ########### folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0], player_idx] = 1 round_countdown[running_games] -= 1 #TODO: if all folded stops game, improves performance but breaks tests # test is not broken, is there another reason? round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0 if np.max(round_countdown[running_games]) <= 0: return current_bets, (round, current_bets, min_raise, prev_round_investment, folded, last_raiser) def evaluate_hands(self, community_cards, hole_cards, contenders): evaluator = treys.Evaluator() # 7463 = 1 lower than the lowest score a hand can have (scores are descending to 1) results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int) for game_idx,community in enumerate(community_cards): for player_idx,hole in enumerate(hole_cards[game_idx]): if contenders[game_idx, player_idx]: results[game_idx, player_idx] = evaluator.evaluate(community.tolist(), hole.tolist()) return results
44.05814
133
0.684613
7,465
0.984828
0
0
0
0
0
0
1,381
0.18219
be1dddb28d3c0ea4aa8ef940a579e9c73af88093
2,487
py
Python
cms/admin/views.py
miloprice/django-cms
c6f548f0983a7488609e07a57552b47675d8d78e
[ "BSD-3-Clause" ]
null
null
null
cms/admin/views.py
miloprice/django-cms
c6f548f0983a7488609e07a57552b47675d8d78e
[ "BSD-3-Clause" ]
null
null
null
cms/admin/views.py
miloprice/django-cms
c6f548f0983a7488609e07a57552b47675d8d78e
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from cms.models import Page, Title, CMSPlugin, Placeholder from cms.utils import get_language_from_request from django.http import Http404 from django.shortcuts import get_object_or_404 def revert_plugins(request, version_id, obj): from reversion.models import Version version = get_object_or_404(Version, pk=version_id) revs = [related_version.object_version for related_version in version.revision.version_set.all()] cms_plugin_list = [] placeholders = {} plugin_list = [] titles = [] others = [] page = obj lang = get_language_from_request(request) for rev in revs: obj = rev.object if obj.__class__ == Placeholder: placeholders[obj.pk] = obj if obj.__class__ == CMSPlugin: cms_plugin_list.append(obj) elif hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj) elif obj.__class__ == Page: pass #page = obj #Page.objects.get(pk=obj.pk) elif obj.__class__ == Title: titles.append(obj) else: others.append(rev) if not page.has_change_permission(request): raise Http404 current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page)) for pk, placeholder in placeholders.items(): # admin has already created the placeholders/ get them instead try: placeholders[pk] = page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk]) for plugin in cms_plugin_list: # connect plugins to the correct placeholder plugin.placeholder = placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for plugin in cms_plugin_list: plugin.save() for p in plugin_list: if int(p.cmsplugin_ptr_id) == int(plugin.pk): plugin.set_base_attr(p) p.save() for old in current_plugins: if old.pk == plugin.pk: plugin.save() current_plugins.remove(old) for title in titles: title.page = page try: title.save() except: title.pk = Title.objects.get(page=page, language=title.language).pk title.save() for other in others: other.object.save() for plugin in current_plugins: plugin.delete()
36.573529
101
0.62686
0
0
0
0
0
0
0
0
188
0.075593
be1f1730e3c83173cbfa65bc65d2316eb598bfbe
4,127
py
Python
delete.py
lvwuyunlifan/crop
7392d007a8271ff384c5c66ed5717afbc4172b4d
[ "Apache-2.0" ]
null
null
null
delete.py
lvwuyunlifan/crop
7392d007a8271ff384c5c66ed5717afbc4172b4d
[ "Apache-2.0" ]
null
null
null
delete.py
lvwuyunlifan/crop
7392d007a8271ff384c5c66ed5717afbc4172b4d
[ "Apache-2.0" ]
null
null
null
import os from PIL import Image, ImageFilter import matplotlib.pyplot as plt import matplotlib.image as mpimg # import seaborn as sns import pandas as pd import numpy as np import random train_path = './AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/' def genImage(gpath, datatype): if datatype == 'train': gen_number = 0 # 统计生成的图片数量 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label for i in range(61): li = label[label['label'] == i] imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist = np.array(li['img_path']).tolist() img_path_gen, label_gen = [], [] # for imagefile in imagelist: for aa in range(len(imagelist)): if aa <= 40: print(aa) path, imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) # im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' +'idetail_'+imagename) gen_number += 1 label_dict = {'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label = label.append(label_gen_pd) # 将生成的图片label加入原先的label # label['label'] = label[['label']].astype('int64') # 转化为int64 # print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) # label_gen_p = pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath + 'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number) if datatype == 'valid': gen_number = 0 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} for i in range(61): li = label[label['label'] == i] imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist = np.array(li['img_path']).tolist() img_path_gen, label_gen = [], [] # for imagefile in imagelist: for aa in range(len(imagelist)): if aa <= 20: print(aa) path, imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) #im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' + 'idetail_' + imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' + 'idetail_' + imagename) gen_number += 1 label_dict = {'img_path': img_path_gen, 'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label = label.append(label_gen_pd) # 将生成的图片label加入原先的label # label['label'] = label[['label']].astype('int64') # 转化为int64 # print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number) if __name__ == '__main__': genImage(train_path, 'train') genImage(valid_path, 'valid')
35.886957
83
0.557063
0
0
0
0
0
0
0
0
1,258
0.292218
be1f5618419f3d6206980e4841ac306ca5a5ac13
854
py
Python
数据分析/matplotlib/03.demo.py
likedeke/python-spider-study
09bee3cbe833234a86efcc28d62ace000e2fbb4b
[ "Apache-2.0" ]
1
2021-08-20T11:47:51.000Z
2021-08-20T11:47:51.000Z
数据分析/matplotlib/03.demo.py
likedeke/python-spider-study
09bee3cbe833234a86efcc28d62ace000e2fbb4b
[ "Apache-2.0" ]
null
null
null
数据分析/matplotlib/03.demo.py
likedeke/python-spider-study
09bee3cbe833234a86efcc28d62ace000e2fbb4b
[ "Apache-2.0" ]
null
null
null
# - - - - - - - - - - - # @author like # @since 2021-02-23 11:08 # @email [email protected] # 十点到十二点的气温变化 from matplotlib import pyplot as plt from matplotlib import rc from matplotlib import font_manager import random x = range(0, 120) y = [random.randint(20, 35) for i in range(120)] plt.figure(figsize=(20, 8), dpi=80) plt.plot(x, y) # 中文字体 chFont = font_manager.FontProperties(family="SimHei") # SimHei # chFont = font_manager.FontProperties(fname="C:/Windows/Fonts/SIMHEI.TTF") # 刻度相关设置 step = 10 xLabels = ["10点,{}分".format(i) for i in range(60)] xLabels += ["11点,{}分".format(i) for i in range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont) # 添加描述信息 plt.xlabel("时间", fontProperties=chFont) plt.ylabel("温度 单位(℃)", fontProperties=chFont) plt.title("10点到12点每分钟的气温变化", fontProperties=chFont) plt.show()
23.722222
80
0.696721
0
0
0
0
0
0
0
0
361
0.379202
be1f96521bb4c93e3fbc514880ddde1a151dfa0d
1,351
py
Python
testing/vcs/test_vcs_isoline_labels.py
xylar/cdat
8a5080cb18febfde365efc96147e25f51494a2bf
[ "BSD-3-Clause" ]
62
2018-03-30T15:46:56.000Z
2021-12-08T23:30:24.000Z
testing/vcs/test_vcs_isoline_labels.py
xylar/cdat
8a5080cb18febfde365efc96147e25f51494a2bf
[ "BSD-3-Clause" ]
114
2018-03-21T01:12:43.000Z
2021-07-05T12:29:54.000Z
testing/vcs/test_vcs_isoline_labels.py
CDAT/uvcdat
5133560c0c049b5c93ee321ba0af494253b44f91
[ "BSD-3-Clause" ]
14
2018-06-06T02:42:47.000Z
2021-11-26T03:27:00.000Z
import os, sys, cdms2, vcs, vcs.testing.regression as regression dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc")) data = dataset("clt") canvas = regression.init() isoline = canvas.createisoline() isoline.label="y" texts=[] colors = [] for i in range(10): text = canvas.createtext() text.color = 50 + 12 * i text.height = 12 colors.append(100 + 12 * i) if i%2 == 0: texts.append(text.name) else: texts.append(text) isoline.text = texts # First test using isoline.text[...].color canvas.plot(data, isoline, bg=1) baseline = os.path.splitext(sys.argv[1]) baselineImage = "%s%s"%baseline ret = regression.run_wo_terminate(canvas, "test_vcs_isoline_labels.png", baselineImage) # Now set isoline.linecolors and test again. canvas.clear() isoline.linecolors = colors canvas.plot(data, isoline, bg=1) baselineImage = "%s%d%s"%(baseline[0], 2, baseline[1]) testImage = os.path.abspath("test_vcs_isoline_labels2.png") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) # Now set isoline.textcolors and test again. canvas.clear() isoline.textcolors = colors canvas.plot(data, isoline, bg=1) baselineImage = "%s%d%s"%(baseline[0], 3, baseline[1]) testImage = os.path.abspath("test_vcs_isoline_labels3.png") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) sys.exit(ret)
29.369565
87
0.721688
0
0
0
0
0
0
0
0
257
0.190229
be204f98e2c8943df601cdf5f75bb96f08fc6392
34,671
py
Python
src/Python_version/ICE_py36.py
ds-utilities/ICE
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
[ "MIT" ]
2
2019-08-05T08:26:38.000Z
2020-05-16T14:10:00.000Z
src/Python_version/ICE_py36.py
postyear/ICE
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
[ "MIT" ]
null
null
null
src/Python_version/ICE_py36.py
postyear/ICE
9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f
[ "MIT" ]
2
2020-05-16T14:10:01.000Z
2021-02-09T20:05:46.000Z
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Mar 5 05:47:03 2018 @author: zg """ import numpy as np #from scipy import io import scipy.io #import pickle from sklearn.model_selection import StratifiedKFold #import sklearn from scipy.sparse import spdiags from scipy.spatial import distance #import matplotlib.pyplot as plt from sklearn.ensemble import BaggingClassifier from sklearn import svm #from sklearn import metrics from sklearn.metrics import roc_auc_score from sklearn import tree import copy import numpy.matlib from sklearn.exceptions import NotFittedError #import FuzzyRwrBagging as frb #from joblib import Parallel, delayed #import multiprocessing def RWR(A, nSteps, laziness, p0 = None): ''' % the random walk algorithm. % A is the input net matrix, with the diag to be 0. % nSteps: how many steps to walk % laziness: the probablity to go back. % p0: the initial probability. usually it is a zero matrix with the diag to % be 1. % % for example, A could be: % A = [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] % % if nSteps is 1000 and laziness is 0.3, p0 is default, the result is: % [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;... % 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;... % 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;... % 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;... % 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425] % % Each column represents the propability for each node. each element in the % column means the probability to go to that node. % This algorithm will converge. For example, for the above matrix, nSteps = % 100, 1000 or 10000, will give the same result. ''' n = len(A) if p0 == None: p0 = np.eye(n) ''' % In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be % 0.2500 0 0 0 0 0 0 % 0 0.2500 0 0 0 0 0 % 0 0 0.2500 0 0 0 0 % 0 0 0 0.3333 0 0 0 % 0 0 0 0 1.0000 0 0 % 0 0 0 0 0 0.5000 0 % 0 0 0 0 0 0 0.5000 % W will be: % 0 0.5000 0.5000 0 0 0 0 % 0.5000 0 0.2500 0.3333 0 0 0 % 0.5000 0.2500 0 0 1.0000 0 0 % 0 0.2500 0 0 0 0.5000 0.5000 % 0 0 0.2500 0 0 0 0 % 0 0 0 0.3333 0 0 0.5000 % 0 0 0 0.3333 0 0.5000 0 ''' #W = A * spdiags(sum(A)'.^(-1), 0, n, n); #W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray() W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \ 0, n, n).toarray() ) p = p0 pl2norm = np.inf unchanged = 0 for i in range(1, nSteps+1): if i % 100 == 0: print(' done rwr ' + str(i-1) ) pnew = (1-laziness) * W.dot(p) + laziness * p0 l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) ) p = pnew if l2norm < np.finfo(float).eps: break else: if l2norm == pl2norm: unchanged = unchanged +1 if unchanged > 10: break else: unchanged = 0 pl2norm = l2norm return p # test RWR() ''' A = np.array([[0,2,2,0,0,0,0],\ [2,0,1,1,0,0,0],\ [2,1,0,0,1,0,0],\ [0,1,0,0,0,1,1],\ [0,0,1,0,0,0,0],\ [0,0,0,1,0,0,1],\ [0,0,0,1,0,1,0]]) nSteps = 1000 lazi = 0.3 RWR(A, nSteps, lazi, None) ''' # test #dst = distance.euclidean(A) # corrent, the same as in Matlab def f_sim_2_aRankNet(sim, k=3): ''' % Convert the similarity matrix to a network graph where each node % has k edges to other nodes (aRank). ''' # delete the diagnal values. # sim = sim-diag(diag(sim) ); np.fill_diagonal(sim, 0) # [~, I] = sort(sim-diag(diag(sim) ) ); I = np.argsort(sim, kind='mergesort') + 1 # [~, I2] = sort(I); I2 = (np.argsort(I, kind='mergesort').T + 1).T # for every column, just keep the top k edges. #aRankNet = (I2 >length(sim)-k); aRankNet = I2 > (len(sim) - k) # make it a diagonal matrix # aRankNet = max(aRankNet, aRankNet'); aRankNet = np.logical_or(aRankNet, aRankNet.T) # remove the diagonal 1s. # aRankNet = aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False) return aRankNet # test #sim = np.array([[0, 0.5566, 0.6448, 0.3289], \ # [0.5566, 0, -0.0842, -0.0170], \ # [0.6448, -0.0842, 0, 0.8405], \ # [0.3289, -0.0170, 0.8405, 0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False, True, True, False], # [ True, False, False, False], # [ True, False, False, True], # [False, False, True, False]]) # #array([[False, True, True, True], # [ True, False, False, False], # [ True, False, False, True], # [ True, False, True, False]]) # #array([[False, True, True, True], # [ True, False, False, True], # [ True, False, False, True], # [ True, True, True, False]]) def f_find_centers_rwMat(rw_mat, k): ''' % on the rw_mat matrix, find some nodes as the centroids for soft % clustering. If we just random pickup some nodes as centroids, that is % not good for fuzzy clusters. % k is the number of centroids. ''' ixs = [] # 1. find the most connected center node as the first centroid. a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col # % most connected node. ix = np.argmax(a) ixs.append(ix) # % 2. iteratively find the rest nodes for i in range(1, k): tmp = rw_mat[:, ixs] b = np.sum(tmp, axis=1) b[ixs] = np.inf # % find the farthest node ix = np.argmin(b) ixs.append(ix) return ixs # test #tmp = f_find_centers_rwMat(rw_mat, 10) def getCutoff(rw_mat, avgNeighborsSize): tmp = rw_mat.flatten('F') a = np.flip(np.sort(tmp), 0) len1 = len(rw_mat) #cutoffs = [] all_neibs = int( avgNeighborsSize * len1 ) print( all_neibs) ct = a[all_neibs] return ct #test #>>> a = np.array([[1,2], [3,4]]) #>>> a.flatten() #array([1, 2, 3, 4]) #>>> a.flatten('F') #array([1, 3, 2, 4]) ''' a = np.array( range(0,100) ) b = np.matlib.repmat(a, 100, 1) ct = getCutoff(b, 70) ''' def f_len_of_each_ele(c1): #% Assume c1 is a 1-dimension cell array, and each element is a 1d double #% array. This function counts the length of each double array. lens = np.zeros(len(c1)) for i in range(0, len(c1)): lens[i] = len(c1[i]) return lens def f_eu_dist(X): ''' calculate the euclidean distance between instances ''' sim = np.zeros(( len(X), len(X) )) for i in range(0, len(X)): for j in range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i] = tmp sim = -sim np.fill_diagonal(sim, 0) return sim #test #sim = f_eu_dist(X) def f_eu_dist2(X1, X2): ''' calculate the euclidean distance between instances from two datasets ''' sim = np.zeros(( len(X1), len(X2) )) for i in range(0, len(X1) ): for j in range(0, len(X2) ): tmp = distance.euclidean(X1[i], X2[j]) sim[i][j] = tmp sim = -sim return sim #test #sim = f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): # X: data # k: number of clusters ''' The return variable clus stores the instance indices for each cluster. However, this data structure is not easy to find for a instance, which are the clusters it belongs to, thus we also need to convert clus to a true-false matrix. ''' if each_clus_sz == None: # on average, how many clusters does one inst belongs to. #overlap_factor = 2; # the estimated size of each cluster. default is half the number of # instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering starts...') print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) ) # sim = squareform(pdist(X)); # sim = -sim; sim = np.zeros((len(X), len(X) ) ) for i in range(0, len(X)): for j in range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i] = tmp sim = -sim print(' done calculating the Euclidean distance matrix') # --------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim)) ) ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done calculating the A-rank KNN graph') # % -------- RWR -------- nSteps = 1000 lazi = 0.3 rw = RWR(ori_graph, nSteps, lazi) # remove probability of returning start node np.fill_diagonal(rw, 0) rw_mat = rw print(' done RWR') # --------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat, k) ct = getCutoff(rw_mat, each_clus_sz) rw_net = rw_mat > ct # % set the diagnal to 1 np.fill_diagonal(rw_net, True) clus = [] for i in range(0, k): tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten() clus.append(tmp) # --------------------------------------------------------------- # % sort the clusters lens = f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1] clus_ordered = [clus[i] for i in ix] print(' center inst. index of each cluster: ') ixs_centers = np.array(ixs_centers) print(ixs_centers[ix]) print(' size of each cluster: ') print(lens[ix]) print(' done RWR clustering') return clus_ordered #test #clus = f_fuzzy_rwr_clusters(X, 100) # pass def f_clus_to_tfs(clus, n_inst): #% convert the cluster information from cell array to mat. But for each #% instance, the rank of clusters information will be lost - you won't know #% what is the top 1/2/3 cluster it belongs to. #% #% clus e.g: #% 1x5 cell #% 1x195 double 1x193 double 1x169 double 1x161 double 1x62 double #% #% tfs e.g: #% 295x5 double #% 1 0 0 0 0 #% 1 1 1 1 0 #% 1 1 1 0 0 #% 1 1 0 0 0 #% 1 1 1 1 0 #% ... #% 1 1 1 1 1 #% 1 0 0 0 0 #% 1 1 1 0 0 tfs = np.zeros((n_inst, len(clus)), dtype=bool) for i in range(0, len(clus)): tfs[clus[i], i] = True return tfs # test #tfs = f_clus_to_tfs(clus, len(X)) # pass def f_tfs_2_instClus(tfs): ''' convert the boolean table representation of clustering result to for each instance, what clusters it belongs to. ''' inst_clus = [] for i in range(0, len(tfs)): row = list( np.where(tfs[i, :] ) [0] ) inst_clus.append(row) return inst_clus # test #inst_clus = f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te): # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \ # bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \ # random_state=None, n_estimators = 100 ) # bagging.fit(X_tr, y_tr) # # y_pred = bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten() # # auc = roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] # test ''' X_tr = X y_tr = y X_te = X y_te = y [y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te) ''' #def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging): # ''' # corresponds to f_weka_bg_svm_tr_te() in Matlab version # ''' # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \ # bagging = BaggingClassifier(BaseBagging, \ # random_state=None, n_estimators = 100 ) # bagging.fit(X_tr, y_tr) # # y_pred = bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten() # # auc = roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] def f_tr(X_tr, y_tr, model): model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) return model_inner def f_te(X_te, model): y_pred = model.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() return y_pred def f_tr_te(X_tr, y_tr, X_te, model): ''' corresponds to f_weka_bg_svm_tr_te() in Matlab version ''' #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \ #bagging = BaggingClassifier(BaseBagging, \ # random_state=None, n_estimators = 100 ) model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred = model_inner.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() #auc = roc_auc_score(y_te.flatten(), y_pred) return y_pred def f_k_fo(X, y, model, k_fold=10): ''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version ''' y = y.flatten() y_pred = np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X, y) for train_index, test_index in skf.split(X, y): #print("TRAIN: ", train_index, " TEST: ", test_index) X_tr, X_te = X[train_index], X[test_index] #y_tr, y_te = y[train_index], y[test_index] y_tr = y[train_index] if np.unique(y_tr).size == 1: y_pred_fo = np.zeros( len(test_index) ) #print len(X_te) #print len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo else: y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model) y_pred[test_index] = y_pred_fo #auc = roc_auc_score(y.flatten(), y_pred) return y_pred # test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \ # random_state=None, n_estimators = 100 ) #y_pred = f_k_fo(X, y, model, k_fold=10) # #print roc_auc_score(y.flatten(), y_pred) # the easy dataset mesothelioma get 1.0 CV result. # breast cancer get 0.599 # all results are correct. def f_quantileNorm(templete, target): ''' Templete is the standard, change the target to the values in the templete. Target may have a very different range than the templete. templete and target should be 1d n by 1 array. f_my_quantileNorm() ''' ix_target = np.argsort(target, kind='mergesort') ix_templete = np.argsort(templete, kind='mergesort') target[ix_target] = templete[ix_templete] new = target return new # test #templete = X[:, 0] #target = X[:, 1] #new = f_quantileNorm(templete, target) #def f_bg_k_fo_3(X, y, k_fold=10): # ''' # corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version # corresponds to f_k_fo() # ''' # y_pred = np.zeros((y.size, 1)) # # skf = StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y) # # for train_index, test_index in skf.split(X, y): # #print("TRAIN:", train_index, "TEST:", test_index) # X_tr, X_te = X[train_index], X[test_index] # y_tr, y_te = y[train_index], y[test_index] def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner): ''' % using each cluster data to predict the whole instances, while self % prediction using 10-fold CV. corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version ''' n_clusters = len(clus) y_pred_multi = np.zeros((y.size, n_clusters) ) models = [] for j in range(0, n_clusters): # for each cluster Xj = X[clus[j].flatten(), :] yj = y[clus[j].flatten() ] model_a_clust = copy.deepcopy(model) print(' Cluster '+str(j)+' started...') #if len(yj) > 10: if len(yj) > 15 and np.unique(yj).size != 1: # ------------------ for self ------------------ #if np.unique(yj).size == 1: # y_pred = np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else: try: y_pred = f_k_fo(Xj, yj, model, fo_inner) # quantileNorm templete = y_pred_whole[clus[j].flatten()] target = y_pred y_pred = f_quantileNorm(templete, target) # copy the normed prediction to the whole data. y_pred_multi[clus[j].flatten(), j] = y_pred print(' c-'+str(j)+' done predicting local instances') # ------------------ for other ----------------- ix_other = set(range(0, y.size)) - set(clus[j].flatten()) ix_other = list(ix_other) #print ix_other X_other = X[ix_other , :] #y_other = y[ix_other ] # predict #y_pred = f_tr_te(Xj, yj, X_other, model) #if np.unique(yj).size != 1: model_a_clust.fit(Xj, yj) y_pred = model_a_clust.predict_proba(X_other) y_pred = y_pred[:, 1].flatten() # quantileNorm templete = y_pred_whole[ix_other] target = y_pred y_pred = f_quantileNorm(templete, target) #else: # y_pred = np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) # copy to the whole array y_pred_multi[ix_other, j] = y_pred print(' c-'+str(j)+' done predicting remote instances') except ValueError as e: print(e) print(' skip this cluster') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred else: if len(yj) <= 15: print (' '+str(len(yj))+' insts in cluster, <= 15, skip...') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred if np.unique(yj).size == 1: print (' warning, #unique class label(s) == 1') y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] = y_pred model_a_clust = np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi, models] # test #[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model) #def f_dec_tab_4_bg_svm(X, y, clus): # ''' # Calculate the decision table # % This version changed from the cluster-cluster dec_mat to instance-cluster # % dec_mat. This solution will avoid the case that if one cluster decision # % is wrong leading entrie cluster prediction is wrong, which is the reason # % of instability. However, we cannot use a systematic evaluation criteria # % such as AUC, I will try using the predicted prob at first. # # % This version 3 adds the support for fuzzy clustering - one instance may # % belongs to more than one cluster. # % This updated version also outputs the predicted values of y. # % support more than 3 clusters # % normalization take place in y_pred_self and y_pred_other, thus do not # % need normalization when predict y_pred_ICE. # % ixsp is another cluster form. # # corresponds to f_dec_tab_4_bg_svm() in Matlab version # ''' # #n_clusters = len(clus) # ## dec_mat stores the prediction error. # #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred # # # ## k_fold of inner cross-validation # #fo_inner = 10 # # --------------------------- WHOLE ------------------------- # # # --------------------------- SELF ------------------------- def f_err_mat(X, y, clus, model): ''' Calculate the decision table corresponds to f_dec_tab_4_bg_svm() in Matlab version ''' n_clusters = len(clus) # err_mat stores the prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred # col 0 to col n_clusters-1 store the predictions by each cluster # the last col stores the pred by whole data #models = [] # k_fold of inner cross-validation fo_inner = 5 # --------------------------- WHOLE ------------------------- # Predict each cluster using the whole data. model_whole = copy.deepcopy(model) y_pred_whole = f_k_fo(X, y, model_whole, fo_inner) model_whole.fit(X, y) # fit a model using all data rather than only a fold pred_prob_mat[:, n_clusters] = y_pred_whole print (' Done evaluation using whole instances') print (' Start to evaluate each cluster ') # --------------------------- SELF ------------------------- # predict the whole instances using each cluster data, while self # prediction using 10-fold CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \ y_pred_whole, model, fo_inner) print (' Done evaluation using each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi # make a tmp array a stores y tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat = abs(pred_prob_mat - tmp ) print (' Done calculating error table and fitting ICE models') return [err_mat, models] """ #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus = f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus, len(X)) y = y.astype(float) #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \ #model = BaggingClassifier(base_estimator = svm.LinearSVR(), \ #model = BaggingClassifier(base_estimator = svm.LinearSVC(), \ model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X, y, clus, model) """ def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): ''' Convert the err table to decision table. ''' dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool) # dec_ixs: for each instance, which clusters should be used. dec_ixs = [] inst_clus = f_tfs_2_instClus(tfs) for i in range(0, len(err_mat)): # Matlab code: #dec_row = dec_mat(cur_nb_ix, :); #dec_row(:, end ) = dec_row(:, end ) - adv_whole; #dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self; row = np.copy( err_mat[i, :] ) #print row row[-1] = row[-1] - adv_whole inst_i_clus = inst_clus[i] if len(inst_i_clus) > 0: row[inst_i_clus] = row[inst_i_clus] - adv_self #print row ix_good_clus = list( np.where( row < row[-1] ) [0] ) #print ix_good_clus if len(ix_good_clus) > 0: dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True): ''' Use the training data to predict the testing data. Use whole training data to predict Use each cluster of training data to predict the testing data. ''' y_pred_all = np.zeros(( len(X_te), len(clus) + 1 )) # the first col is the prediction using the whole data model_whole = models[-1] y_pred_all[:, 0] = f_te(X_te, model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model) #print 'whole model good ' # start from the second col, the result is by each cluster for i in range(0, len(clus)): #Xi = X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten() ] model_i = models[i] #model_a_clust = copy.deepcopy(model) try: y_pred_te = f_te(X_te, model_i) except : if model_i == 0: y_pred_te = np.zeros(len(X_te)) elif model_i == 1: y_pred_te = np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as e: # print(repr(e)) # y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model '+str(i)+' good ' #y_pred_te = f_tr_te(Xi, yi, X_te, model) if doNorm == True: templete = y_pred_all[:, 0] target = y_pred_te y_pred = f_quantileNorm(templete, target) else: y_pred = y_pred_te y_pred_all[:, i+1] = y_pred return y_pred_all # test #y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model) def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' ''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating decision table') return [clus, models, dec_ixs] #def_deal_miss_v_1(d): ''' deal with missing values by replacing them by mean. ''' def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' This version use the err mat to re-clustering ''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ******************** re-clustering ******************** n_iter = 2 for i in range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating decision table') return [clus, models, dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and inst_clus contains the same information that clus is the instances ids for each cluster, while inst_clus stores that for each instance, which cluster(s) it belongs to. dec_ixs stores the good cluster(s) for each instance, which may include even a remote cluster. each instance in dec_ixs does not contain the whole set of instances. ''' # the first col is the prediction using the whole data # start from the second col, the result is by each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE = np.zeros( len(X_te) ) neighbour_mat = f_eu_dist2(X_tr, X_te) # ---------- for each testing instance ---------- #n_partials = np.zeros( len(X_te) ) #n_wholes = np.zeros( len(X_te) ) for j in range(0, len(X_te) ): # for each testing instance # find the top 10 neighbors for each test instance neighbour_col = neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col ) ix = ix[::-1] ix_top_neighbors = ix[0:N] #print 'testing inst ' + str(j) #print ' ix of top neighbors:' #print ix_top_neighbors # ---------- find all neighbors' picks ---------- clus_ids_to_use = [] nei_labels = [] for cur_nb in range(0, N): # for each neighbour # find each neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use + clus_id_to_use # also find neighbor's label. maybe will be used later as KNN pred # instead of using whole to pred. nei_labels = nei_labels + list( y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:' #print clus_ids_to_use # cluster id + 1 to make the ix fit the col id in y_pred_all a = clus_ids_to_use a = list( np.array(a) + 1 ) clus_ids_to_use = a # number of partial models used n_partial = len(clus_ids_to_use) # number of whole models used, based on parameters alpha, beta and N. n_whole = int( round( alpha*n_partial + beta*N ) ) clus_ids_to_use = clus_ids_to_use + [0] * n_whole #print ' clus_ids_to_use:' #print clus_ids_to_use #print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting testing instances.') return y_pred_ICE # test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa = './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w = 0.4 s = 0.5 N = 5 alpha = 1 beta = 1 k_fold = 10 aucs_ICE = [] aucs_whole = [] # f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa + 'data/res_ICE_bg_svm_py.txt' f_res = pa + 'data/res_ICE_SVM_py.txt' f = open(f_res, 'w') #for j in range(1, 50): for j in range(1, 49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus = 100 #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \ # random_state=None, n_estimators = 100 ) model = svm.SVC(kernel='linear', probability = True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE = np.zeros( y.size ) y_preds_whole = np.zeros( y.size ) fold_i = 1 for train_index, test_index in skf.split(X, y): # print("TRAIN:", train_index, "TEST:", test_index) X_tr, X_te = X[train_index], X[test_index] y_tr, y_te = y[train_index], y[test_index] [clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s) #[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s) y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index] = y_pred_whole print( j) print( 'fold ' + str(fold_i) + ' finished') fold_i = fold_i + 1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\t' + str(auc_ICE) + ' \t ' + str(auc_whole) + '\n') except: continue
31.348101
93
0.551123
0
0
0
0
0
0
0
0
18,959
0.546826
be20c61ee255e8ce67c5713e68e8dff144cc5ef4
44,105
py
Python
xc/common/utils/prjxray_routing_import.py
FireFox317/symbiflow-arch-defs
f0e7b4212544e1d55da776fb7a2ff79117e01454
[ "ISC" ]
1
2020-09-23T17:57:07.000Z
2020-09-23T17:57:07.000Z
xc/common/utils/prjxray_routing_import.py
tcal-x/symbiflow-arch-defs
1e513ac778371608c51fa86a98e54279e3c74752
[ "ISC" ]
null
null
null
xc/common/utils/prjxray_routing_import.py
tcal-x/symbiflow-arch-defs
1e513ac778371608c51fa86a98e54279e3c74752
[ "ISC" ]
null
null
null
#!/usr/bin/env python3 """ Imports 7-series routing fabric to the rr graph. For ROI configurations, this also connects the synthetic IO tiles to the routing node specified. Rough structure: Add rr_nodes for CHANX and CHANY from the database. IPIN and OPIN rr_nodes should already be present from the input rr_graph. Create a mapping between database graph_nodes and IPIN, OPIN, CHANX and CHANY rr_node ids in the rr_graph. Add rr_edge for each row in the graph_edge table. Import channel XML node from connection database and serialize output to rr_graph XML. """ import argparse import os.path from hilbertcurve.hilbertcurve import HilbertCurve import math import prjxray.db from prjxray.roi import Roi import prjxray.grid as grid from lib.rr_graph import graph2 from lib.rr_graph import tracks from lib.connection_database import get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2 as capnp_graph2 from prjxray_constant_site_pins import feature_when_routed from prjxray_tile_import import remove_vpr_tile_prefix import simplejson as json from lib import progressbar_utils import datetime import re import functools import pickle import sqlite3 now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])') # Regex for [LR]IOI_SING tiles IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\.IOI_)({})([01])(.*)'.format( "|".join(IOI_SITE_PIPS) ) ) def reduce_connection_box(box): """ Reduce the number of connection boxes by merging some. Examples: >>> reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L') 'B' """ box = CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in box: box = 'IMUX' if box.endswith('_L'): box = box.replace('_L', '') return box REBUF_NODES = {} REBUF_SOURCES = {} def get_clk_hrow_and_rebuf_tiles_sorted(cur): """ Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles. returns them in a list sorted according to their Y coordinates. """ cur.execute( """ SELECT name FROM phy_tile WHERE name LIKE "CLK_HROW_BOT_R_%" OR name LIKE "CLK_HROW_TOP_R_%" OR name LIKE "CLK_BUFG_REBUF_%" ORDER BY grid_y DESC; """ ) return [t[0] for t in cur.fetchall()] def populate_bufg_rebuf_map(conn): global REBUF_NODES REBUF_NODES = {} global REBUF_SOURCES REBUF_SOURCES = {} rebuf_wire_regexp = re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur = conn.cursor() # Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles. rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append None on both ends of the list to simplify the code below. rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles + [None] def maybe_get_clk_hrow(i): """ Returns a name of CLK_HROW tile only if its there on the list. """ tile = rebuf_and_hrow_tiles[i] if tile is not None and tile.startswith("CLK_HROW"): return tile return None # Assign each REBUF tile its above and below CLK_HROW tile. Note that in # VPR coords terms. "above" and "below" mean the opposite... rebuf_to_hrow_map = {} for i, tile_name in enumerate(rebuf_and_hrow_tiles): if tile_name is not None and tile_name.startswith("CLK_BUFG_REBUF"): rebuf_to_hrow_map[tile_name] = { "above": maybe_get_clk_hrow(i - 1), "below": maybe_get_clk_hrow(i + 1), } # Find nodes touching rebuf wires. cur.execute( """ WITH rebuf_wires(wire_in_tile_pkey) AS ( SELECT pkey FROM wire_in_tile WHERE name LIKE "CLK_BUFG_REBUF_R_CK_GCLK%_BOT" OR name LIKE "CLK_BUFG_REBUF_R_CK_GCLK%_TOP" ), rebuf_nodes(node_pkey) AS ( SELECT DISTINCT node_pkey FROM wire WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ) SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name FROM rebuf_nodes INNER JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ORDER BY rebuf_nodes.node_pkey;""" ) for node_pkey, rebuf_tile, rebuf_wire_name in cur: if node_pkey not in REBUF_NODES: REBUF_NODES[node_pkey] = [] m = rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2) == 'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile]["below"] if hrow_tile is not None: REBUF_NODES[node_pkey].append( "{}.CLK_HROW_R_CK_GCLK{}_ACTIVE".format( hrow_tile, m.group(1) ) ) elif m.group(2) == 'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile]["above"] if hrow_tile is not None: REBUF_NODES[node_pkey].append( "{}.CLK_HROW_R_CK_GCLK{}_ACTIVE".format( hrow_tile, m.group(1) ) ) else: assert False, (rebuf_tile, rebuf_wire_name) for node_pkey in REBUF_NODES: cur.execute( """ SELECT phy_tile.name, wire_in_tile.name FROM wire INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey WHERE wire.node_pkey = ?;""", (node_pkey, ) ) for tile, wire_name in cur: REBUF_SOURCES[(tile, wire_name)] = node_pkey HCLK_CMT_TILES = {} def populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES HCLK_CMT_TILES = {} grid = db.grid() _, x_max, _, _ = grid.dims() for tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue hclk_x, hclk_y = grid.loc_of_tilename(tile) hclk_cmt_x = hclk_x hclk_cmt_y = hclk_y while hclk_cmt_x > 0: hclk_cmt_x -= 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT': HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break hclk_cmt_x = hclk_x while hclk_cmt_x < x_max: hclk_cmt_x += 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number): if (hclk_tile, lr) not in HCLK_CMT_TILES: return [] hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def check_feature(feature): """ Check if enabling this feature requires other features to be enabled. Some pips imply other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 """ # IOI_SING tiles have bits in common with the IOI tiles. # # The difference is that the TOP IOI_SING tile shares bits with # the bottom half of a normal IOI tile, while the BOTTOM IOI_SING # shares bits with the top half of a normal IOI TILE. # # The following, is to change the edge feature to accomodate this # need, as the IOI_SING tiles have the same wire, and pip names # despite they are found on the TOP or BOTTOM of an IOI column m = IOI_SING_REGEX.fullmatch(feature) if m: # Each clock region spans a total of 50 IOBs. # The IOI_SING are found on top or bottom of the whole # IOI/IOB column. The Y coordinate identified with the # second capture group is dived by 50 to get the relative # position of the IOI_SING within the clock region column is_bottom_sing = int(m.group(2)) % 50 == 0 # This is the value to attach to the source pip name that # changes based on which IOI_SING is selected (top or bottom) # # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1' if is_bottom_sing else '0' # This is the value to attach to the IOI_SITE_PIPS names # in the destination wire of the pip # # Example: IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value = '0' if is_bottom_sing else '1' unchanged_feature = "{}{}{}{}".format( m.group(1), m.group(2), m.group(3), m.group(4) ) src_wire = m.group(6).replace('_SING', '') for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']: if pip in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK' in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature = "{}{}".format(dst_value, src_wire) feature = "{}{}".format(unchanged_feature, changed_feature) feature_path = feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When this PIP is active the IOB operates in the differential output mode. # There is no feature assosciated with that PIP in the prjxray db but there # is a tile-wide feature named "DIFF_OUT". # # The "DIFF_OUT" cannot be set in the architecture as it is defined one # level up in the hierarchy (its tile-wide, not site-wide). So here we # map the PIP's feature to "DIFF_OUT" if feature_path[2] == "IOB_DIFFO_OUT0" and \ feature_path[1] == "IOB_DIFFO_IN1": return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 # # These connections are hard wires that connect IOB33M and IOB33S sites. # They are used in differential input mode. # # Vivado does not report this connection as a PIP but in the prjxray db it # is a pip. Instead of making it a pseudo-pip we simply reject fasm # features here. if feature_path[2] == "IOB_PADOUT0" and feature_path[1] == "IOB_DIFFI_IN1": return '' if feature_path[2] == "IOB_PADOUT1" and feature_path[1] == "IOB_DIFFI_IN0": return '' # REBUF stuff rebuf_key = (feature_path[0], feature_path[1]) if rebuf_key in REBUF_SOURCES: return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1] ) return ' '.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_buffer_feature)) # BUFHCE sites are now routed through, without the need of placing them, therefore, # when the relative pip is traversed, the correct fasm feature needs to be added. # The relevant features are: # - IN_USE: to enable the BUFHCE site # - ZINV_CE: to disable the inverter on CE input which is connected to VCC. # This sets the CE signal to constant 1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str = m.group(1) if 'L' in x_loc_str: x_loc = 0 elif 'R' in x_loc_str: x_loc = 1 else: assert False, "Impossible to determine X location of BUFHCE" y_loc = m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\'b1'.format( feature_path[0], bufhce_loc ) return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return ' '.join(features) m = HCLK_OUT.fullmatch(feature_path[-1]) if m: return ' '.join( [feature] + find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2) ) ) m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) ) return ' '.join((feature, enable_cascout)) parts = feature.split('.') wire_feature = feature_when_routed(parts[1]) if wire_feature is not None: return '{} {}.{}'.format(feature, parts[0], wire_feature) return feature # CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\.]+)\.([^\]]+)\[0\]$') def set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ): """ Assign a connection box to an IPIN node. """ node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] = graph2.Node(**node_dict) def update_connection_box( conn, graph, graph_node_pkey, node_idx, connection_box_map ): """ Update connection box of IPIN node if needed. """ cur = conn.cursor() cur.execute( """ SELECT connection_box_wire_pkey FROM graph_node WHERE pkey = ?""", (graph_node_pkey, ) ) connection_box_wire_pkey = cur.fetchone()[0] if connection_box_wire_pkey is not None: cur.execute( """ SELECT grid_x, grid_y FROM phy_tile WHERE pkey = ( SELECT phy_tile_pkey FROM wire WHERE pkey = ? )""", (connection_box_wire_pkey, ) ) grid_x, grid_y = cur.fetchone() cur.execute( "SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?", (connection_box_wire_pkey, ) ) wire_in_tile_pkey = cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey] cur.execute( """ SELECT switch.intrinsic_delay FROM switch WHERE pkey = ( SELECT site_pin_switch_pkey FROM wire_in_tile WHERE pkey = ( SELECT wire_in_tile_pkey FROM wire WHERE pkey = ( SELECT site_wire_pkey FROM node WHERE pkey = ( SELECT node_pkey FROM graph_node WHERE pkey = ? ) ) ) )""", (graph_node_pkey, ) ) site_pin_delay = cur.fetchone()[0] set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ) def create_get_tile_and_site_as_tile_pkey(cur): tiles = {} for tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute(""" SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;"""): tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x, y): return tiles[(x, y)] return get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( """ SELECT site.site_type_pkey, site_as_tile.site_pkey FROM site_as_tile INNER JOIN site ON site.pkey = site_as_tile.site_pkey WHERE site_as_tile.pkey = ?""", (site_as_tile_pkey, ) ) results = cur.fetchall() assert len(results) == 1, site_as_tile_pkey return results[0] @functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey, site_pkey = get_site_from_site_as_tile( site_as_tile_pkey ) cur.execute( """ SELECT pkey FROM wire_in_tile WHERE site_pin_pkey = ( SELECT pkey FROM site_pin WHERE site_type_pkey = ? AND name = ? ) AND site_pkey = ? ;""", (site_type_pkey, pin, site_pkey) ) results = cur.fetchall() assert len(results) == 1 wire_in_tile_pkey = results[0][0] return wire_in_tile_pkey return get_site_as_tile_wire def import_graph_nodes(conn, graph, node_mapping, connection_box_map): cur = conn.cursor() get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire = create_get_site_as_tile_wire(cur) for node_idx, node in enumerate(graph.nodes): if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name = graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)] # Synthetic blocks are handled below. if pin_name.startswith('SYN-'): set_connection_box( graph, node_idx, node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., ) continue m = PIN_NAME_TO_PARTS.match(pin_name) assert m is not None, pin_name tile_type = m.group(1) tile_type = remove_vpr_tile_prefix(tile_type) pin = m.group(2) tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low ) if site_as_tile_pkey is not None: wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin) else: cur.execute( """ SELECT pkey FROM wire_in_tile WHERE name = ? AND phy_tile_type_pkey IN ( SELECT tile_type_pkey FROM phy_tile WHERE pkey IN ( SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey = ? ) );""", (pin, tile_pkey) ) results = cur.fetchall() assert len(results) == 1 wire_in_tile_pkey = results[0][0] tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute( """ SELECT top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey FROM wire WHERE wire_in_tile_pkey = ? AND tile_pkey = ?;""", (wire_in_tile_pkey, tile_pkey) ) result = cur.fetchone() assert result is not None, (wire_in_tile_pkey, tile_pkey) ( top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey ) = result side = node.loc.side if side == tracks.Direction.LEFT: assert left_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[left_graph_node_pkey] = node.id update_connection_box( conn, graph, left_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.RIGHT: assert right_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[right_graph_node_pkey] = node.id update_connection_box( conn, graph, right_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.TOP: assert top_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[top_graph_node_pkey] = node.id update_connection_box( conn, graph, top_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.BOTTOM: assert bottom_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[bottom_graph_node_pkey] = node.id update_connection_box( conn, graph, bottom_graph_node_pkey, node_idx, connection_box_map ) else: assert False, side def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id): cur = conn.cursor() cur2 = conn.cursor() for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance, resistance) in progressbar_utils.progressbar(cur.execute(""" SELECT pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance, resistance FROM graph_node WHERE track_pkey IS NOT NULL;""")): if track_pkey not in alive_tracks: continue cur2.execute( """ SELECT name FROM segment WHERE pkey = ( SELECT segment_pkey FROM track WHERE pkey = ? )""", (track_pkey, ) ) result = cur2.fetchone() if result is not None: segment_name = result[0] segment_id = graph.get_segment_id_from_name(segment_name) else: segment_id = default_segment_id node_type = graph2.NodeType(graph_node_type) if node_type == graph2.NodeType.CHANX: direction = 'X' x_low = max(x_low, 1) elif node_type == graph2.NodeType.CHANY: direction = 'Y' y_low = max(y_low, 1) else: assert False, node_type canonical_loc = None cur2.execute( """ SELECT grid_x, grid_y FROM phy_tile WHERE pkey = ( SELECT canon_phy_tile_pkey FROM track WHERE pkey = ? )""", (track_pkey, ) ) result = cur2.fetchone() if result: canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1]) track = tracks.Track( direction=direction, x_low=x_low, x_high=x_high, y_low=y_low, y_high=y_high, ) assert graph_node_pkey not in node_mapping node_mapping[graph_node_pkey] = graph.add_track( track=track, segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming( r=resistance, c=capacitance, ), canonical_loc=canonical_loc ) def create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id ): cur = conn.cursor() cur.execute("""SELECT count(*) FROM track;""") (num_channels, ) = cur.fetchone() print('{} Import alive tracks'.format(now())) alive_tracks = set() for (track_pkey, ) in cur.execute("SELECT pkey FROM track WHERE alive = 1;"): alive_tracks.add(track_pkey) print('{} Importing alive tracks'.format(now())) import_tracks(conn, alive_tracks, node_mapping, graph, segment_id) print('original {} final {}'.format(num_channels, len(alive_tracks))) def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles): cur = conn.cursor() delayless_switch = graph.get_switch_id('__vpr_delayless_switch__') for tile_name, synth_tile in synth_tiles['tiles'].items(): num_inpad = len( list( filter( lambda t: t['port_type'] == 'output', synth_tile['pins'] ) ) ) num_outpad = len( list( filter( lambda t: t['port_type'] == 'input', synth_tile['pins'] ) ) ) for pin in synth_tile['pins']: if pin['port_type'] in ['input', 'output']: wire_pkey = get_wire_pkey(conn, tile_name, pin['wire']) cur.execute( """ SELECT track_pkey FROM node WHERE pkey = ( SELECT node_pkey FROM wire WHERE pkey = ? );""", (wire_pkey, ) ) (track_pkey, ) = cur.fetchone() assert track_pkey is not None, ( tile_name, pin['wire'], wire_pkey ) elif pin['port_type'] == 'VCC': cur.execute('SELECT vcc_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone() elif pin['port_type'] == 'GND': cur.execute('SELECT gnd_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone() else: assert False, pin['port_type'] tracks_model, track_nodes = get_track_model(conn, track_pkey) option = list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values() ) assert len(option) > 0, (pin, len(option)) if pin['port_type'] == 'input': tile_type = synth_tile['tile_name'] wire = 'outpad' elif pin['port_type'] == 'output': tile_type = synth_tile['tile_name'] wire = 'inpad' elif pin['port_type'] == 'VCC': tile_type = 'SYN-VCC' wire = 'VCC' elif pin['port_type'] == 'GND': tile_type = 'SYN-GND' wire = 'GND' else: assert False, pin track_node = track_nodes[option[0]] assert track_node in node_mapping, (track_node, track_pkey) if wire == 'inpad' and num_inpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'], wire ) elif wire == 'outpad' and num_outpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc'] - num_inpad), wire ) else: pin_name = graph.create_pin_name_from_tile_type_and_pin( tile_type, wire ) pin_node = graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name ) if pin['port_type'] == 'input': graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) elif pin['port_type'] in ['VCC', 'GND', 'output']: graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) else: assert False, pin def get_switch_name(conn, graph, switch_name_map, switch_pkey): assert switch_pkey is not None if switch_pkey not in switch_name_map: cur = conn.cursor() cur.execute( """SELECT name FROM switch WHERE pkey = ?;""", (switch_pkey, ) ) (switch_name, ) = cur.fetchone() switch_id = graph.get_switch_id(switch_name) switch_name_map[switch_pkey] = switch_id else: switch_id = switch_name_map[switch_pkey] return switch_id def create_get_tile_name(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey): cur.execute( """ SELECT name FROM phy_tile WHERE pkey = ?; """, (tile_pkey, ) ) return cur.fetchone()[0] return get_tile_name def create_get_pip_wire_names(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey): cur.execute( """SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM pip_in_tile WHERE pkey = ?;""", (pip_pkey, ) ) src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone() cur.execute( """SELECT name FROM wire_in_tile WHERE pkey = ?;""", (src_wire_in_tile_pkey, ) ) (src_net, ) = cur.fetchone() cur.execute( """SELECT name FROM wire_in_tile WHERE pkey = ?;""", (dest_wire_in_tile_pkey, ) ) (dest_net, ) = cur.fetchone() return (src_net, dest_net) return get_pip_wire_names def get_number_graph_edges(conn, graph, node_mapping): num_edges = len(graph.edges) print('{} Counting edges.'.format(now())) cur = conn.cursor() cur.execute("SELECT count() FROM graph_edge;" "") for src_graph_node, dest_graph_node in cur.execute(""" SELECT src_graph_node_pkey, dest_graph_node_pkey FROM graph_edge; """): if src_graph_node not in node_mapping: continue if dest_graph_node not in node_mapping: continue num_edges += 1 return num_edges def import_graph_edges(conn, graph, node_mapping): # First yield existing edges print('{} Importing existing edges.'.format(now())) for edge in graph.edges: yield (edge.src_node, edge.sink_node, edge.switch_id, None) # Then yield edges from database. cur = conn.cursor() cur.execute("SELECT count() FROM graph_edge;" "") (num_edges, ) = cur.fetchone() get_tile_name = create_get_tile_name(conn) get_pip_wire_names = create_get_pip_wire_names(conn) switch_name_map = {} print('{} Importing edges from database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges) as bar: for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey, pip_pkey, backward) in enumerate(cur.execute(""" SELECT src_graph_node_pkey, dest_graph_node_pkey, switch_pkey, phy_tile_pkey, pip_in_tile_pkey, backward FROM graph_edge; """)): if src_graph_node not in node_mapping: continue if dest_graph_node not in node_mapping: continue if pip_pkey is not None: tile_name = get_tile_name(phy_tile_pkey) src_net, dest_net = get_pip_wire_names(pip_pkey) if not backward: pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net) else: pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net) else: pip_name = None switch_id = get_switch_name( conn, graph, switch_name_map, switch_pkey ) src_node = node_mapping[src_graph_node] sink_node = node_mapping[dest_graph_node] if pip_name is not None: feature = check_feature(pip_name) if feature: yield ( src_node, sink_node, switch_id, (('fasm_features', feature), ) ) else: yield (src_node, sink_node, switch_id, ()) else: yield (src_node, sink_node, switch_id, ()) if idx % 1024 == 0: bar.update(idx) def create_channels(conn): cur = conn.cursor() cur.execute( """ SELECT chan_width_max, x_min, x_max, y_min, y_max FROM channel;""" ) chan_width_max, x_min, x_max, y_min, y_max = cur.fetchone() cur.execute('SELECT idx, info FROM x_list;') x_list = [] for idx, info in cur: x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT idx, info FROM y_list;') y_list = [] for idx, info in cur: y_list.append(graph2.ChannelList(idx, info)) return graph2.Channels( chan_width_max=chan_width_max, x_min=x_min, y_min=y_min, x_max=x_max, y_max=y_max, x_list=x_list, y_list=y_list, ) def create_connection_boxes(conn, graph): """ Assign connection box ids for all connection box types. """ cur = conn.cursor() cur.execute( """ SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN ( SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN ( SELECT connection_box_wire_pkey FROM graph_node WHERE connection_box_wire_pkey IS NOT NULL ) );""" ) connection_box_map = {} for wire_in_tile_pkey, tile_type_pkey, wire_name in cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return connection_box_map def yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar: for idx, node in enumerate(nodes): yield node if idx % 1024 == 0: bar.update(idx) def phy_grid_dims(conn): """ Returns physical grid dimensions. """ cur = conn.cursor() cur.execute("SELECT grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT 1;") x_max = cur.fetchone()[0] cur.execute("SELECT grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT 1;") y_max = cur.fetchone()[0] return x_max + 1, y_max + 1 def find_constant_network(graph): """ Find VCC and GND tiles and create synth_tiles input. All arches should have these synthetic tiles, search the input rr graph for the SYN-GND and SYN-VCC tiles. """ block_types = {} for block_type in graph.block_types: block_types[block_type.name] = block_type.id assert 'SYN-GND' in block_types assert 'SYN-VCC' in block_types gnd_block_id = block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC'] gnd_loc = None vcc_loc = None for grid_loc in graph.grid: if gnd_block_id == grid_loc.block_type_id: assert gnd_loc is None gnd_loc = (grid_loc.x, grid_loc.y) if vcc_block_id == grid_loc.block_type_id: assert vcc_loc is None vcc_loc = (grid_loc.x, grid_loc.y) assert gnd_loc is not None assert vcc_loc is not None synth_tiles = { 'tiles': { "VCC": { 'loc': vcc_loc, 'pins': [ { 'wire': 'VCC', 'pad': 'VCC', 'port_type': 'VCC', 'is_clock': False, }, ], }, "GND": { 'loc': gnd_loc, 'pins': [ { 'wire': 'GND', 'pad': 'GND', 'port_type': 'GND', 'is_clock': False, }, ], }, } } return synth_tiles def create_node_remap(nodes, channels_obj): N = 2 p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map = {} for node in nodes: x = node.loc.x_low y = node.loc.y_low if (x, y) not in point_map: point_map[(x, y)] = [] point_map[(x, y)].append(node.id) hilbert_curve = HilbertCurve(p, N) idx = 0 id_map = {} for h in range(hilbert_curve.max_h + 1): coord = tuple(hilbert_curve.coordinates_from_distance(h)) if coord not in point_map: continue for old_id in point_map[coord]: id_map[old_id] = idx idx += 1 del point_map[coord] return lambda x: id_map[x] def main(): parser = argparse.ArgumentParser() parser.add_argument( '--db_root', required=True, help='Project X-Ray Database' ) parser.add_argument('--part', required=True, help='FPGA part') parser.add_argument( '--read_rr_graph', required=True, help='Input rr_graph file' ) parser.add_argument( '--write_rr_graph', required=True, help='Output rr_graph file' ) parser.add_argument( '--write_rr_node_map', required=True, help='Output map of graph_node_pkey to rr inode file' ) parser.add_argument( '--connection_database', help='Database of fabric connectivity', required=True ) parser.add_argument( '--synth_tiles', help='If using an ROI, synthetic tile defintion from prjxray-arch-import' ) parser.add_argument( '--graph_limit', help='Limit grid to specified dimensions in x_min,y_min,x_max,y_max', ) parser.add_argument( '--vpr_capnp_schema_dir', help='Directory container VPR schema files', ) print('{} Starting routing import'.format(now())) args = parser.parse_args() db = prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db) synth_tiles = None if args.synth_tiles: use_roi = True with open(args.synth_tiles) as f: synth_tiles = json.load(f) roi = Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{} generating routing graph for ROI.'.format(now())) elif args.graph_limit: use_roi = True x_min, y_min, x_max, y_max = map(int, args.graph_limit.split(',')) roi = Roi( db=db, x1=x_min, y1=y_min, x2=x_max, y2=y_max, ) else: use_roi = False roi = None synth_tiles = None capnp_graph = capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, ) graph = capnp_graph.graph if synth_tiles is None: synth_tiles = find_constant_network(graph) with sqlite3.connect("file:{}?mode=ro".format(args.connection_database), uri=True) as conn: populate_bufg_rebuf_map(conn) cur = conn.cursor() for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \ switch_type in cur.execute(""" SELECT name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, switch_type FROM switch;"""): # Add back missing switchs, which were unused in arch xml, and so # were not emitted in rrgraph XML. # # TODO: This can be removed once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is fixed. try: graph.get_switch_id(name) continue except KeyError: capnp_graph.add_switch( graph2.Switch( id=None, name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0, c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost, ), sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0, ), ) ) # Mapping of graph_node.pkey to rr node id. node_mapping = {} print('{} Creating connection box list'.format(now())) connection_box_map = create_connection_boxes(conn, graph) # Match site pins rr nodes with graph_node's in the connection_database. print('{} Importing graph nodes'.format(now())) import_graph_nodes(conn, graph, node_mapping, connection_box_map) # Walk all track graph nodes and add them. print('{} Creating tracks'.format(now())) segment_id = graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id ) # Set of (src, sink, switch_id) tuples that pip edges have been sent to # VPR. VPR cannot handle duplicate paths with the same switch id. print('{} Adding synthetic edges'.format(now())) add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles) print('{} Creating channels.'.format(now())) channels_obj = create_channels(conn) node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim, y_dim = phy_grid_dims(conn) connection_box_obj = graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim ) num_edges = get_number_graph_edges(conn, graph, node_mapping) print('{} Serializing to disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn, graph, node_mapping), node_remap=node_remap, ) for k in node_mapping: node_mapping[k] = node_remap(node_mapping[k]) print('{} Writing node map.'.format(now())) with open(args.write_rr_node_map, 'wb') as f: pickle.dump(node_mapping, f) print('{} Done writing node map.'.format(now())) if __name__ == '__main__': main()
30.375344
90
0.590114
0
0
2,481
0.056252
1,969
0.044643
0
0
12,494
0.283279
be20fd972c9533d7359e606c8ff9c31f5c519ad2
17,854
py
Python
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
andrewbowen19/ClusterEclipsingBinaries
e554cb6bb613e0d3703314e50fcf5289f50bf572
[ "MIT" ]
null
null
null
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
andrewbowen19/ClusterEclipsingBinaries
e554cb6bb613e0d3703314e50fcf5289f50bf572
[ "MIT" ]
null
null
null
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py
andrewbowen19/ClusterEclipsingBinaries
e554cb6bb613e0d3703314e50fcf5289f50bf572
[ "MIT" ]
null
null
null
######################### ######################### # Need to account for limit in input period ######################### ######################### # Baseline M67 long script -- NO crowding # New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file # Doing this so we don't have to run analyse each time # Can write separate script for p-ecc plots # Quest paths in this version of script import pandas as pd import numpy as np import os from astropy.coordinates import SkyCoord from astropy import units, constants from astropy.modeling import models, fitting import scipy.stats from scipy.integrate import quad #for Quest import matplotlib matplotlib.use('Agg') doIndividualPlots = True from matplotlib import pyplot as plt def file_len(fname): i = 0 with open(fname) as f: for i, l in enumerate(f): pass return i + 1 def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar to field, but limiting by the hard-soft boundary def fitRagfb(): x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20, 0.35, 0.50, 0.70, 0.75] init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter = fitting.LevMarLSQFitter() fit = fitter(init, x, y) return fit def RagNormal(x, cdf = False): mean = 5.03 std = 2.28 if (cdf): return scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']): c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster) c2 = '#A62B1F' #Dai Red c3 = '#BF8A26' #Dali Beige fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2 histAll = np.insert(histAll,0,0) histObs = np.insert(histObs,0,0) for f in filters: histRec[f] = np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.sum(histObs), color=c2) for f in filters: lw = 1 if (f == 'all'): lw = 0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters - Baseline', fontsize = 16) ax1.set_xlabel(xtitle) #CDF #cdfAll = [] #cdfObs = [] #cdfRec = dict() #for f in filters: # cdfRec[f] = [] # for i in range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for i in range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for f in filters: # for i in range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll, color=c1) #ax2.step(bin_edges, cdfObs, color=c2) #for f in filters: # lw = 1 # if (f == 'all'): # lw = 0.5 # ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight') #write to a text file with open('./eblsst_files/' + fname+'.csv','w') as fl: outline = 'binEdges,histAll,histObs' for f in filters: outline += ','+f+'histRec' outline += '\n' fl.write(outline) for i in range(len(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in filters: outline += ','+str(histRec[f][i]) outline += '\n' fl.write(outline) if __name__ == "__main__": filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all'] #get the Raghavan binary fraction fit fbFit= fitRagfb() print(fbFit) #to normalize intAll, err = quad(RagNormal, -20, 20) intCut, err = quad(RagNormal, -20, np.log10(365*10.)) intNorm = intCut/intAll #cutoff in percent error for "recovered" Pcut = 0.1 #assumed mean stellar mass mMean = 0.5 #minimum number of lines to consider in file Nlim = 3 if (doIndividualPlots): fmass, axmass = plt.subplots() fqrat, axqrat = plt.subplots() fecc, axecc = plt.subplots() flper, axlper = plt.subplots() fdist, axdist = plt.subplots() fmag, axmag = plt.subplots() frad, axrad = plt.subplots() #bins for all the histograms Nbins = 25 mbins = np.arange(0,10, 0.1, dtype='float') qbins = np.arange(0,1, 0.1, dtype='float') ebins = np.arange(0, 1.05, 0.05, dtype='float') lpbins = np.arange(-2, 10, 0.5, dtype='float') dbins = np.arange(0, 40, 1, dtype='float') magbins = np.arange(11, 25, 1, dtype='float') rbins = np.arange(0, 100, 0.2, dtype='float') #blanks for the histograms #All m1hAll = np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec = dict() qhRec = dict() ehRec = dict() lphRec = dict() dhRec = dict() maghRec = dict() rhRec = dict() for f in filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA = [] Dec = [] recFrac = [] recN = [] rawN = [] obsN = [] fileN = [] fileObsN = [] fileRecN = [] allNPrsa = [] obsNPrsa = [] recNPrsa = [] # Lists for period and eccentricity for Andrew's circularization plots eccAll = [] eccObs = [] eccRec = [] pAll = [] pObs = [] pRec = [] # Using prsa dataframes for these lists because of period cutoff at 1000 days # Dataframes to write to files later; 3 files for each sub-population - append everything to these peccAll = pd.DataFrame(columns = ['e', 'p']) peccObs = pd.DataFrame(columns = ['e', 'p']) peccRec = pd.DataFrame(columns = ['e', 'p']) #Read in all the data and make the histograms d = "./input_files/" files = os.listdir(d) IDs = [] for i, f in enumerate(files): print(round(i/len(files),4), f) fl = file_len(d+f) if (fl >= 4): #read in the header header = pd.read_csv(d+f, nrows=1) ###################### #NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms ##################### Nmult = header['clusterMass'][0]/mMean #Nmult = 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in rest of the file data = pd.read_csv(d+f, header = 2).fillna(-999) rF = 0. rN = 0. Nrec = 0. Nobs = 0. raN = 0. obN = 0. fiN = 0. fioN = 0. firN = 0. NallPrsa = 0. NobsPrsa = 0. NrecPrsa = 0. Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?) prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)] # Appending for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa = len(prsa.index) if (Nall >= Nlim): #create histograms #All m1hAll0, m1b = np.histogram(data["m1"], bins=mbins) qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins) ehAll0, eb = np.histogram(data["e"], bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins) dhAll0, db = np.histogram(data["d"], bins=dbins) maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins) rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account for the binary fraction, as a function of mass dm1 = np.diff(m1b) m1val = m1b[:-1] + dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the hard-soft boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), cdf = True) print("fb, Phs = ", fb, Phs) Nmult *= fb m1hAll += m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult #Obs obs = data.loc[data['LSM_PERIOD'] != -999] Nobs = len(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)] NobsPrsa = len(prsaObs.index) # Appending for Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs >= Nlim): m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins) qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins) ehObs0, eb = np.histogram(obs["e"], bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins) dhObs0, db = np.histogram(obs["d"], bins=dbins) maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins) rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins) m1hObs += m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult #Rec recCombined = pd.DataFrame() prsaRecCombined = pd.DataFrame() for filt in filters: key = filt+'LSS_PERIOD' if (filt == 'all'): key = 'LSM_PERIOD' fullP = abs(data[key] - data['p'])/data['p'] halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))] Nrec = len(rec.index) #I'd like to account for all filters here to have more accurate numbers recCombined = recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec) # Going to use prsaRecCombined for ecc-p plots to account for all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt == 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >= Nlim): m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins) qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins) ehRec0, eb = np.histogram(rec["e"], bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins) dhRec0, db = np.histogram(rec["d"], bins=dbins) maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins) rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult #for the mollweide if (filt == 'all'): Nrec = len(recCombined.index) rF = Nrec/Nall rN = Nrec/Nall*Nmult raN = Nmult obN = Nobs/Nall*Nmult fiN = Nall fioN = Nobs firN = Nrec NrecPrsa = len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating p and ecc lists eccAll = np.concatenate(eccAll) eccObs = np.concatenate(eccObs) eccRec = np.concatenate(eccRec) pAll = np.concatenate(pAll) pObs = np.concatenate(pObs) pRec = np.concatenate(pRec) # print('Ecc lists:', eccAll, eccObs, eccRec) # print('P lists:', pAll, pObs, pRec) # Appending lists with all the p/ecc values to our dataframes # All dataframe peccAll['e'] = eccAll peccAll['p'] = pAll # Observable dataframe peccObs['e'] = eccObs peccObs['p'] = pObs # Recovered dataframe peccRec['e'] = eccRec peccRec['p'] = pRec # print('Final Dataframes:', peccAll, peccObs, peccRec) # print(peccRec.columns) # 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p']) #plot and save the histograms saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make the mollweide coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r"$l$",fontsize=16) #ax.set_ylabel(r"$b$",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4) ax.set_xlabel("RA",fontsize=16) ax.set_ylabel("Dec",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight') f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r"$l$",fontsize=16) #ax.set_ylabel(r"$b$",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) ax.set_xlabel("RA",fontsize=16) ax.set_ylabel("Dec",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight') if (doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight') print("###################") print("number of binaries in input files (raw, log):",np.sum(fileN), np.log10(np.sum(fileN))) print("number of binaries in tested with gatspy (raw, log):",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print("number of binaries in recovered with gatspy (raw, log):",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print("recovered/observable*100 with gatspy:",np.sum(fileRecN)/np.sum(fileObsN)*100.) print("###################") print("total in sample (raw, log):",np.sum(rawN), np.log10(np.sum(rawN))) print("total observable (raw, log):",np.sum(obsN), np.log10(np.sum(obsN))) print("total recovered (raw, log):",np.sum(recN), np.log10(np.sum(recN))) print("recovered/observable*100:",np.sum(recN)/np.sum(obsN)*100.) print("###################") print("total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print("total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print("total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(recNPrsa), np.log10(np.sum(recNPrsa))) print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)
35.284585
213
0.641089
0
0
0
0
0
0
0
0
5,571
0.312031
be21dcede1ec1af84c0ccb9e8297bd042d23271a
1,712
py
Python
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
13
2015-11-30T15:49:45.000Z
2022-02-08T16:11:30.000Z
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
640
2015-02-11T18:55:47.000Z
2022-03-31T14:12:23.000Z
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
51
2015-08-11T21:01:40.000Z
2022-03-30T07:31:34.000Z
import FWCore.ParameterSet.Config as cms import os process = cms.Process("summary") process.MessageLogger = cms.Service( "MessageLogger", debugModules = cms.untracked.vstring( "*" ), cout = cms.untracked.PSet( threshold = cms.untracked.string( "DEBUG" ) ), destinations = cms.untracked.vstring( "cout" ) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source = cms.Source("EmptySource", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load("CondCore.CondDB.CondDB_cfi") process.load("CondTools.BeamSpot.BeamSpotRcdPrinter_cfi") ### 2018 Prompt process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt" process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output = "summary2018_Prompt.txt" ### 2017 ReReco #process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ### 2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ### 2018D Prompt #process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242 #process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.p = cms.Path(process.BeamSpotRcdPrinter)
38.044444
110
0.733645
0
0
0
0
0
0
0
0
812
0.474299
be237e880ccb11dff8fac9488a75005cce1dd897
381
py
Python
django/authentication/api/urls.py
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
[ "MIT" ]
2
2021-05-13T18:02:00.000Z
2022-03-30T19:53:38.000Z
django/authentication/api/urls.py
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
[ "MIT" ]
null
null
null
django/authentication/api/urls.py
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
[ "MIT" ]
null
null
null
from django.urls import path from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView, TokenVerifyView ) urlpatterns = [ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/', TokenRefreshView.as_view(), name='token_refresh'), path('verify/', TokenVerifyView.as_view(), name='token_verify'), ]
29.307692
77
0.734908
0
0
0
0
0
0
0
0
76
0.199475
be23b9cced5e521037b8711e7bde05f5d17925f0
7,257
py
Python
yue/core/explorer/ftpsource.py
nsetzer/YueMusicPlayer
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
[ "MIT" ]
null
null
null
yue/core/explorer/ftpsource.py
nsetzer/YueMusicPlayer
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
[ "MIT" ]
null
null
null
yue/core/explorer/ftpsource.py
nsetzer/YueMusicPlayer
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
[ "MIT" ]
1
2019-03-06T14:29:27.000Z
2019-03-06T14:29:27.000Z
from ftplib import FTP,error_perm, all_errors import posixpath from io import BytesIO,SEEK_SET from .source import DataSource import sys import re reftp = re.compile('(ssh|ftp)\:\/\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\/(.*)') def parseFTPurl( url ): m = reftp.match( url ) if m: g = m.groups() result = { "mode" : g[0], "username" : g[2] or "", "password" : g[3] or "", "hostname" : g[4] or "", "port" : int(g[5][1:]) if g[5] else 0, "path" : g[6] or "/", } if result['port'] == 0: if result['mode'] == ssh: result['port'] = 22 else: result['port'] = 21 # ftp port default return result raise ValueError("invalid: %s"%url) def utf8_fix(s): return ''.join([ a if ord(a)<128 else "%02X"%ord(a) for a in s]) class FTPWriter(object): """docstring for FTPWriter""" def __init__(self, ftp, path): super(FTPWriter, self).__init__() self.ftp = ftp self.path = path self.file = BytesIO() def write(self,data): return self.file.write(data) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self): self.file.seek(0) text = "STOR " + utf8_fix(self.path) self.ftp.storbinary(text, self.file) def __enter__(self): return self def __exit__(self,typ,val,tb): if typ is None: self.close() class FTPReader(object): """docstring for FTPWriter""" def __init__(self, ftp, path): super(FTPReader, self).__init__() self.ftp = ftp self.path = path self.file = BytesIO() # open the file text = "RETR " + utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write) self.file.seek(0) def read(self,n=None): return self.file.read(n) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self): self.file.close() def __enter__(self): return self def __exit__(self,typ,val,tb): if typ is None: self.close() class FTPSource(DataSource): """ there is some sort of problem with utf-8/latin-1 and ftplib storbinary must accepts a STRING, since it builds a cmd and add the CRLF to the input argument using the plus operator. the command fails when given unicode text (ord > 127) and also fails whenm given a byte string. """ # TODO: turn this into a directory generator # which first loads the directory, then loops over # loaded items. # TODO: on windows we need a way to view available # drive letters def __init__(self, host, port, username="", password=""): super(FTPSource, self).__init__() self.ftp = FTP() self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname = "%s:%d"%(host,port) def root(self): return "/" def close(self): try: self.ftp.quit() except all_errors as e: sys.stderr.write("Error Closing FTP connection\n") sys.stderr.write("%s\n"%e) super().close() def fix(self, path): return utf8_fix(path) def join(self,*args): return posixpath.join(*args) def breakpath(self,path): return [ x for x in path.replace("/","\\").split("\\") if x ] def relpath(self,path,base): return posixpath.relpath(path,base) def normpath(self,path,root=None): if root and not path.startswith("/"): path = posixpath.join(root,path) return posixpath.normpath( path ) def listdir(self,path): return self.ftp.nlst(path) def parent(self,path): # TODO: if path is C:\\ return empty string ? # empty string returns drives p,_ = posixpath.split(path) return p def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def delete(self,path): # todo support removing directory rmdir() path = utf8_fix(path) if self.exists( path ): if self.isdir(path): try: self.ftp.rmd(path) except Exception as e: print("ftp delete error: %s"%e) else: try: self.ftp.delete(path) except Exception as e: print("ftp delete error: %s"%e) def open(self,path,mode): if mode=="wb": return FTPWriter(self.ftp,path) elif mode=="rb": return FTPReader(self.ftp,path) raise NotImplementedError(mode) def exists(self,path): path = utf8_fix(path) p,n=posixpath.split(path) lst = set(self.listdir(p)) return n in lst def isdir(self,path): path = utf8_fix(path) try: return self.ftp.size(path) is None except error_perm: # TODO: to think about more later, # under my use-case, I'm only asking if a path is a directory # if I Already think it exists. Under the current FTP impl # ftp.size() fails for various reasons unless the file exists # and is an accessable file. I can infer that a failure to # determine the size means that the path is a directory, # but this does not hold true under other use cases. # I can't cache listdir calls, but if I could, then I could # use that to determine if the file exists return True#self.exists( path ) def mkdir(self,path): # this is a really ugly quick and dirty solution path = utf8_fix(path) if not self.exists(path): p = self.parent( path ) try: if not self.exists(p): self.ftp.mkd( p ) self.ftp.mkd(path) except Exception as e: print("ftp mkd error: %s"%e) def split(self,path): return posixpath.split(path) def splitext(self,path): return posixpath.splitext(path) def stat(self,path): try: size = self.ftp.size(path) except error_perm: size = None result = { "isDir" : size is None, "isLink": False, "mtime" : 0, "ctime" : 0, "size" : size or 0, "name" : self.split(path)[1], "mode" : 0 } return result def stat_fast(self,path): # not fast for thus file system :( try: size = self.ftp.size(path) except error_perm: size = None result = { "name" : self.split(path)[1], "size" : size or 0, "isDir" : size is None, "isLink" : False, } return result def chmod(self,path,mode): print("chmod not implemented") def getExportPath(self,path): return self.hostname+path
27.384906
83
0.539893
6,339
0.873501
0
0
0
0
0
0
1,685
0.23219
be23cbbbbbb53c2c62b109846cda81e757eb1b58
14,527
py
Python
tests/engine/knowledge_base.py
roshanmaskey/plaso
637856f578eb4bc81f62b97d7f483f69314e7f47
[ "Apache-2.0" ]
1,253
2015-01-02T13:58:02.000Z
2022-03-31T08:43:39.000Z
tests/engine/knowledge_base.py
roshanmaskey/plaso
637856f578eb4bc81f62b97d7f483f69314e7f47
[ "Apache-2.0" ]
3,388
2015-01-02T11:17:58.000Z
2022-03-30T10:21:45.000Z
tests/engine/knowledge_base.py
roshanmaskey/plaso
637856f578eb4bc81f62b97d7f483f69314e7f47
[ "Apache-2.0" ]
376
2015-01-20T07:04:54.000Z
2022-03-04T23:53:00.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Tests for the knowledge base.""" import unittest from plaso.containers import artifacts from plaso.engine import knowledge_base from tests import test_lib as shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase): """Tests for the knowledge base.""" # pylint: disable=protected-access _MACOS_PATHS = [ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS = [ {'name': 'root', 'path': '/var/root', 'sid': '0'}, {'name': 'frank', 'path': '/Users/frank', 'sid': '4052'}, {'name': 'hans', 'path': '/Users/hans', 'sid': '4352'}, {'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}] _WINDOWS_PATHS = [ 'C:\\Users\\Dude\\SomeFolder\\Chrome\\Default\\Extensions', ('C:\\Users\\Dude\\SomeNoneStandardFolder\\Chrome\\Default\\Extensions\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions', ('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\Windows\\System32', 'C:\\Stuff/with path separator\\Folder'] _WINDOWS_USERS = [ {'name': 'dude', 'path': 'C:\\Users\\dude', 'sid': 'S-1'}, {'name': 'frank', 'path': 'C:\\Users\\frank', 'sid': 'S-2'}] def _SetUserAccounts(self, knowledge_base_object, users): """Sets the user accounts in the knowledge base. Args: knowledge_base_object (KnowledgeBase): knowledge base. users (list[dict[str,str])): users. """ for user in users: identifier = user.get('sid', user.get('uid', None)) if not identifier: continue user_account = artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path', None), username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self): """Tests the codepage property.""" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252') def testHostnameProperty(self): """Tests the hostname property.""" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '') def testOperatingSystemProperty(self): """Tests the operating_system property.""" knowledge_base_object = knowledge_base.KnowledgeBase() operating_system = knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows') operating_system = knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows') def testTimezoneProperty(self): """Tests the timezone property.""" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def testUserAccountsProperty(self): """Tests the user accounts property.""" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1) def testYearProperty(self): """Tests the year property.""" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0) def testAddUserAccount(self): """Tests the AddUserAccount function.""" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self): """Tests the AddEnvironmentVariable function.""" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self): """Tests the GetEnvironmentVariable functions.""" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self): """Tests the GetEnvironmentVariables function.""" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir', value='C:\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables = knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2) def testGetHostname(self): """Tests the GetHostname function.""" knowledge_base_object = knowledge_base.KnowledgeBase() hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, '') # TODO: add tests for GetMountPoint. def testGetSourceConfigurationArtifacts(self): """Tests the GetSourceConfigurationArtifacts function.""" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations = ( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0]) system_configuration = source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def testGetSystemConfigurationArtifact(self): """Tests the _GetSystemConfigurationArtifact function.""" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration = ( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') # TODO: add tests for GetTextPrepend. def testGetUsernameByIdentifier(self): """Tests the GetUsernameByIdentifier function.""" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) usename = knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser') usename = knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '') usename = knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '') def testGetUsernameForPath(self): """Tests the GetUsernameForPath function.""" knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username, 'hans') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username) def testGetSetValue(self): """Tests the Get and SetValue functions.""" knowledge_base_object = knowledge_base.KnowledgeBase() expected_value = 'test value' knowledge_base_object.SetValue('Test', expected_value) value = knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def testHasUserAccounts(self): """Tests the HasUserAccounts function.""" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self): """Tests the ReadSystemConfigurationArtifact function.""" knowledge_base_object = knowledge_base.KnowledgeBase() system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.hostname = artifacts.HostnameArtifact( name='myhost.mydomain') user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain') def testSetActiveSession(self): """Tests the SetActiveSession function.""" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self): """Tests the SetCodepage function.""" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def testSetHostname(self): """Tests the SetHostname function.""" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) # TODO: add tests for SetMountPoint. # TODO: add tests for SetTextPrepend. def testSetTimeZone(self): """Tests the SetTimeZone function.""" knowledge_base_object = knowledge_base.KnowledgeBase() time_zone_artifact = artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set an IANA time zone name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') # Set a Windows time zone name. knowledge_base_object.SetTimeZone('Eastern Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a localized Windows time zone name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a MUI form Windows time zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with self.assertRaises(ValueError): knowledge_base_object.SetTimeZone('Bogus') if __name__ == '__main__': unittest.main()
37.153453
79
0.748537
14,250
0.980932
0
0
0
0
0
0
3,773
0.259723
be247dcc0b3afb4ed9e9527cdfcf9da7e14edb83
2,244
py
Python
Problems/Dynamic Programming/140. Word Break II.py
BYJRK/LeetCode-Solutions
008467e1717309066a519acb8623d2f84071b64a
[ "MIT" ]
null
null
null
Problems/Dynamic Programming/140. Word Break II.py
BYJRK/LeetCode-Solutions
008467e1717309066a519acb8623d2f84071b64a
[ "MIT" ]
null
null
null
Problems/Dynamic Programming/140. Word Break II.py
BYJRK/LeetCode-Solutions
008467e1717309066a519acb8623d2f84071b64a
[ "MIT" ]
null
null
null
# https://leetcode.com/problems/word-break-ii/ from typing import List class Solution: def wordBreak(self, s: str, wordDict: List[str]) -> List[str]: # 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出 set1 = set(s) set2 = set(''.join(wordDict)) if not set1.issubset(set2): return [] # dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割 # 如果是 [[]] 则表示开头 # 如果是 [None],则表示还没有访问到,或没有办法进行分割 # 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp = [None] * (len(s) + 1) dp[0] = [[]] for i in range(len(s) + 1): # 如果当前子字符串无法分割,则跳过 if dp[i] is None: continue tmp = s[i:] for w in wordDict: idx = len(w) + i if idx > len(s): continue if tmp.startswith(w): if dp[idx] is None: dp[idx] = [] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic in dp[i]: dp[idx].append(dic + [w]) if dp[-1] is None: return [] return [' '.join(res) for res in dp[-1]] def wordBreak_dfs(self, s: str, wordDict: List[str]) -> List[str]: def dfs(s: str, memo={}): if s in memo: return memo[s] if len(s) == 0: return [[]] res = [] for w in wordDict: if s.startswith(w): tmp = s[len(w):] combos = dfs(tmp, memo) for combo in combos: res.append([w] + combo) memo[s] = res return res return dfs(s) s = Solution() print(s.wordBreak_dfs('catsanddog', ["cat", "cats", "and", "sand", "dog"])) print(s.wordBreak_dfs('pineapplepenapple', [ "apple", "pen", "applepen", "pine", "pineapple"])) # text = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # words = ["a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa", # "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"] # print(s.wordBreak(text, words))
29.142857
162
0.483512
1,958
0.769049
0
0
0
0
0
0
982
0.385703
be260edf2b0780a31f443fdc8e024043c1398df0
30,595
py
Python
neutron/tests/unit/db/test_migration.py
banhr/neutron
4b3e73648327ce9f4d3437986a8663372f577f1b
[ "Apache-2.0" ]
1
2018-07-04T07:59:31.000Z
2018-07-04T07:59:31.000Z
neutron/tests/unit/db/test_migration.py
weiqiLee/neutron
ddc72ebd41a0e7804b33a21583d3add008191229
[ "Apache-2.0" ]
null
null
null
neutron/tests/unit/db/test_migration.py
weiqiLee/neutron
ddc72ebd41a0e7804b33a21583d3add008191229
[ "Apache-2.0" ]
1
2018-08-28T17:13:16.000Z
2018-08-28T17:13:16.000Z
# Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import re import sys import textwrap from alembic.autogenerate import api as alembic_ag_api from alembic import config as alembic_config from alembic.operations import ops as alembic_ops from alembic import script as alembic_script import fixtures import mock from neutron_lib.utils import helpers from oslo_utils import fileutils import pkg_resources import sqlalchemy as sa from testtools import matchers from neutron.conf.db import migration_cli from neutron.db import migration from neutron.db.migration import autogen from neutron.db.migration import cli from neutron.tests import base from neutron.tests import tools from neutron.tests.unit import testlib_api class FakeConfig(object): service = '' class FakeRevision(object): path = 'fakepath' def __init__(self, labels=None, down_revision=None, is_branch_point=False): if not labels: labels = set() self.branch_labels = labels self.down_revision = down_revision self.is_branch_point = is_branch_point self.revision = helpers.get_random_string(10) self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of the migration entrypoints map so it can be restored during test cleanup. ''' def _setUp(self): self.ep_backup = {} for proj, ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir # Avoid creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up some configs and entrypoints for tests to chew on self.configs = [] self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {} for project in self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name = project.replace('-', '_') + '.db.migration' attrs = ('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\ mock.patch.object(cli, 'run_sanity_checks'),\ mock.patch.object(cli, 'validate_revisions'): cli.main() def _append_version_path(args): args = copy.copy(args) if 'autogenerate' in args and not args['autogenerate']: args['version_path'] = mock.ANY return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for kwargs in exp_kwargs] ) def test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp', [{'revision': 'foo', 'sql': False}] ) self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'], 'stamp', [{'revision': 'foo', 'sql': True}] ) def _validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd], cmd, [{'verbose': False}]) self._main_test_helper( ['prog', cmd, '--verbose'], cmd, [{'verbose': True}]) def test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli, 'validate_head_files') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files') as update: if separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': True, }] self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': True, 'autogenerate': False, 'head': cli._get_branch_head(branch) } for branch in cli.MIGRATION_BRANCHES] for kwarg in expected_kwargs: kwarg['autogenerate'] = False kwarg['sql'] = True self._main_test_helper( ['prog', 'revision', '--sql', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': False, 'head': 'expand@head' }] self._main_test_helper( ['prog', 'revision', '-m', 'message', '--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper( ['prog', 'revision', '-m', 'message', '--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test that old branchless approach is still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'], 'upgrade', [{'desc': None, 'revision': 'heads', 'sql': True}] ) def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': '+3', 'sql': False}] ) def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': 'kilo+3', 'sql': False}] ) def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s revision1' % mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s +3' % mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s' % branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}] ) def assert_command_fails(self, command): # Avoid cluttering stdout with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3']) def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config = self.configs[0] head_files_not_exist = (contract_head == expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\ mock.patch('os.path.exists') as os_mock: if head_files_not_exist: os_mock.return_value = False else: os_mock.return_value = True fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\n')).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\n')).mock_open if contract_head in heads and expand_head in heads: cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head in heads and expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks): heads = ['a', 'b'] mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\n') old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels = {'label1', 'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2', 'label3'} fake_revision = FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions = [FakeRevision() for i in range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision in revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True) for i in range(2)] walk_mock.return_value = revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)] walk_mock.return_value = revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect = lambda cfg, release, branch: ( "/foo/expand" if branch == 'expand' else "/foo/contract") migration_script = alembic_ops.MigrationScript( 'eced083f5df', # these directives will be split into separate # expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name' ) ] ) ] ), # these will be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization') ] ), message='create the organization table and ' 'replace user.organization_name' ) directives = [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives ) expand = directives[0] contract = directives[1] self.assertEqual("/foo/expand", expand.version_path) self.assertEqual("/foo/contract", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s = textwrap.dedent(s) s = re.escape(s) # alembic 0.8.9 added additional leading '# ' before comments return s.replace('\\#\\#\\#\\ ', '(# )?### ') expected_regex = ("""\ ### commands auto generated by Alembic - please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', """ """sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', """ """'organization', ['organization_id'], ['id']) ### end Alembic commands ###""") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = ("""\ ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ### end Alembic commands ###""") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision() for r in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class TestSafetyChecks(base.BaseTestCase): def test_validate_revisions(self, *mocks): cli.validate_revisions(cli.get_neutron_config())
40.469577
79
0.610557
29,237
0.955614
0
0
12,368
0.404249
0
0
5,402
0.176565
be26276b9a7545ff4607b3e77287b80155ccbf7d
959
py
Python
withdrawal/floor_ceiling.py
hoostus/prime-harvesting
6606b94ea7859fbf217dbea4ace856e3fa4d154e
[ "BlueOak-1.0.0", "Apache-2.0" ]
23
2016-09-07T06:13:37.000Z
2022-02-17T23:49:03.000Z
withdrawal/floor_ceiling.py
hoostus/prime-harvesting
6606b94ea7859fbf217dbea4ace856e3fa4d154e
[ "BlueOak-1.0.0", "Apache-2.0" ]
null
null
null
withdrawal/floor_ceiling.py
hoostus/prime-harvesting
6606b94ea7859fbf217dbea4ace856e3fa4d154e
[ "BlueOak-1.0.0", "Apache-2.0" ]
12
2016-06-30T17:27:39.000Z
2021-12-12T07:54:27.000Z
from decimal import Decimal from .abc import WithdrawalStrategy # Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money class FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor = Decimal(floor) self.ceiling = Decimal(ceiling) self.rate = Decimal(rate) def start(self): amount = self.rate * self.portfolio.value self.initial_amount = amount return amount def next(self): amount = self.rate * self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation floor = initial_amount_inflation_adjusted * self.floor ceiling = initial_amount_inflation_adjusted * self.ceiling amount = max(amount, floor) amount = min(amount, ceiling) return amount
30.935484
91
0.693431
815
0.849844
0
0
0
0
0
0
76
0.079249
be2647506be1ffc3fcefa8eacc15a737776b73ab
8,288
py
Python
20190426/6_BME280_WiFi/bme280.py
rcolistete/MicroPython_MiniCurso_ProjOrientado
c82affe833587141c4c05ee08ea84b095bfe845f
[ "MIT" ]
null
null
null
20190426/6_BME280_WiFi/bme280.py
rcolistete/MicroPython_MiniCurso_ProjOrientado
c82affe833587141c4c05ee08ea84b095bfe845f
[ "MIT" ]
null
null
null
20190426/6_BME280_WiFi/bme280.py
rcolistete/MicroPython_MiniCurso_ProjOrientado
c82affe833587141c4c05ee08ea84b095bfe845f
[ "MIT" ]
null
null
null
""" MicroPython driver for Bosh BME280 temperature, pressure and humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: Nelio Goncalves Godoi, Roberto Colistete Jr Version: 3.1.2 @ 2018/04 License: MIT License (https://opensource.org/licenses/MIT) """ import time from ustruct import unpack, unpack_from from array import array # BME280 default address BME280_I2CADDR = 0x76 # BME280_I2CADDR = 0x77 OSAMPLE_0 = 0 OSAMPLE_1 = 1 OSAMPLE_2 = 2 OSAMPLE_4 = 3 OSAMPLE_8 = 4 OSAMPLE_16 = 5 BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF = 0 FILTER_2 = 1 FILTER_4 = 2 FILTER_8 = 3 FILTER_16 = 4 CELSIUS = 'C' FAHRENHEIT = 'F' KELVIN = 'K' class BME280(object): def __init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR, i2c=None): osamples = [ OSAMPLE_0, OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] msg_error = 'Unexpected {} operating mode value {0}.' if temperature_mode not in osamples: raise ValueError(msg_error.format("temperature", temperature_mode)) self.temperature_mode = temperature_mode if pressure_mode not in osamples: raise ValueError(msg_error.format("pressure", pressure_mode)) self.pressure_mode = pressure_mode if humidity_mode not in osamples: raise ValueError(msg_error.format("humidity", humidity_mode)) self.humidity_mode = humidity_mode msg_error = 'Unexpected low pass IIR filter setting value {0}.' if iir not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]: raise ValueError(msg_error.format(iir)) self.iir = iir msg_error = 'Unexpected temperature scale value {0}.' if temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]: raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale = temperature_scale del msg_error self.address = address if i2c is None: raise ValueError('An I2C object is required.') self.i2c = i2c dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26) dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7) self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \ self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \ self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \ _, self.dig_H1 = unpack("<HhhHhhhhhhhhBB", dig_88_a1) self.dig_H2, self.dig_H3 = unpack("<hB", dig_e1_e7) e4_sign = unpack_from("<b", dig_e1_e7, 3)[0] self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] & 0xF) e6_sign = unpack_from("<b", dig_e1_e7, 5)[0] self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >> 4) self.dig_H6 = unpack_from("<b", dig_e1_e7, 6)[0] self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002) self.t_fine = 0 self._l1_barray = bytearray(1) self._l8_barray = bytearray(8) self._l3_resultarray = array("i", [0, 0, 0]) self._l1_barray[0] = self.iir << 2 self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002) self._l1_barray[0] = self.humidity_mode self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray) def read_raw_data(self, result): self._l1_barray[0] = ( self.pressure_mode << 5 | self.temperature_mode << 2 | 1) self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16 = [ OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] sleep_time = 1250 if self.temperature_mode in osamples_1_16: sleep_time += 2300*(1 << self.temperature_mode) if self.pressure_mode in osamples_1_16: sleep_time += 575 + (2300*(1 << self.pressure_mode)) if self.humidity_mode in osamples_1_16: sleep_time += 575 + (2300*(1 << self.humidity_mode)) time.sleep_us(sleep_time) while (unpack('<H', self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS, 2))[0] & 0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray) readout = self._l8_barray raw_press = ((readout[0] << 16) | (readout[1] << 8) | readout[2]) >> 4 raw_temp = ((readout[3] << 16) | (readout[4] << 8) | readout[5]) >> 4 raw_hum = (readout[6] << 8) | readout[7] result[0] = raw_temp result[1] = raw_press result[2] = raw_hum def read_compensated_data(self, result=None): """ Get raw data and compensa the same """ self.read_raw_data(self._l3_resultarray) raw_temp, raw_press, raw_hum = self._l3_resultarray var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11) var2 = (raw_temp >> 4) - self.dig_T1 var2 = var2 * ((raw_temp >> 4) - self.dig_T1) var2 = ((var2 >> 12) * self.dig_T3) >> 14 self.t_fine = var1 + var2 temp = (self.t_fine * 5 + 128) >> 8 var1 = self.t_fine - 128000 var2 = var1 * var1 * self.dig_P6 var2 = var2 + ((var1 * self.dig_P5) << 17) var2 = var2 + (self.dig_P4 << 35) var1 = (((var1 * var1 * self.dig_P3) >> 8) + ((var1 * self.dig_P2) << 12)) var1 = (((1 << 47) + var1) * self.dig_P1) >> 33 if var1 == 0: pressure = 0 else: p = 1048576 - raw_press p = (((p << 31) - var2) * 3125) // var1 var1 = (self.dig_P9 * (p >> 13) * (p >> 13)) >> 25 var2 = (self.dig_P8 * p) >> 19 pressure = ((p + var1 + var2) >> 8) + (self.dig_P7 << 4) h = self.t_fine - 76800 h = (((((raw_hum << 14) - (self.dig_H4 << 20) - (self.dig_H5 * h)) + 16384) >> 15) * (((((((h * self.dig_H6) >> 10) * (((h * self.dig_H3) >> 11) + 32768)) >> 10) + 2097152) * self.dig_H2 + 8192) >> 14)) h = h - (((((h >> 15) * (h >> 15)) >> 7) * self.dig_H1) >> 4) h = 0 if h < 0 else h h = 419430400 if h > 419430400 else h humidity = h >> 12 if result: result[0] = temp result[1] = pressure result[2] = humidity return result return array("i", (temp, pressure, humidity)) @property def values(self): temp, pres, humi = self.read_compensated_data() temp = temp/100 if self.temperature_scale == 'F': temp = 32 + (temp*1.8) elif self.temperature_scale == 'K': temp = temp + 273.15 pres = pres/256 humi = humi/1024 return (temp, pres, humi) @property def formated_values(self): t, p, h = self.values temp = "{} "+self.temperature_scale return (temp.format(t), "{} Pa".format(p), "{} %".format(h)) @property def temperature(self): t, _, _ = self.values return t @property def pressure(self): _, p, _ = self.values return p @property def pressure_precision(self): _, p, _ = self.read_compensated_data() pi = float(p // 256) pd = (p % 256)/256 return (pi, pd) @property def humidity(self): _, _, h = self.values return h def altitude(self, pressure_sea_level=1013.25): pi, pd = self.pressure_precision() return 44330*(1-((float(pi+pd)/100)/pressure_sea_level)**(1/5.255))
33.554656
81
0.558518
7,512
0.906371
0
0
945
0.11402
0
0
651
0.078547
be2674ce54565aac0c872fd9c167bb04e3da2fda
9,749
py
Python
airflow/contrib/secrets/hashicorp_vault.py
colpal/airfloss
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
airflow/contrib/secrets/hashicorp_vault.py
colpal/airfloss
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
7
2020-10-05T18:20:16.000Z
2022-02-01T00:54:35.000Z
airflow/contrib/secrets/hashicorp_vault.py
colpal/airfloss
1857cf309b69d4c2d60e9bb67f731eb01d0ecda1
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
1
2020-10-21T03:22:43.000Z
2020-10-21T03:22:43.000Z
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Objects relating to sourcing connections & variables from Hashicorp Vault """ from typing import Optional import hvac from cached_property import cached_property from hvac.exceptions import InvalidPath, VaultError from airflow.exceptions import AirflowException from airflow.secrets import BaseSecretsBackend from airflow.utils.log.logging_mixin import LoggingMixin class VaultBackend(BaseSecretsBackend, LoggingMixin): """ Retrieves Connections and Variables from Hashicorp Vault Configurable via ``airflow.cfg`` as follows: .. code-block:: ini [secrets] backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs = { "connections_path": "connections", "url": "http://127.0.0.1:8200", "mount_point": "airflow" } For example, if your keys are under ``connections`` path in ``airflow`` mount_point, this would be accessible if you provide ``{"connections_path": "connections"}`` and request conn_id ``smtp_default``. :param connections_path: Specifies the path of the secret to read to get Connections. (default: 'connections') :type connections_path: str :param variables_path: Specifies the path of the secret to read to get Variables. (default: 'variables') :type variables_path: str :param config_path: Specifies the path of the secret to read Airflow Configurations (default: 'configs'). :type config_path: str :param url: Base URL for the Vault instance being addressed. :type url: str :param auth_type: Authentication Type for Vault (one of 'token', 'ldap', 'userpass', 'approle', 'github', 'gcp', 'kubernetes'). Default is ``token``. :type auth_type: str :param mount_point: The "path" the secret engine was mounted on. (Default: ``secret``) :type mount_point: str :param token: Authentication token to include in requests sent to Vault. (for ``token`` and ``github`` auth_type) :type token: str :param kv_engine_version: Select the version of the engine to run (``1`` or ``2``, default: ``2``) :type kv_engine_version: int :param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_type) :type username: str :param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_type) :type password: str :param role_id: Role ID for Authentication (for ``approle`` auth_type) :type role_id: str :param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type) :type kubernetes_role: str :param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path: str :param secret_id: Secret ID for Authentication (for ``approle`` auth_type) :type secret_id: str :param gcp_key_path: Path to GCP Credential JSON file (for ``gcp`` auth_type) :type gcp_key_path: str :param gcp_scopes: Comma-separated string containing GCP scopes (for ``gcp`` auth_type) :type gcp_scopes: str """ def __init__( # pylint: disable=too-many-arguments self, connections_path='connections', # type: str variables_path='variables', # type: str config_path='config', # type: str url=None, # type: Optional[str] auth_type='token', # type: str mount_point='secret', # type: str kv_engine_version=2, # type: int token=None, # type: Optional[str] username=None, # type: Optional[str] password=None, # type: Optional[str] role_id=None, # type: Optional[str] kubernetes_role=None, # type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str secret_id=None, # type: Optional[str] gcp_key_path=None, # type: Optional[str] gcp_scopes=None, # type: Optional[str] **kwargs ): super(VaultBackend, self).__init__() self.connections_path = connections_path.rstrip('/') if variables_path != None: self.variables_path = variables_path.rstrip('/') else: self.variables_path = variables_path self.config_path = config_path.rstrip('/') self.url = url self.auth_type = auth_type self.kwargs = kwargs self.token = token self.username = username self.password = password self.role_id = role_id self.kubernetes_role = kubernetes_role self.kubernetes_jwt_path = kubernetes_jwt_path self.secret_id = secret_id self.mount_point = mount_point self.kv_engine_version = kv_engine_version self.gcp_key_path = gcp_key_path self.gcp_scopes = gcp_scopes @cached_property def client(self): # type: () -> hvac.Client """ Return an authenticated Hashicorp Vault client """ _client = hvac.Client(url=self.url, **self.kwargs) if self.auth_type == "token": if not self.token: raise VaultError("token cannot be None for auth_type='token'") _client.token = self.token elif self.auth_type == "ldap": _client.auth.ldap.login( username=self.username, password=self.password) elif self.auth_type == "userpass": _client.auth_userpass(username=self.username, password=self.password) elif self.auth_type == "approle": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif self.auth_type == "kubernetes": if not self.kubernetes_role: raise VaultError("kubernetes_role cannot be None for auth_type='kubernetes'") with open(self.kubernetes_jwt_path) as f: jwt = f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif self.auth_type == "github": _client.auth.github.login(token=self.token) elif self.auth_type == "gcp": from airflow.contrib.utils.gcp_credentials_provider import ( get_credentials_and_project_id, _get_scopes ) scopes = _get_scopes(self.gcp_scopes) credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else: raise AirflowException("Authentication type '{}' not supported".format(self.auth_type)) if _client.is_authenticated(): return _client else: raise VaultError("Vault Authentication Error!") def get_conn_uri(self, conn_id): # type: (str) -> Optional[str] """ Get secret value from Vault. Store the secret in the form of URI :param conn_id: connection id :type conn_id: str """ response = self._get_secret(self.connections_path, conn_id) return response.get("conn_uri") if response else None def get_variable(self, key): # type: (str) -> Optional[str] """ Get Airflow Variable :param key: Variable Key :return: Variable Value """ if self.variables_path == None: return None else: response = self._get_secret(self.variables_path, key) return response.get("value") if response else None def _get_secret(self, path_prefix, secret_id): # type: (str, str) -> Optional[dict] """ Get secret value from Vault. :param path_prefix: Prefix for the Path to get Secret :type path_prefix: str :param secret_id: Secret Key :type secret_id: str """ secret_path = self.build_path(path_prefix, secret_id) try: if self.kv_engine_version == 1: response = self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point ) else: response = self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point) except InvalidPath: self.log.info("Secret %s not found in Path: %s", secret_id, secret_path) return None return_data = response["data"] if self.kv_engine_version == 1 else response["data"]["data"] return return_data def get_config(self, key): # type: (str) -> Optional[str] """ Get Airflow Configuration :param key: Configuration Option Key :type key: str :rtype: str :return: Configuration Option Value retrieved from the vault """ response = self._get_secret(self.config_path, key) return response.get("value") if response else None
40.452282
102
0.647656
8,587
0.880808
0
0
1,897
0.194584
0
0
5,188
0.532157
be27d0cf506bd514ef2b8fd412eba196789b1b66
6,347
py
Python
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
AdamCoscia/eve-trajectory-mining
134f142a5665f66fbf92aada8dd6252fab64ddff
[ "MIT" ]
null
null
null
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
AdamCoscia/eve-trajectory-mining
134f142a5665f66fbf92aada8dd6252fab64ddff
[ "MIT" ]
null
null
null
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
AdamCoscia/eve-trajectory-mining
134f142a5665f66fbf92aada8dd6252fab64ddff
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Computes distance between killmails by text similarity. Edit Distance Metrics - Levenshtein Distance - Damerau-Levenshtein Distance - Jaro Distance - Jaro-Winkler Distance - Match Rating Approach Comparison - Hamming Distance Vector Distance Metrics - Jaccard Similarity - Cosine Distance Written By: Adam Coscia Updated On: 11/09/2019 """ # Start timing import time start = time.time() total = 0 def lap(msg): """Records time elapsed.""" global start, total elapsed = (time.time() - start) - total total = time.time() - start if elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed > 60: if total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap("Importing modules...") from ast import literal_eval from functools import reduce import os import sys import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel def get_long_text_cosine_distance(los1, los2): """Calculates cosine distance between two killmails' item lists. 1. Converts collection of long text items to raw document representation. 2. Converts the collection of raw documents to a matrix of TF-IDF features using TfidfVectorizer (combines vector counting and TF-IDF calculator). 3. Computes cosine similarity between feature vectors. Uses linear kernel since TF-IDF matrix will be normalized already. Arguments: los1: First document, a list of raw strings. los2: Second document, a list of raw strings. Returns: cosine distance as a value between 0-1, with 1 being identical. """ if type(los1) == float or type(los2) == float: return 0 if len(los1) == 0 or len(los2) == 0: return 0 doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los1]) # Create bag of words doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los2]) # Create bag of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist def get_short_text_cosine_distance(los1, los2): """Calculates cosine distance between two killmails' item lists. 1. Converts collection of short text items to raw document representation. 2. Converts the collection of raw documents to a matrix of TF-IDF features using TfidfVectorizer (combines vector counting and TF-IDF calculator). 3. Computes cosine similarity between feature vectors. Uses linear kernel since TF-IDF matrix will be normalized already. Arguments: los1: First document, a list of raw strings. los2: Second document, a list of raw strings. Returns: cosine distance as a value between 0-1, with 1 being identical and 0 being complete different. """ if type(los1) == float or type(los2) == float: return 0 if len(los1) == 0 or len(los2) == 0: return 0 doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los1]) # Create bag of words doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los2]) # Create bag of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist # Load CSV from local file lap("Loading CSV data from local file...") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill']) df = df.dropna() # Convert items column to correct data type lap("Converting 'item' column value types...") df['items'] = df['items'].apply(literal_eval) # Group DataFrame by character_id and compute distance series for each group lap("Computing cosine distances and change in kd by grouping character_id's...") groupby = df.groupby('character_id') # group dataframe by character_id num_groups = len(groupby) # get number of groups count = 0 # current group number out of number of groups groups = [] # list to append modified group dataframes to for name, gp in groupby: # Order the observations and prepare the dataframe gp = (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1)) # Generate change in kills over change in deaths and change in kd ratio kills1 = gp['k_count'] kills2 = gp['k_count'].shift() deaths1 = gp['d_count'] deaths2 = gp['d_count'].shift() idx = len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs of observations sequentially to compare pairs = [] items1 = gp['items'] items2 = gp['items'].shift() for i in range(1, len(gp)): # Start from 1 to avoid adding nan pair los1 = items1.iloc[i] los2 = items2.iloc[i] pairs.append((los2, los1)) # Generate distance series using pairs list and different metrics # start distance series with nan due to starting range at 1 cos_dist_lt = [np.nan] # cosine distance b/w long text BoW cos_dist_st = [np.nan] # cosine distance b/w short text BoW for pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx = len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp) # Record progress count += 1 print(f"Progress {count/num_groups:2.1%}", end="\r") lap("Concatenating resulting groups and writing to file...") df_res = pd.concat(groups) df_res.to_csv(f'data/useable_victims_distancesAndKD.csv') lap("Exit")
37.556213
92
0.669293
0
0
0
0
0
0
0
0
3,564
0.561525
be28146fdfcf8ed2a16239294869650841f46a74
1,181
py
Python
src/chess/utils.py
Dalkio/custom-alphazero
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
[ "MIT" ]
null
null
null
src/chess/utils.py
Dalkio/custom-alphazero
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
[ "MIT" ]
6
2020-08-13T13:02:58.000Z
2022-02-10T02:21:49.000Z
src/chess/utils.py
Dalkio/custom-alphazero
e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db
[ "MIT" ]
null
null
null
import numpy as np from itertools import product from typing import List from src.config import ConfigChess from src.chess.board import Board from src.chess.move import Move def get_all_possible_moves() -> List[Move]: all_possible_moves = set() array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype("int8") for i, j, piece in product( range(ConfigChess.board_size), range(ConfigChess.board_size), ["Q", "N"] ): array[i][j] = Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[i][j] = 0 # underpromotion moves array[1, :] = Board.piece_symbol_to_int("P") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[0, :] = Board.piece_symbol_to_int("p") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) # no need to add castling moves: they have already be added with queen moves under UCI notation return sorted(list(all_possible_moves))
36.90625
99
0.686706
0
0
0
0
0
0
0
0
135
0.11431
be2868ed0261dc37f256c2a99990b52d127544a4
1,845
py
Python
multirotor.py
christymarc/mfac
29449a0c79e618059fa6f67ae7ab76711543c513
[ "MIT" ]
null
null
null
multirotor.py
christymarc/mfac
29449a0c79e618059fa6f67ae7ab76711543c513
[ "MIT" ]
null
null
null
multirotor.py
christymarc/mfac
29449a0c79e618059fa6f67ae7ab76711543c513
[ "MIT" ]
1
2022-03-01T05:00:02.000Z
2022-03-01T05:00:02.000Z
from random import gauss class MultiRotor: """Simple vertical dynamics for a multirotor vehicle.""" GRAVITY = -9.81 def __init__( self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1 ): """ Args: altitude (float): initial altitude of the vehicle velocity (float): initial velocity of the vehicle mass (float): mass of the vehicle emc (float): electromechanical constant for the vehicle dt (float): simulation time step noise (float): standard deviation of normally distributed simulation noise """ self.y0 = altitude self.y1 = velocity self.mass = mass self.emc = emc self.dt = dt self.noise = noise def step(self, effort): """Advance the multirotor simulation and apply motor forces. Args: effort (float): related to the upward thrust of the vehicle, it must be >= 0 Return: The current state (altitude, velocity) of the vehicle. """ effort = max(0, effort) scaled_effort = self.emc / self.mass * effort net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort # Don't let the vehcicle fall through the ground if self.y0 <= 0 and net_acceleration < 0: y0dot = 0 y1dot = 0 else: y0dot = self.y1 y1dot = net_acceleration self.y0 += y0dot * self.dt self.y1 += y1dot * self.dt self.y0 += gauss(0, self.noise) return self.y0, self.y1 def get_altitude(self): """Return the current altitude.""" return self.y0 def get_delta_time(self): """Return the simulation time step.""" return self.dt
27.132353
86
0.566938
1,817
0.984824
0
0
0
0
0
0
852
0.461789
be286e006cd7ef8775677a3d599b4cc9bc55f723
6,329
py
Python
stpmex/client.py
cuenca-mx/stpmex-python
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
[ "MIT" ]
37
2019-01-06T02:52:38.000Z
2022-03-17T21:19:48.000Z
stpmex/client.py
cuenca-mx/stpmex-python
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
[ "MIT" ]
204
2018-09-05T22:55:33.000Z
2022-03-31T23:21:13.000Z
stpmex/client.py
cuenca-mx/stpmex-python
93f630cd05cea927b32f5aeb5f9b958c4ee91af9
[ "MIT" ]
20
2018-09-17T15:29:51.000Z
2022-02-03T06:29:32.000Z
import re from typing import Any, ClassVar, Dict, List, NoReturn, Union from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from requests import Response, Session from .exc import ( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount, SignatureValidationError, StpmexException, ) from .resources import CuentaFisica, Orden, Resource, Saldo from .version import __version__ as client_version DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com' class Client: base_url: str soap_url: str session: Session # resources cuentas: ClassVar = CuentaFisica ordenes: ClassVar = Orden saldos: ClassVar = Saldo def __init__( self, empresa: str, priv_key: str, priv_key_passphrase: str, demo: bool = False, base_url: str = None, soap_url: str = None, timeout: tuple = None, ): self.timeout = timeout self.session = Session() self.session.headers['User-Agent'] = f'stpmex-python/{client_version}' if demo: host_url = DEMO_HOST self.session.verify = False else: host_url = PROD_HOST self.session.verify = True self.base_url = base_url or f'{host_url}/speiws/rest' self.soap_url = ( soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices' ) try: self.pkey = serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(), ) except (ValueError, TypeError, UnsupportedAlgorithm): raise InvalidPassphrase Resource.empresa = empresa Resource._client = self def post( self, endpoint: str, data: Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('post', endpoint, data) def put( self, endpoint: str, data: Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('put', endpoint, data) def delete( self, endpoint: str, data: Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('delete', endpoint, data) def request( self, method: str, endpoint: str, data: Dict[str, Any], **kwargs: Any ) -> Union[Dict[str, Any], List[Any]]: url = self.base_url + endpoint response = self.session.request( method, url, json=data, timeout=self.timeout, **kwargs, ) self._check_response(response) resultado = response.json() if 'resultado' in resultado: # Some responses are enveloped resultado = resultado['resultado'] return resultado @staticmethod def _check_response(response: Response) -> None: if not response.ok: response.raise_for_status() resp = response.json() if isinstance(resp, dict): try: _raise_description_error_exc(resp) except KeyError: ... try: assert resp['descripcion'] _raise_description_exc(resp) except (AssertionError, KeyError): ... response.raise_for_status() def _raise_description_error_exc(resp: Dict) -> NoReturn: id = resp['resultado']['id'] error = resp['resultado']['descripcionError'] if id == 0 and error == 'No se recibió respuesta del servicio': raise NoServiceResponse(**resp['resultado']) elif id == 0 and error == 'Error validando la firma': raise SignatureValidationError(**resp['resultado']) elif id == 0 and re.match(r'El campo .+ es obligatorio', error): raise MandatoryField(**resp['resultado']) elif id == -1 and re.match( r'La clave de rastreo .+ ya fue utilizada', error ): raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif id == -7 and re.match(r'La cuenta .+ no existe', error): raise AccountDoesNotExist(**resp['resultado']) elif id == -9 and re.match(r'La Institucion \d+ no es valida', error): raise InvalidInstitution(**resp['resultado']) elif id == -11 and re.match(r'El tipo de cuenta \d+ es invalido', error): raise InvalidAccountType(**resp['resultado']) elif id == -20 and re.match(r'El monto {.+} no es válido', error): raise InvalidAmount(**resp['resultado']) elif id == -22 and 'no coincide para la institucion operante' in error: raise BankCodeClabeMismatch(**resp['resultado']) elif id == -24 and re.match(r'Cuenta {\d+} - {MISMA_CUENTA}', error): raise SameAccount(**resp['resultado']) elif id == -34 and 'Clave rastreo invalida' in error: raise InvalidTrackingKey(**resp['resultado']) elif id == -100 and error.startswith('No se encontr'): raise NoOrdenesEncontradas elif id == -200 and 'Se rechaza por PLD' in error: raise PldRejected(**resp['resultado']) else: raise StpmexException(**resp['resultado']) def _raise_description_exc(resp: Dict) -> NoReturn: id = resp['id'] desc = resp['descripcion'] if id == 0 and 'Cuenta en revisión' in desc: # STP regresa esta respuesta cuando se registra # una cuenta. No se levanta excepción porque # todas las cuentas pasan por este status. ... elif id == 1 and desc == 'rfc/curp invalido': raise InvalidRfcOrCurp(**resp) elif id == 1 and re.match(r'El campo \w+ es invalido', desc): raise InvalidField(**resp) elif id == 3 and desc == 'Cuenta Duplicada': raise DuplicatedAccount(**resp) elif id == 5 and re.match(r'El campo .* obligatorio \w+', desc): raise MandatoryField(**resp) else: raise StpmexException(**resp)
34.026882
78
0.618739
2,871
0.45334
0
0
525
0.082899
0
0
1,132
0.178746
be288cac85f4b858cc1c87f0fce298bec6844670
4,770
py
Python
aql/tests/types/aql_test_list_types.py
menify/sandbox
32166c71044f0d5b414335b2b6559adc571f568c
[ "MIT" ]
null
null
null
aql/tests/types/aql_test_list_types.py
menify/sandbox
32166c71044f0d5b414335b2b6559adc571f568c
[ "MIT" ]
null
null
null
aql/tests/types/aql_test_list_types.py
menify/sandbox
32166c71044f0d5b414335b2b6559adc571f568c
[ "MIT" ]
null
null
null
import sys import os.path import timeit sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') )) from aql_tests import skip, AqlTestCase, runLocalTests from aql.util_types import UniqueList, SplitListType, List, ValueListType #//===========================================================================// class TestListTypes( AqlTestCase ): def test_unique_list(self): ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest() self.assertEqual( ul, [2,3,1]) self.assertEqual( list(ul), [1,2,3]) ul = UniqueList() ul.append( 1 ); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1 ); ul.selfTest() ul.append( 2 ); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1 ); ul.selfTest() self.assertEqual( list(ul), [1,3,2]) ul.append_front( 2 ); ul.selfTest() self.assertEqual( list(ul), [2,1,3]) ul.extend( [4,1,2,2,5] ); ul.selfTest() self.assertEqual( list(ul), [2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest() self.assertEqual( list(ul), [1,2,3,5,4]) self.assertEqual( list(ul), [1,2,3,5,4]) ul.remove( 1 ); ul.selfTest() self.assertEqual( list(ul), [2,3,5,4]) ul.remove( 5 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) ul.remove( 55 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) self.assertEqual( ul.pop(), 4 ); ul.selfTest() self.assertEqual( ul.pop_front(), 2 ); ul.selfTest() self.assertEqual( ul.pop_front(), 3 ); ul.selfTest() ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual( list(ul), [1,2,3,4,5]) ul -= [2,2,2,4,33]; ul.selfTest() self.assertEqual( list(ul), [1,3,5]) self.assertEqual( ul[0], 1) self.assertEqual( ul[2], 5) self.assertEqual( ul[1], 3) self.assertIn( 1, ul) self.assertEqual( list(reversed(ul)), [5,3,1]) ul.reverse(); ul.selfTest() self.assertEqual( ul, [5,3,1] ) ul.reverse(); ul.selfTest() self.assertEqual( str(ul), "[1, 3, 5]" ) self.assertEqual( ul, UniqueList([1, 3, 5]) ) self.assertEqual( ul, UniqueList(ul) ) self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) ) self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] ) #//===========================================================================// def test_splitlist(self): l = SplitListType( List, ", \t\n\r" )("1,2, 3,,, \n\r\t4") self.assertEqual( l, ['1','2','3','4'] ) self.assertEqual( l, "1,2,3,4" ) self.assertEqual( l, "1 2 3 4" ) self.assertEqual( str(l), "1,2,3,4" ) l += "7, 8" self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= "2, 3" self.assertEqual( l, ['1','4','7','8'] ) l -= "5" self.assertEqual( l, ['1','4','7','8'] ) l.extend_front( "10,12" ) self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend( "0,-1" ) self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] ) #//===========================================================================// def test_valuelist(self): l = SplitListType( ValueListType( List, int ), ", \t\n\r" )("1,2, 3,,, \n\r\t4") self.assertEqual( l, [1,2,3,4] ) self.assertEqual( l, "1,2,3,4" ) self.assertEqual( l, "1 2 3 4" ) self.assertEqual( str(l), "1,2,3,4" ) l += [7, 8] self.assertEqual( l, ['1','2','3','4','7','8'] ) l += 78 self.assertEqual( l, ['1','2','3','4','7','8', 78] ) l -= 78 self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= "2, 3" self.assertEqual( l, ['1','4','7','8'] ) l -= "5" self.assertEqual( l, ['1','4','7','8'] ) l.extend_front( "10,12" ) self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend( "0,-1" ) self.assertEqual( l, [10,12,1,4,7,8,0,-1] ) l[0] = "5" self.assertEqual( l, [5,12,1,4,7,8,0,-1] ) #//===========================================================================// def test_list(self): l = List([1,2,3,4]) self.assertEqual( l, [1,2,3,4] ) l += [7, 8] self.assertEqual( l, [1,2,3,4,7,8] ) l += 78 self.assertEqual( l, [1,2,3,4,7,8,78] ) l -= 78 self.assertEqual( l, [1,2,3,4,7,8] ) l -= [2, 3] self.assertEqual( l, [1,4,7,8] ) l -= 5 self.assertEqual( l, [1,4,7,8] ) l.extend_front( [10,12] ) self.assertEqual( l, [10,12,1,4,7,8] ) l.extend( [0,-1] ) self.assertEqual( l, [10,12,1,4,7,8, 0, -1] ) #//===========================================================================// if __name__ == "__main__": runLocalTests()
27.413793
88
0.469182
4,297
0.900839
0
0
0
0
0
0
789
0.165409
be2a32ef4dd37c381a36c7a58f2812962caeb4d5
502
py
Python
logger_application/logger.py
swatishayna/OnlineEDAAutomation
a1bfe8b1dee51a4872529a98f6e1136922329e3e
[ "MIT" ]
1
2022-03-24T20:26:44.000Z
2022-03-24T20:26:44.000Z
logger_application/logger.py
surajaiswal13/OnlineEDAAutomation
a1bfe8b1dee51a4872529a98f6e1136922329e3e
[ "MIT" ]
null
null
null
logger_application/logger.py
surajaiswal13/OnlineEDAAutomation
a1bfe8b1dee51a4872529a98f6e1136922329e3e
[ "MIT" ]
2
2022-02-08T16:35:32.000Z
2022-03-04T06:56:54.000Z
from datetime import datetime from src.utils import uploaded_file import os class App_Logger: def __init__(self): pass def log(self, file_object, email, log_message, log_writer_id): self.now = datetime.now() self.date = self.now.date() self.current_time = self.now.strftime("%H:%M:%S") file_object.write( email+ "_eda_" + log_writer_id + "\t\t" +str(self.date) + "/" + str(self.current_time) + "\t\t" +email+ "\t\t" +log_message +"\n")
27.888889
143
0.621514
423
0.842629
0
0
0
0
0
0
42
0.083665
be2a7a241325332e4117c63de7ba8c5d1c491871
332
py
Python
metasync/params.py
dstarikov/metavault
1933cc6cd828ee9c594a45a78238a9a319de0143
[ "MIT" ]
1
2019-05-28T15:59:35.000Z
2019-05-28T15:59:35.000Z
metasync/params.py
dstarikov/metavault
1933cc6cd828ee9c594a45a78238a9a319de0143
[ "MIT" ]
null
null
null
metasync/params.py
dstarikov/metavault
1933cc6cd828ee9c594a45a78238a9a319de0143
[ "MIT" ]
null
null
null
# config params KB = 1024 MB = 1024*KB GB = 1024*MB # name of meta root dir META_DIR = ".metasync" # batching time for daemon SYNC_WAIT = 3 # blob size BLOB_UNIT = 32*MB # Increase of Paxos proposal number PAXOS_PNUM_INC = 10 # authentication directory import os AUTH_DIR = os.path.join(os.path.expanduser("~"), ".metasync")
15.090909
61
0.713855
0
0
0
0
0
0
0
0
162
0.487952
be2be4ab8f891e1d119f0c6cbe7bc4c566727644
547
py
Python
py/tests/test_valid_parentheses.py
Dragonway/LeetCode
53ed9e9bcc1ed6955b013e0d37d2a684c2ec7135
[ "MIT" ]
null
null
null
py/tests/test_valid_parentheses.py
Dragonway/LeetCode
53ed9e9bcc1ed6955b013e0d37d2a684c2ec7135
[ "MIT" ]
null
null
null
py/tests/test_valid_parentheses.py
Dragonway/LeetCode
53ed9e9bcc1ed6955b013e0d37d2a684c2ec7135
[ "MIT" ]
null
null
null
import unittest from py.tests.utils import test from py import valid_parentheses as vp class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def test_valid_parentheses(self) -> None: test("()", result=True) test("()[]{}", result=True) test("(]", result=False) test("([)]", result=False) test("{[]}", result=True) test("", result=True) test(")()", result=False) test("(())((())))", result=False)
30.388889
46
0.521024
457
0.835466
0
0
405
0.740402
0
0
48
0.087751
be2c413f1972d5571cb52206e64c8dffe9762a99
2,503
py
Python
hitnet/hitnet.py
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
[ "MIT" ]
38
2021-09-05T13:59:11.000Z
2022-03-28T14:18:30.000Z
hitnet/hitnet.py
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
[ "MIT" ]
3
2021-11-25T08:21:01.000Z
2022-03-07T08:22:11.000Z
hitnet/hitnet.py
AchintyaSrivastava/HITNET-Stereo-Depth-estimation
90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166
[ "MIT" ]
5
2021-09-05T23:15:10.000Z
2022-02-10T08:32:00.000Z
import tensorflow as tf import numpy as np import time import cv2 from hitnet.utils_hitnet import * drivingStereo_config = CameraConfig(0.546, 1000) class HitNet(): def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps = 0 self.timeLastPrediction = time.time() self.frameCounter = 0 self.camera_config = camera_config # Initialize model self.model = self.initialize_model(model_path, model_type) def __call__(self, left_img, right_img): return self.estimate_disparity(left_img, right_img) def initialize_model(self, model_path, model_type): self.model_type = model_type with tf.io.gfile.GFile(model_path, "rb") as f: graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read()) # Wrap frozen graph to ConcreteFunctions if self.model_type == ModelType.flyingthings: model = wrap_frozen_graph(graph_def=graph_def, inputs="input:0", outputs=["reference_output_disparity:0","secondary_output_disparity:0"]) else: model = wrap_frozen_graph(graph_def=graph_def, inputs="input:0", outputs="reference_output_disparity:0") return model def estimate_disparity(self, left_img, right_img): input_tensor = self.prepare_input(left_img, right_img) # Perform inference on the image if self.model_type == ModelType.flyingthings: left_disparity, right_disparity = self.inference(input_tensor) self.disparity_map = left_disparity else: self.disparity_map = self.inference(input_tensor) return self.disparity_map def get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self, left_img, right_img): if (self.model_type == ModelType.eth3d): # Shape (1, None, None, 2) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img,2) right_img = np.expand_dims(right_img,2) combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0 else: # Shape (1, None, None, 6) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0 return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32) def inference(self, input_tensor): output = self.model(input_tensor) return np.squeeze(output)
25.804124
96
0.742709
2,339
0.934479
0
0
0
0
0
0
254
0.101478
be2cf6688bc9f36adc898b8d1394b2bd6f967ed1
854
py
Python
fobi_custom/plugins/form_elements/fields/intercept/household_tenure/fobi_form_elements.py
datamade/just-spaces
cc2b7d1518e5da65a403413d39a309fa3e2ac122
[ "MIT" ]
6
2019-04-09T06:52:31.000Z
2021-08-31T04:31:59.000Z
fobi_custom/plugins/form_elements/fields/intercept/household_tenure/fobi_form_elements.py
datamade/just-spaces
cc2b7d1518e5da65a403413d39a309fa3e2ac122
[ "MIT" ]
176
2019-01-11T21:05:50.000Z
2021-03-16T17:04:13.000Z
fobi_custom/plugins/form_elements/fields/intercept/household_tenure/fobi_form_elements.py
datamade/just-spaces
cc2b7d1518e5da65a403413d39a309fa3e2ac122
[ "MIT" ]
1
2019-05-10T15:30:25.000Z
2019-05-10T15:30:25.000Z
from django import forms from fobi.base import FormFieldPlugin, form_element_plugin_registry from .forms import HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin): """HouseholdTenurePlugin.""" uid = "household_tenure" name = "What year did you move into your current address?" form = HouseholdTenureForm group = "Intercept" # Group to which the plugin belongs to def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): field_kwargs = { 'required': self.data.required, 'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}), } return [(self.data.name, forms.IntegerField, field_kwargs)] form_element_plugin_registry.register(HouseholdTenurePlugin)
29.448276
70
0.686183
654
0.765808
0
0
0
0
0
0
171
0.200234
076c3b7d76dce4361980237fd24f6e7d24b9f302
368
py
Python
utils/scripts/OOOlevelGen/src/sprites/__init__.py
fullscreennl/monkeyswipe
c56192e202674dd5ab18023f6cf14cf51e95fbd0
[ "MIT" ]
null
null
null
utils/scripts/OOOlevelGen/src/sprites/__init__.py
fullscreennl/monkeyswipe
c56192e202674dd5ab18023f6cf14cf51e95fbd0
[ "MIT" ]
null
null
null
utils/scripts/OOOlevelGen/src/sprites/__init__.py
fullscreennl/monkeyswipe
c56192e202674dd5ab18023f6cf14cf51e95fbd0
[ "MIT" ]
null
null
null
__all__ = ['EnemyBucketWithStar', 'Nut', 'Beam', 'Enemy', 'Friend', 'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy', 'Star', 'Wizard', 'EnemyEquipedRotor', 'CyclingEnemyObject', 'Joints', 'Bomb', 'Contacts']
21.647059
33
0.366848
0
0
0
0
0
0
0
0
160
0.434783
076ca6ec3c064417c645687635c5d40cf01c07b7
29,159
py
Python
code/trainer.py
mazzaAnt/StackGAN-v2
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
[ "MIT" ]
1
2019-02-04T20:45:51.000Z
2019-02-04T20:45:51.000Z
code/trainer.py
mazzaAnt/StackGAN-v2
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
[ "MIT" ]
null
null
null
code/trainer.py
mazzaAnt/StackGAN-v2
dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0
[ "MIT" ]
null
null
null
from __future__ import print_function from six.moves import range import torchvision.transforms as transforms import torch.backends.cudnn as cudnn import torch import torch.nn as nn from torch.autograd import Variable import torch.optim as optim import torchvision.utils as vutils import numpy as np import os import time from PIL import Image, ImageFont, ImageDraw from copy import deepcopy from miscc.config import cfg from miscc.utils import mkdir_p from CaptionDatasets import * from tensorboard import summary from tensorboard import FileWriter from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3 # ################## Shared functions ################### def compute_mean_covariance(img): batch_size = img.size(0) channel_num = img.size(1) height = img.size(2) width = img.size(3) num_pixels = height * width # batch_size * channel_num * 1 * 1 mu = img.mean(2, keepdim=True).mean(3, keepdim=True) # batch_size * channel_num * num_pixels img_hat = img - mu.expand_as(img) img_hat = img_hat.view(batch_size, channel_num, num_pixels) # batch_size * num_pixels * channel_num img_hat_transpose = img_hat.transpose(1, 2) # batch_size * channel_num * channel_num covariance = torch.bmm(img_hat, img_hat_transpose) covariance = covariance / num_pixels return mu, covariance def KL_loss(mu, logvar): # -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.mean(KLD_element).mul_(-0.5) return KLD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.orthogonal(m.weight.data, 1.0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: nn.init.orthogonal(m.weight.data, 1.0) if m.bias is not None: m.bias.data.fill_(0.0) def load_params(model, new_param): for p, new_p in zip(model.parameters(), new_param): p.data.copy_(new_p) def copy_G_params(model): flatten = deepcopy(list(p.data for p in model.parameters())) return flatten def compute_inception_score(predictions, num_splits=1): # print('predictions', predictions.shape) scores = [] for i in range(num_splits): istart = i * predictions.shape[0] // num_splits iend = (i + 1) * predictions.shape[0] // num_splits part = predictions[istart:iend, :] kl = part * \ (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores) def negative_log_posterior_probability(predictions, num_splits=1): # print('predictions', predictions.shape) scores = [] for i in range(num_splits): istart = i * predictions.shape[0] // num_splits iend = (i + 1) * predictions.shape[0] // num_splits part = predictions[istart:iend, :] result = -1. * np.log(np.max(part, 1)) result = np.mean(result) scores.append(result) return np.mean(scores), np.std(scores) def load_network(gpus): netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=gpus) print(netG) netsD = [] if cfg.TREE.BRANCH_NUM > 0: netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM > 1: netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM > 2: netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM > 3: netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM > 4: netsD.append(D_NET1024()) # TODO: if cfg.TREE.BRANCH_NUM > 5: for i in range(len(netsD)): netsD[i].apply(weights_init) netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus) # print(netsD[i]) print('# of netsD', len(netsD)) count = 0 if cfg.TRAIN.NET_G != '': state_dict = torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) istart = cfg.TRAIN.NET_G.rfind('_') + 1 iend = cfg.TRAIN.NET_G.rfind('.') count = cfg.TRAIN.NET_G[istart:iend] count = int(count) + 1 if cfg.TRAIN.NET_D != '': for i in range(len(netsD)): print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i)) state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict) inception_model = INCEPTION_V3() if cfg.CUDA: netG.cuda() for i in range(len(netsD)): netsD[i].cuda() inception_model = inception_model.cuda() inception_model.eval() return netG, netsD, len(netsD), inception_model, count def define_optimizers(netG, netsD): optimizersD = [] num_Ds = len(netsD) for i in range(num_Ds): opt = optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999)) optimizersD.append(opt) # G_opt_paras = [] # for p in netG.parameters(): # if p.requires_grad: # G_opt_paras.append(p) optimizerG = optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999)) return optimizerG, optimizersD def save_model(netG, avg_param_G, netsD, epoch, model_dir): load_params(netG, avg_param_G) torch.save( netG.state_dict(), '%s/netG_%d.pth' % (model_dir, epoch)) for i in range(len(netsD)): netD = netsD[i] torch.save( netD.state_dict(), '%s/netD%d.pth' % (model_dir, i)) print('Save G/Ds models.') def save_real(imgs_tcpu, image_dir): num = cfg.TRAIN.VIS_COUNT # The range of real_img (i.e., self.imgs_tcpu[i][0:num]) # is changed to [0, 1] by function vutils.save_image real_img = imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png' % (image_dir), normalize=True) real_img_set = vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set, (1, 2, 0)) real_img_set = real_img_set * 255 real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) def save_img_results(imgs_tcpu, fake_imgs, num_imgs, count, image_dir, summary_writer): num = cfg.TRAIN.VIS_COUNT # The range of real_img (i.e., self.imgs_tcpu[i][0:num]) # is changed to [0, 1] by function vutils.save_image real_img = imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png' % (image_dir), normalize=True) real_img_set = vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set, (1, 2, 0)) real_img_set = real_img_set * 255 real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img, count) for i in range(num_imgs): fake_img = fake_imgs[i][0:num] # The range of fake_img.data (i.e., self.fake_imgs[i][0:num]) # is still [-1. 1]... vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png' % (image_dir, count, i), normalize=True) fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set = np.transpose(fake_img_set, (1, 2, 0)) fake_img_set = (fake_img_set + 1) * 255 / 2 fake_img_set = fake_img_set.astype(np.uint8) sup_fake_img = summary.image('fake_img%d' % i, fake_img_set) summary_writer.add_summary(sup_fake_img, count) summary_writer.flush() # ################# Text to image task############################ # class condGANTrainer(object): def __init__(self, output_dir, data_loader, imsize): if cfg.TRAIN.FLAG: self.model_dir = os.path.join(output_dir, 'Model') self.image_dir = os.path.join(output_dir, 'Image') self.log_dir = os.path.join(output_dir, 'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer = FileWriter(self.log_dir) s_gpus = cfg.GPU_ID.split(',') self.gpus = [int(ix) for ix in s_gpus] self.num_gpus = len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark = True self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus self.max_epoch = cfg.TRAIN.MAX_EPOCH self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader = data_loader self.num_batches = len(self.data_loader) def prepare_data(self, data): imgs, w_imgs, t_embedding, _ = data real_vimgs, wrong_vimgs = [], [] if cfg.CUDA: vembedding = Variable(t_embedding).cuda() else: vembedding = Variable(t_embedding) for i in range(self.num_Ds): if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return imgs, real_vimgs, wrong_vimgs, vembedding def train_Dnet(self, idx, count): flag = count % 100 batch_size = self.real_imgs[0].size(0) criterion, mu = self.criterion, self.mu netD, optD = self.netsD[idx], self.optimizersD[idx] real_imgs = self.real_imgs[idx] wrong_imgs = self.wrong_imgs[idx] fake_imgs = self.fake_imgs[idx] # netD.zero_grad() # Forward real_labels = self.real_labels[:batch_size] fake_labels = self.fake_labels[:batch_size] # for real real_logits = netD(real_imgs, mu.detach()) wrong_logits = netD(wrong_imgs, mu.detach()) fake_logits = netD(fake_imgs.detach(), mu.detach()) # errD_real = criterion(real_logits[0], real_labels) errD_wrong = criterion(wrong_logits[0], fake_labels) errD_fake = criterion(fake_logits[0], fake_labels) if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \ criterion(real_logits[1], real_labels) errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \ criterion(wrong_logits[1], real_labels) errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \ criterion(fake_logits[1], fake_labels) # errD_real = errD_real + errD_real_uncond errD_wrong = errD_wrong + errD_wrong_uncond errD_fake = errD_fake + errD_fake_uncond # errD = errD_real + errD_wrong + errD_fake else: errD = errD_real + 0.5 * (errD_wrong + errD_fake) # backward errD.backward() # update parameters optD.step() # log if flag == 0: summary_D = summary.scalar('D_loss%d' % idx, errD.item()) self.summary_writer.add_summary(summary_D, count) return errD def train_Gnet(self, count): self.netG.zero_grad() errG_total = 0 flag = count % 100 batch_size = self.real_imgs[0].size(0) criterion, mu, logvar = self.criterion, self.mu, self.logvar real_labels = self.real_labels[:batch_size] for i in range(self.num_Ds): outputs = self.netsD[i](self.fake_imgs[i], mu) errG = criterion(outputs[0], real_labels) if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\ criterion(outputs[1], real_labels) errG = errG + errG_patch errG_total = errG_total + errG if flag == 0: summary_D = summary.scalar('G_loss%d' % i, errG.item()) self.summary_writer.add_summary(summary_D, count) # Compute color consistency losses if cfg.TRAIN.COEFF.COLOR_LOSS > 0: if self.num_Ds > 1: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2 = \ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu2 + like_cov2 if flag == 0: sum_mu = summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov, count) if self.num_Ds > 2: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2 = \ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu1 + like_cov1 if flag == 0: sum_mu = summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov, count) kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL errG_total = errG_total + kl_loss # Postpone the backward propagation # errG_total.backward() # self.optimizerG.step() return kl_loss, errG_total def train(self): self.netG, self.netsD, self.num_Ds,\ self.inception_model, start_count = load_network(self.gpus) avg_param_G = copy_G_params(self.netG) self.optimizerG, self.optimizersD = \ define_optimizers(self.netG, self.netsD) self.criterion = nn.BCELoss() self.SATcriterion = nn.CrossEntropyLoss() self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one = torch.FloatTensor([1.0]) self.gradient_half = torch.FloatTensor([0.5]) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)) # Data parameters data_folder = 'birds_output' # folder with data files saved by create_input_files.py data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name shared by data files normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Show, Attend, and Tell Dataloader train_loader = torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True) if cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda() # Compute SATloss self.real_labels = self.real_labels.cuda() self.fake_labels = self.fake_labels.cuda() self.gradient_one = self.gradient_one.cuda() self.gradient_half = self.gradient_half.cuda() noise, fixed_noise = noise.cuda(), fixed_noise.cuda() predictions = [] count = start_count start_epoch = start_count // (self.num_batches) for epoch in range(start_epoch, self.max_epoch): start_t = time.time() # for step, data in enumerate(self.data_loader, 0): for step, data in enumerate(zip(self.data_loader, train_loader), 0): data_1 = data[0] _, caps, caplens = data[1] data = data_1 ####################################################### # (0) Prepare training data ###################################################### self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \ self.txt_embedding = self.prepare_data(data) # Testing line for real samples if epoch == start_epoch and step == 0: print ('Checking real samples at first...') save_real(self.imgs_tcpu, self.image_dir) ####################################################### # (1) Generate fake images ###################################################### noise.data.normal_(0, 1) self.fake_imgs, self.mu, self.logvar = \ self.netG(noise, self.txt_embedding) # len(self.fake_imgs) = NUM_BRANCHES # self.fake_imgs[0].shape = [batch_size, 3, 64, 64] # self.fake_imgs[1].shape = [batch_size, 3, 128, 128] # self.fake_imgs[2].shape = [batch_size, 3, 256, 256] ####################################################### # (*) Forward fake images to SAT ###################################################### from SATmodels import Encoder, DecoderWithAttention from torch.nn.utils.rnn import pack_padded_sequence fine_tune_encoder = False # Read word map word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json') with open(word_map_file, 'r') as j: word_map = json.load(j) # Define the encoder/decoder structure for SAT model decoder = DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=4e-4) encoder = Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()), lr=1e-4) if fine_tune_encoder else None SATloss = 0 # Compute the SAT loss after forwarding the SAT model for idx in range(len(self.fake_imgs)): img = encoder(self.fake_imgs[idx]) scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(img, caps, caplens) targets = caps_sorted[:, 1:] scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda() targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda() SATloss += self.SATcriterion(scores, targets) + 1 * ((1. - alphas.sum(dim=1)) ** 2).mean() # Set zero_grad for encoder/decoder decoder_optimizer.zero_grad() if encoder_optimizer is not None: encoder_optimizer.zero_grad() ####################################################### # (2) Update D network ###################################################### errD_total = 0 for i in range(self.num_Ds): errD = self.train_Dnet(i, count) errD_total += errD ####################################################### # (3) Update G network: maximize log(D(G(z))) ###################################################### kl_loss, errG_total = self.train_Gnet(count) for p, avg_p in zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data) # Combine with G and SAT first, then back propagation errG_total += SATloss errG_total.backward() self.optimizerG.step() ####################################################### # (*) Update SAT network: ###################################################### # Update weights decoder_optimizer.step() if encoder_optimizer is not None: encoder_optimizer.step() ####################################################### # (*) Prediction and Inception score: ###################################################### pred = self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if count % 100 == 0: summary_D = summary.scalar('D_loss', errD_total.item()) summary_G = summary.scalar('G_loss', errG_total.item()) summary_KL = summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL, count) count += 1 ####################################################### # (*) Save Images/Log/Model per SNAPSHOT_INTERVAL: ###################################################### if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) # Save images backup_para = copy_G_params(self.netG) load_params(self.netG, avg_param_G) # self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds, count, self.image_dir, self.summary_writer) # load_params(self.netG, backup_para) # Compute inception score if len(predictions) > 500: predictions = np.concatenate(predictions, 0) mean, std = compute_inception_score(predictions, 10) # print('mean:', mean, 'std', std) m_incep = summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep, count) # mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10) m_nlpp = summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp, count) # predictions = [] end_t = time.time() print('''[%d/%d][%d] Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f Time: %.2fs ''' # D(real): %.4f D(wrong):%.4f D(fake) %.4f % (epoch, self.max_epoch, self.num_batches, errD_total.item(), errG_total.item(), kl_loss.item(), end_t - start_t)) save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) self.summary_writer.close() def save_superimages(self, images_list, filenames, save_dir, split_dir, imsize): batch_size = images_list[0].size(0) num_sentences = len(images_list) for i in range(batch_size): s_tmp = '%s/super/%s/%s' %\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder): print('Make a new folder: ', folder) mkdir_p(folder) # savename = '%s_%d.png' % (s_tmp, imsize) super_img = [] for j in range(num_sentences): img = images_list[j][i] # print(img.size()) img = img.view(1, 3, imsize, imsize) # print(img.size()) super_img.append(img) # break super_img = torch.cat(super_img, 0) vutils.save_image(super_img, savename, nrow=10, normalize=True) def save_singleimages(self, images, filenames, save_dir, split_dir, sentenceID, imsize): for i in range(images.size(0)): s_tmp = '%s/single_samples/%s/%s' %\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder): print('Make a new folder: ', folder) mkdir_p(folder) fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID) # range from [-1, 1] to [0, 255] img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr = img.permute(1, 2, 0).data.cpu().numpy() im = Image.fromarray(ndarr) im.save(fullpath) def evaluate(self, split_dir): if cfg.TRAIN.NET_G == '': print('Error: the path for morels is not found!') else: # Build and load the generator if split_dir == 'test': split_dir = 'valid' netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG) # state_dict = torch.load(cfg.TRAIN.NET_G) state_dict = \ torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) # the path to save generated images s_tmp = cfg.TRAIN.NET_G istart = s_tmp.rfind('_') + 1 iend = s_tmp.rfind('.') iteration = int(s_tmp[istart:iend]) s_tmp = s_tmp[:s_tmp.rfind('/')] save_dir = '%s/iteration%d' % (s_tmp, iteration) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) if cfg.CUDA: netG.cuda() noise = noise.cuda() # switch to evaluate mode netG.eval() for step, data in enumerate(self.data_loader, 0): imgs, t_embeddings, filenames = data if cfg.CUDA: t_embeddings = Variable(t_embeddings).cuda() else: t_embeddings = Variable(t_embeddings) # print(t_embeddings[:, 0, :], t_embeddings.size(1)) embedding_dim = t_embeddings.size(1) batch_size = imgs[0].size(0) noise.data.resize_(batch_size, nz) noise.data.normal_(0, 1) fake_img_list = [] for i in range(embedding_dim): fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :]) if cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1], filenames, save_dir, split_dir, i, 256) # self.save_singleimages(fake_imgs[-2], filenames, # save_dir, split_dir, i, 128) # self.save_singleimages(fake_imgs[-3], filenames, # save_dir, split_dir, i, 64) # break if cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 64) # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 128) self.save_superimages(fake_img_list, filenames, save_dir, split_dir, 256)
41.360284
116
0.540245
21,291
0.730169
0
0
0
0
0
0
4,528
0.155287
076cc2a993643184f8804f5d69cb1769c80c9cee
5,654
py
Python
spletni_vmesnik.py
LeaHolc/recepcija
bff9f804e795e45c2da214432042c0ae067783b0
[ "MIT" ]
1
2021-11-11T08:20:13.000Z
2021-11-11T08:20:13.000Z
spletni_vmesnik.py
LeaHolc/recepcija
bff9f804e795e45c2da214432042c0ae067783b0
[ "MIT" ]
null
null
null
spletni_vmesnik.py
LeaHolc/recepcija
bff9f804e795e45c2da214432042c0ae067783b0
[ "MIT" ]
null
null
null
from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file import bottle import controller from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime as dt @bottle.get('/') def root(): redirect('/domov') @bottle.get('/domov') def index(): parcele = dobi_parcele_za_prikaz(dt.date.today()) return template("domov", parcele=parcele, hide_header_back=True) @bottle.get("/parcela/<id_parcele>") def parcela(id_parcele): 'Preverimo stanje parcele' rez, gostje = dobi_info_parcele(id_parcele, dt.date.today()) if rez is not None: stanje = "Parcela je trenutno zasedena" else: stanje = "Parcela je trenutno na voljo" return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje) @bottle.get("/naredi-rezervacijo/<id_parcele>") def nova_rezervacija(id_parcele=None): print(id_parcele) today = dt.date.today() tomorrow = today + dt.timedelta(days=1) return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow) @bottle.post("/naredi-rezervacijo") def naredi_novo_rezervacijo(): " V modelu naredi novo rezervacijo in ji doda prvega gosta" # Preberemo lastnosti iz forme ime = request.forms.ime#get("") priimek = request.forms.priimek#get("") emso = request.forms.emso#get("") drzava = request.forms.drzava#get("") id_parcele = request.forms.id_parcele#get("") od = request.forms.zacetek#get("") do = request.forms.konec#get("") print(ime, priimek) try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except Exception as e: print(e) print("Napaka pri pretvorbi datumov") return redirect("/naredi-rezervacijo") rezervacija = naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { "EMSO":emso, "ime":ime, "priimek":priimek, "drzava":drzava, }, datum_od, datum_do) return redirect(f"/parcela/{id_parcele}") @bottle.get("/dodaj-gosta/<id_rezervacije>") def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today = dt.date.today() tomorrow = today + dt.timedelta(days=1) rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka") return template("dodajanje_gosta", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow) @bottle.post("/dodaj-gosta-na-rezervacijo") def post_dodaj_gosta_na_rezervacijo(): " V modelu rezervaciji doda gosta" # Preberemo lastnosti iz forme ime = request.forms.ime priimek = request.forms.priimek emso = request.forms.emso#get("") drzava = request.forms.drzava#get("") id_rezervacije = request.forms.rez#get("") od = request.forms.zacetek#get("") do = request.forms.konec#get("") try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except Exception as e: print(e) print("Napaka pri pretvorbi datumov") return redirect("/dodaj-gosta") rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { "EMSO":emso, "ime":ime, "priimek":priimek, "drzava":drzava, },datum_od,datum_do) print(id_rezervacije) return redirect(f"/parcela/{rezervacija.id_parcele}") @bottle.get("/predracun/<id_rezervacije>") def predracun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka") today = dt.date.today() gostje = rezervacija.gostje sestevek, postavke = dobi_postavke_racuna(rezervacija) slovar_cen = {} slovar_kolicin = {} for gost in gostje: slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f') return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get("/zakljuci/<id_rezervacije>") def racun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka") today = dt.date.today() gostje = rezervacija.gostje sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen = {} slovar_kolicin = {} for gost in gostje: slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f') return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404) def napaka404(a): return template("error", sporocilo="Stran ne obstaja!", naslov="404") @bottle.error(500) def napaka500(a): return template("error", sporocilo="Napaka streznika!", naslov="500") bottle.run(reloader=True, debug=True)
37.693333
196
0.703926
0
0
0
0
5,208
0.921118
0
0
1,087
0.192253
076da057376eccf60a978162dbf694687eba8ff6
1,233
py
Python
espnet/nets/pytorch_backend/transducer/initializer.py
magictron/espnet
075cee8d586957241be3e54c47846fbb12a32310
[ "Apache-2.0" ]
2
2020-06-21T11:15:10.000Z
2021-12-03T08:08:45.000Z
espnet/nets/pytorch_backend/transducer/initializer.py
magictron/espnet
075cee8d586957241be3e54c47846fbb12a32310
[ "Apache-2.0" ]
1
2021-03-05T10:43:49.000Z
2021-03-05T10:43:49.000Z
espnet/nets/pytorch_backend/transducer/initializer.py
magictron/espnet
075cee8d586957241be3e54c47846fbb12a32310
[ "Apache-2.0" ]
2
2021-03-30T06:02:08.000Z
2021-08-06T06:59:22.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Parameter initialization for transducer RNN/Transformer parts.""" import six from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import initialize def initializer(model, args): """Initialize transducer model. Args: model (torch.nn.Module): transducer instance args (Namespace): argument Namespace containing options """ if args.dtype != "transformer": if args.etype == "transformer": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype == "transformer": initialize(model, args.transformer_init) else: lecun_normal_init_parameters(model.encoder) initialize(model.decoder, args.transformer_init)
31.615385
83
0.697486
0
0
0
0
0
0
0
0
320
0.25953
076e350bd997dc6e64e333caef566c1b62991f65
970
py
Python
evaluate.py
adelmassimo/EM-Algorithm-for-MMPP
23ae031076a464bfba5286cf6b5a1fa5e1cc66b1
[ "MIT" ]
null
null
null
evaluate.py
adelmassimo/EM-Algorithm-for-MMPP
23ae031076a464bfba5286cf6b5a1fa5e1cc66b1
[ "MIT" ]
null
null
null
evaluate.py
adelmassimo/EM-Algorithm-for-MMPP
23ae031076a464bfba5286cf6b5a1fa5e1cc66b1
[ "MIT" ]
null
null
null
import model import numpy as np import datasetReader as df import main # Number of traces loaded T T = 1 # Generate traces traces_factory = df.DatasetFactory() traces_factory.createDataset(T) traces = traces_factory.traces P0 = np.matrix("[ .02 0;" "0 0 0.5;" "0 0 0]") P1 = np.matrix("[0.1 0 0;" "0 0.5 0;" "0 0 0.9]") M = np.matrix("[0.25 0 0;" "0 0.23 0;" "0 0 0.85]") def backward_likelihood(i, trace): N = model.N M = len( trace ) likelihoods = np.ones((N, 1)) if i < M: P = main.randomization(P0, model.uniformization_rate, trace[i][0]) # P = stored_p_values[i, :, :] likelihoods = np.multiply( P.dot( model.P1 ).dot( backward_likelihood(i+1, trace) ), model.M[:, trace[i][1]] ) if likelihoods.sum() != 0: likelihoods = likelihoods / likelihoods.sum() return likelihoods
23.095238
74
0.541237
0
0
0
0
0
0
0
0
167
0.172165
076ea8e320bea4958c4967806ffb3361e0b72568
2,400
py
Python
Imaging/Core/Testing/Python/TestHSVToRGB.py
forestGzh/VTK
bc98327275bd5cfa95c5825f80a2755a458b6da8
[ "BSD-3-Clause" ]
1,755
2015-01-03T06:55:00.000Z
2022-03-29T05:23:26.000Z
Imaging/Core/Testing/Python/TestHSVToRGB.py
forestGzh/VTK
bc98327275bd5cfa95c5825f80a2755a458b6da8
[ "BSD-3-Clause" ]
29
2015-04-23T20:58:30.000Z
2022-03-02T16:16:42.000Z
Imaging/Core/Testing/Python/TestHSVToRGB.py
forestGzh/VTK
bc98327275bd5cfa95c5825f80a2755a458b6da8
[ "BSD-3-Clause" ]
1,044
2015-01-05T22:48:27.000Z
2022-03-31T02:38:26.000Z
#!/usr/bin/env python import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Use the painter to draw using colors. # This is not a pipeline object. It will support pipeline objects. # Please do not use this object directly. imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r, g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render() # --- end of script --
34.285714
67
0.814583
0
0
0
0
0
0
0
0
288
0.12
076eec8de4f676b9d586492c7ab7750df189a96a
296
py
Python
kelas_2b/echa.py
barizraihan/belajarpython
57df4c939600dd34a519599d6c78178bfb55063b
[ "MIT" ]
null
null
null
kelas_2b/echa.py
barizraihan/belajarpython
57df4c939600dd34a519599d6c78178bfb55063b
[ "MIT" ]
null
null
null
kelas_2b/echa.py
barizraihan/belajarpython
57df4c939600dd34a519599d6c78178bfb55063b
[ "MIT" ]
null
null
null
import csv class echa: def werehousing(self): with open('kelas_2b/echa.csv', 'r') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',') for row in csv_reader: print("menampilkan data barang:", row[0], row[1], row[2], row[3], row[4])
32.888889
93
0.567568
283
0.956081
0
0
0
0
0
0
51
0.172297
076f84eca9f11a3725b25d5cf7a8fa60fb6dd720
3,399
py
Python
tests/test_handler_surface_distance.py
dyollb/MONAI
9084c452c48095c82c71d4391b3684006e5a3c56
[ "Apache-2.0" ]
2,971
2019-10-16T23:53:16.000Z
2022-03-31T20:58:24.000Z
tests/test_handler_surface_distance.py
dyollb/MONAI
9084c452c48095c82c71d4391b3684006e5a3c56
[ "Apache-2.0" ]
2,851
2020-01-10T16:23:44.000Z
2022-03-31T22:14:53.000Z
tests/test_handler_surface_distance.py
dyollb/MONAI
9084c452c48095c82c71d4391b3684006e5a3c56
[ "Apache-2.0" ]
614
2020-01-14T19:18:01.000Z
2022-03-31T14:06:14.000Z
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Tuple import numpy as np import torch from ignite.engine import Engine from monai.handlers import SurfaceDistance def create_spherical_seg_3d( radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99) ) -> np.ndarray: """ Return a 3D image with a sphere inside. Voxel values will be 1 inside the sphere, and 0 elsewhere. Args: radius: radius of sphere (in terms of number of voxels, can be partial) centre: location of sphere centre. im_shape: shape of image to create See also: :py:meth:`~create_test_image_3d` """ # Create image image = np.zeros(im_shape, dtype=np.int32) spy, spx, spz = np.ogrid[ -centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2] ] circle = (spx * spx + spy * spy + spz * spz) <= radius * radius image[circle] = 1 image[~circle] = 0 return image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) # test input a list of channel-first tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase): # TODO test multi node Surface Distance def test_compute(self): sur_metric = SurfaceDistance(include_background=True) def _val_func(engine, batch): pass engine = Engine(_val_func) sur_metric.attach(engine, "surface_distance") y_pred, y = TEST_SAMPLE_1 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4) y_pred, y = TEST_SAMPLE_2 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4) y_pred, y = TEST_SAMPLE_3 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float("inf")) y_pred, y = TEST_SAMPLE_4 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float("inf")) def test_shape_mismatch(self): sur_metric = SurfaceDistance(include_background=True) with self.assertRaises((AssertionError, ValueError)): y_pred = TEST_SAMPLE_1[0] y = torch.ones((1, 1, 10, 10, 10)) sur_metric.update([y_pred, y]) if __name__ == "__main__": unittest.main()
35.778947
120
0.692262
1,192
0.350691
0
0
0
0
0
0
1,056
0.31068
077018ad315b121efadde62952dbcb47369a343a
2,368
py
Python
benchmarks/eval.py
rom1mouret/anoflows
42381c06b8897e4510e73cda87ea97ea3f4a5579
[ "Apache-2.0" ]
null
null
null
benchmarks/eval.py
rom1mouret/anoflows
42381c06b8897e4510e73cda87ea97ea3f4a5579
[ "Apache-2.0" ]
null
null
null
benchmarks/eval.py
rom1mouret/anoflows
42381c06b8897e4510e73cda87ea97ea3f4a5579
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 import sys import logging import yaml import pandas as pd import numpy as np from collections import defaultdict from sklearn.model_selection import train_test_split from sklearn.ensemble import IsolationForest from sklearn.impute import SimpleImputer from anoflows.hpo import find_best_flows from data_loading import load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv) == 1: logging.error("YAML data specification missing from the command line arguments") exit(1) spec_file = sys.argv[1] df, spec = load_data(spec_file) max_rows = min(len(df), spec.get("max_rows", 40000)) novelty_detection = spec.get("novelty", True) normal_classes = spec["normal_classes"] precision = defaultdict(list) for rounds in range(spec.get("rounds", 1)): # random sampling df = df.sample(n=max_rows, replace=False) label_col = spec["label_column"] y = df[label_col].values other = df.drop(label_col, inplace=False, axis=1) X = other.values # imputing X = SimpleImputer(copy=False).fit_transform(X) # train/test split X_train, X_test, y_train, y_test = \ train_test_split(X, y, shuffle=False, test_size=0.5) if novelty_detection: keep = np.where(np.isin(y_train, normal_classes))[0] X_train = X_train[keep, :] y_train = y_train[keep] # training #flows, loss = find_best_flows(X_train, device='cpu', n_trials=1) from anoflows.anoflow_bagging import AnoFlowBagging flows = AnoFlowBagging() flows.fit(X_train) iforest = IsolationForest().fit(X_train) # prediction pred = { "anoflows": flows.likelihood(X_test), "iforest": iforest.decision_function(X_test) } # evaluation y_true = np.where(np.isin(y_test, spec["anomaly_classes"]))[0] ref = np.zeros(len(y_test)) ref[y_true] = 1 k = len(y_true) for name, y_pred in pred.items(): anomaly_indices = y_pred.argsort()[:k] prec = ref[anomaly_indices].sum() / k logging.info("%s: %.1f%% (%d anomalies / %d rows)" % (name, 100*prec, k, len(y_test))) precision[name].append(prec) logging.info("* SUMMARY %s", spec_file) for name, prec in precision.items(): prec = 100 * np.array(prec) mean = np.mean(prec) std = np.std(prec) logging.info("%s; mean=%.1f%% std=%.1f%%" % (name, mean, std))
29.234568
94
0.678209
0
0
0
0
0
0
0
0
403
0.170186
07702a9eb4e9374ca232b483bdbecbfbdb1917c5
840
py
Python
pydantic/version.py
jamescurtin/pydantic
4f8f9396906a094626b770fb7cc8eecf03770ffe
[ "MIT" ]
1
2020-02-25T15:28:47.000Z
2020-02-25T15:28:47.000Z
pydantic/version.py
jamescurtin/pydantic
4f8f9396906a094626b770fb7cc8eecf03770ffe
[ "MIT" ]
1
2020-01-17T17:12:45.000Z
2020-01-17T17:12:45.000Z
pydantic/version.py
jamescurtin/pydantic
4f8f9396906a094626b770fb7cc8eecf03770ffe
[ "MIT" ]
1
2020-12-19T18:00:19.000Z
2020-12-19T18:00:19.000Z
__all__ = ['VERSION', 'version_info'] VERSION = '1.4a1' def version_info() -> str: import platform import sys from importlib import import_module from pathlib import Path from .main import compiled optional_deps = [] for p in ('typing-extensions', 'email-validator', 'devtools'): try: import_module(p.replace('-', '_')) except ImportError: continue optional_deps.append(p) info = { 'pydantic version': VERSION, 'pydantic compiled': compiled, 'install path': Path(__file__).resolve().parent, 'python version': sys.version, 'platform': platform.platform(), 'optional deps. installed': optional_deps, } return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items())
27.096774
101
0.589286
0
0
0
0
0
0
0
0
210
0.25
0770f2a922548842dd4151e55d3fc69c6cf5b84c
2,319
py
Python
spire/core/registry.py
siq/spire
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
[ "Linux-OpenIB" ]
null
null
null
spire/core/registry.py
siq/spire
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
[ "Linux-OpenIB" ]
1
2016-09-15T16:19:27.000Z
2016-09-15T16:20:06.000Z
spire/core/registry.py
siq/spire
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
[ "Linux-OpenIB" ]
null
null
null
from scheme import Structure __all__ = ('Configurable', 'Registry') class Configurable(object): """A sentry class which indicates that subclasses can establish a configuration chain.""" class Registry(object): """The unit registry.""" dependencies = {} schemas = {} units = {} @classmethod def is_configurable(cls, obj): return (obj is not Configurable and issubclass(obj, Configurable) and Configurable not in obj.__bases__) @classmethod def purge(cls): cls.schemas = {} cls.units = {} @classmethod def register_dependency(cls, dependency): token = dependency.token if not token: return if token not in cls.dependencies: cls.dependencies[token] = type(dependency) if not dependency.configurable: return configuration = dependency.unit.configuration if token in cls.schemas: structure = cls.schemas[token] if configuration.required and not dependency.optional and not structure.required: structure.required = True else: schema = dependency.construct_schema(generic=True, name=token) if dependency.optional: schema = schema.clone(required=False) cls.schemas[token] = schema @classmethod def register_unit(cls, unit): cls.units[unit.identity] = unit if cls.is_configurable(unit): queue = [(unit, [unit.identity], None)] while queue: subject, tokens, dependency = queue.pop(0) if subject.configuration: token = '/'.join(tokens) if dependency: structure = dependency.construct_schema(name=token) if dependency.token and structure.required: structure = structure.clone(required=False) else: structure = subject.configuration.schema.clone(required=False, name=token) cls.schemas[token] = structure for attr, subdependency in subject.dependencies.iteritems(): queue.append((subdependency.unit, tokens + [attr], subdependency))
34.61194
93
0.581285
2,246
0.968521
0
0
1,994
0.859853
0
0
140
0.060371
07710c963c7c958684d4d5e192f36678ee929e23
231
py
Python
oslo_devsupport/model/__init__.py
berrange/oslo.devsupport
463c5842e95c5f8a7009ab1041f290e3a1050a06
[ "Apache-1.1" ]
null
null
null
oslo_devsupport/model/__init__.py
berrange/oslo.devsupport
463c5842e95c5f8a7009ab1041f290e3a1050a06
[ "Apache-1.1" ]
null
null
null
oslo_devsupport/model/__init__.py
berrange/oslo.devsupport
463c5842e95c5f8a7009ab1041f290e3a1050a06
[ "Apache-1.1" ]
null
null
null
from .command import * from .database import * from .entrypoint import * from .group import * from .http import * from .messaging import * from .method import * from .operation import * from .stack import * from .threads import *
19.25
25
0.735931
0
0
0
0
0
0
0
0
0
0
0771ae571980aa4669298ae5f48b1ac83a19af96
2,953
py
Python
scripts/extract.py
nng555/fairseq
c9730a125825a85f33042e1b9fd1959b8ca829e5
[ "MIT" ]
2
2020-10-05T08:52:01.000Z
2021-03-03T15:26:35.000Z
scripts/extract.py
nng555/fairseq
c9730a125825a85f33042e1b9fd1959b8ca829e5
[ "MIT" ]
null
null
null
scripts/extract.py
nng555/fairseq
c9730a125825a85f33042e1b9fd1959b8ca829e5
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Extracts random constraints from reference files.""" import argparse import random import sys from sacrebleu import extract_ngrams def get_phrase(words, index, length): assert index < len(words) - length + 1 phr = " ".join(words[index : index + length]) for i in range(index, index + length): words.pop(index) return phr def main(args): if args.seed: random.seed(args.seed) for line in sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if "\t" in line: source, target = line.split("\t") if args.add_sos: target = f"<s> {target}" if args.add_eos: target = f"{target} </s>" if len(target.split()) >= args.len: words = [target] num = args.number choices = {} for i in range(num): if len(words) == 0: break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice = " ".join( tokens[phrase_index : min(len(tokens), phrase_index + args.len)] ) for j in range( phrase_index, min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index) if phrase_index > 0: words.append(" ".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens): words.append(" ".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out with spaces target = target.replace(choice, " " * len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep="\t") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases") parser.add_argument("--len", "-l", type=int, default=1, help="phrase length") parser.add_argument( "--add-sos", default=False, action="store_true", help="add <s> token" ) parser.add_argument( "--add-eos", default=False, action="store_true", help="add </s> token" ) parser.add_argument("--seed", "-s", default=0, type=int) args = parser.parse_args() Main(args)
31.752688
88
0.529292
0
0
0
0
0
0
0
0
489
0.165594
0773947b769d5f943efc051b2beaf2ee562da724
1,231
py
Python
AppImageBuilder/commands/file.py
gouchi/appimage-builder
40e9851c573179e066af116fb906e9cad8099b59
[ "MIT" ]
null
null
null
AppImageBuilder/commands/file.py
gouchi/appimage-builder
40e9851c573179e066af116fb906e9cad8099b59
[ "MIT" ]
null
null
null
AppImageBuilder/commands/file.py
gouchi/appimage-builder
40e9851c573179e066af116fb906e9cad8099b59
[ "MIT" ]
null
null
null
# Copyright 2020 Alexis Lopez Zubieta # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. import os from .command import Command class FileError(RuntimeError): pass class File(Command): def __init__(self): super().__init__('file') self.log_stdout = False self.log_command = False def query(self, path): self._run(['file', '-b', '--exclude', 'ascii', path]) if self.return_code != 0: raise FileError('\n'.join(self.stderr)) return '\n'.join(self.stdout) def is_executable_elf(self, path): output = self.query(path) result = ('ELF' in output) and ('executable' in output) return result
31.564103
80
0.685621
557
0.452478
0
0
0
0
0
0
676
0.549147
0775eae440b3ed8a8de73f26dfbbc57343a6323d
6,670
py
Python
text_selection/analyse_zenon_scrape.py
dainst/chronoi-corpus-processing
7f508a7572e1022c4c88d1477db029e6619a1f0c
[ "MIT" ]
null
null
null
text_selection/analyse_zenon_scrape.py
dainst/chronoi-corpus-processing
7f508a7572e1022c4c88d1477db029e6619a1f0c
[ "MIT" ]
null
null
null
text_selection/analyse_zenon_scrape.py
dainst/chronoi-corpus-processing
7f508a7572e1022c4c88d1477db029e6619a1f0c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import csv import furl import json import re import sys from collections import defaultdict def filter_records_without_url(records: []) -> []: return [r for r in records if any(r.get("urls"))] def build_furl(url: str) -> furl.furl: try: furl_obj = furl.furl(url) if not furl_obj.host: furl_obj = furl.furl("http://" + url) return furl_obj except ValueError: return furl.furl("https://invalid-url.xyz") def determine_host(url: str) -> str: furl_obj = build_furl(url) return re.sub(r"^www[0-9]*\.", "", furl_obj.host) def build_hosts_to_urls(records: []) -> {str: {str}}: result = defaultdict(set) for record in records: for url in record.get("urls"): host = determine_host(url.get("url")) result[host].add(url.get("url")) return result def print_most_common_url_hosts(hosts_to_urls: {}, n: int): hosts = [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n] hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h])) for host in hosts: print("% 6d\t%s" % (len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls: {}, host: str): urls = hosts_to_urls.get(host, []) for url in urls: print(url) if not any(urls): print(f"No urls for host: '{host}'", file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str): # It should be ok, to only pattern match the hosts here... ids1 = {r.get("id") for r in records if record_has_matching_url(r, pattern1)} ids2 = {r.get("id") for r in records if record_has_matching_url(r, pattern2)} ids_both = ids1.intersection(ids2) for host, number in {pattern1: len(ids1), pattern2: len(ids2), "both": len(ids_both)}.items(): print(f"{host}: {number}") def record_has_matching_url(record: {}, pattern: str) -> bool: return any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record: {}, pattern: str) -> [{}]: result = [] for url in record.get("urls"): if any(re.findall(pattern, url.get("url"))): result.append(url) return result def record_remove_urls_not_matching(record: {}, pattern: str): record["urls"] = record_get_urls_matching(record, pattern) def earliest_year(year_strings: [str]) -> str: years = [] for year_s in year_strings: try: years.append(int(year_s)) except ValueError: print(f"Not a string that is a year: '{year_s}'", file=sys.stderr) continue return str(sorted(years)[0]) if any(years) else "" def main(args: argparse.Namespace): with open(args.scrape_file, "r") as file: records = json.load(file) records = filter_records_without_url(records) # filter urls by the user-provided filter list if args.desc_filters: with open(args.desc_filters, "r") as file: filters = file.read().splitlines() for record in records: record["urls"] = [url for url in record.get("urls") if url.get("desc") not in filters] records = filter_records_without_url(records) # print unique hosts or urls, then exit if args.print_host_urls or args.print_common_hosts >= 0: hosts_to_urls = build_hosts_to_urls(records) if args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) # check in how many records the two given hosts co-occur, then exit if args.patterns_cooccur: host1, host2 = args.patterns_cooccur.split(",") print_how_often_url_patterns_cooccur(records, host1, host2) exit(0) # do some selection based on a url pattern, remove all non-matching urls from the record if args.select_by_url: pattern = args.select_by_url records = [r for r in records if record_has_matching_url(r, pattern)] for record in records: record_remove_urls_not_matching(record, pattern) # sort the records by id, to be extra sure, that we get the same order every time this is called # print each line as a csv column records = sorted(records, key=lambda r: r.get("id")) writer = csv.writer(sys.stdout, delimiter=",", quoting=csv.QUOTE_ALL) for record in records: to_print = [] if args.print_id: to_print.append(record.get("id", "")) if args.print_url: to_print.append(record.get("urls")[0].get("url") if any(record.get("urls")) else "") if args.print_pub_date: to_print.append(earliest_year(record.get("publicationDates", []))) if args.print_languages: to_print.append("|".join(record.get("languages", []))) writer.writerow(to_print) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Process a file with zenon json records and print some information about them.") parser.add_argument("scrape_file", type=str, help="The file that contains the zenon dumps as json.") parser.add_argument("--desc-filters", type=str, help="A file to filter urls by. Excludes urls with 'desc' fields matching a line in the file.") # these are arguments to print some specific information parser.add_argument("--print-common-hosts", type=int, default=-1, help="Print hosts that appear more than n times in the records urls, then exit.") parser.add_argument("--print-host-urls", type=str, help="Print all urls for the host, then exit.") parser.add_argument("--patterns-cooccur", type=str, help="Format: 'pattern1,pattern2', print how often these occur in single records url fields, then exit.") # these are meant to work together select by a url pattern then print information about the records parser.add_argument("--select-by-url", type=str, help="Give a pattern for a url to select records by.") parser.add_argument("--print-url", action="store_true", help="Print the first of each urls for the selected records. (Ignores other urls present on the records if --select-url is given.)") parser.add_argument("--print-pub-date", action="store_true", help="Print the earliest publication year for each of the selected records.") parser.add_argument("--print-id", action="store_true", help="Print the selected records' ids") parser.add_argument("--print-languages", action="store_true", help="Print the selected records' languages") main(parser.parse_args())
40.670732
192
0.669715
0
0
0
0
0
0
0
0
1,900
0.284858
07773d417997f41786d66f2eb9103478a102aad8
2,578
py
Python
src/python/twitter/pants/targets/java_antlr_library.py
wfarner/commons
42988a7a49f012665174538cca53604c7846ee86
[ "Apache-2.0" ]
1
2019-12-20T14:13:27.000Z
2019-12-20T14:13:27.000Z
src/python/twitter/pants/targets/java_antlr_library.py
wfarner/commons
42988a7a49f012665174538cca53604c7846ee86
[ "Apache-2.0" ]
null
null
null
src/python/twitter/pants/targets/java_antlr_library.py
wfarner/commons
42988a7a49f012665174538cca53604c7846ee86
[ "Apache-2.0" ]
1
2019-12-20T14:13:29.000Z
2019-12-20T14:13:29.000Z
# ================================================================================================== # Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== __author__ = 'Brian Larson' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): """Defines a target that builds java stubs from an Antlr grammar file.""" def __init__(self, name, sources, provides = None, dependencies = None, excludes = None, compiler = 'antlr3'): """name: The name of this module target, addressable via pants via the portion of the spec following the colon sources: A list of paths containing the Antlr source files this module's jar is compiled from provides: An optional Dependency object indicating the The ivy artifact to export dependencies: An optional list of Dependency objects specifying the binary (jar) dependencies of this module. excludes: An optional list of dependency exclude patterns to filter all of this module's transitive dependencies against. compiler: The name of the compiler used to compile the ANTLR files. Currently only supports 'antlr3' and 'antlr4'""" ExportableJvmLibrary.__init__(self, name, sources, provides, dependencies, excludes) self.add_labels('codegen') if compiler not in ['antlr3', 'antlr4']: raise ValueError("Illegal value for 'compiler': {}".format(compiler)) self.compiler = compiler def _as_jar_dependency(self): return ExportableJvmLibrary._as_jar_dependency(self).with_sources()
44.448276
100
0.588053
1,567
0.607836
0
0
0
0
0
0
1,726
0.669511
0777dbaeb86425a933c2accd81e0d8dadd226bab
3,092
py
Python
bigml/tests/create_pca_steps_bck.py
devs-cloud/python_ml
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
[ "Apache-2.0" ]
null
null
null
bigml/tests/create_pca_steps_bck.py
devs-cloud/python_ml
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
[ "Apache-2.0" ]
null
null
null
bigml/tests/create_pca_steps_bck.py
devs-cloud/python_ml
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- #!/usr/bin/env python # # Copyright 2018-2020 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import json import os from datetime import datetime, timedelta from world import world from nose.tools import eq_, assert_less from bigml.api import HTTP_CREATED from bigml.api import HTTP_ACCEPTED from bigml.api import FINISHED from bigml.api import FAULTY from bigml.api import get_status from read_pca_steps import i_get_the_pca #@step(r'the pca name is "(.*)"') def i_check_pca_name(step, name): pca_name = world.pca['name'] eq_(name, pca_name) #@step(r'I create a PCA from a dataset$') def i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, {'name': 'new PCA'}) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) #@step(r'I create a PCA from a dataset$') def i_create_a_pca_with_params(step, params): params = json.loads(params) dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, params) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update the PCA name to "(.*)"$') def i_update_pca_name(step, name): resource = world.api.update_pca(world.pca['resource'], {'name': name}) world.status = resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location = resource['location'] world.pca = resource['object'] #@step(r'I wait until the PCA status code is either (\d) or (-\d) less than (\d+)') def wait_until_pca_status_code_is(step, code1, code2, secs): start = datetime.utcnow() delta = int(secs) * world.delta pca_id = world.pca['resource'] i_get_the_pca(step, pca_id) status = get_status(world.pca) while (status['code'] != int(code1) and status['code'] != int(code2)): time.sleep(3) assert_less(datetime.utcnow() - start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status = get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I wait until the PCA is ready less than (\d+)') def the_pca_is_finished_in_less_than(step, secs): wait_until_pca_status_code_is(step, FINISHED, FAULTY, secs)
32.893617
83
0.698254
0
0
0
0
0
0
0
0
1,080
0.349288
077860d7dfef7192b10ddd84d4a9115cb45934f6
290
py
Python
config.py
Pasmikh/quiz_please_bot
2b619b359d8021be57b404525013c53403d6cde1
[ "MIT" ]
null
null
null
config.py
Pasmikh/quiz_please_bot
2b619b359d8021be57b404525013c53403d6cde1
[ "MIT" ]
null
null
null
config.py
Pasmikh/quiz_please_bot
2b619b359d8021be57b404525013c53403d6cde1
[ "MIT" ]
null
null
null
days_of_week = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday'] operation = '' options = ['Info', 'Check-in/Out', 'Edit games', 'Back'] admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days = [] TOKEN = 'bot_token' group_id = id_of_group_chat
41.428571
88
0.713793
0
0
0
0
0
0
0
0
167
0.575862
0778705078ff1aa67fe1ad3d2a88bc9581c13e09
2,331
py
Python
Chapter 8/sandwich-maker.py
ostin-r/automate-boring-stuff-solutions
78f0a2981e6520ff2907285e666168a0f35eba02
[ "FTL" ]
4
2021-06-14T10:37:58.000Z
2021-12-30T17:49:17.000Z
Chapter 8/sandwich-maker.py
ostin-r/automate-boring-stuff-solutions
78f0a2981e6520ff2907285e666168a0f35eba02
[ "FTL" ]
null
null
null
Chapter 8/sandwich-maker.py
ostin-r/automate-boring-stuff-solutions
78f0a2981e6520ff2907285e666168a0f35eba02
[ "FTL" ]
1
2021-07-29T15:26:54.000Z
2021-07-29T15:26:54.000Z
''' Austin Richards 2/20/21 sandwich-maker.py uses pyinputplus to validate user input for sandwich preferences ''' import pyinputplus as ip def get_cost(food_name): '''gets the cost of items in sandwich_builder''' food_dict = { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I made them all cost 0.25 'no':0 # saying no to a topping costs nothing } return food_dict[food_name] def sandwich_builder(): print('Enter your sandwich preferences below:\n') bread_prompt = 'What bread type would you like? (sourdough, rye, wheat, or white)\n' bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt) protein_prompt = 'What type of protein would you like? (chicken, turkey, ham, or tofu)\n' protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would you like mayo?\n') mustard = ip.inputYesNo(prompt='Would you like mustard?\n') tomato = ip.inputYesNo(prompt='Would you like tomato?\n') lettuce = ip.inputYesNo(prompt='Would you like lettuce?\n') like_cheese = ip.inputYesNo(prompt='Do you like cheese on your sandwich?\n') if like_cheese is 'yes': cheese_prompt = 'What kind of cheese would you like? (cheddar, swiss, mozzarella)\n' cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce]) for item in sandwich: cost += get_cost(item) else: sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce]) for item in sandwich: cost += get_cost(item) how_many_prompt = 'How many sandwiches would you like?\n' how_many = ip.inputInt(min=1, prompt=how_many_prompt) print('\nFinal cost: ${}'.format(round(cost * how_many * 1.06, 2))) sandwich_builder()
33.3
96
0.62248
0
0
0
0
0
0
0
0
915
0.392535
0778aa1b06b2fda0447a13db0a273ce1b3e6b40f
2,021
py
Python
tests/core/test_headerupdater.py
My-Novel-Management/storybuilderunite
c003d3451e237f574c54a87ea7d4fd8da8e833be
[ "MIT" ]
1
2020-06-18T01:38:55.000Z
2020-06-18T01:38:55.000Z
tests/core/test_headerupdater.py
My-Novel-Management/storybuilder
1f36e56a74dbb55a25d60fce3ce81f3c650f521a
[ "MIT" ]
143
2019-11-13T00:21:11.000Z
2020-08-15T05:47:41.000Z
tests/core/test_headerupdater.py
My-Novel-Management/storybuilderunite
c003d3451e237f574c54a87ea7d4fd8da8e833be
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- ''' HeaderUpdater class test ======================== ''' import unittest from tests.testutils import print_testtitle, validate_with_fail from builder.commands.scode import SCode, SCmd from builder.containers.chapter import Chapter from builder.containers.episode import Episode from builder.containers.scene import Scene from builder.containers.story import Story from builder.core import headerupdater as hd class HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class') def test_instance(self): tmp = hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self): data = [ # (src, expect, exp_opt) (True, Story('test',), ('test',), 1), ] def checker(src, expect, exp_opt): tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of', checker, data) def test_outline_of(self): data = [ # (src, expect) (True, Story('test',outline='apple'), ('apple',)), ] def checker(src, expect): tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of', checker, data) def test_end_of(self): data = [ # (src, expect) (True, Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of', lambda src, expect: self.assertEqual( hd.HeaderUpdater()._end_of(src).cmd, expect), data)
33.131148
66
0.597724
1,583
0.783276
0
0
97
0.047996
0
0
223
0.110341
0778ae783c1f5257a96e5e0972a23c96938e6782
682
py
Python
dotsDB/test_vlen_datasets.py
aernesto/Lab_DotsDB_Utilities
d8458b4126d80daeb5084234889fc6674158ea0f
[ "MIT" ]
1
2019-03-11T19:12:12.000Z
2019-03-11T19:12:12.000Z
dotsDB/test_vlen_datasets.py
aernesto/Lab_DotsDB_Utilities
d8458b4126d80daeb5084234889fc6674158ea0f
[ "MIT" ]
null
null
null
dotsDB/test_vlen_datasets.py
aernesto/Lab_DotsDB_Utilities
d8458b4126d80daeb5084234889fc6674158ea0f
[ "MIT" ]
1
2019-10-31T20:10:12.000Z
2019-10-31T20:10:12.000Z
import numpy as np import h5py filename = "test_vlen_datasets_np_bool.h5" rows = [np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])] f = h5py.File(filename, 'x') # create file, fails if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset("vlen_matrix", (2,), compression="gzip", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r in range(len(rows)): dset[r] = rows[r] f.flush() f.close() f = h5py.File(filename, 'r') dsetr = f["vlen_matrix"] for r in range(dsetr.shape[0]): print(dsetr[r])
22.733333
60
0.590909
0
0
0
0
0
0
0
0
99
0.145161
0779ab4524c7785b80eb2c94fee42447c65c7dbc
8,824
py
Python
utils.py
g4idrijs/CardiacUltrasoundPhaseEstimation
6bd2e157240133b6e306a7ca931d3d3b96647b88
[ "Apache-2.0" ]
1
2020-11-17T16:14:06.000Z
2020-11-17T16:14:06.000Z
utils.py
g4idrijs/CardiacUltrasoundPhaseEstimation
6bd2e157240133b6e306a7ca931d3d3b96647b88
[ "Apache-2.0" ]
null
null
null
utils.py
g4idrijs/CardiacUltrasoundPhaseEstimation
6bd2e157240133b6e306a7ca931d3d3b96647b88
[ "Apache-2.0" ]
1
2020-06-28T09:19:02.000Z
2020-06-28T09:19:02.000Z
import os, time import numpy as np import scipy.signal import scipy.misc import scipy.ndimage.filters import matplotlib.pyplot as plt import PIL from PIL import ImageDraw import angles import cv2 import SimpleITK as sitk def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if resizeAmount is not None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if resizeAmount is not None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False) plt.xticks(()) plt.yticks(()) def normalizeArray(a): return np.single(0.0 + a - a.min()) / (a.max() - a.min()) def AddTextOnImage(imInput, strText, loc=(2, 2), color=255): imInputPIL = PIL.Image.fromarray(imInput) d = ImageDraw.Draw(imInputPIL) d.text(loc, strText, fill=color) return np.asarray(imInputPIL) def AddTextOnVideo(imVideo, strText, loc=(2, 2)): imVideoOut = np.zeros_like(imVideo) for i in range(imVideo.shape[2]): imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :, i], strText, loc) return imVideoOut def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None): if not isinstance(imVideo, list): imVideo = [imVideo] strWindowName = [strWindowName] # find max number of frames maxFrames = 0 for vid in range(len(imVideo)): if imVideo[vid].shape[-1] > maxFrames: maxFrames = imVideo[vid].shape[2] # display video blnLoop = True fid = 0 while True: for vid in range(len(imVideo)): curVideoFid = fid % imVideo[vid].shape[2] imCur = imVideo[vid][:, :, curVideoFid] # resize image if requested if resizeAmount: imCur = scipy.misc.imresize(imCur, resizeAmount) # show image cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid + 1)) # look for "esc" key k = cv2.waitKey(waitTime) & 0xff if blnLoop: if k == 27: break elif k == ord(' '): blnLoop = False else: fid = (fid + 1) % maxFrames else: if k == 27: # escape break elif k == ord(' '): # space blnLoop = True elif k == 81: # left arrow fid = (fid - 1) % maxFrames elif k == 83: # right arrow fid = (fid + 1) % maxFrames for vid in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a, bounds=None): if bounds is None: return (0.0 + a - a.min()) / (a.max() - a.min()) else: b = (0.0 + a - bounds[0]) / (bounds[1] - bounds[0]) b[b < 0] = bounds[0] b[b > bounds[1]] = bounds[1] return b def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None): vidseq = cv2.VideoCapture(dataFilePath) print vidseq, vidseq.isOpened() # print metadata metadata = {} numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\tFRAME_COUNT = ', numFrames metadata['FRAME_COUNT'] = numFrames frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight > 0: print '\tFRAME HEIGHT = ', frameHeight metadata['FRAME_HEIGHT'] = frameHeight frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth > 0: print '\tFRAME WIDTH = ', frameWidth metadata['FRAME_WIDTH'] = frameWidth fps = vidseq.get(cv2.CAP_PROP_FPS) if fps > 0: print '\tFPS = ', fps metadata['FPS'] = fps fmt = vidseq.get(cv2.CAP_PROP_FORMAT) if fmt > 0: print '\FORMAT = ', fmt metadata['FORMAT'] = fmt vmode = vidseq.get(cv2.CAP_PROP_MODE) if vmode > 0: print '\MODE = ', vmode metadata['MODE'] = MODE # smooth if wanted if sigmaSmooth: wSmooth = 4 * sigmaSmooth + 1 print metadata # read video frames imInput = [] fid = 0 prevPercent = 0 print '\n' while True: valid_object, frame = vidseq.read() if not valid_object: break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if resizeAmount: frame = scipy.misc.imresize(frame, resizeAmount) if sigmaSmooth: frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0) imInput.append(frame) # update progress fid += 1 curPercent = np.floor(100.0 * fid / numFrames) if curPercent > prevPercent: prevPercent = curPercent print '%.2d%%' % curPercent, print '\n' imInput = np.dstack(imInput) vidseq.release() return (imInput, metadata) def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False): # start timer tStart = time.time() # write video # fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4 fourcc = cv2.VideoWriter_fourcc(*list(codec)) height, width = imVideo.shape[:2] writer = cv2.VideoWriter(filename, fourcc, fps=fps, frameSize=(width, height), isColor=isColor) print writer.isOpened() numFrames = imVideo.shape[-1] for fid in range(numFrames): if isColor: writer.write(imVideo[:, :, :, fid].astype('uint8')) else: writer.write(imVideo[:, :, fid].astype('uint8')) # end timer tEnd = time.time() print 'Writing video {} took {} seconds'.format(filename, tEnd - tStart) # release writer.release() def writeVideoAsTiffStack(imVideo, strFilePrefix): # start timer tStart = time.time() for fid in range(imVideo.shape[2]): plt.imsave(strFilePrefix + '.%.3d.tif' % (fid + 1), imVideo[:, :, fid]) # end timer tEnd = time.time() print 'Writing video {} took {} seconds'.format(strFilePrefix, tEnd - tStart) def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None): plt.imshow(im.max(axis)) if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def normalizeAngles(angleList, angle_range): return np.array( [angles.normalize(i, angle_range[0], angle_range[1]) for i in angleList]) def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs): for ext in saveext: plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs) def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)): for ext in saveext: plt.imsave(os.path.join(saveDir, fileName + ext), im) def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame, maxFrame, splineOrder): tZoom = np.float(numOutFrames) / (maxFrame - minFrame + 1) return scipy.ndimage.interpolation.zoom( imInput[:, :, minFrame:maxFrame + 1], (1, 1, tZoom), order=splineOrder) def ncorr(imA, imB): imA = (imA - imA.mean()) / imA.std() imB = (imB - imB.mean()) / imB.std() return np.mean(imA * imB) def vis_checkerboard(im1, im2): im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk) def fig2data(fig): """ @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it @param fig a matplotlib figure @return a numpy 3D array of RGBA values """ # draw the renderer fig.canvas.draw() # Get the RGBA buffer from the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) # canvas.tostring_argb give pixmap in ARGB mode. # Roll the ALPHA channel to have it in RGBA mode buf = np.roll(buf, 3, axis=2) return buf
24.241758
79
0.592248
0
0
0
0
0
0
0
0
970
0.109927
077a977fb0ed578109f21b4a8ba0c330e1e23efb
1,441
py
Python
weasyl/emailer.py
akash143143/weasyl
be42a2313e657e97c4a48432379e37b6a3d4a4af
[ "Apache-2.0" ]
null
null
null
weasyl/emailer.py
akash143143/weasyl
be42a2313e657e97c4a48432379e37b6a3d4a4af
[ "Apache-2.0" ]
null
null
null
weasyl/emailer.py
akash143143/weasyl
be42a2313e657e97c4a48432379e37b6a3d4a4af
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import import re from email.mime.text import MIMEText from smtplib import SMTP from weasyl import define, macro EMAIL_ADDRESS = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+\Z") def normalize_address(address): """ Converts an e-mail address to a consistent representation. Returns None if the given address is not considered valid. """ address = address.strip() if not EMAIL_ADDRESS.match(address): return None local, domain = address.split("@", 1) return "%s@%s" % (local, domain.lower()) def send(mailto, subject, content): """Send an e-mail. `mailto` must be a normalized e-mail address to send this e-mail to. The system email will be designated as the sender. """ message = MIMEText(content.strip()) message["To"] = mailto message["From"] = macro.MACRO_EMAIL_ADDRESS message["Subject"] = subject # smtp.sendmail() only converts CR and LF (produced by MIMEText and our templates) to CRLF in Python 3. In Python 2, we need this: msg_crlf = re.sub(r"\r\n|[\r\n]", "\r\n", message.as_string()) smtp = SMTP(define.config_read_setting('host', "localhost", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf, ) finally: smtp.quit() define.metric('increment', 'emails')
26.685185
134
0.646079
0
0
0
0
0
0
0
0
565
0.392089
077ab159d3a90c5c7c3094919ba408b1a2cadaa4
663
py
Python
tests/test_missing_process.py
ricklupton/sphinx_probs_rdf
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
[ "MIT" ]
1
2021-07-31T10:06:50.000Z
2021-07-31T10:06:50.000Z
tests/test_missing_process.py
ricklupton/sphinx_probs_rdf
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
[ "MIT" ]
1
2021-05-05T18:15:48.000Z
2021-05-05T18:15:48.000Z
tests/test_missing_process.py
ricklupton/sphinx_probs_rdf
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
[ "MIT" ]
null
null
null
import pytest from rdflib import Graph, Namespace, Literal from rdflib.namespace import RDF, RDFS from sphinx_probs_rdf.directives import PROBS SYS = Namespace("http://example.org/system/") @pytest.mark.sphinx( 'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)}) def test_builder_reports_warning_for_missing_process(app, status, warning): app.builder.build_all() assert "build succeeded" not in status.getvalue() warnings = warning.getvalue().strip() assert 'WARNING: Requested child "http://example.org/system/Missing" of "http://example.org/system/ErrorMissingProcess" is not a Process' in warnings
36.833333
153
0.764706
0
0
0
0
469
0.707391
0
0
220
0.331825
077afe0d8f015a761ad56ef674705600c184e8fe
1,721
py
Python
analysis_functionarcademix.py
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
e56e0e853aca4367ebf99ae18e920b80f39bd133
[ "MIT" ]
null
null
null
analysis_functionarcademix.py
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
e56e0e853aca4367ebf99ae18e920b80f39bd133
[ "MIT" ]
null
null
null
analysis_functionarcademix.py
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
e56e0e853aca4367ebf99ae18e920b80f39bd133
[ "MIT" ]
null
null
null
#analysis function for three level game def stat_analysis(c1,c2,c3): #ask question for viewing analysis of game analysis=input('\nDo you want to see your game analysis? (Yes/No) ') if analysis=='Yes': levels=['Level 1','Level 2','Level 3'] #calculating the score of levels l1_score= c1*10 l2_score= c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set y-axis label plt.show() print('\nDescriptive Statistics of Scores:') #find mean value print('\nMean: ',statistics.mean(level_score)) #find median value print('\nMediand: ',statistics.median(level_score)) #Mode calculation #create numPy array of values with only one mode arr_val = np.array(level_score) #find unique values in array along with their counts vals, uni_val_counts = np.unique(arr_val, return_counts=True) #find mode mode_value = np.argwhere(counts == np.max(uni_val_counts)) print('\nMode: ',vals[mode_value].flatten().tolist()) #find variance print('\nVariance: ',np.var(level_score)) #find standard deviation print('\nStandard Deviation: ',statistics.stdev(level_score)) print('\nGood Bye.See you later!!!') elif analysis=='No': print('\nGood Bye.See you later!!!') else: print('Invalid value enter') stat_analysis(c1,c2,c3)
30.732143
72
0.613016
0
0
0
0
0
0
0
0
699
0.406159
077b64f9f341be6f03c89ac88afd5ce1383da321
2,246
py
Python
Hello_Cone.py
TechnoTanuki/Python_BMP
d6f7e7a4b74f7d6e8761d618c156d37c97726038
[ "MIT" ]
3
2022-02-24T15:46:43.000Z
2022-03-30T13:17:03.000Z
Hello_Cone.py
TechnoTanuki/Python_BMP
d6f7e7a4b74f7d6e8761d618c156d37c97726038
[ "MIT" ]
null
null
null
Hello_Cone.py
TechnoTanuki/Python_BMP
d6f7e7a4b74f7d6e8761d618c156d37c97726038
[ "MIT" ]
null
null
null
notice = """ Cone Demo ----------------------------------- | Copyright 2022 by Joel C. Alcarez | | [[email protected]] | |-----------------------------------| | We make absolutely no warranty | | of any kind, expressed or implied | |-----------------------------------| | This graphics library outputs | | to a bitmap file. | ----------------------------------- """ from Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP ) import subprocess as proc from os import path def main(): print(notice) imgedt = 'mspaint' # replace with another editor if Unix rootdir = path.dirname(__file__) # get path of this script mx = my = 250 # x=y square bmp file = 'HelloCone.bmp' # some random file name as string bmp = newBMP(mx, my, 24) # RGB bmp cenpt = centercoord(bmp) # helper method to get center of a bitmap cf = getRGBfactors() # color info with presets d, translationvector = 400, [0, 0, 200] # be careful with these variables or object goes offscreen isSolid = True # toggle solid or outline showoutline = False # can show outline even if solid cf = getRGBfactors() # color list color = cf['brightyellow'] # color of solid outlinecolor = 0 # outline color rotation = rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees vcen = (1,0,0) # x y z coords r = 40 # radius of cone zlen = 40 # height of cone deganglestep = 5 # how finely we tile flat surfaces around the cone obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A solid is defined by vertices and surfaces plot3Dsolid(bmp, obj3D, isSolid, color, showoutline, outlinecolor, rotation, translationvector, d, cenpt) saveBMP(file, bmp) # save file print('Saved to %s in %s\nAll done close %s to finish' % \ (file, rootdir, imgedt)) ret = proc.call([imgedt, file]) if __name__=="__main__": main()
38.724138
109
0.548531
0
0
0
0
0
0
0
0
1,068
0.475512
077c2964f05f1e340c5f354633e006236a1d9021
2,001
py
Python
analysis/training_curve_6D.py
AndrewKirby2/data_synthesis
656858137a348fd5dcb57bcd04bdfece2b9eac1b
[ "MIT" ]
null
null
null
analysis/training_curve_6D.py
AndrewKirby2/data_synthesis
656858137a348fd5dcb57bcd04bdfece2b9eac1b
[ "MIT" ]
null
null
null
analysis/training_curve_6D.py
AndrewKirby2/data_synthesis
656858137a348fd5dcb57bcd04bdfece2b9eac1b
[ "MIT" ]
null
null
null
""" Plot a training curve for the 6D data simulator of CT* """ import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions import regular_array_monte_carlo # create array to store results for plotting rmse = np.ones((25, 2)) noise = 0.01 # create array of sampled regular array layouts #cand_points = regular_array_monte_carlo(10000) # create testing points X_test, y_test = create_testing_points_regular(noise) n = 0 n_target = 0 n_train = 0 while n_train < 200: n_target = 100 +100*n # create training points X_train, y_train, n_train = \ create_training_points_irregular(n_target, noise) # fit GP regression and calculate rmse kernel = 1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict) # report rmse print(n_train, np.sqrt(mse)) rmse[n, 0] = n_train rmse[n, 1] = np.sqrt(mse) n += 1 plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training curve RBF - 6D 1% noise - irregular array training - max change halved') plt.ylabel('RMSE') plt.xlabel('Training points') plt.savefig('analysis/GP_machine_learning_plots/\ gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png')
34.5
92
0.733633
0
0
0
0
0
0
0
0
580
0.289855