code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import multiprocessing as mp
from sys import version_info
from collections import Counter
import matplotlib.pyplot as plt
import mne
from PyQt5.QtCore import (pyqtSlot, QStringListModel, QModelIndex, QSettings,
QEvent, Qt, QObject)
from PyQt5.QtGui import QKeySequence, QDropEvent
from PyQt5.QtWidgets import (QApplication, QMainWindow, QFileDialog, QSplitter,
QMessageBox, QListView, QAction, QLabel, QFrame,
QStatusBar, QToolBar)
from .tfr.backend.avg_epochs_tfr import AvgEpochsTFR
from .tfr.app.avg_epochs_tfr import AvgTFRWindow
from .tfr.backend.epochs_psd import EpochsPSD
from .tfr.app.epochs_psd import EpochsPSDWindow
from .tfr.backend.raw_psd import RawPSD
from .tfr.app.raw_psd import RawPSDWindow
from .utils.error import show_error
from .dialogs.calcdialog import CalcDialog
from .dialogs.filterdialog import FilterDialog
from .dialogs.findeventsdialog import FindEventsDialog
from .dialogs.pickchannelsdialog import PickChannelsDialog
from .dialogs.referencedialog import ReferenceDialog
from .dialogs.montagedialog import MontageDialog
from .dialogs.channelpropertiesdialog import ChannelPropertiesDialog
from .dialogs.runicadialog import RunICADialog
from .dialogs.eventsdialog import EventsDialog
from .widgets.infowidget import InfoWidget
from .dialogs.timefreqdialog import TimeFreqDialog
from .dialogs.psddialog import PSDDialog
from .dialogs.epochingdialog import EpochingDialog
from .dialogs.epochingdialog import EpochingDialog
from .dialogs.navepochsdialog import NavEpochsDialog
from .dialogs.resampledialog import ResampleDialog
from .dialogs.evokedstatesdialog import EvokedStatesDialog
from .dialogs.evokedtopodialog import EvokedTopoDialog
from .dialogs.batchdialog import BatchDialog
from .utils.ica_utils import plot_correlation_matrix as plot_cormat
from .utils.ica_utils import (plot_ica_components_with_timeseries,
plot_overlay)
from .model import (SUPPORTED_FORMATS, SUPPORTED_EXPORT_FORMATS,
LabelsNotFoundError, InvalidAnnotationsError)
__version__ = "0.1.0"
MAX_RECENT = 6 # maximum number of recent files
def read_settings():
"""Read application settings.
Returns
-------
settings : dict
The restored settings values are returned in a dictionary for further
processing.
"""
settings = QSettings()
recent = settings.value("recent")
if not recent:
recent = [] # default is empty list
statusbar = settings.value("statusbar")
if statusbar is None: # default is True
statusbar = True
geometry = settings.value("geometry")
state = settings.value("state")
return {"recent": recent, "statusbar": statusbar, "geometry": geometry,
"state": state}
def write_settings(**kwargs):
"""Write application settings."""
settings = QSettings()
for key, value in kwargs.items():
settings.setValue(key, value)
class MainWindow(QMainWindow):
"""MNELAB main window."""
def __init__(self, model):
"""Initialize MNELAB main window.
Parameters
----------
model : mnelab.model.Model instance
The main window needs to connect to a model containing all data
sets. This decouples the GUI from the data (model/view).
"""
super().__init__()
self.model = model # data model
self.setWindowTitle("MNELAB")
# restore settings
settings = read_settings()
self.recent = settings["recent"] # list of recent files
if settings["geometry"]:
self.restoreGeometry(settings["geometry"])
else:
self.setGeometry(300, 300, 1000, 750) # default window size
self.move(QApplication.desktop().screen().rect().center() -
self.rect().center()) # center window
if settings["state"]:
self.restoreState(settings["state"])
self.actions = {} # contains all actions
# initialize menus
file_menu = self.menuBar().addMenu("&File")
self.actions["open_file"] = file_menu.addAction(
"&Open...",
lambda: self.open_file(model.load, "Open raw", SUPPORTED_FORMATS),
QKeySequence.Open)
self.recent_menu = file_menu.addMenu("Open recent")
self.recent_menu.aboutToShow.connect(self._update_recent_menu)
self.recent_menu.triggered.connect(self._load_recent)
if not self.recent:
self.recent_menu.setEnabled(False)
self.actions["close_file"] = file_menu.addAction(
"&Close",
self.model.remove_data,
QKeySequence.Close)
self.actions["close_all"] = file_menu.addAction(
"Close all",
self.close_all)
file_menu.addSeparator()
self.actions["import_bads"] = file_menu.addAction(
"Import bad channels...",
lambda: self.import_file(model.import_bads, "Import bad channels",
"*.csv *.txt"))
self.actions["import_events"] = file_menu.addAction(
"Import events...",
lambda: self.import_file(model.import_events, "Import events",
"*.csv *.mrk"))
self.actions["import_annotations"] = file_menu.addAction(
"Import annotations...",
lambda: self.import_file(model.import_annotations,
"Import annotations", "*.csv *.mrk"))
self.actions["import_ica"] = file_menu.addAction(
"Import &ICA...",
lambda: self.open_file(model.import_ica, "Import ICA",
"*.fif *.fif.gz"))
file_menu.addSeparator()
self.actions["export_data"] = file_menu.addAction(
"Export data...",
lambda: self.export_file(model.export_data, "Export",
SUPPORTED_EXPORT_FORMATS))
self.actions["export_bads"] = file_menu.addAction(
"Export &bad channels...",
lambda: self.export_file(model.export_bads, "Export bad channels",
"*.csv"))
self.actions["export_events"] = file_menu.addAction(
"Export &events...",
lambda: self.export_file(model.export_events, "Export events",
"*.csv"))
self.actions["export_annotations"] = file_menu.addAction(
"Export &annotations...",
lambda: self.export_file(model.export_annotations,
"Export annotations", "*.csv"))
self.actions["export_ica"] = file_menu.addAction(
"Export ICA...",
lambda: self.export_file(model.export_ica,
"Export ICA", "*.fif *.fif.gz"))
self.actions["export_psd"] = file_menu.addAction(
"Export Power Spectrum Density...",
lambda: self.export_file(model.export_psd,
"Export Power Spectrum Density", "*.hdf"))
self.actions["export_tfr"] = file_menu.addAction(
"Export Time-Frequency...",
lambda: self.export_file(model.export_tfr,
"Export Time-Frequency", "*.hdf"))
file_menu.addSeparator()
self.actions["quit"] = file_menu.addAction(
"&Quit", self.close, QKeySequence.Quit)
edit_menu = self.menuBar().addMenu("&Edit")
self.actions["pick_chans"] = edit_menu.addAction(
"Pick &channels...",
self.pick_channels)
self.actions["chan_props"] = edit_menu.addAction(
"Edit channel &properties...",
self.channel_properties)
self.actions["set_montage"] = edit_menu.addAction(
"Edit &montage...", self.set_montage)
self.actions["events"] = edit_menu.addAction(
"Edit &events...", self.edit_events)
plot_menu = self.menuBar().addMenu("&Plot")
self.actions["plot_raw"] = plot_menu.addAction(
"Plot &Data...", self.plot_raw)
self.actions["plot_image"] = plot_menu.addAction(
"Plot data as &Image...", self.plot_image)
self.actions["plot_states"] = plot_menu.addAction(
"Plot &States...", self.plot_states)
self.actions["plot_topomaps"] = plot_menu.addAction(
"Plot &Topomaps...", self.plot_topomaps)
self.actions["plot_montage"] = plot_menu.addAction(
"Plot Current &montage", self.plot_montage)
tools_menu = self.menuBar().addMenu("&Preprocessing")
self.actions["filter"] = tools_menu.addAction(
"&Filter data...", self.filter_data)
self.actions["resample"] = tools_menu.addAction(
"&Downsample...", self.resample)
self.actions["interpolate_bads"] = tools_menu.addAction(
"Interpolate bad channels...", self.interpolate_bads)
self.actions["set_ref"] = tools_menu.addAction(
"&Set reference...", self.set_reference)
ica_menu = self.menuBar().addMenu("&ICA")
self.actions["run_ica"] = ica_menu.addAction(
"Run &ICA...", self.run_ica)
ica_menu.addSeparator()
self.actions["plot_ica_components"] = ica_menu.addAction(
"Plot ICA &components...",
self.plot_ica_components_with_timeseries)
self.actions["plot_ica_sources"] = ica_menu.addAction(
"Plot &ICA sources...", self.plot_ica_sources)
self.actions["plot_correlation_matrix"] = ica_menu.addAction(
"Plot correlation &matrix...", self.plot_correlation_matrix)
self.actions["plot_overlay"] = ica_menu.addAction(
"Plot overlay...", self.plot_ica_overlay)
ica_menu.addSeparator()
self.actions["apply_ica"] = ica_menu.addAction(
"Apply &ICA...", self.apply_ica)
freq_menu = self.menuBar().addMenu("&Frequencies")
self.actions["plot_psd"] = freq_menu.addAction(
"Compute &Power spectral density...", self.plot_psd)
self.actions["plot_tfr"] = freq_menu.addAction(
"Compute &Time-Frequency...", self.plot_tfr)
freq_menu.addSeparator()
self.actions["open_tfr"] = freq_menu.addAction(
"&Open Time-Frequency file...", self.open_tfr)
self.actions["open_psd"] = freq_menu.addAction(
"&Open Power Spectrum Density file...",
self.open_psd)
events_menu = self.menuBar().addMenu("&Events")
self.actions["plot_events"] = events_menu.addAction(
"&Plot events...", self.plot_events)
events_menu.addSeparator()
self.actions["find_events"] = events_menu.addAction(
"Find &events...", self.find_events)
self.actions["add_events"] = events_menu.addAction(
"Setup events as annotation...", self.add_events)
epochs_menu = self.menuBar().addMenu("Epochs")
self.actions["epoch_data"] = epochs_menu.addAction(
"Cut data into epochs...", self.epoch_data)
self.actions["evoke_data"] = epochs_menu.addAction(
"Average epochs...", self.evoke_data)
batch_menu = self.menuBar().addMenu("&Batch")
self.actions["open_batch"] = batch_menu.addAction(
"Open &Batch processing window", self.open_batch)
view_menu = self.menuBar().addMenu("&View")
self.actions["statusbar"] = view_menu.addAction(
"Statusbar", self._toggle_statusbar)
self.actions["statusbar"].setCheckable(True)
help_menu = self.menuBar().addMenu("&Help")
self.actions["about"] = help_menu.addAction("&About", self.show_about)
self.actions["about_qt"] = help_menu.addAction(
"About &Qt", self.show_about_qt)
# actions that are always enabled
self.always_enabled = ["open_file", "about", "about_qt", "quit",
"statusbar", "open_batch", "open_tfr",
"open_psd"]
# set up data model for sidebar (list of open files)
self.names = QStringListModel()
self.names.dataChanged.connect(self._update_names)
splitter = QSplitter()
self.sidebar = QListView()
self.sidebar.setFrameStyle(QFrame.NoFrame)
self.sidebar.setFocusPolicy(Qt.NoFocus)
self.sidebar.setModel(self.names)
self.sidebar.clicked.connect(self._update_data)
splitter.addWidget(self.sidebar)
self.infowidget = InfoWidget()
splitter.addWidget(self.infowidget)
width = splitter.size().width()
splitter.setSizes((width * 0.3, width * 0.7))
self.setCentralWidget(splitter)
self.status_label = QLabel()
self.statusBar().addPermanentWidget(self.status_label)
if settings["statusbar"]:
self.statusBar().show()
self.actions["statusbar"].setChecked(True)
else:
self.statusBar().hide()
self.actions["statusbar"].setChecked(False)
self.setAcceptDrops(True)
self.data_changed()
def data_changed(self):
# update sidebar
self.names.setStringList(self.model.names)
self.sidebar.setCurrentIndex(self.names.index(self.model.index))
# update info widget
if self.model.data:
self.infowidget.set_values(self.model.get_info())
else:
self.infowidget.clear()
# update status bar
if self.model.data:
mb = self.model.nbytes / 1024 ** 2
self.status_label.setText("Total Memory: {:.2f} MB".format(mb))
else:
self.status_label.clear()
# toggle actions
if len(self.model) == 0: # disable if no data sets are currently open
enabled = False
else:
enabled = True
for name, action in self.actions.items(): # toggle
if name not in self.always_enabled:
action.setEnabled(enabled)
if self.model.data: # toggle if specific conditions are met
if self.model.current["raw"]:
raw = True
evoked = False
bads = bool(self.model.current["raw"].info["bads"])
annot = self.model.current["raw"].annotations is not None
events = self.model.current["events"] is not None
self.actions["import_annotations"].setEnabled(True)
self.actions["import_events"].setEnabled(True)
self.actions["evoke_data"].setEnabled(False)
self.actions["plot_image"].setEnabled(False)
self.actions["plot_tfr"].setEnabled(False)
else:
raw = False
annot = False
events = False
self.actions["find_events"].setEnabled(False)
self.actions["import_annotations"].setEnabled(False)
self.actions["import_events"].setEnabled(False)
self.actions["plot_image"].setEnabled(True)
if self.model.current["epochs"]:
evoked = False
bads = bool(self.model.current["epochs"].info["bads"])
self.actions["evoke_data"].setEnabled(True)
else:
evoked = True
bads = bool(self.model.current["evoked"].info["bads"])
self.actions["evoke_data"].setEnabled(False)
self.actions["export_bads"].setEnabled(enabled and bads)
self.actions["export_events"].setEnabled(enabled and events)
self.actions["export_annotations"].setEnabled(enabled and annot)
montage = bool(self.model.current["montage"])
self.actions["run_ica"].setEnabled(enabled and montage
and not evoked)
self.actions["plot_montage"].setEnabled(enabled and montage)
self.actions["interpolate_bads"].setEnabled(enabled and montage)
ica = bool(self.model.current["ica"])
self.actions["export_ica"].setEnabled(enabled and ica)
self.actions["plot_events"].setEnabled(raw and events)
self.actions["plot_ica_components"].setEnabled(enabled and ica
and montage)
self.actions["plot_ica_sources"].setEnabled(enabled and ica
and montage)
self.actions["plot_correlation_matrix"].setEnabled(
enabled and ica and montage)
self.actions["plot_overlay"].setEnabled(
enabled and ica and montage)
self.actions["run_ica"].setEnabled(montage and not evoked)
self.actions["apply_ica"].setEnabled(enabled and ica
and montage)
self.actions["events"].setEnabled(enabled and events)
self.actions["epoch_data"].setEnabled(enabled and events)
self.actions["add_events"].setEnabled(enabled and events)
self.actions["plot_states"].setEnabled(montage and evoked)
self.actions["plot_topomaps"].setEnabled(montage and evoked)
self.actions["export_tfr"].setEnabled(
self.model.current["tfr"] is not None)
self.actions["export_psd"].setEnabled(
self.model.current["psd"] is not None)
# add to recent files
if len(self.model) > 0:
self._add_recent(self.model.current["fname"])
def open_file(self, f, text, ffilter):
"""Open file."""
fname = QFileDialog.getOpenFileName(self, text, filter=ffilter)[0]
if fname:
f(fname)
def export_file(self, f, text, ffilter):
"""Export to file."""
# BUG on windows fname = QFileDialog.getSaveFileName(self,
# text, filter=ffilter)[0]
fname = QFileDialog.getSaveFileName(self, text, filter=ffilter)[0]
if fname:
f(fname)
def import_file(self, f, text, ffilter):
"""Import file."""
fname = QFileDialog.getOpenFileName(self, text, filter=ffilter)[0]
if fname:
try:
f(fname)
except LabelsNotFoundError as e:
QMessageBox.critical(self, "Channel labels not found", str(e))
except InvalidAnnotationsError as e:
QMessageBox.critical(self, "Invalid annotations", str(e))
def close_all(self):
"""Close all currently open data sets."""
msg = QMessageBox.question(self, "Close all data sets",
"Close all data sets?")
if msg == QMessageBox.Yes:
while len(self.model) > 0:
self.model.remove_data()
def pick_channels(self):
"""Pick channels in current data set."""
if self.model.current["raw"]:
channels = self.model.current["raw"].info["ch_names"]
elif self.model.current["epochs"]:
channels = self.model.current["epochs"].info["ch_names"]
else:
channels = self.model.current["evoked"].info["ch_names"]
dialog = PickChannelsDialog(self, channels, selected=channels)
if dialog.exec_():
picks = [item.data(0) for item in dialog.channels.selectedItems()]
drops = set(channels) - set(picks)
if drops:
self.auto_duplicate()
self.model.drop_channels(drops)
self.model.history.append(f"data.drop({drops})")
def channel_properties(self):
"""Show channel properties dialog."""
if self.model.current["raw"]:
info = self.model.current["raw"].info
elif self.model.current["epochs"]:
info = self.model.current["epochs"].info
else:
info = self.model.current["evoked"].info
dialog = ChannelPropertiesDialog(self, info)
if dialog.exec_():
dialog.model.sort(0)
bads = []
renamed = {}
types = {}
for i in range(dialog.model.rowCount()):
new_label = dialog.model.item(i, 1).data(Qt.DisplayRole)
old_label = info["ch_names"][i]
if new_label != old_label:
renamed[old_label] = new_label
new_type = dialog.model.item(i, 2).data(Qt.DisplayRole).lower()
types[new_label] = new_type
if dialog.model.item(i, 3).checkState() == Qt.Checked:
bads.append(info["ch_names"][i])
self.model.set_channel_properties(bads, renamed, types)
def set_montage(self):
"""Set montage."""
montages = mne.channels.get_builtin_montages()
# TODO: currently it is not possible to remove an existing montage
dialog = MontageDialog(self, montages,
selected=self.model.current["montage"])
if dialog.exec_():
if dialog.montage_path == '':
name = dialog.montages.selectedItems()[0].data(0)
montage = mne.channels.read_montage(name)
self.model.history.append("montage = mne.channels."
+ ("read_montage({})").format(name))
else:
from .utils.montage import xyz_to_montage
montage = xyz_to_montage(dialog.montage_path)
self.model.history.append("montage = xyz_to_montage({})"
.format(dialog.montage_path))
if self.model.current["raw"]:
ch_names = self.model.current["raw"].info["ch_names"]
elif self.model.current["epochs"]:
ch_names = self.model.current["epochs"].info["ch_names"]
elif self.model.current["evoked"]:
ch_names = self.model.current["evoked"].info["ch_names"]
# check if at least one channel name matches a name in the montage
if set(ch_names) & set(montage.ch_names):
self.model.set_montage(montage)
else:
QMessageBox.critical(self, "No matching channel names",
"Channel names defined in the montage do "
"not match any channel name in the data.")
def edit_events(self):
pos = self.model.current["events"][:, 0].tolist()
desc = self.model.current["events"][:, 2].tolist()
dialog = EventsDialog(self, pos, desc)
if dialog.exec_():
pass
def plot_raw(self):
"""Plot data."""
events = self.model.current["events"]
if self.model.current["raw"]:
nchan = self.model.current["raw"].info["nchan"]
fig = self.model.current["raw"].plot(
events=events, title=self.model.current["name"],
scalings="auto", show=False)
self.model.history.append("raw.plot(n_channels={})".format(nchan))
elif self.model.current["epochs"]:
nchan = self.model.current["epochs"].info["nchan"]
fig = self.model.current["epochs"].plot(
title=self.model.current["name"],
scalings="auto", show=False)
self.model.history.append(
"epochs.plot(n_channels={})".format(nchan))
elif self.model.current["evoked"]:
nchan = self.model.current["evoked"].info["nchan"]
fig = self.model.current["evoked"].plot(show=False, gfp=True,
spatial_colors=True,
selectable=False)
self.model.history.append(
"epochs.plot(n_channels={})".format(nchan))
win = fig.canvas.manager.window
win.setWindowTitle(self.model.current["name"])
win.findChild(QStatusBar).hide()
win.installEventFilter(self) # detect if the figure is closed
# prevent closing the window with the escape key
try:
key_events = fig.canvas.callbacks.callbacks["key_press_event"][8]
except KeyError:
pass
else: # this requires MNE >=0.15
# This line causes bug... I don't know why exactly
# AttributeError: '_StrongRef' object has no attribute 'func'
#
# key_events.func.keywords["params"]["close_key"] = None
pass
fig.show()
def plot_image(self):
if self.model.current["epochs"]:
try:
epochs = self.model.current["epochs"]
dialog = NavEpochsDialog(None, epochs)
dialog.setWindowModality(Qt.WindowModal)
dialog.setWindowTitle(self.model.current["name"])
dialog.exec()
except Exception as e:
print(e)
elif self.model.current["evoked"]:
fig = self.model.current["evoked"].plot_image(show=False)
self.model.history.append("evoked.plot_image()")
win = fig.canvas.manager.window
win.findChild(QStatusBar).hide()
win.setWindowTitle(self.model.current["name"])
win.installEventFilter(self) # detect if the figure is closed
fig.show()
def plot_states(self):
if self.model.current["evoked"]:
dialog = EvokedStatesDialog(None, self.model.current["evoked"])
dialog.setWindowModality(Qt.NonModal)
dialog.setWindowTitle(self.model.current["name"])
dialog.show()
def plot_topomaps(self):
if self.model.current["evoked"]:
dialog = EvokedTopoDialog(None, self.model.current["evoked"])
dialog.setWindowModality(Qt.NonModal)
dialog.setWindowTitle(self.model.current["name"])
dialog.show()
def plot_events(self):
events = self.model.current["events"]
fig = mne.viz.plot_events(events, show=False)
win = fig.canvas.manager.window
win.setWindowModality(Qt.WindowModal)
win.setWindowTitle(self.model.current["name"])
win.findChild(QStatusBar).hide()
win.findChild(QToolBar).hide()
fig.show()
def plot_psd(self):
"""Plot power spectral density (PSD)."""
if self.model.current["raw"]:
raw = self.model.current["raw"]
dialog = PSDDialog(None, raw)
dialog.setWindowModality(Qt.WindowModal)
dialog.setWindowTitle('PSD of ' + self.model.current["name"])
dialog.exec_()
elif self.model.current["epochs"]:
epochs = self.model.current["epochs"]
dialog = PSDDialog(None, epochs)
dialog.setWindowModality(Qt.WindowModal)
dialog.setWindowTitle('PSD of ' + self.model.current["name"])
dialog.exec_()
elif self.model.current["evoked"]:
evoked = self.model.current["evoked"]
dialog = PSDDialog(None, evoked)
dialog.setWindowModality(Qt.WindowModal)
dialog.setWindowTitle('PSD of ' + self.model.current["name"])
dialog.exec_()
try:
psd = dialog.psd
except Exception as e:
psd = None
if psd is not None:
self.model.current["psd"] = psd
self.data_changed()
def open_psd(self):
fname = QFileDialog.getOpenFileName(self, "Open TFR",
filter="*.h5 *.hdf")[0]
try:
psd = EpochsPSD().init_from_hdf(fname)
win = EpochsPSDWindow(psd, parent=None)
win.setWindowTitle(fname)
win.exec()
except Exception as e:
print(e)
try:
psd = RawPSD().init_from_hdf(fname)
win = RawPSDWindow(psd, parent=None)
win.setWindowModality(Qt.WindowModal)
win.setWindowTitle(fname)
win.exec()
except Exception:
pass
def plot_tfr(self):
"""Plot Time-Frequency."""
if self.model.current["epochs"]:
data = self.model.current["epochs"]
elif self.model.current["evoked"]:
data = self.model.current["evoked"]
dialog = TimeFreqDialog(None, data)
dialog.setWindowModality(Qt.WindowModal)
dialog.setWindowTitle('TFR of ' + self.model.current["name"])
dialog.exec_()
try:
tfr = dialog.avgTFR
self.data_changed()
except Exception as e:
tfr = None
if tfr is not None:
self.model.current["tfr"] = tfr
self.data_changed()
def open_tfr(self):
try:
fname = QFileDialog.getOpenFileName(self, "Open TFR",
filter="*.h5 *.hdf")[0]
avgTFR = AvgEpochsTFR().init_from_hdf(fname)
win = AvgTFRWindow(avgTFR, parent=None)
win.setWindowModality(Qt.WindowModal)
win.setWindowTitle(fname)
win.exec()
except Exception as e:
print(e)
def plot_montage(self):
"""Plot current montage."""
if self.model.current["raw"]:
data = self.model.current["raw"]
elif self.model.current["epochs"]:
data = self.model.current["epochs"]
elif self.model.current["evoked"]:
data = self.model.current["evoked"]
chans = Counter([mne.io.pick.channel_type(data.info, i)
for i in range(data.info["nchan"])])
fig = plt.figure()
types = []
for type in chans.keys():
if type in ['eeg', 'mag', 'grad']:
types.append(type)
for i, type in enumerate(types):
ax = fig.add_subplot(1, len(types), i + 1)
ax.set_title(type + '({} channels)'.format(chans[type]))
data.plot_sensors(show_names=True, show=False,
ch_type=type, axes=ax, title='')
win = fig.canvas.manager.window
win.resize(len(types) * 600, 600)
win.setWindowTitle(self.model.current["name"])
win.findChild(QStatusBar).hide()
win.findChild(QToolBar).hide()
fig.show()
def plot_ica_components_with_timeseries(self):
if self.model.current["raw"]:
try:
fig = plot_ica_components_with_timeseries(
self.model.current["ica"],
inst=self.model.current["raw"])
except Exception as e:
QMessageBox.critical(self, "Unexpected error ", str(e))
elif self.model.current["epochs"]:
try:
fig = plot_ica_components_with_timeseries(
self.model.current["ica"],
inst=self.model.current["epochs"])
except Exception as e:
try:
fig = self.model.current["ica"].plot_ica_components(
inst=self.model.current["epochs"])
except Exception as e:
QMessageBox.critical(self, "Unexpected error ", str(e))
def plot_ica_sources(self):
if self.model.current["raw"]:
fig = (self.model.current["ica"]
.plot_sources(inst=self.model.current["raw"]))
elif self.model.current["epochs"]:
fig = (self.model.current["ica"]
.plot_sources(inst=self.model.current["epochs"]))
win = fig.canvas.manager.window
win.setWindowTitle("ICA Sources")
win.findChild(QStatusBar).hide()
win.installEventFilter(self) # detect if the figure is closed
def plot_correlation_matrix(self):
if self.model.current["raw"]:
try:
plot_cormat(self.model.current["raw"],
self.model.current["ica"])
except ValueError as e:
QMessageBox.critical(
self, "Can't compute correlation with template ", str(e))
except Exception as e:
QMessageBox.critical(
self, "Unexpected error ", str(e))
elif self.model.current["epochs"]:
try:
plot_cormat(self.model.current["epochs"],
self.model.current["ica"])
except ValueError as e:
QMessageBox.critical(
self, "Can't compute correlation with template ", str(e))
except Exception as e:
QMessageBox.critical(
self, "Unexpected error ", str(e))
def plot_ica_overlay(self):
if self.model.current["raw"]:
plot_overlay(self.model.current["ica"],
self.model.current["raw"])
elif self.model.current["epochs"]:
plot_overlay(self.model.current["ica"],
self.model.current["epochs"])
return()
def run_ica(self):
"""Run ICA calculation."""
try:
import picard
import mne.preprocessing.ICA
except ImportError:
have_picard = False
import mne
else:
have_picard = True
try:
import sklearn # required for FastICA
except ImportError:
have_sklearn = False
else:
have_sklearn = True
if self.model.current["raw"]:
data = self.model.current["raw"]
inst_type = "raw"
elif self.model.current["epochs"]:
data = self.model.current["epochs"]
inst_type = "epochs"
nchan = len(mne.pick_types(data.info,
meg=True, eeg=True, exclude='bads'))
dialog = RunICADialog(self, nchan, have_picard, have_sklearn)
if dialog.exec_():
calc = CalcDialog(self, "Calculating ICA", "Calculating ICA.")
method = dialog.method.currentText()
exclude_bad_segments = dialog.exclude_bad_segments.isChecked()
decim = int(dialog.decim.text())
n_components = int(dialog.n_components.text())
max_pca_components = int(dialog.max_pca_components.text())
n_pca_components = int(dialog.pca_components.text())
random_state = int(dialog.random_seed.text())
max_iter = int(dialog.max_iter.text())
ica = mne.preprocessing.ICA(
method=dialog.methods[method],
n_components=n_components,
max_pca_components=max_pca_components,
n_pca_components=n_pca_components,
random_state=random_state,
max_iter=max_iter)
pool = mp.Pool(1)
kwds = {"reject_by_annotation": exclude_bad_segments,
"decim": decim}
res = pool.apply_async(func=ica.fit,
args=(data,),
kwds=kwds, callback=lambda x: calc.accept())
if not calc.exec_():
pool.terminate()
else:
self.auto_duplicate()
self.model.current["ica"] = res.get(timeout=1)
self.model.history.append(
"ica=ICA("
+ ("method={} ,").format(dialog.methods[method])
+ ("n_components={}, ").format(n_components)
+ ("max_pca_components={}, ").format(max_pca_components)
+ ("n_pca_components={}, ").format(n_pca_components)
+ ("random_state={}, ").format(random_state)
+ ("max_iter={})").format(max_iter))
self.model.history.append(
"ica.fit("
+ ("inst={}, ").format(inst_type)
+ ("decim={}, ").format(decim)
+ ("reject_by_annotation={})"
.format(exclude_bad_segments)))
self.data_changed()
def apply_ica(self):
"""Set reference."""
self.auto_duplicate()
self.model.apply_ica()
def resample(self):
"""Resample data."""
dialog = ResampleDialog(self)
if dialog.exec_():
sfreq = dialog.sfreq
if sfreq is not None:
self.auto_duplicate()
if self.model.current['raw']:
self.model.current['raw'].resample(sfreq)
elif self.model.current['epochs']:
self.model.current['epochs'].resample(sfreq)
elif self.model.current['evoked']:
self.model.current['evoked'].resample(sfreq)
self.model.current["name"] += " (resampled)"
self.data_changed()
def filter_data(self):
"""Filter data."""
if self.model.current['raw']:
israw = True
else:
israw = False
dialog = FilterDialog(self, israw)
if dialog.exec_():
if dialog.low or dialog.high or dialog.notch_freqs:
self.auto_duplicate()
self.model.filter(dialog.low, dialog.high, dialog.notch_freqs)
def find_events(self):
info = self.model.current["raw"].info
# use first stim channel as default in dialog
default_stim = 0
for i in range(info["nchan"]):
if mne.io.pick.channel_type(info, i) == "stim":
default_stim = i
break
dialog = FindEventsDialog(self, info["ch_names"], default_stim)
if dialog.exec_():
stim_channel = dialog.stimchan.currentText()
consecutive = dialog.consecutive.isChecked()
initial_event = dialog.initial_event.isChecked()
uint_cast = dialog.uint_cast.isChecked()
min_dur = dialog.minduredit.value()
shortest_event = dialog.shortesteventedit.value()
self.model.find_events(stim_channel=stim_channel,
consecutive=consecutive,
initial_event=initial_event,
uint_cast=uint_cast,
min_duration=min_dur,
shortest_event=shortest_event)
def interpolate_bads(self):
"""Interpolate bad channels."""
self.auto_duplicate()
self.model.interpolate_bads()
def add_events(self):
"""Setup the events in the data as a STIM channel."""
self.auto_duplicate()
self.model.add_events()
def epoch_data(self):
"""Cut raw data into epochs."""
dialog = EpochingDialog(self, self.model.current["events"],
self.model.current["raw"])
if dialog.exec_():
selected = [int(item.text()) for item
in dialog.labels.selectedItems()]
try:
tmin = float(dialog.tmin.text())
tmax = float(dialog.tmax.text())
except ValueError as e:
show_error('Unable to compute epochs...', info=str(e))
else:
if dialog.baseline.isChecked():
try:
a = float(float(dialog.a.text()))
except ValueError:
a = None
try:
b = float(float(dialog.b.text()))
except ValueError:
b = None
baseline = (a, b)
else:
baseline = None
self.auto_duplicate()
self.model.epoch_data(selected, tmin, tmax, baseline)
def evoke_data(self):
"""Compute the mean of epochs."""
self.auto_duplicate()
self.model.evoke_data()
def set_reference(self):
"""Set reference."""
dialog = ReferenceDialog(self)
if dialog.exec_():
self.auto_duplicate()
if dialog.average.isChecked():
self.model.set_reference("average")
else:
ref = [c.strip() for c in dialog.channellist.text().split(",")]
self.model.set_reference(ref)
def open_batch(self):
"""Open batch processing dialog."""
dialog = BatchDialog(self)
dialog.exec_()
def show_about(self):
"""Show About dialog."""
msg_box = QMessageBox(self)
text = ("<h3>MNELAB</h3>"
"<nobr><p>MNELAB is a graphical user interface for MNE.</p>"
"</nobr>")
msg_box.setText(text)
mnelab_url = "github.com/cbrnr/mnelab"
mne_url = "github.com/mne-tools/mne-python"
text = (f'<nobr><p>This program uses MNE version {mne.__version__} '
f'(Python {".".join(str(k) for k in version_info[:3])}).</p>'
f'</nobr>'
f'<nobr><p>MNELAB repository: '
f'<a href=https://{mnelab_url}>{mnelab_url}</a></p></nobr>'
f'<nobr><p>MNE repository: '
f'<a href=https://{mne_url}>{mne_url}</a></p></nobr>'
f'<p>Licensed under the BSD 3-clause license.</p>'
f'<p>Copyright 2017-2019 by <NAME>.</p>')
msg_box.setInformativeText(text)
msg_box.exec_()
def show_about_qt(self):
"""Show About Qt dialog."""
QMessageBox.aboutQt(self, "About Qt")
def auto_duplicate(self):
# if current data is stored in a file create a new data set
if self.model.current["fname"]:
self.model.duplicate_data()
# otherwise ask the user
else:
msg = QMessageBox.question(self, "Overwrite existing data set",
"Overwrite existing data set?")
if msg == QMessageBox.No: # create new data set
self.model.duplicate_data()
def _add_recent(self, fname):
"""Add a file to recent file list.
Parameters
----------
fname : str
File name.
"""
if fname in self.recent: # avoid duplicates
self.recent.remove(fname)
self.recent.insert(0, fname)
while len(self.recent) > MAX_RECENT: # prune list
self.recent.pop()
write_settings(recent=self.recent)
if not self.recent_menu.isEnabled():
self.recent_menu.setEnabled(True)
def _remove_recent(self, fname):
"""Remove file from recent file list.
Parameters
----------
fname : str
File name.
"""
if fname in self.recent:
self.recent.remove(fname)
write_settings(recent=self.recent)
if not self.recent:
self.recent_menu.setEnabled(False)
@pyqtSlot(QModelIndex)
def _update_data(self, selected):
"""Update index and information based on the state of the sidebar.
Parameters
----------
selected : QModelIndex
Index of the selected row.
"""
if selected.row() != self.model.index:
self.model.index = selected.row()
self.data_changed()
@pyqtSlot(QModelIndex, QModelIndex)
def _update_names(self, start, stop):
"""Update names in DataSets after changes in sidebar."""
for index in range(start.row(), stop.row() + 1):
self.model.data[index]["name"] = self.names.stringList()[index]
@pyqtSlot()
def _update_recent_menu(self):
self.recent_menu.clear()
for recent in self.recent:
self.recent_menu.addAction(recent)
@pyqtSlot(QAction)
def _load_recent(self, action):
self.model.load(action.text())
@pyqtSlot()
def _toggle_statusbar(self):
if self.statusBar().isHidden():
self.statusBar().show()
else:
self.statusBar().hide()
write_settings(statusbar=not self.statusBar().isHidden())
@pyqtSlot(QDropEvent)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.acceptProposedAction()
@pyqtSlot(QDropEvent)
def dropEvent(self, event):
mime = event.mimeData()
if mime.hasUrls():
urls = mime.urls()
for url in urls:
self.model.load(url.toLocalFile())
@pyqtSlot(QEvent)
def closeEvent(self, event):
"""Close application.
Parameters
----------
event : QEvent
Close event.
"""
write_settings(geometry=self.saveGeometry(), state=self.saveState())
if self.model.history:
print("\nCommand History")
print("===============")
print("\n".join(self.model.history))
QApplication.quit()
def eventFilter(self, source, event):
# currently the only source is the raw plot window
if event.type() == QEvent.Close:
self.data_changed()
return QObject.eventFilter(self, source, event)
| [
"PyQt5.QtWidgets.QMessageBox",
"PyQt5.QtWidgets.QMessageBox.aboutQt",
"PyQt5.QtCore.QStringListModel",
"PyQt5.QtWidgets.QMessageBox.question",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"mne.io.pick.channel_type",
"PyQt5.QtCore.QObject.eventFilter",
"PyQt5.QtWidgets.QLabel",
"mne.preprocessing.ICA",
"mne.pick_types",
"PyQt5.QtWidgets.QApplication.desktop",
"PyQt5.QtWidgets.QListView",
"mne.viz.plot_events",
"PyQt5.QtWidgets.QMessageBox.critical",
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtWidgets.QSplitter",
"PyQt5.QtWidgets.QApplication.quit",
"mne.channels.get_builtin_montages",
"matplotlib.pyplot.figure",
"PyQt5.QtCore.QSettings",
"multiprocessing.Pool",
"mne.channels.read_montage",
"PyQt5.QtWidgets.QFileDialog.getSaveFileName"
] | [((2411, 2422), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (2420, 2422), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((2910, 2921), 'PyQt5.QtCore.QSettings', 'QSettings', ([], {}), '()\n', (2919, 2921), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((43214, 43235), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QModelIndex'], {}), '(QModelIndex)\n', (43222, 43235), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((43601, 43635), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QModelIndex', 'QModelIndex'], {}), '(QModelIndex, QModelIndex)\n', (43609, 43635), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((43882, 43892), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (43890, 43892), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((44049, 44066), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QAction'], {}), '(QAction)\n', (44057, 44066), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((44148, 44158), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (44156, 44158), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((44390, 44410), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QDropEvent'], {}), '(QDropEvent)\n', (44398, 44410), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((44534, 44554), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QDropEvent'], {}), '(QDropEvent)\n', (44542, 44554), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((44763, 44779), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', (['QEvent'], {}), '(QEvent)\n', (44771, 44779), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((12233, 12251), 'PyQt5.QtCore.QStringListModel', 'QStringListModel', ([], {}), '()\n', (12249, 12251), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((12330, 12341), 'PyQt5.QtWidgets.QSplitter', 'QSplitter', ([], {}), '()\n', (12339, 12341), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((12365, 12376), 'PyQt5.QtWidgets.QListView', 'QListView', ([], {}), '()\n', (12374, 12376), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((12861, 12869), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (12867, 12869), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((18741, 18814), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Close all data sets"""', '"""Close all data sets?"""'], {}), "(self, 'Close all data sets', 'Close all data sets?')\n", (18761, 18814), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((20913, 20948), 'mne.channels.get_builtin_montages', 'mne.channels.get_builtin_montages', ([], {}), '()\n', (20946, 20948), False, 'import mne\n'), ((26170, 26209), 'mne.viz.plot_events', 'mne.viz.plot_events', (['events'], {'show': '(False)'}), '(events, show=False)\n', (26189, 26209), False, 'import mne\n'), ((29847, 29859), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29857, 29859), True, 'import matplotlib.pyplot as plt\n'), ((40818, 40835), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', (['self'], {}), '(self)\n', (40829, 40835), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((41790, 41827), 'PyQt5.QtWidgets.QMessageBox.aboutQt', 'QMessageBox.aboutQt', (['self', '"""About Qt"""'], {}), "(self, 'About Qt')\n", (41809, 41827), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((45183, 45202), 'PyQt5.QtWidgets.QApplication.quit', 'QApplication.quit', ([], {}), '()\n', (45200, 45202), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((45393, 45433), 'PyQt5.QtCore.QObject.eventFilter', 'QObject.eventFilter', (['self', 'source', 'event'], {}), '(self, source, event)\n', (45412, 45433), False, 'from PyQt5.QtCore import pyqtSlot, QStringListModel, QModelIndex, QSettings, QEvent, Qt, QObject\n'), ((17806, 17861), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', 'text'], {'filter': 'ffilter'}), '(self, text, filter=ffilter)\n', (17833, 17861), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((18098, 18153), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QFileDialog.getSaveFileName', (['self', 'text'], {'filter': 'ffilter'}), '(self, text, filter=ffilter)\n', (18125, 18153), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((18285, 18340), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', 'text'], {'filter': 'ffilter'}), '(self, text, filter=ffilter)\n', (18312, 18340), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((27628, 27694), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Open TFR"""'], {'filter': '"""*.h5 *.hdf"""'}), "(self, 'Open TFR', filter='*.h5 *.hdf')\n", (27655, 27694), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((34005, 34066), 'mne.pick_types', 'mne.pick_types', (['data.info'], {'meg': '(True)', 'eeg': '(True)', 'exclude': '"""bads"""'}), "(data.info, meg=True, eeg=True, exclude='bads')\n", (34019, 34066), False, 'import mne\n'), ((34767, 34976), 'mne.preprocessing.ICA', 'mne.preprocessing.ICA', ([], {'method': 'dialog.methods[method]', 'n_components': 'n_components', 'max_pca_components': 'max_pca_components', 'n_pca_components': 'n_pca_components', 'random_state': 'random_state', 'max_iter': 'max_iter'}), '(method=dialog.methods[method], n_components=\n n_components, max_pca_components=max_pca_components, n_pca_components=\n n_pca_components, random_state=random_state, max_iter=max_iter)\n', (34788, 34976), False, 'import mne\n'), ((35084, 35094), 'multiprocessing.Pool', 'mp.Pool', (['(1)'], {}), '(1)\n', (35091, 35094), True, 'import multiprocessing as mp\n'), ((42072, 42165), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Overwrite existing data set"""', '"""Overwrite existing data set?"""'], {}), "(self, 'Overwrite existing data set',\n 'Overwrite existing data set?')\n", (42092, 42165), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((21303, 21334), 'mne.channels.read_montage', 'mne.channels.read_montage', (['name'], {}), '(name)\n', (21328, 21334), False, 'import mne\n'), ((22332, 22479), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', (['self', '"""No matching channel names"""', '"""Channel names defined in the montage do not match any channel name in the data."""'], {}), "(self, 'No matching channel names',\n 'Channel names defined in the montage do not match any channel name in the data.'\n )\n", (22352, 22479), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((28987, 29053), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Open TFR"""'], {'filter': '"""*.h5 *.hdf"""'}), "(self, 'Open TFR', filter='*.h5 *.hdf')\n", (29014, 29053), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n'), ((29732, 29770), 'mne.io.pick.channel_type', 'mne.io.pick.channel_type', (['data.info', 'i'], {}), '(data.info, i)\n', (29756, 29770), False, 'import mne\n'), ((37754, 37787), 'mne.io.pick.channel_type', 'mne.io.pick.channel_type', (['info', 'i'], {}), '(info, i)\n', (37778, 37787), False, 'import mne\n'), ((3805, 3827), 'PyQt5.QtWidgets.QApplication.desktop', 'QApplication.desktop', ([], {}), '()\n', (3825, 3827), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QSplitter, QMessageBox, QListView, QAction, QLabel, QFrame, QStatusBar, QToolBar\n')] |
from django.contrib import admin
from .models import Answer, AnswerItem, Poll, Question, QuestionItem, Result
class AnswerItemAdmin(admin.ModelAdmin):
model = AnswerItem
filter_horizontal = ("answers",)
class AnswerItemThroughAdmin(admin.TabularInline):
model = Result.answers.through
extra = 0
class AnswerAdmin(admin.ModelAdmin):
pass
class QuestionAdmin(admin.ModelAdmin):
model = QuestionItem
class QuestionItemAdmin(admin.StackedInline):
model = QuestionItem
extra = 0
class PollAdmin(admin.ModelAdmin):
inlines = (QuestionItemAdmin,)
class ResultAdmin(admin.ModelAdmin):
inlines = [AnswerItemThroughAdmin]
list_display = ["id", "poll", "user", "session_id"]
fields = ("poll", "user", "session_id")
admin.site.register(AnswerItem, AnswerItemAdmin)
admin.site.register(Result, ResultAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Answer, AnswerAdmin)
admin.site.register(Poll, PollAdmin)
| [
"django.contrib.admin.site.register"
] | [((769, 817), 'django.contrib.admin.site.register', 'admin.site.register', (['AnswerItem', 'AnswerItemAdmin'], {}), '(AnswerItem, AnswerItemAdmin)\n', (788, 817), False, 'from django.contrib import admin\n'), ((818, 858), 'django.contrib.admin.site.register', 'admin.site.register', (['Result', 'ResultAdmin'], {}), '(Result, ResultAdmin)\n', (837, 858), False, 'from django.contrib import admin\n'), ((859, 903), 'django.contrib.admin.site.register', 'admin.site.register', (['Question', 'QuestionAdmin'], {}), '(Question, QuestionAdmin)\n', (878, 903), False, 'from django.contrib import admin\n'), ((904, 944), 'django.contrib.admin.site.register', 'admin.site.register', (['Answer', 'AnswerAdmin'], {}), '(Answer, AnswerAdmin)\n', (923, 944), False, 'from django.contrib import admin\n'), ((945, 981), 'django.contrib.admin.site.register', 'admin.site.register', (['Poll', 'PollAdmin'], {}), '(Poll, PollAdmin)\n', (964, 981), False, 'from django.contrib import admin\n')] |
"""
Created on Mar 10, 2014
@author: jtaghiyar
"""
import os
import subprocess as sub
from kronos_version import kronos_version
from utils import Pipeline
from helpers import make_dir, Configurer
from workflow_manager import WorkFlow, WorkFlowManager
from plumber import Plumber
from string import Template
from tempfile import NamedTemporaryFile as ntf
import logging
logging.basicConfig(format='%(asctime)s %(message)s',
level=logging.DEBUG)
class Factory(object):
"""
create, run, manage pipelines.
"""
def __init__(self, args):
"""initialize."""
self.args = args
self.pipelines = []
self.c = Configurer()
make_dir(self.args.working_dir)
def make_config(self):
"""make a yaml config file."""
file_name = os.path.join(self.args.working_dir, self.args.output_filename + '.yaml')
config_dict = Configurer.make_config_dict(self.args.components)
Configurer.print2yaml(config_dict, file_name)
def init_pipeline(self):
"""initialize a new pipeline."""
## update __SAMPLES__, __SHARED__ and __GENERAL__ sections.
self.c.config_file = self.args.config_file
if self.args.input_samples:
self.c.update_sample_section(self.args.input_samples)
if self.args.setup_file:
self.c.update_shared_section(self.args.setup_file)
## make a copy of the updated config file in the working directory.
updated_cfgfile = os.path.basename(self.args.config_file).split('.yaml')[0]
updated_cfgfile = os.path.join(self.args.working_dir, updated_cfgfile + '_kronos.yaml')
Configurer.print2yaml(self.c.config_dict, updated_cfgfile)
## create a work flow from updated config file
wf = WorkFlow(updated_cfgfile)
samples = wf.get_samples()
if not samples:
self._make_intermediate_pipeline(self.args.pipeline_name, updated_cfgfile, None)
else:
for sample_id, sample_dict in samples.iteritems():
new_config_file = self._make_intermediate_config_file(sample_id, sample_dict)
pipeline_name = sample_id + '_' + self.args.pipeline_name
self._make_intermediate_pipeline(pipeline_name, new_config_file, sample_id)
self._paste_pipelines(updated_cfgfile)
def run_pipeline(self):
"""run Kronos-made pipeline with optional initialization."""
if self.args.config_file:
if not self.args.pipeline_name:
bname = os.path.basename(self.args.config_file)
self.args.pipeline_name = os.path.splitext(bname)[0]
self.init_pipeline()
self.args.kronos_pipeline = os.path.join(self.args.working_dir,
self.args.pipeline_name + '.py')
## TODO: check if the -k input has been generated with kronos.
cmd = "{python_installation} {kronos_pipeline} -b {job_scheduler} "
cmd += "-c {components_dir} -d {drmaa_library_path} -j {num_jobs} "
cmd += "-n {num_pipelines} -p {python_installation} -w {working_dir}"
cmd = cmd.format(**vars(self.args))
if self.args.qsub_options:
cmd += " -q '%s'" % (self.args.qsub_options)
if self.args.run_id:
cmd += " -r '%s'" % (self.args.run_id)
if self.args.pipeline_name:
cmd += " -e '%s'" % (self.args.pipeline_name)
if self.args.no_prefix:
cmd += " --no_prefix"
logging.info('running the command: %s' % (cmd))
proc = sub.Popen(cmd, stdout=sub.PIPE, stderr=sub.PIPE, shell=True)
cmdout, cmderr = proc.communicate()
if cmdout:
logging.info(cmdout)
if cmderr:
logging.warning(cmderr)
def update_config(self):
old_config_file = self.args.config_files[0]
new_config_file = self.args.config_files[1]
file_name = os.path.join(self.args.working_dir,
self.args.output_filename + '.yaml')
new_config_dict = Configurer.update_config_files(old_config_file, new_config_file)
Configurer.print2yaml(new_config_dict, file_name)
def _make_intermediate_config_file(self, sample_id, sample_dict):
"""make an intermediate config file from the original config_file."""
intermediate_dir = os.path.join(self.args.working_dir, 'intermediate_config_files')
make_dir(intermediate_dir)
temp_name = os.path.splitext(os.path.basename(self.args.config_file))[0] + '_kronos'
new_file_name = os.path.join(intermediate_dir, sample_id + '_' + temp_name + '.yaml')
new_config_dict = self.c.update_config_dict(sample_dict)
Configurer.print2yaml(new_config_dict, new_file_name)
return new_file_name
def _make_intermediate_pipeline(self, pipeline_name, config_file, sample_id):
"""make an intermediate pipeline script from the intermediate config file."""
intermediate_dir = os.path.join(self.args.working_dir, 'intermediate_pipeline_scripts')
make_dir(intermediate_dir)
p = self._make_pipeline(pipeline_name, config_file, intermediate_dir, sample_id)
self.pipelines.append(p)
def _make_pipeline(self, pipeline_name, config_file, script_dir, sample_id):
p = Pipeline(pipeline_name = pipeline_name,
config_file = config_file,
script_dir = script_dir,
sample_id = sample_id)
p.make_script(sample_id)
return p
def _paste_pipelines(self, config_file):
"""merge intermediate pipelines."""
pipeline_script = os.path.join(self.args.working_dir, self.args.pipeline_name + '.py')
with open(pipeline_script, 'w') as ps:
plumber = Plumber(ps, None)
plumber.paste_pipelines(self.pipelines, config_file)
def test(self):
pycmd = self.args.python_installation
tests = list()
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, '../test/')
# tests.append(os.path.join(path, 'tester_io_manager.py'))
tests.append(os.path.join(path, 'tester.py'))
for test in tests:
os.system('{0} {1}'.format(pycmd, test))
def make_component(self):
output_dir = os.path.abspath(self.args.working_dir)
comp_name = self.args.component_name
comp_path = os.path.join(output_dir, comp_name)
## make the component directory
if os.path.exists(comp_path):
msg = ("There is already a component with name '{0}' "
" in the given path {1}").format(comp_name, comp_path)
raise Exception(msg)
else:
os.mkdir(comp_path)
os.mkdir(os.path.join(comp_path, 'component_seed'))
def _make_new_file_and_replace_comp_name(template_file, comp_name):
with open(template_file, 'r') as tf:
new_filename = os.path.basename(template_file)
new_file = open(os.path.join(comp_path, new_filename), 'w')
t = Template(tf.read())
new_file.write(t.substitute(COMPONENT_NAME=comp_name))
new_file.close()
package_path = os.path.dirname(os.path.realpath(__file__))
templates_path = os.path.join(package_path, '../templates')
component_ui = os.path.join(templates_path, 'component_ui.py')
component_main = os.path.join(templates_path, 'component_main.py')
component_reqs = os.path.join(templates_path, 'component_reqs.py')
component_params = os.path.join(templates_path, 'component_params.py')
_make_new_file_and_replace_comp_name(component_ui, comp_name)
_make_new_file_and_replace_comp_name(component_main, comp_name)
_make_new_file_and_replace_comp_name(component_reqs, comp_name)
_make_new_file_and_replace_comp_name(component_params, comp_name)
## create the __init__.py inside the component package
init_file = open(os.path.join(comp_path, '__init__.py'), 'w')
init_file.close()
def main():
import kronosui
args = kronosui.args
logging.info("<<< kronos_" + kronos_version + " started >>>")
factory = Factory(args)
if args.subparser_name == 'make_config':
logging.info("making a config file ...")
factory.make_config()
elif args.subparser_name == 'init':
logging.info("initializing the pipeline ...")
factory.init_pipeline()
elif args.subparser_name == 'test':
factory.test()
elif args.subparser_name == 'make_component':
logging.info("making a component")
factory.make_component()
elif args.subparser_name == 'update_config':
logging.info("updating config files ...")
factory.update_config()
elif args.subparser_name == 'run':
logging.info("running the pipeline ...")
factory.run_pipeline()
logging.info("<<< kronos_" + kronos_version + " finished >>>")
if __name__ == '__main__':
main()
| [
"helpers.Configurer.print2yaml",
"helpers.make_dir",
"utils.Pipeline",
"logging.info",
"os.path.exists",
"subprocess.Popen",
"helpers.Configurer",
"os.mkdir",
"helpers.Configurer.update_config_files",
"logging.warning",
"os.path.splitext",
"os.path.abspath",
"helpers.Configurer.make_config_dict",
"logging.basicConfig",
"workflow_manager.WorkFlow",
"os.path.join",
"os.path.realpath",
"os.path.basename",
"plumber.Plumber"
] | [((372, 446), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(asctime)s %(message)s', level=logging.DEBUG)\n", (391, 446), False, 'import logging\n'), ((8447, 8508), 'logging.info', 'logging.info', (["('<<< kronos_' + kronos_version + ' started >>>')"], {}), "('<<< kronos_' + kronos_version + ' started >>>')\n", (8459, 8508), False, 'import logging\n'), ((9258, 9320), 'logging.info', 'logging.info', (["('<<< kronos_' + kronos_version + ' finished >>>')"], {}), "('<<< kronos_' + kronos_version + ' finished >>>')\n", (9270, 9320), False, 'import logging\n'), ((671, 683), 'helpers.Configurer', 'Configurer', ([], {}), '()\n', (681, 683), False, 'from helpers import make_dir, Configurer\n'), ((692, 723), 'helpers.make_dir', 'make_dir', (['self.args.working_dir'], {}), '(self.args.working_dir)\n', (700, 723), False, 'from helpers import make_dir, Configurer\n'), ((819, 891), 'os.path.join', 'os.path.join', (['self.args.working_dir', "(self.args.output_filename + '.yaml')"], {}), "(self.args.working_dir, self.args.output_filename + '.yaml')\n", (831, 891), False, 'import os\n'), ((914, 963), 'helpers.Configurer.make_config_dict', 'Configurer.make_config_dict', (['self.args.components'], {}), '(self.args.components)\n', (941, 963), False, 'from helpers import make_dir, Configurer\n'), ((972, 1017), 'helpers.Configurer.print2yaml', 'Configurer.print2yaml', (['config_dict', 'file_name'], {}), '(config_dict, file_name)\n', (993, 1017), False, 'from helpers import make_dir, Configurer\n'), ((1613, 1682), 'os.path.join', 'os.path.join', (['self.args.working_dir', "(updated_cfgfile + '_kronos.yaml')"], {}), "(self.args.working_dir, updated_cfgfile + '_kronos.yaml')\n", (1625, 1682), False, 'import os\n'), ((1691, 1749), 'helpers.Configurer.print2yaml', 'Configurer.print2yaml', (['self.c.config_dict', 'updated_cfgfile'], {}), '(self.c.config_dict, updated_cfgfile)\n', (1712, 1749), False, 'from helpers import make_dir, Configurer\n'), ((1819, 1844), 'workflow_manager.WorkFlow', 'WorkFlow', (['updated_cfgfile'], {}), '(updated_cfgfile)\n', (1827, 1844), False, 'from workflow_manager import WorkFlow, WorkFlowManager\n'), ((3616, 3661), 'logging.info', 'logging.info', (["('running the command: %s' % cmd)"], {}), "('running the command: %s' % cmd)\n", (3628, 3661), False, 'import logging\n'), ((3679, 3739), 'subprocess.Popen', 'sub.Popen', (['cmd'], {'stdout': 'sub.PIPE', 'stderr': 'sub.PIPE', 'shell': '(True)'}), '(cmd, stdout=sub.PIPE, stderr=sub.PIPE, shell=True)\n', (3688, 3739), True, 'import subprocess as sub\n'), ((4049, 4121), 'os.path.join', 'os.path.join', (['self.args.working_dir', "(self.args.output_filename + '.yaml')"], {}), "(self.args.working_dir, self.args.output_filename + '.yaml')\n", (4061, 4121), False, 'import os\n'), ((4190, 4254), 'helpers.Configurer.update_config_files', 'Configurer.update_config_files', (['old_config_file', 'new_config_file'], {}), '(old_config_file, new_config_file)\n', (4220, 4254), False, 'from helpers import make_dir, Configurer\n'), ((4263, 4312), 'helpers.Configurer.print2yaml', 'Configurer.print2yaml', (['new_config_dict', 'file_name'], {}), '(new_config_dict, file_name)\n', (4284, 4312), False, 'from helpers import make_dir, Configurer\n'), ((4506, 4570), 'os.path.join', 'os.path.join', (['self.args.working_dir', '"""intermediate_config_files"""'], {}), "(self.args.working_dir, 'intermediate_config_files')\n", (4518, 4570), False, 'import os\n'), ((4579, 4605), 'helpers.make_dir', 'make_dir', (['intermediate_dir'], {}), '(intermediate_dir)\n', (4587, 4605), False, 'from helpers import make_dir, Configurer\n'), ((4723, 4792), 'os.path.join', 'os.path.join', (['intermediate_dir', "(sample_id + '_' + temp_name + '.yaml')"], {}), "(intermediate_dir, sample_id + '_' + temp_name + '.yaml')\n", (4735, 4792), False, 'import os\n'), ((4867, 4920), 'helpers.Configurer.print2yaml', 'Configurer.print2yaml', (['new_config_dict', 'new_file_name'], {}), '(new_config_dict, new_file_name)\n', (4888, 4920), False, 'from helpers import make_dir, Configurer\n'), ((5154, 5222), 'os.path.join', 'os.path.join', (['self.args.working_dir', '"""intermediate_pipeline_scripts"""'], {}), "(self.args.working_dir, 'intermediate_pipeline_scripts')\n", (5166, 5222), False, 'import os\n'), ((5231, 5257), 'helpers.make_dir', 'make_dir', (['intermediate_dir'], {}), '(intermediate_dir)\n', (5239, 5257), False, 'from helpers import make_dir, Configurer\n'), ((5483, 5594), 'utils.Pipeline', 'Pipeline', ([], {'pipeline_name': 'pipeline_name', 'config_file': 'config_file', 'script_dir': 'script_dir', 'sample_id': 'sample_id'}), '(pipeline_name=pipeline_name, config_file=config_file, script_dir=\n script_dir, sample_id=sample_id)\n', (5491, 5594), False, 'from utils import Pipeline\n'), ((5840, 5908), 'os.path.join', 'os.path.join', (['self.args.working_dir', "(self.args.pipeline_name + '.py')"], {}), "(self.args.working_dir, self.args.pipeline_name + '.py')\n", (5852, 5908), False, 'import os\n'), ((6234, 6264), 'os.path.join', 'os.path.join', (['path', '"""../test/"""'], {}), "(path, '../test/')\n", (6246, 6264), False, 'import os\n'), ((6526, 6564), 'os.path.abspath', 'os.path.abspath', (['self.args.working_dir'], {}), '(self.args.working_dir)\n', (6541, 6564), False, 'import os\n'), ((6630, 6665), 'os.path.join', 'os.path.join', (['output_dir', 'comp_name'], {}), '(output_dir, comp_name)\n', (6642, 6665), False, 'import os\n'), ((6726, 6751), 'os.path.exists', 'os.path.exists', (['comp_path'], {}), '(comp_path)\n', (6740, 6751), False, 'import os\n'), ((7563, 7605), 'os.path.join', 'os.path.join', (['package_path', '"""../templates"""'], {}), "(package_path, '../templates')\n", (7575, 7605), False, 'import os\n'), ((7629, 7676), 'os.path.join', 'os.path.join', (['templates_path', '"""component_ui.py"""'], {}), "(templates_path, 'component_ui.py')\n", (7641, 7676), False, 'import os\n'), ((7702, 7751), 'os.path.join', 'os.path.join', (['templates_path', '"""component_main.py"""'], {}), "(templates_path, 'component_main.py')\n", (7714, 7751), False, 'import os\n'), ((7777, 7826), 'os.path.join', 'os.path.join', (['templates_path', '"""component_reqs.py"""'], {}), "(templates_path, 'component_reqs.py')\n", (7789, 7826), False, 'import os\n'), ((7855, 7906), 'os.path.join', 'os.path.join', (['templates_path', '"""component_params.py"""'], {}), "(templates_path, 'component_params.py')\n", (7867, 7906), False, 'import os\n'), ((8592, 8632), 'logging.info', 'logging.info', (['"""making a config file ..."""'], {}), "('making a config file ...')\n", (8604, 8632), False, 'import logging\n'), ((2798, 2866), 'os.path.join', 'os.path.join', (['self.args.working_dir', "(self.args.pipeline_name + '.py')"], {}), "(self.args.working_dir, self.args.pipeline_name + '.py')\n", (2810, 2866), False, 'import os\n'), ((3815, 3835), 'logging.info', 'logging.info', (['cmdout'], {}), '(cmdout)\n', (3827, 3835), False, 'import logging\n'), ((3867, 3890), 'logging.warning', 'logging.warning', (['cmderr'], {}), '(cmderr)\n', (3882, 3890), False, 'import logging\n'), ((5979, 5996), 'plumber.Plumber', 'Plumber', (['ps', 'None'], {}), '(ps, None)\n', (5986, 5996), False, 'from plumber import Plumber\n'), ((6191, 6217), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (6207, 6217), False, 'import os\n'), ((6360, 6391), 'os.path.join', 'os.path.join', (['path', '"""tester.py"""'], {}), "(path, 'tester.py')\n", (6372, 6391), False, 'import os\n'), ((6953, 6972), 'os.mkdir', 'os.mkdir', (['comp_path'], {}), '(comp_path)\n', (6961, 6972), False, 'import os\n'), ((7510, 7536), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (7526, 7536), False, 'import os\n'), ((8301, 8339), 'os.path.join', 'os.path.join', (['comp_path', '"""__init__.py"""'], {}), "(comp_path, '__init__.py')\n", (8313, 8339), False, 'import os\n'), ((8712, 8757), 'logging.info', 'logging.info', (['"""initializing the pipeline ..."""'], {}), "('initializing the pipeline ...')\n", (8724, 8757), False, 'import logging\n'), ((2603, 2642), 'os.path.basename', 'os.path.basename', (['self.args.config_file'], {}), '(self.args.config_file)\n', (2619, 2642), False, 'import os\n'), ((6994, 7035), 'os.path.join', 'os.path.join', (['comp_path', '"""component_seed"""'], {}), "(comp_path, 'component_seed')\n", (7006, 7035), False, 'import os\n'), ((7202, 7233), 'os.path.basename', 'os.path.basename', (['template_file'], {}), '(template_file)\n', (7218, 7233), False, 'import os\n'), ((1529, 1568), 'os.path.basename', 'os.path.basename', (['self.args.config_file'], {}), '(self.args.config_file)\n', (1545, 1568), False, 'import os\n'), ((2685, 2708), 'os.path.splitext', 'os.path.splitext', (['bname'], {}), '(bname)\n', (2701, 2708), False, 'import os\n'), ((4643, 4682), 'os.path.basename', 'os.path.basename', (['self.args.config_file'], {}), '(self.args.config_file)\n', (4659, 4682), False, 'import os\n'), ((7266, 7303), 'os.path.join', 'os.path.join', (['comp_path', 'new_filename'], {}), '(comp_path, new_filename)\n', (7278, 7303), False, 'import os\n'), ((8921, 8955), 'logging.info', 'logging.info', (['"""making a component"""'], {}), "('making a component')\n", (8933, 8955), False, 'import logging\n'), ((9055, 9096), 'logging.info', 'logging.info', (['"""updating config files ..."""'], {}), "('updating config files ...')\n", (9067, 9096), False, 'import logging\n'), ((9177, 9217), 'logging.info', 'logging.info', (['"""running the pipeline ..."""'], {}), "('running the pipeline ...')\n", (9189, 9217), False, 'import logging\n')] |
import unittest
import clitui
class TestDisplayFunctions(unittest.TestCase):
def test_is_pressed(self):
self.assertEqual(clitui.is_pressed("UP", "\x1b[A"), True)
self.assertEqual(clitui.is_pressed("DOWN", "\x1b[B"), True)
self.assertEqual(clitui.is_pressed("RIGHT", "\x1b[C"), True)
self.assertEqual(clitui.is_pressed("LEFT", "\x1b[D"), True)
self.assertEqual(clitui.is_pressed("ESC", "\x1b"), True)
self.assertEqual(clitui.is_pressed("ENTER", "\n"), True)
self.assertEqual(clitui.is_pressed("TAB", "\t"), True)
self.assertEqual(clitui.is_pressed("BAR", " "), True)
self.assertEqual(clitui.is_pressed("Fail", "Fail"), True)
self.assertEqual(clitui.is_pressed("Key", "Fail"), False)
def test_boarder_char_h(self):
self.assertEqual(clitui.display.boarder_char("h", style=0), chr(9552))
self.assertEqual(clitui.display.boarder_char("h", style=1), chr(9472))
self.assertEqual(clitui.display.boarder_char("h", style=2), chr(9473))
def test_boarder_char_v(self):
self.assertEqual(clitui.display.boarder_char("v", style=0), chr(9553))
self.assertEqual(clitui.display.boarder_char("v", style=1), chr(9474))
self.assertEqual(clitui.display.boarder_char("v", style=2), chr(9475))
def test_boarder_char_lt(self):
self.assertEqual(clitui.display.boarder_char("lt", style=0), chr(9571))
self.assertEqual(clitui.display.boarder_char("lt", style=1), chr(9508))
self.assertEqual(clitui.display.boarder_char("lt", style=2), chr(9515))
def test_boarder_char_rt(self):
self.assertEqual(clitui.display.boarder_char("rt", style=0), chr(9568))
self.assertEqual(clitui.display.boarder_char("rt", style=1), chr(9500))
self.assertEqual(clitui.display.boarder_char("rt", style=2), chr(9507))
def test_boarder_char_tt(self):
self.assertEqual(clitui.display.boarder_char("tt", style=0), chr(9574))
self.assertEqual(clitui.display.boarder_char("tt", style=1), chr(9516))
self.assertEqual(clitui.display.boarder_char("tt", style=2), chr(9523))
def test_boarder_char_bt(self):
self.assertEqual(clitui.display.boarder_char("bt", style=0), chr(9577))
self.assertEqual(clitui.display.boarder_char("bt", style=1), chr(9524))
self.assertEqual(clitui.display.boarder_char("bt", style=2), chr(9531))
def test_boarder_char_tlc(self):
self.assertEqual(clitui.display.boarder_char("tlc", style=0), chr(9556))
self.assertEqual(clitui.display.boarder_char("tlc", style=1), chr(9581))
self.assertEqual(clitui.display.boarder_char("tlc", style=2), chr(9487))
def test_boarder_char_trc(self):
self.assertEqual(clitui.display.boarder_char("trc", style=0), chr(9559))
self.assertEqual(clitui.display.boarder_char("trc", style=1), chr(9582))
self.assertEqual(clitui.display.boarder_char("trc", style=2), chr(9491))
def test_boarder_char_blc(self):
self.assertEqual(clitui.display.boarder_char("blc", style=0), chr(9562))
self.assertEqual(clitui.display.boarder_char("blc", style=1), chr(9584))
self.assertEqual(clitui.display.boarder_char("blc", style=2), chr(9495))
def test_boarder_char_brc(self):
self.assertEqual(clitui.display.boarder_char("brc", style=0), chr(9565))
self.assertEqual(clitui.display.boarder_char("brc", style=1), chr(9583))
self.assertEqual(clitui.display.boarder_char("brc", style=2), chr(9499))
def test_boarder_char_ls(self):
self.assertEqual(clitui.display.boarder_char("ls", style=0), "/")
self.assertEqual(clitui.display.boarder_char("ls", style=1), "/")
self.assertEqual(clitui.display.boarder_char("ls", style=2), "/")
def test_boarder_char_rs(self):
self.assertEqual(clitui.display.boarder_char("rs", style=0), "\\")
self.assertEqual(clitui.display.boarder_char("rs", style=1), "\\")
self.assertEqual(clitui.display.boarder_char("rs", style=2), "\\")
def test_boarder_char_plus(self):
self.assertEqual(clitui.display.boarder_char("+", style=0), chr(9580))
self.assertEqual(clitui.display.boarder_char("+", style=1), chr(9532))
self.assertEqual(clitui.display.boarder_char("+", style=2), chr(9547))
def test_gen_points(self):
points = clitui.display.gen_points(0, 0, 5, 0,)
self.assertEqual(
tuple(points), ((0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0)),
)
def test_cirlce_points(self):
points = set(clitui.display._create_points(10, 10, 3, 1, 360))
self.assertEqual(
tuple(points),
(
(12, 7),
(12, 13),
(8, 12),
(13, 11),
(13, 8),
(7, 10),
(12, 12),
(9, 7),
(8, 8),
(11, 7),
(9, 13),
(11, 13),
(13, 10),
(7, 9),
(7, 12),
(12, 8),
(8, 7),
(10, 7),
(10, 13),
(8, 13),
(13, 9),
(13, 12),
(7, 11),
(7, 8),
),
)
if __name__ == "__main__":
unittest.main()
| [
"clitui.display.gen_points",
"clitui.display.boarder_char",
"clitui.is_pressed",
"unittest.main",
"clitui.display._create_points"
] | [((5377, 5392), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5390, 5392), False, 'import unittest\n'), ((4392, 4429), 'clitui.display.gen_points', 'clitui.display.gen_points', (['(0)', '(0)', '(5)', '(0)'], {}), '(0, 0, 5, 0)\n', (4417, 4429), False, 'import clitui\n'), ((135, 168), 'clitui.is_pressed', 'clitui.is_pressed', (['"""UP"""', '"""\x1b[A"""'], {}), "('UP', '\\x1b[A')\n", (152, 168), False, 'import clitui\n'), ((201, 236), 'clitui.is_pressed', 'clitui.is_pressed', (['"""DOWN"""', '"""\x1b[B"""'], {}), "('DOWN', '\\x1b[B')\n", (218, 236), False, 'import clitui\n'), ((269, 305), 'clitui.is_pressed', 'clitui.is_pressed', (['"""RIGHT"""', '"""\x1b[C"""'], {}), "('RIGHT', '\\x1b[C')\n", (286, 305), False, 'import clitui\n'), ((338, 373), 'clitui.is_pressed', 'clitui.is_pressed', (['"""LEFT"""', '"""\x1b[D"""'], {}), "('LEFT', '\\x1b[D')\n", (355, 373), False, 'import clitui\n'), ((406, 438), 'clitui.is_pressed', 'clitui.is_pressed', (['"""ESC"""', '"""\x1b"""'], {}), "('ESC', '\\x1b')\n", (423, 438), False, 'import clitui\n'), ((471, 503), 'clitui.is_pressed', 'clitui.is_pressed', (['"""ENTER"""', '"""\n"""'], {}), "('ENTER', '\\n')\n", (488, 503), False, 'import clitui\n'), ((536, 566), 'clitui.is_pressed', 'clitui.is_pressed', (['"""TAB"""', '"""\t"""'], {}), "('TAB', '\\t')\n", (553, 566), False, 'import clitui\n'), ((599, 628), 'clitui.is_pressed', 'clitui.is_pressed', (['"""BAR"""', '""" """'], {}), "('BAR', ' ')\n", (616, 628), False, 'import clitui\n'), ((661, 694), 'clitui.is_pressed', 'clitui.is_pressed', (['"""Fail"""', '"""Fail"""'], {}), "('Fail', 'Fail')\n", (678, 694), False, 'import clitui\n'), ((727, 759), 'clitui.is_pressed', 'clitui.is_pressed', (['"""Key"""', '"""Fail"""'], {}), "('Key', 'Fail')\n", (744, 759), False, 'import clitui\n'), ((829, 870), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""h"""'], {'style': '(0)'}), "('h', style=0)\n", (856, 870), False, 'import clitui\n'), ((908, 949), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""h"""'], {'style': '(1)'}), "('h', style=1)\n", (935, 949), False, 'import clitui\n'), ((987, 1028), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""h"""'], {'style': '(2)'}), "('h', style=2)\n", (1014, 1028), False, 'import clitui\n'), ((1102, 1143), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""v"""'], {'style': '(0)'}), "('v', style=0)\n", (1129, 1143), False, 'import clitui\n'), ((1181, 1222), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""v"""'], {'style': '(1)'}), "('v', style=1)\n", (1208, 1222), False, 'import clitui\n'), ((1260, 1301), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""v"""'], {'style': '(2)'}), "('v', style=2)\n", (1287, 1301), False, 'import clitui\n'), ((1376, 1418), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""lt"""'], {'style': '(0)'}), "('lt', style=0)\n", (1403, 1418), False, 'import clitui\n'), ((1456, 1498), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""lt"""'], {'style': '(1)'}), "('lt', style=1)\n", (1483, 1498), False, 'import clitui\n'), ((1536, 1578), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""lt"""'], {'style': '(2)'}), "('lt', style=2)\n", (1563, 1578), False, 'import clitui\n'), ((1653, 1695), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""rt"""'], {'style': '(0)'}), "('rt', style=0)\n", (1680, 1695), False, 'import clitui\n'), ((1733, 1775), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""rt"""'], {'style': '(1)'}), "('rt', style=1)\n", (1760, 1775), False, 'import clitui\n'), ((1813, 1855), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""rt"""'], {'style': '(2)'}), "('rt', style=2)\n", (1840, 1855), False, 'import clitui\n'), ((1930, 1972), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""tt"""'], {'style': '(0)'}), "('tt', style=0)\n", (1957, 1972), False, 'import clitui\n'), ((2010, 2052), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""tt"""'], {'style': '(1)'}), "('tt', style=1)\n", (2037, 2052), False, 'import clitui\n'), ((2090, 2132), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""tt"""'], {'style': '(2)'}), "('tt', style=2)\n", (2117, 2132), False, 'import clitui\n'), ((2207, 2249), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""bt"""'], {'style': '(0)'}), "('bt', style=0)\n", (2234, 2249), False, 'import clitui\n'), ((2287, 2329), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""bt"""'], {'style': '(1)'}), "('bt', style=1)\n", (2314, 2329), False, 'import clitui\n'), ((2367, 2409), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""bt"""'], {'style': '(2)'}), "('bt', style=2)\n", (2394, 2409), False, 'import clitui\n'), ((2485, 2528), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""tlc"""'], {'style': '(0)'}), "('tlc', style=0)\n", (2512, 2528), False, 'import clitui\n'), ((2566, 2609), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""tlc"""'], {'style': '(1)'}), "('tlc', style=1)\n", (2593, 2609), False, 'import clitui\n'), ((2647, 2690), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""tlc"""'], {'style': '(2)'}), "('tlc', style=2)\n", (2674, 2690), False, 'import clitui\n'), ((2766, 2809), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""trc"""'], {'style': '(0)'}), "('trc', style=0)\n", (2793, 2809), False, 'import clitui\n'), ((2847, 2890), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""trc"""'], {'style': '(1)'}), "('trc', style=1)\n", (2874, 2890), False, 'import clitui\n'), ((2928, 2971), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""trc"""'], {'style': '(2)'}), "('trc', style=2)\n", (2955, 2971), False, 'import clitui\n'), ((3047, 3090), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""blc"""'], {'style': '(0)'}), "('blc', style=0)\n", (3074, 3090), False, 'import clitui\n'), ((3128, 3171), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""blc"""'], {'style': '(1)'}), "('blc', style=1)\n", (3155, 3171), False, 'import clitui\n'), ((3209, 3252), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""blc"""'], {'style': '(2)'}), "('blc', style=2)\n", (3236, 3252), False, 'import clitui\n'), ((3328, 3371), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""brc"""'], {'style': '(0)'}), "('brc', style=0)\n", (3355, 3371), False, 'import clitui\n'), ((3409, 3452), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""brc"""'], {'style': '(1)'}), "('brc', style=1)\n", (3436, 3452), False, 'import clitui\n'), ((3490, 3533), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""brc"""'], {'style': '(2)'}), "('brc', style=2)\n", (3517, 3533), False, 'import clitui\n'), ((3608, 3650), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""ls"""'], {'style': '(0)'}), "('ls', style=0)\n", (3635, 3650), False, 'import clitui\n'), ((3682, 3724), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""ls"""'], {'style': '(1)'}), "('ls', style=1)\n", (3709, 3724), False, 'import clitui\n'), ((3756, 3798), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""ls"""'], {'style': '(2)'}), "('ls', style=2)\n", (3783, 3798), False, 'import clitui\n'), ((3867, 3909), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""rs"""'], {'style': '(0)'}), "('rs', style=0)\n", (3894, 3909), False, 'import clitui\n'), ((3942, 3984), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""rs"""'], {'style': '(1)'}), "('rs', style=1)\n", (3969, 3984), False, 'import clitui\n'), ((4017, 4059), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""rs"""'], {'style': '(2)'}), "('rs', style=2)\n", (4044, 4059), False, 'import clitui\n'), ((4131, 4172), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""+"""'], {'style': '(0)'}), "('+', style=0)\n", (4158, 4172), False, 'import clitui\n'), ((4210, 4251), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""+"""'], {'style': '(1)'}), "('+', style=1)\n", (4237, 4251), False, 'import clitui\n'), ((4289, 4330), 'clitui.display.boarder_char', 'clitui.display.boarder_char', (['"""+"""'], {'style': '(2)'}), "('+', style=2)\n", (4316, 4330), False, 'import clitui\n'), ((4600, 4648), 'clitui.display._create_points', 'clitui.display._create_points', (['(10)', '(10)', '(3)', '(1)', '(360)'], {}), '(10, 10, 3, 1, 360)\n', (4629, 4648), False, 'import clitui\n')] |
# Generated by Django 2.2.5 on 2020-01-14 14:27
import designs.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0019_auto_20200105_2301'),
('designs', '0006_delete_material'),
]
operations = [
migrations.RemoveField(
model_name='design',
name='title',
),
migrations.AddField(
model_name='design',
name='design_number',
field=models.CharField(blank=True, default=designs.models.create_custom_design_code, max_length=40, null=True, verbose_name='番号'),
),
migrations.AddField(
model_name='design',
name='guest',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='design', to='users.Guest'),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((333, 390), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""design"""', 'name': '"""title"""'}), "(model_name='design', name='title')\n", (355, 390), False, 'from django.db import migrations, models\n'), ((541, 669), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': 'designs.models.create_custom_design_code', 'max_length': '(40)', 'null': '(True)', 'verbose_name': '"""番号"""'}), "(blank=True, default=designs.models.\n create_custom_design_code, max_length=40, null=True, verbose_name='番号')\n", (557, 669), False, 'from django.db import migrations, models\n'), ((783, 902), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""design"""', 'to': '"""users.Guest"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='design', to='users.Guest')\n", (800, 902), False, 'from django.db import migrations, models\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import os
import tarfile
import tempfile
import unittest
import numpy as np
import onnx
from onnx import helper
from .test_util import create_input, N
from . import test_rnn
from six.moves.urllib.request import urlretrieve
L = 20
M = 10
S = 5
const2_np = np.random.randn(S, S)
const2_onnx = onnx.helper.make_tensor("const2",
onnx.TensorProto.FLOAT,
(S, S),
const2_np.flatten().astype(float))
# TODO: These Numpy specs will be generally useful to backend implementations,
# so they should get moved out of here at some point
node_tests = [
("test_abs", N("Abs"), np.abs, [(S, S, S)]),
("test_add", N("Add"), np.add, [(S, S, S), (S, S, S)]),
("test_add_bcast", N("Add", broadcast=1), np.add, [(S, M), (M,)]),
("test_constant", N("Constant", value=const2_onnx), lambda: const2_np, []),
# TODO: Are we actually supporting other dot modes? In that case, some fancy
# footwork is necessary...
("test_relu", N("Relu"), lambda x: np.clip(x, 0, np.inf), [(S, S, S)]),
("test_constant_pad",
N("Pad", mode='constant', value=1.2, paddings=[0, 0, 0, 0, 1, 2, 3, 4]),
lambda x: np.pad(x,
pad_width=((0, 0), (0, 0), (1, 2), (3, 4)),
mode='constant',
constant_values=1.2),
[(1, 3, L, M)]),
("test_refelction_pad",
N("Pad", mode='reflect', paddings=[0, 0, 0, 0, 1, 1, 1, 1]),
lambda x: np.pad(x,
pad_width=((0, 0), (0, 0), (1, 1), (1, 1)),
mode='reflect'),
[(1, 3, L, M)]),
("test_edge_pad",
N("Pad", mode='edge', paddings=[0, 0, 0, 0, 1, 1, 1, 1]),
lambda x: np.pad(x,
pad_width=((0, 0), (0, 0), (1, 1), (1, 1)),
mode='edge'),
[(1, 3, L, M)]),
("test_slice",
N("Slice", axes=[0, 1], starts=[0, 0], ends=[3, M]),
lambda x: x[0:3, 0:M], [(L, M, S)]),
("test_slice_neg",
N("Slice", axes=[1], starts=[0], ends=[-1]),
lambda x: x[:, 0:-1], [(L, M, S)]),
("test_slice_default_axes",
N("Slice", starts=[0, 0, 3], ends=[L, M, 4]),
lambda x: x[:, :, 3:4], [(L, M, S)]),
("test_matmul_2d",
N("MatMul"),
np.matmul,
[(M, S), (S, M)]),
("test_matmul_3d",
N("MatMul"),
np.matmul,
[(1, M, S), (1, S, M)]),
("test_matmul_4d",
N("MatMul"),
np.matmul,
[(1, 2, S, M),
(1, 2, M, S)]),
# TODO: Add all the other operators
] + test_rnn.node_tests
model_tests = [
('test_bvlc_alexnet', 'bvlc_alexnet'),
('test_densenet121', 'densenet121'),
('test_inception_v1', 'inception_v1'),
('test_inception_v2', 'inception_v2'),
('test_resnet50', 'resnet50'),
('test_shufflenet', 'shufflenet'),
('test_squeezenet', 'squeezenet'),
('test_vgg16', 'vgg16'),
]
# Running vgg19 on Travis with Python 2 keeps getting OOM!
if not os.environ.get('TRAVIS'):
model_tests.append(('test_vgg19', 'vgg19'))
class BackendTest(object):
def __init__(self, backend, parent_module=None):
class TestsContainer(unittest.TestCase):
def setUp(self):
np.random.seed(seed=0)
self.backend = backend
self._base_case = TestsContainer
self._parent_module = parent_module
# List of test cases to be applied on the parent scope
# Example usage: globals().update(BackendTest(backend).test_cases)
self.test_cases = {}
for nt in node_tests:
self._add_node_test(*nt)
for gt in model_tests:
self._add_model_test(*gt)
# For backward compatibility - create a suite to aggregate them all
self.tests = type(str('OnnxBackendTest'), (self._base_case,), {})
for _, case in sorted(self.test_cases.items()):
for name, func in sorted(case.__dict__.items()):
if name.startswith('test_'):
setattr(self.tests, name, func)
def _get_test_case(self, category):
name = 'OnnxBackend{}Test'.format(category)
if name not in self.test_cases:
self.test_cases[name] = type(str(name), (self._base_case,), {})
if self._parent_module:
self.test_cases[name].__module__ = self._parent_module
return self.test_cases[name]
def _prepare_model(self, model_name):
onnx_home = os.path.expanduser(os.getenv('ONNX_HOME', '~/.onnx'))
models_dir = os.getenv('ONNX_MODELS', os.path.join(onnx_home, 'models'))
model_dir = os.path.join(models_dir, model_name)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
url = 'https://s3.amazonaws.com/download.onnx/models/{}.tar.gz'.format(
model_name)
with tempfile.NamedTemporaryFile(delete=True) as download_file:
print('Start downloading model {} from {}'.format(model_name, url))
urlretrieve(url, download_file.name)
print('Done')
with tarfile.open(download_file.name) as t:
t.extractall(models_dir)
return model_dir
def _add_test(self, test_case, name, test_func):
# We don't prepend the 'test_' prefix to improve greppability
if not name.startswith('test_'):
raise ValueError('Test name must start with test_: {}'.format(name))
s = self._get_test_case(test_case)
if hasattr(s, name):
raise ValueError('Duplicated test name: {}'.format(name))
setattr(s, name, test_func)
def _add_model_test(self, test_name, model_name, device='CPU'):
"""
Add A test for a single ONNX model against a reference implementation.
test_name (string): Eventual name of the test. Must be prefixed
with 'test_'.
model_name (string): The ONNX model's name
inputs (list of ndarrays): inputs to the model
outputs (list of ndarrays): outputs to the model
"""
def run(test_self):
if not self.backend.supports_device(device):
raise unittest.SkipTest(
"Backend doesn't support device {}".format(device))
model_dir = self._prepare_model(model_name)
model_pb_path = os.path.join(model_dir, 'model.pb')
model = onnx.load(model_pb_path)
prepared_model = self.backend.prepare(model, device)
for test_data_npz in glob.glob(os.path.join(model_dir, 'test_data_*.npz')):
test_data = np.load(test_data_npz, encoding='bytes')
inputs = list(test_data['inputs'])
outputs = list(prepared_model.run(inputs))
ref_outputs = test_data['outputs']
test_self.assertEqual(len(ref_outputs), len(outputs))
for i in range(len(outputs)):
np.testing.assert_almost_equal(
ref_outputs[i],
outputs[i],
decimal=4)
self._add_test('Model', test_name, run)
def _add_node_test(self, test_name, node_spec, ref, inputs, device='CPU'):
"""
Add A test for a single ONNX node against a reference implementation.
Arguments:
test_name (string): Eventual name of the test. Must be prefixed
with 'test_'.
node (NodeSpec): The ONNX node's name and attributes to be tested;
inputs and outputs will be inferred from other arguments of this
spec.
ref (lambda): A function from any number of Numpy ndarrays,
to a single ndarray or tuple of ndarrays.
inputs (tuple of ndarrays or size tuples): A specification of
the input to the operator.
"""
def run(test_self):
if not self.backend.supports_device(device):
raise unittest.SkipTest(
"Backend doesn't support device {}".format(device))
# TODO: In some cases we should generate multiple random inputs
# and test (ala Hypothesis)
args = create_input(inputs)
ref_outputs = ref(*args)
if not isinstance(ref_outputs, tuple):
ref_outputs = (ref_outputs,)
input_names = ['input_{}'.format(i) for i in range(len(args))]
output_names = ['output_{}'.format(i) for i in range(len(ref_outputs))]
node_def = helper.make_node(
node_spec.name,
input_names,
output_names,
**node_spec.kwargs)
outputs = self.backend.run_node(node_def, args, device)
test_self.assertEqual(len(ref_outputs), len(outputs))
for i in range(len(output_names)):
np.testing.assert_almost_equal(
ref_outputs[i],
outputs[i],
decimal=4)
self._add_test('Node', test_name, run)
| [
"numpy.clip",
"os.path.exists",
"tarfile.open",
"onnx.helper.make_node",
"os.getenv",
"os.makedirs",
"os.path.join",
"os.environ.get",
"numpy.testing.assert_almost_equal",
"onnx.load",
"numpy.random.seed",
"tempfile.NamedTemporaryFile",
"six.moves.urllib.request.urlretrieve",
"numpy.pad",
"numpy.load",
"numpy.random.randn"
] | [((420, 441), 'numpy.random.randn', 'np.random.randn', (['S', 'S'], {}), '(S, S)\n', (435, 441), True, 'import numpy as np\n'), ((3139, 3163), 'os.environ.get', 'os.environ.get', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (3153, 3163), False, 'import os\n'), ((4770, 4806), 'os.path.join', 'os.path.join', (['models_dir', 'model_name'], {}), '(models_dir, model_name)\n', (4782, 4806), False, 'import os\n'), ((4634, 4667), 'os.getenv', 'os.getenv', (['"""ONNX_HOME"""', '"""~/.onnx"""'], {}), "('ONNX_HOME', '~/.onnx')\n", (4643, 4667), False, 'import os\n'), ((4715, 4748), 'os.path.join', 'os.path.join', (['onnx_home', '"""models"""'], {}), "(onnx_home, 'models')\n", (4727, 4748), False, 'import os\n'), ((4822, 4847), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (4836, 4847), False, 'import os\n'), ((4861, 4883), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (4872, 4883), False, 'import os\n'), ((6529, 6564), 'os.path.join', 'os.path.join', (['model_dir', '"""model.pb"""'], {}), "(model_dir, 'model.pb')\n", (6541, 6564), False, 'import os\n'), ((6585, 6609), 'onnx.load', 'onnx.load', (['model_pb_path'], {}), '(model_pb_path)\n', (6594, 6609), False, 'import onnx\n'), ((8729, 8808), 'onnx.helper.make_node', 'helper.make_node', (['node_spec.name', 'input_names', 'output_names'], {}), '(node_spec.name, input_names, output_names, **node_spec.kwargs)\n', (8745, 8808), False, 'from onnx import helper\n'), ((1231, 1252), 'numpy.clip', 'np.clip', (['x', '(0)', 'np.inf'], {}), '(x, 0, np.inf)\n', (1238, 1252), True, 'import numpy as np\n'), ((1387, 1482), 'numpy.pad', 'np.pad', (['x'], {'pad_width': '((0, 0), (0, 0), (1, 2), (3, 4))', 'mode': '"""constant"""', 'constant_values': '(1.2)'}), "(x, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode='constant',\n constant_values=1.2)\n", (1393, 1482), True, 'import numpy as np\n'), ((1677, 1746), 'numpy.pad', 'np.pad', (['x'], {'pad_width': '((0, 0), (0, 0), (1, 1), (1, 1))', 'mode': '"""reflect"""'}), "(x, pad_width=((0, 0), (0, 0), (1, 1), (1, 1)), mode='reflect')\n", (1683, 1746), True, 'import numpy as np\n'), ((1914, 1980), 'numpy.pad', 'np.pad', (['x'], {'pad_width': '((0, 0), (0, 0), (1, 1), (1, 1))', 'mode': '"""edge"""'}), "(x, pad_width=((0, 0), (0, 0), (1, 1), (1, 1)), mode='edge')\n", (1920, 1980), True, 'import numpy as np\n'), ((3389, 3411), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(0)'}), '(seed=0)\n', (3403, 3411), True, 'import numpy as np\n'), ((5013, 5053), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(True)'}), '(delete=True)\n', (5040, 5053), False, 'import tempfile\n'), ((5172, 5208), 'six.moves.urllib.request.urlretrieve', 'urlretrieve', (['url', 'download_file.name'], {}), '(url, download_file.name)\n', (5183, 5208), False, 'from six.moves.urllib.request import urlretrieve\n'), ((6719, 6761), 'os.path.join', 'os.path.join', (['model_dir', '"""test_data_*.npz"""'], {}), "(model_dir, 'test_data_*.npz')\n", (6731, 6761), False, 'import os\n'), ((6792, 6832), 'numpy.load', 'np.load', (['test_data_npz'], {'encoding': '"""bytes"""'}), "(test_data_npz, encoding='bytes')\n", (6799, 6832), True, 'import numpy as np\n'), ((9071, 9140), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ref_outputs[i]', 'outputs[i]'], {'decimal': '(4)'}), '(ref_outputs[i], outputs[i], decimal=4)\n', (9101, 9140), True, 'import numpy as np\n'), ((5260, 5292), 'tarfile.open', 'tarfile.open', (['download_file.name'], {}), '(download_file.name)\n', (5272, 5292), False, 'import tarfile\n'), ((7130, 7199), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ref_outputs[i]', 'outputs[i]'], {'decimal': '(4)'}), '(ref_outputs[i], outputs[i], decimal=4)\n', (7160, 7199), True, 'import numpy as np\n')] |
from .pyoptsolvercpp import IpoptConfig, IpoptSolver
from .pyoptsolvercpp import SnoptConfig, SnoptSolver
from .pyoptsolvercpp import OptProblem, OptResult
from .pyoptsolvercpp import __with_snopt__, __with_ipopt__, __version__
import warnings
import six
import numpy as np
from scipy.optimize import NonlinearConstraint, minimize, Bounds
from scipy.sparse import coo_matrix
try:
from .pyknitro import KnitroSolver
__has_knitro__ = True
except:
print("Cannot import KnitroSolver, make sure its Python interface is installed")
__has_knitro__ = False
class OptConfig(object):
"""This class is used to keep track of various optimization configurations.
It provides a unified interface for interacting with those solvers.
Some custom options are also supported, but might not work depending on the backend.
"""
shared_options = {'major_iter', 'opt_tol', 'fea_tol', 'print_level', 'deriv_check'}
snopt_options = {'minor_iter', 'iter_limit', 'print_file', 'history'}
ipopt_options = {'print_freq', 'linear_solver', 'exact_hessian', 'fd_jacobian'}
scipy_kws = {'tol'}
scipy_option_kws = {'grad', 'xtol', 'gtol', 'barrier_tol', 'initial_constr_penalty',
'initial_tr_radius', 'initial_barrier_parameter', 'initial_barrier_tolerance',
'factorization_method', 'disp'}
def __init__(self, backend='ipopt', **kw):
if backend not in ['ipopt', 'snopt', 'scipy', 'knitro']:
warnings.warn('Backend should be in ipopt, snopt, scipy, knitro')
if __with_ipopt__ and backend == 'ipopt':
self.backend = 'ipopt'
self.option = IpoptConfig()
elif __with_snopt__ and backend == 'snopt':
self.backend = 'snopt'
self.option = SnoptConfig()
elif backend == 'knitro':
assert __has_knitro__, "To use Knitro, Knitro Python interface must be installed correctly"
self.backend = backend
self.option = {}
else:
self.backend = 'scipy'
self.option = {'options': {'sparse_jacobian': True}, 'tol': 1e-6}
self._process_kw(**kw)
def _process_kw(self, **kws):
is_ipopt = self.backend == 'ipopt'
is_snopt = self.backend == 'snopt'
is_scipy = self.backend == 'scipy'
is_knitro = self.backend == 'knitro'
if is_knitro:
self.option.update(kws)
return
if is_scipy:
options = self.option['options']
for key, val in six.iteritems(kws):
if key == 'major_iter':
if not is_scipy:
self.option.set_major_iter(val)
else:
options['maxiter'] = val
elif key == 'opt_tol':
if not is_scipy:
self.option.set_opt_tol(val)
else:
options['gtol'] = val
elif key == 'fea_tol':
if not is_scipy:
self.option.set_fea_tol(val)
else:
self.option['tol'] = val
elif key == 'print_level':
if not is_scipy:
self.option.set_print_level(val)
else:
options['verbose'] = val
elif key == 'deriv_check':
if not is_scipy:
self.option.enable_deriv_check(val)
elif key == 'minor_iter':
if is_snopt:
self.option.set_minor_iter(val)
elif key == 'iter_limit':
if is_snopt:
self.option.set_iter_limit(val)
elif key == 'print_file':
if is_snopt:
self.option.print_file = val
elif key == 'print_freq':
if is_ipopt:
self.option.set_print_freq(val)
elif key == 'linear_solver':
if is_ipopt:
self.option.set_linear_solver(val)
elif key == 'exact_hessian':
if is_ipopt and val:
self.option.enable_exact_hessian()
elif key == 'fd_jacobian':
if is_ipopt and val:
self.option.enable_fd_jacobian()
elif key == 'history':
if is_snopt and val:
self.option.enable_history()
if is_knitro and val:
self.option['history'] = val
elif key == 'user_callback':
if is_knitro:
self.option['user_callback'] = val
elif key in self.scipy_kws:
self.option[key] = val
elif key in self.scipy_option_kws:
options[key] = val
else: # considering we are adding several types
if isinstance(val, int):
if not is_scipy:
self.option.add_int_option(key, val)
elif isinstance(val, float):
if not is_scipy:
self.option.add_float_option(val)
elif isinstance(val, str) or val is None:
if is_snopt:
self.option.add_string_option(key)
elif is_ipopt:
self.option.add_string_option(key, val)
class TrustConstrSolver(object):
"""A wrapper that builds on a already declared problem."""
def __init__(self, problem, option):
assert isinstance(problem, OptProblem)
self.problem = problem
self.option = option
self.g = np.zeros(problem.nG)
self.y = np.zeros(problem.nf)
self.row, self.col = np.zeros((2, problem.nG), dtype=int)
if not problem.ipstyle:
self.Aval = problem.Aval
self.Arow = problem.Arow.astype(int)
self.Acol = problem.Acol.astype(int)
self.obj_a_indice = np.where(problem.Arow == 0)
self.A_csc = coo_matrix((self.Aval, (self.Arow, self.Acol)),
shape=(problem.nf, problem.nx))
def _get_coo(self):
if self.problem.ipstyle:
return coo_matrix((self.g, (self.row, self.col)), shape=(self.problem.nf, self.problem.nx))
else:
return coo_matrix((np.concatenate((self.g, self.Aval)),
(np.concatenate((self.row, self.Arow)),
np.concatenate((self.col, self.Acol)))),
shape=(self.problem.nf, self.problem.nx))
def solve_rand(self):
guess = self.problem.random_gen_x()
return self.solve_guess(guess)
def solve_guess(self, guess):
if self.problem.ipstyle:
def confun(x):
self.problem.eval_constr(x, self.y)
return self.y
def jacfun(x):
self.problem.__jacobian__(x, self.g, self.row, self.col, False)
return self._get_coo()
def costfun(x):
g = np.zeros_like(x)
self.problem.eval_gradient(x, g)
return self.problem.__cost__(x), g
# figure out the jacobian sparsity
self.problem.__jacobian__(guess, self.g, self.row, self.col, True)
else:
def confun(x):
if self.problem.grad:
self.problem.__callg__(x, self.y, self.g, self.row, self.col, False, False)
else:
self.problem.__callf__(x, self.y)
self.y += self.A_csc.dot(x)
return self.y
def jacfun(x):
self.problem.__callg__(x, self.y, self.g, self.row, self.col, False, True)
return self._get_coo()
def costfun(x):
self.problem.__callg__(x, self.y, self.g, self.row, self.col, False, True)
if not hasattr(self, 'cost_row_mask'):
self.cost_row_mask = np.where(self.row == 0)[0]
grad = np.zeros_like(x)
grad[self.col[self.cost_row_mask]] = self.g[self.cost_row_mask]
grad[self.Acol[self.obj_a_indice]] = self.Aval[self.obj_a_indice]
return self.y[0], grad
# figure out the jacobian sparsity
self.problem.__callg__(guess, self.y, self.g, self.row, self.col, True, True)
# jac0 = self._get_coo()
constr = NonlinearConstraint(confun, self.problem.get_lb(), self.problem.get_ub(), jac=jacfun)
bounds = Bounds(self.problem.get_xlb(), self.problem.get_xub())
res = minimize(costfun, guess, method='trust-constr', jac=True, bounds=bounds,
constraints=constr, **self.option)
# use the attr trick to add contents to returned res
setattr(res, 'obj', res.fun)
setattr(res, 'flag', res.status != 0 and res.constr_violation < self.option['tol'])
setattr(res, 'sol', res.x)
setattr(res, 'fval', res.constr)
setattr(res, 'lmd', res.v)
setattr(res, 'xmul', res.v)
return res
class OptSolver(object):
def __init__(self, problem, config):
if config.backend == 'snopt':
self.solver = SnoptSolver(problem, config.option)
elif config.backend == 'ipopt':
self.solver = IpoptSolver(problem, config.option)
elif config.backend == 'knitro':
assert __has_knitro__, "To use Knitro, Knitro Python interface must be installed correctly"
self.solver = KnitroSolver(problem, config.option)
else:
self.solver = TrustConstrSolver(problem, config.option)
def solve_rand(self):
return self.solver.solve_rand()
def solve_guess(self, guess):
return self.solver.solve_guess(guess)
| [
"numpy.where",
"scipy.optimize.minimize",
"numpy.zeros",
"numpy.concatenate",
"scipy.sparse.coo_matrix",
"warnings.warn",
"six.iteritems",
"numpy.zeros_like"
] | [((2538, 2556), 'six.iteritems', 'six.iteritems', (['kws'], {}), '(kws)\n', (2551, 2556), False, 'import six\n'), ((5638, 5658), 'numpy.zeros', 'np.zeros', (['problem.nG'], {}), '(problem.nG)\n', (5646, 5658), True, 'import numpy as np\n'), ((5676, 5696), 'numpy.zeros', 'np.zeros', (['problem.nf'], {}), '(problem.nf)\n', (5684, 5696), True, 'import numpy as np\n'), ((5726, 5762), 'numpy.zeros', 'np.zeros', (['(2, problem.nG)'], {'dtype': 'int'}), '((2, problem.nG), dtype=int)\n', (5734, 5762), True, 'import numpy as np\n'), ((8644, 8755), 'scipy.optimize.minimize', 'minimize', (['costfun', 'guess'], {'method': '"""trust-constr"""', 'jac': '(True)', 'bounds': 'bounds', 'constraints': 'constr'}), "(costfun, guess, method='trust-constr', jac=True, bounds=bounds,\n constraints=constr, **self.option)\n", (8652, 8755), False, 'from scipy.optimize import NonlinearConstraint, minimize, Bounds\n'), ((1484, 1549), 'warnings.warn', 'warnings.warn', (['"""Backend should be in ipopt, snopt, scipy, knitro"""'], {}), "('Backend should be in ipopt, snopt, scipy, knitro')\n", (1497, 1549), False, 'import warnings\n'), ((5962, 5989), 'numpy.where', 'np.where', (['(problem.Arow == 0)'], {}), '(problem.Arow == 0)\n', (5970, 5989), True, 'import numpy as np\n'), ((6015, 6094), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(self.Aval, (self.Arow, self.Acol))'], {'shape': '(problem.nf, problem.nx)'}), '((self.Aval, (self.Arow, self.Acol)), shape=(problem.nf, problem.nx))\n', (6025, 6094), False, 'from scipy.sparse import coo_matrix\n'), ((6208, 6297), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(self.g, (self.row, self.col))'], {'shape': '(self.problem.nf, self.problem.nx)'}), '((self.g, (self.row, self.col)), shape=(self.problem.nf, self.\n problem.nx))\n', (6218, 6297), False, 'from scipy.sparse import coo_matrix\n'), ((7074, 7090), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (7087, 7090), True, 'import numpy as np\n'), ((8066, 8082), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (8079, 8082), True, 'import numpy as np\n'), ((6338, 6373), 'numpy.concatenate', 'np.concatenate', (['(self.g, self.Aval)'], {}), '((self.g, self.Aval))\n', (6352, 6373), True, 'import numpy as np\n'), ((6407, 6444), 'numpy.concatenate', 'np.concatenate', (['(self.row, self.Arow)'], {}), '((self.row, self.Arow))\n', (6421, 6444), True, 'import numpy as np\n'), ((6478, 6515), 'numpy.concatenate', 'np.concatenate', (['(self.col, self.Acol)'], {}), '((self.col, self.Acol))\n', (6492, 6515), True, 'import numpy as np\n'), ((8016, 8039), 'numpy.where', 'np.where', (['(self.row == 0)'], {}), '(self.row == 0)\n', (8024, 8039), True, 'import numpy as np\n')] |
import pandas as pd
import plotly.graph_objects as go
SUPPORTED_FORMATS = ['html', 'png', 'webp', 'svg', 'pdf', 'eps']
sns_colorscale = [[0.0, '#3f7f93'],
[0.071, '#5890a1'],
[0.143, '#72a1b0'],
[0.214, '#8cb3bf'],
[0.286, '#a7c5cf'],
[0.357, '#c0d6dd'],
[0.429, '#dae8ec'],
[0.5, '#f2f2f2'],
[0.571, '#f7d7d9'],
[0.643, '#f2bcc0'],
[0.714, '#eda3a9'],
[0.786, '#e8888f'],
[0.857, '#e36e76'],
[0.929, '#de535e'],
[1.0, '#d93a46']]
def save_heatmap_with_datetime(df: pd.DataFrame, output_path: str, dformat: str = 'html'):
fig = go.Figure(data=go.Heatmap(
z=df.T,
x=df.index,
y=df.columns.values,
colorscale='Viridis'))
save_figure(fig, dformat, output_path)
def save_corr_matrix(df: pd.DataFrame, output_path: str, dformat: str = 'html'):
corr = df.corr().values
col_names = df.columns.values
N = len(corr)
corr = [[corr[i][j] if i > j else None for j in range(N)] for i in range(N)]
text = [[f'corr({col_names[i]}, {col_names[j]}) = {corr[i][j]:.2f}' if i > j else '' for j in range(N)] for i in range(N)]
heatmap = go.Heatmap(
z=corr,
x=col_names,
y=col_names,
xgap=1, ygap=1,
colorscale=sns_colorscale,
colorbar_thickness=20,
colorbar_ticklen=3,
hovertext=text,
hoverinfo='text'
)
layout = go.Layout(
title_text='Correlation matrix', title_x=0.5,
xaxis_showgrid=False,
yaxis_showgrid=False,
yaxis_autorange='reversed'
)
fig = go.Figure(data=heatmap, layout=layout)
save_figure(fig, dformat, output_path)
def save_figure(fig: go.Figure, dformat: str, output_path: str):
if dformat == 'html':
fig.write_html(output_path)
elif dformat in SUPPORTED_FORMATS:
fig.write_image(output_path, format=dformat)
else:
raise NotImplemented(f'Format {dformat} is not supported') | [
"plotly.graph_objects.Figure",
"plotly.graph_objects.Heatmap",
"plotly.graph_objects.Layout"
] | [((1101, 1275), 'plotly.graph_objects.Heatmap', 'go.Heatmap', ([], {'z': 'corr', 'x': 'col_names', 'y': 'col_names', 'xgap': '(1)', 'ygap': '(1)', 'colorscale': 'sns_colorscale', 'colorbar_thickness': '(20)', 'colorbar_ticklen': '(3)', 'hovertext': 'text', 'hoverinfo': '"""text"""'}), "(z=corr, x=col_names, y=col_names, xgap=1, ygap=1, colorscale=\n sns_colorscale, colorbar_thickness=20, colorbar_ticklen=3, hovertext=\n text, hoverinfo='text')\n", (1111, 1275), True, 'import plotly.graph_objects as go\n'), ((1358, 1490), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title_text': '"""Correlation matrix"""', 'title_x': '(0.5)', 'xaxis_showgrid': '(False)', 'yaxis_showgrid': '(False)', 'yaxis_autorange': '"""reversed"""'}), "(title_text='Correlation matrix', title_x=0.5, xaxis_showgrid=\n False, yaxis_showgrid=False, yaxis_autorange='reversed')\n", (1367, 1490), True, 'import plotly.graph_objects as go\n'), ((1535, 1573), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'heatmap', 'layout': 'layout'}), '(data=heatmap, layout=layout)\n', (1544, 1573), True, 'import plotly.graph_objects as go\n'), ((565, 638), 'plotly.graph_objects.Heatmap', 'go.Heatmap', ([], {'z': 'df.T', 'x': 'df.index', 'y': 'df.columns.values', 'colorscale': '"""Viridis"""'}), "(z=df.T, x=df.index, y=df.columns.values, colorscale='Viridis')\n", (575, 638), True, 'import plotly.graph_objects as go\n')] |
from glob import glob
import re
# Use src/Deprecates.jl to replace all deprecated functions
# in entire codebase, excluding src/Deprecates.jl
# First, we build the library:
with open("src/Deprecates.jl", "r") as f:
library = {}
for line in f.read().split("\n"):
# If doesn't start with `@deprecate`, skip:
if not line.startswith("@deprecate"):
continue
# Each line is in the format:
# @deprecate <function> <replacement>
function_name = line.split(" ")[1]
replacement = line.split(" ")[2]
library[function_name] = replacement
# Now, we replace all deprecated functions in src/*.jl:
for fname in glob("**/*.jl"):
if fname == "src/Deprecates.jl":
continue
with open(fname, "r") as f:
contents = f.read()
for function_name in library:
contents = re.sub(
r"\b" + function_name + r"\b", library[function_name], contents
)
with open(fname, "w") as f:
f.write(contents) | [
"re.sub",
"glob.glob"
] | [((675, 690), 'glob.glob', 'glob', (['"""**/*.jl"""'], {}), "('**/*.jl')\n", (679, 690), False, 'from glob import glob\n'), ((867, 938), 're.sub', 're.sub', (["('\\\\b' + function_name + '\\\\b')", 'library[function_name]', 'contents'], {}), "('\\\\b' + function_name + '\\\\b', library[function_name], contents)\n", (873, 938), False, 'import re\n')] |
#imports random library which is helpful for testing outcomes, contains functions such as shuffle
import random
#imports features of pyplot and numpy
import operator
#imports the csv library necessary for reading the 'environmental' csv data (in.csv).
import matplotlib.pyplot
#imports the agentframework file which must be in the same directory to work, by importing this we can import the agent class.
import csv
#imports agentframework i in the same directory
import agentframework
#creates a new empty list for what will be the csv environment data
environment = []
agents = []
num_of_agents = 10
num_of_iterations = 100
f = open('in.csv', newline='')
#Note that the correct directory must be navigated to in the terminal else the full file path will be needed
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader: # A list of rows
rowlist = []
for value in row: # A list of value
rowlist.append(value)
environment.append(rowlist)
# Make the agents.
for i in range(num_of_agents):
agents.append(agentframework.agent(environment))
#check agents to make sure everything is running smoothly
#for i in range(num_of_agents):
#print(agents[i])
# Move the agents.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
#plots the agents
matplotlib.pyplot.xlim(0, 100)
matplotlib.pyplot.ylim(0, 100)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i]._x, agents[i]._y)
matplotlib.pyplot.show()
#Extras!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
f = open("in.csv", "r")
print(f.read())
"""Final thoughts: since environment has been defined in agents __init__(self,environment) we are able to
move the environmental data the same way in which we would move the agents. I expanded the matplot axes
in order to visualise the full breadth of the data"""
""" I think the previous plot code matplotlib.pyplot.imshow(environment)
matplotlib.pyplot.show() plotted the data but in a linear fashion rather than an X/Y grid and that's why the
plot output looked odd"""
| [
"csv.reader",
"agentframework.agent"
] | [((802, 845), 'csv.reader', 'csv.reader', (['f'], {'quoting': 'csv.QUOTE_NONNUMERIC'}), '(f, quoting=csv.QUOTE_NONNUMERIC)\n', (812, 845), False, 'import csv\n'), ((1088, 1121), 'agentframework.agent', 'agentframework.agent', (['environment'], {}), '(environment)\n', (1108, 1121), False, 'import agentframework\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" User Forms """
from flask_wtf import FlaskForm, RecaptchaField
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired
class RegisterForm(FlaskForm):
name = StringField('Имя:', validators=[DataRequired()])
email = EmailField('Адрес эл. почты:', validators=[DataRequired()])
password = PasswordField('<PASSWORD>:', validators=[DataRequired()])
password_again = PasswordField('Повторите пароль:',
validators=[DataRequired()])
recaptcha = RecaptchaField()
submit = SubmitField('Создать аккаунт')
class LoginForm(FlaskForm):
email = EmailField('Адрес эл. почты:', validators=[DataRequired()])
password = PasswordField('<PASSWORD>:', validators=[DataRequired()])
remember_me = BooleanField('Запомнить меня')
submit = SubmitField('Войти')
| [
"wtforms.BooleanField",
"flask_wtf.RecaptchaField",
"wtforms.validators.DataRequired",
"wtforms.SubmitField"
] | [((647, 663), 'flask_wtf.RecaptchaField', 'RecaptchaField', ([], {}), '()\n', (661, 663), False, 'from flask_wtf import FlaskForm, RecaptchaField\n'), ((677, 707), 'wtforms.SubmitField', 'SubmitField', (['"""Создать аккаунт"""'], {}), "('Создать аккаунт')\n", (688, 707), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField\n'), ((901, 931), 'wtforms.BooleanField', 'BooleanField', (['"""Запомнить меня"""'], {}), "('Запомнить меня')\n", (913, 931), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField\n'), ((945, 965), 'wtforms.SubmitField', 'SubmitField', (['"""Войти"""'], {}), "('Войти')\n", (956, 965), False, 'from wtforms import StringField, PasswordField, SubmitField, BooleanField\n'), ((352, 366), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (364, 366), False, 'from wtforms.validators import DataRequired\n'), ((433, 447), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (445, 447), False, 'from wtforms.validators import DataRequired\n'), ((494, 508), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (506, 508), False, 'from wtforms.validators import DataRequired\n'), ((614, 628), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (626, 628), False, 'from wtforms.validators import DataRequired\n'), ((805, 819), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (817, 819), False, 'from wtforms.validators import DataRequired\n'), ((866, 880), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (878, 880), False, 'from wtforms.validators import DataRequired\n')] |
#cython: language_level=3
import email.parser
import string
import random
def random_temp(length = 10):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def multipart_formdata(data,temp_folder,compile=True):
try:
dis = data.decode("utf-8","ignore")
except (UnicodeDecodeError, AttributeError):
try:
del dis
except:
pass
return "Must be Bytes"
if compile == True:
new_data = b""
orign = data.decode("utf-8","ignore")
headers = orign[:orign.find("\n\n")].split("\n")
new_data += "".join(s for s in headers if "Content-Type: multipart/form-data;" in s).encode("utf-8")+b"\n\n"
if new_data.decode("utf-8").endswith("\n\n\n\n"):
new_data = new_data[:-2]
new_data += data[orign.find("\r\n\r\n")+4:]
else:
new_data = data
print(new_data.decode("utf-8"))
msg = email.parser.BytesParser().parsebytes(new_data)
files = {}
for part in msg.get_payload():
name = part.get_param('name', header='content-disposition')
if "filename=\"" in name:
dq = part.get_param('name', header='content-disposition').find("\"",1,len(name))
name = name[:dq]
temp_file = random_temp()
with open(f"{temp_folder}/{temp_file}","wb") as file:
file.write(part.get_payload(decode=True))
files[name] = {"content": part.get_payload(decode=True) , "temp":temp_file,"content_type":part.get_content_type()}
return files
| [
"random.choice"
] | [((166, 188), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (179, 188), False, 'import random\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.gis.db import models
# Create your models here.
class firewalls(models.Model):
location = models.LineStringField(srid=3857)
type = models.IntegerField()
descript = models.CharField(max_length=70) | [
"django.contrib.gis.db.models.LineStringField",
"django.contrib.gis.db.models.CharField",
"django.contrib.gis.db.models.IntegerField"
] | [((210, 243), 'django.contrib.gis.db.models.LineStringField', 'models.LineStringField', ([], {'srid': '(3857)'}), '(srid=3857)\n', (232, 243), False, 'from django.contrib.gis.db import models\n'), ((255, 276), 'django.contrib.gis.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (274, 276), False, 'from django.contrib.gis.db import models\n'), ((292, 323), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'max_length': '(70)'}), '(max_length=70)\n', (308, 323), False, 'from django.contrib.gis.db import models\n')] |
import logging
import re
from html import unescape
from urllib.parse import quote
from dvtag.utils import create_request_session
session = create_request_session()
class DoujinVoice():
def __init__(self, rjid: str) -> None:
self.rjid = rjid
self.dl_count = 0
self.url = ""
self.work_name = ""
self.work_image = ""
self.seiyus = []
self.circle = ""
self.sale_date = ""
self._init_metadata()
self._add_metadata()
self._get_cover()
def _add_metadata(self):
html = session.get(self.url).text
try:
pattern = r'<th>声優</th>[\s\S]*?<td>[\s\S]*?(<a[\s\S]*?>[\s\S]*?)</td>'
seiyu_list_html = re.search(pattern, html).group(1)
pattern = r'<a[\s\S]*?>(.*?)<'
for seiyu_html in re.finditer(pattern, seiyu_list_html):
self.seiyus.append(unescape(seiyu_html.group(1)))
except AttributeError as e:
logging.error("Cannot get artists from {}: {}".format(
self.rjid, e))
try:
pattern = r"<th>サークル名</th>[\s\S]*?<a[\s\S]*?>(.*?)<"
circle = re.search(pattern, html).group(1)
self.circle = unescape(circle)
except AttributeError as e:
logging.error("Cannot get circle from {}: {}".format(self.rjid, e))
# get sale date
pattern = r'www\.dlsite\.com/maniax/new/=/year/([0-9]{4})/mon/([0-9]{2})/day/([0-9]{2})/'
match = re.search(pattern, html)
if match:
self.sale_date = "{}-{}-{}".format(match.group(1), match.group(2),
match.group(3))
def _init_metadata(self):
rsp = session.get(
"https://www.dlsite.com/maniax/product/info/ajax?product_id=" +
self.rjid)
try:
json_data = rsp.json()[self.rjid]
self.dl_count = int(json_data["dl_count"])
self.url = json_data["down_url"].replace("download/split",
"work").replace(
"download", "work")
self.work_name = json_data["work_name"]
self.work_image = "https:" + json_data["work_image"]
except ValueError as e:
logging.error(
f"Cannot convert a response to json or convert dl_count to int with RJ-ID {self.rjid}: {e}",
)
except KeyError as e:
logging.error(e)
def _get_cover(self):
"""
Tries to fetch a better cover
"""
try:
search_url = "https://chobit.cc/s/?f_category=vo&q_keyword=" + quote(
self.work_name)
headers = {'cookie': 'showr18=1'}
search_result = session.get(search_url, headers=headers).text
href = re.search(r'work-work-name.*?<a.*href=\"(.*?)\"',
search_result).group(1)
detail_url = "https://chobit.cc" + href
detail = session.get(detail_url, headers=headers).text
self.work_image = re.search(r'albumart="(.*?)"', detail).group(1)
except Exception as e:
logging.warning(
f"Cannot fetch cover from chobit for {self.rjid}: {e}")
| [
"dvtag.utils.create_request_session",
"html.unescape",
"urllib.parse.quote",
"logging.warning",
"re.finditer",
"logging.error",
"re.search"
] | [((141, 165), 'dvtag.utils.create_request_session', 'create_request_session', ([], {}), '()\n', (163, 165), False, 'from dvtag.utils import create_request_session\n'), ((1506, 1530), 're.search', 're.search', (['pattern', 'html'], {}), '(pattern, html)\n', (1515, 1530), False, 'import re\n'), ((834, 871), 're.finditer', 're.finditer', (['pattern', 'seiyu_list_html'], {}), '(pattern, seiyu_list_html)\n', (845, 871), False, 'import re\n'), ((1233, 1249), 'html.unescape', 'unescape', (['circle'], {}), '(circle)\n', (1241, 1249), False, 'from html import unescape\n'), ((2344, 2460), 'logging.error', 'logging.error', (['f"""Cannot convert a response to json or convert dl_count to int with RJ-ID {self.rjid}: {e}"""'], {}), "(\n f'Cannot convert a response to json or convert dl_count to int with RJ-ID {self.rjid}: {e}'\n )\n", (2357, 2460), False, 'import logging\n'), ((2524, 2540), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (2537, 2540), False, 'import logging\n'), ((2718, 2739), 'urllib.parse.quote', 'quote', (['self.work_name'], {}), '(self.work_name)\n', (2723, 2739), False, 'from urllib.parse import quote\n'), ((3244, 3315), 'logging.warning', 'logging.warning', (['f"""Cannot fetch cover from chobit for {self.rjid}: {e}"""'], {}), "(f'Cannot fetch cover from chobit for {self.rjid}: {e}')\n", (3259, 3315), False, 'import logging\n'), ((726, 750), 're.search', 're.search', (['pattern', 'html'], {}), '(pattern, html)\n', (735, 750), False, 'import re\n'), ((1173, 1197), 're.search', 're.search', (['pattern', 'html'], {}), '(pattern, html)\n', (1182, 1197), False, 'import re\n'), ((2898, 2963), 're.search', 're.search', (['"""work-work-name.*?<a.*href=\\\\"(.*?)\\\\\\""""', 'search_result'], {}), '(\'work-work-name.*?<a.*href=\\\\"(.*?)\\\\"\', search_result)\n', (2907, 2963), False, 'import re\n'), ((3153, 3190), 're.search', 're.search', (['"""albumart="(.*?)\\""""', 'detail'], {}), '(\'albumart="(.*?)"\', detail)\n', (3162, 3190), False, 'import re\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
<NAME> http://philadams.net
Grab columns from .csv files. Like cut or awk but without choking on delimiter
escaping.
See README.txt for details.
"""
import sys
import re
import csv
def pluck(rows, fields, inverse=False):
"""
rows: an iterable of iterables
yield each row in `rows`, retaining only those indices of row in `fields`.
if inverse=True, retain only those indices NOT in `fields`.
"""
retain = None
for idx, row in enumerate(rows):
# retain the appropriate columns
if not inverse:
retain = retain or fields
else:
fields = [f if f >= 0 else len(row) + f for f in fields]
retain = retain or [i for i in range(len(row)) if i not in fields]
newrow = [row[i] for i in retain if len(row) > i]
yield newrow
def type_and_index_field(field):
"""coerce field to an int, and, as the UI is one-indexed, decrememnt
field values >= 0 by 1."""
field = int(field)
if field >= 0:
field -= 1
return field
def main(args):
# parse csv data
rows = csv.reader(args.infile,
delimiter=args.delimiter, quotechar=args.quotechar)
# if --names just display col names and exit
if args.col_names:
names = rows.next()
for idx, name in enumerate(names):
sys.stdout.write('%d. %s\n' % (idx + 1, name))
exit(0)
# skip n rows
for i in range(args.skip):
rows.next()
# prep fields
if args.fields:
fields = []
for f in args.fields.replace(' ', '').split(','):
if re.findall(r'\d+-\d+', f):
start, stop = map(int, f.split('-'))
for g in range(start, stop + 1):
fields.append(type_and_index_field(g))
else:
fields.append(type_and_index_field(f))
else:
fields = None
# push to stdout
out = csv.writer(sys.stdout, lineterminator='\n')
iter = enumerate(pluck(rows, fields, inverse=args.inverse))
if fields is None:
iter = enumerate(rows)
for idx, row in iter:
if args.line_numbers:
row.insert(0, idx)
out.writerow(row)
def cli():
import argparse
# populate and parse command line options
desc = 'Grab columns from csv input.'
desc += '\nhttp://github.com/philadams/pluckr'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('infile', nargs='?', default=sys.stdin,
type=argparse.FileType('rU'), help='input file (.csv)')
parser.add_argument('-f', dest='fields',
help='ordered list of columns to retain; one-indexed')
parser.add_argument('-i', '--inverse', dest='inverse', action='store_true',
help='invert column retention: drop those in -f')
parser.add_argument('-d', '--delimiter', default=',', dest='delimiter',
help='field delimiter when reading infile')
parser.add_argument('-q', '--quotechar', default='"', dest='quotechar',
help='field quotechar when reading infile')
parser.add_argument('-s', '--skip', default=0, dest='skip',
type=int, help='number of rows to skip')
parser.add_argument('-l', '--line-numbers', dest='line_numbers',
action='store_true',
help='prepend line numbers to output')
parser.add_argument('-n', '--names', dest='col_names', action='store_true',
help='print column names; assumes one header row')
args = parser.parse_args()
main(args)
| [
"argparse.FileType",
"argparse.ArgumentParser",
"csv.writer",
"re.findall",
"csv.reader",
"sys.stdout.write"
] | [((1140, 1215), 'csv.reader', 'csv.reader', (['args.infile'], {'delimiter': 'args.delimiter', 'quotechar': 'args.quotechar'}), '(args.infile, delimiter=args.delimiter, quotechar=args.quotechar)\n', (1150, 1215), False, 'import csv\n'), ((1984, 2027), 'csv.writer', 'csv.writer', (['sys.stdout'], {'lineterminator': '"""\n"""'}), "(sys.stdout, lineterminator='\\n')\n", (1994, 2027), False, 'import csv\n'), ((2445, 2486), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (2468, 2486), False, 'import argparse\n'), ((1394, 1440), 'sys.stdout.write', 'sys.stdout.write', (["('%d. %s\\n' % (idx + 1, name))"], {}), "('%d. %s\\n' % (idx + 1, name))\n", (1410, 1440), False, 'import sys\n'), ((1659, 1685), 're.findall', 're.findall', (['"""\\\\d+-\\\\d+"""', 'f'], {}), "('\\\\d+-\\\\d+', f)\n", (1669, 1685), False, 'import re\n'), ((2580, 2603), 'argparse.FileType', 'argparse.FileType', (['"""rU"""'], {}), "('rU')\n", (2597, 2603), False, 'import argparse\n')] |
"""
SAC Controller
==============
"""
# Other imports
import numpy as np
from stable_baselines3 import SAC
# Local imports
from simple_pendulum.controllers.abstract_controller import AbstractController
class SacController(AbstractController):
"""
Controller which acts on a policy which has been learned with sac.
"""
def __init__(self,
model_path,
torque_limit,
use_symmetry=True,
state_representation=2):
"""
Controller which acts on a policy which has been learned with sac.
Parameters
----------
model_path : string
path to the trained model in zip format
torque_limit : float
torque limit of the pendulum. The output of the model will be
scaled with this number
use_symmetry : bool
whether to use the left/right symmetry of the pendulum
"""
self.model = SAC.load(model_path)
self.torque_limit = float(torque_limit)
self.use_symmetry = bool(use_symmetry)
self.state_representation = state_representation
if state_representation == 2:
# state is [th, th, vel]
self.low = np.array([-6*2*np.pi, -20])
self.high = np.array([6*2*np.pi, 20])
elif state_representation == 3:
# state is [cos(th), sin(th), vel]
self.low = np.array([-1., -1., -8.])
self.high = np.array([1., 1., 8.])
def get_control_output(self, meas_pos, meas_vel, meas_tau=0, meas_time=0):
"""
The function to compute the control input for the pendulum actuator
Parameters
----------
meas_pos : float
the position of the pendulum [rad]
meas_vel : float
the velocity of the pendulum [rad/s]
meas_tau : float
the meastured torque of the pendulum [Nm]
(not used)
meas_time : float
the collapsed time [s]
(not used)
Returns
-------
des_pos : float
the desired position of the pendulum [rad]
(not used, returns None)
des_vel : float
the desired velocity of the pendulum [rad/s]
(not used, returns None)
des_tau : float
the torque supposed to be applied by the actuator [Nm]
"""
pos = float(np.squeeze(meas_pos))
vel = float(np.squeeze(meas_vel))
# map meas pos to [-np.pi, np.pi]
meas_pos_mod = np.mod(pos + np.pi, 2 * np.pi) - np.pi
# observation = np.squeeze(np.array([meas_pos_mod, vel]))
observation = self.get_observation([meas_pos_mod, vel])
if self.use_symmetry:
observation[0] *= np.sign(meas_pos_mod)
observation[1] *= np.sign(meas_pos_mod)
des_tau, _states = self.model.predict(observation)
des_tau *= np.sign(meas_pos_mod)
else:
des_tau, _states = self.model.predict(observation)
des_tau *= self.torque_limit
# since this is a pure torque controller,
# set pos_des and vel_des to None
des_pos = None
des_vel = None
return des_pos, des_vel, des_tau
def get_observation(self, state):
st = np.copy(state)
st[1] = np.clip(st[1], self.low[-1], self.high[-1])
if self.state_representation == 2:
observation = np.array([obs for obs in st], dtype=np.float32)
elif self.state_representation == 3:
observation = np.array([np.cos(st[0]),
np.sin(st[0]),
st[1]],
dtype=np.float32)
return observation
| [
"numpy.clip",
"numpy.copy",
"stable_baselines3.SAC.load",
"numpy.squeeze",
"numpy.array",
"numpy.sign",
"numpy.cos",
"numpy.sin",
"numpy.mod"
] | [((969, 989), 'stable_baselines3.SAC.load', 'SAC.load', (['model_path'], {}), '(model_path)\n', (977, 989), False, 'from stable_baselines3 import SAC\n'), ((3321, 3335), 'numpy.copy', 'np.copy', (['state'], {}), '(state)\n', (3328, 3335), True, 'import numpy as np\n'), ((3352, 3395), 'numpy.clip', 'np.clip', (['st[1]', 'self.low[-1]', 'self.high[-1]'], {}), '(st[1], self.low[-1], self.high[-1])\n', (3359, 3395), True, 'import numpy as np\n'), ((1241, 1272), 'numpy.array', 'np.array', (['[-6 * 2 * np.pi, -20]'], {}), '([-6 * 2 * np.pi, -20])\n', (1249, 1272), True, 'import numpy as np\n'), ((1293, 1322), 'numpy.array', 'np.array', (['[6 * 2 * np.pi, 20]'], {}), '([6 * 2 * np.pi, 20])\n', (1301, 1322), True, 'import numpy as np\n'), ((2432, 2452), 'numpy.squeeze', 'np.squeeze', (['meas_pos'], {}), '(meas_pos)\n', (2442, 2452), True, 'import numpy as np\n'), ((2474, 2494), 'numpy.squeeze', 'np.squeeze', (['meas_vel'], {}), '(meas_vel)\n', (2484, 2494), True, 'import numpy as np\n'), ((2562, 2592), 'numpy.mod', 'np.mod', (['(pos + np.pi)', '(2 * np.pi)'], {}), '(pos + np.pi, 2 * np.pi)\n', (2568, 2592), True, 'import numpy as np\n'), ((2792, 2813), 'numpy.sign', 'np.sign', (['meas_pos_mod'], {}), '(meas_pos_mod)\n', (2799, 2813), True, 'import numpy as np\n'), ((2844, 2865), 'numpy.sign', 'np.sign', (['meas_pos_mod'], {}), '(meas_pos_mod)\n', (2851, 2865), True, 'import numpy as np\n'), ((2952, 2973), 'numpy.sign', 'np.sign', (['meas_pos_mod'], {}), '(meas_pos_mod)\n', (2959, 2973), True, 'import numpy as np\n'), ((3465, 3512), 'numpy.array', 'np.array', (['[obs for obs in st]'], {'dtype': 'np.float32'}), '([obs for obs in st], dtype=np.float32)\n', (3473, 3512), True, 'import numpy as np\n'), ((1429, 1457), 'numpy.array', 'np.array', (['[-1.0, -1.0, -8.0]'], {}), '([-1.0, -1.0, -8.0])\n', (1437, 1457), True, 'import numpy as np\n'), ((1479, 1504), 'numpy.array', 'np.array', (['[1.0, 1.0, 8.0]'], {}), '([1.0, 1.0, 8.0])\n', (1487, 1504), True, 'import numpy as np\n'), ((3594, 3607), 'numpy.cos', 'np.cos', (['st[0]'], {}), '(st[0])\n', (3600, 3607), True, 'import numpy as np\n'), ((3645, 3658), 'numpy.sin', 'np.sin', (['st[0]'], {}), '(st[0])\n', (3651, 3658), True, 'import numpy as np\n')] |
import pytest
from mock_trace import MockTrace as mt
from examples.spaghetti import good
def test_swapin():
with pytest.raises(TypeError):
good(42, name="Eastwood")
with (
mt.patch('examples.spaghetti.good') as mgood,
mt.patch('examples.spaghetti.bad') as mbad,
mt.patch('examples.spaghetti.ugly', mocker=lambda _: "NOOP") as mugly,
):
g = mgood(42, name="Eastwood")
assert str(mgood) == "MockTrace(function=examples.spaghetti.good, mocker=good) called 1 times"
assert str(mbad) == "MockTrace(function=examples.spaghetti.bad, mocker=bad) called 1 times"
assert str(mugly) == "MockTrace(function=examples.spaghetti.ugly, mocker=<lambda>) called 2 times"
shape = mt.graph_shape()
print(shape)
assert shape == [
(None, ["examples.spaghetti.good((42,){'name': 'Eastwood'})"]),
("examples.spaghetti.good((42,){'name': 'Eastwood'})", [
'examples.spaghetti.bad((42,))', "examples.spaghetti.ugly(('Eastwood',))"]),
('examples.spaghetti.bad((42,))', ['examples.spaghetti.ugly((42,))']),
]
shape = mt.graph_shape(hide_modules=True)
print(shape)
assert shape == [(None, ["good((42,){'name': 'Eastwood'})"]), ("good((42,){'name': 'Eastwood'})", [
'bad((42,))', "ugly(('Eastwood',))"]), ('bad((42,))', ['ugly((42,))'])]
with mt.patch('examples.spaghetti.ugly', mocker=lambda _: "NOOP") as mugly:
g = good(42, name="Eastwood")
assert str(mugly) == "MockTrace(function=examples.spaghetti.ugly, mocker=<lambda>) called 2 times"
shape = mt.graph_shape(hide_modules=True)
print(shape)
assert shape == [(None, ['ugly((42,))', "ugly(('Eastwood',))"])]
| [
"mock_trace.MockTrace.patch",
"pytest.raises",
"examples.spaghetti.good",
"mock_trace.MockTrace.graph_shape"
] | [((736, 752), 'mock_trace.MockTrace.graph_shape', 'mt.graph_shape', ([], {}), '()\n', (750, 752), True, 'from mock_trace import MockTrace as mt\n'), ((1113, 1146), 'mock_trace.MockTrace.graph_shape', 'mt.graph_shape', ([], {'hide_modules': '(True)'}), '(hide_modules=True)\n', (1127, 1146), True, 'from mock_trace import MockTrace as mt\n'), ((1584, 1617), 'mock_trace.MockTrace.graph_shape', 'mt.graph_shape', ([], {'hide_modules': '(True)'}), '(hide_modules=True)\n', (1598, 1617), True, 'from mock_trace import MockTrace as mt\n'), ((121, 145), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (134, 145), False, 'import pytest\n'), ((155, 180), 'examples.spaghetti.good', 'good', (['(42)'], {'name': '"""Eastwood"""'}), "(42, name='Eastwood')\n", (159, 180), False, 'from examples.spaghetti import good\n'), ((201, 236), 'mock_trace.MockTrace.patch', 'mt.patch', (['"""examples.spaghetti.good"""'], {}), "('examples.spaghetti.good')\n", (209, 236), True, 'from mock_trace import MockTrace as mt\n'), ((255, 289), 'mock_trace.MockTrace.patch', 'mt.patch', (['"""examples.spaghetti.bad"""'], {}), "('examples.spaghetti.bad')\n", (263, 289), True, 'from mock_trace import MockTrace as mt\n'), ((307, 367), 'mock_trace.MockTrace.patch', 'mt.patch', (['"""examples.spaghetti.ugly"""'], {'mocker': "(lambda _: 'NOOP')"}), "('examples.spaghetti.ugly', mocker=lambda _: 'NOOP')\n", (315, 367), True, 'from mock_trace import MockTrace as mt\n'), ((1358, 1418), 'mock_trace.MockTrace.patch', 'mt.patch', (['"""examples.spaghetti.ugly"""'], {'mocker': "(lambda _: 'NOOP')"}), "('examples.spaghetti.ugly', mocker=lambda _: 'NOOP')\n", (1366, 1418), True, 'from mock_trace import MockTrace as mt\n'), ((1441, 1466), 'examples.spaghetti.good', 'good', (['(42)'], {'name': '"""Eastwood"""'}), "(42, name='Eastwood')\n", (1445, 1466), False, 'from examples.spaghetti import good\n')] |
"""
Fuzzying test -
<NAME> <<EMAIL>>
reported by <NAME>
"""
import random
import string
from . import ESP32Alarm, ESP32Warning
class FuzzingESP32:
def __init__(self, config):
self.get_all_fields = config["get_all_fields"]
self.cnt = 0
def set(self, name, value):
"""
Set command wrapper
arguments:
- name the parameter name as a string
- value the value to assign to the variable as any type
convertible to string
returns: an "OK" string in case of success.
"""
print("ESP32Serial-DEBUG: set %s %s" % (name, value))
def set_watchdog(self):
"""
Set the watchdog polling command
returns: an "OK" string in case of success.
"""
return self.set("watchdog_reset", 1)
def get(self, name):
"""
Get command wrapper
arguments:
- name the parameter name as a string
returns: the requested value
"""
print("ESP32Serial-DEBUG: get %s" % name)
if name == "pressure":
value = random.uniform(-1500, 1500)
elif name == "flow":
value = random.uniform(-1500, 1500)
elif name == "battery_charge":
value = random.uniform(-100, 100)
elif name == "tidal":
value = random.uniform(-1500, 1500)
elif name == "peep":
value = random.uniform(-100, 100)
elif name == "temperature":
value = random.uniform(-100, 100)
elif name == "battery_powered":
value = int(random.uniform(0, 1.5))
elif name == "bpm":
value = random.uniform(-100, 100)
elif name == "o2":
value = random.uniform(-100, 100)
elif name == "peak":
value = random.uniform(-100, 100)
elif name == "total_inspired_volume":
value = random.uniform(-100, 100)
elif name == "total_expired_volume":
value = random.uniform(-100, 100)
elif name == "volume_minute":
value = random.uniform(-100, 100)
elif name == "run":
value = '0'
elif name == "alarm":
value = '0'
else:
value = '0'
self.cnt += 1
if self.cnt == 1000:
self.cnt = 0
value = ''.join(
(random.choice(string.ascii_letters + string.digits) for i in range(8)))
# print(str(self.cnt))
print(str(value))
return str(value)
def get_all(self):
"""
Get the observables as listed in the get_all_fields internal
object.
returns: a dict with member keys as written above and values as
strings.
"""
print("ESP32Serial-DEBUG: get all")
values = [self.get(field) for field in self.get_all_fields]
return dict(zip(self.get_all_fields, values))
def get_alarms(self):
"""
Get the alarms from the ESP32
returns: a ESP32Alarm instance describing the possible alarms.
"""
return ESP32Alarm(int(self.get("alarm")))
def get_warnings(self):
"""
Get the warnings from the ESP32
returns: a ESP32Warning instance describing the possible warnings.
"""
return ESP32Warning(int(self.get("warning")))
def reset_alarms(self):
"""
Reset all the raised alarms in ESP32
returns: an "OK" string in case of success.
"""
return self.set("alarm", 0)
def reset_warnings(self):
"""
Reset all the raised warnings in ESP32
returns: an "OK" string in case of success.
"""
return self.set("warning", 0)
def raise_gui_alarm(self):
"""
Raises an alarm in ESP32
arguments:
- alarm_type an integer representing the alarm type
returns: an "OK" string in case of success.
"""
return self.set("alarm", 1)
def snooze_hw_alarm(self, alarm_type):
"""
Function to snooze the corresponding alarm in ESP32
arguments:
- alarm_type an integer representing the alarm type. One and
only one.
returns: an "OK" string in case of success.
"""
# yes, the ESP sends alarms as binary-coded struct, but the snooze
# happens by means of the exponent
bitmap = {1 << x: x for x in range(32)}
pos = bitmap[alarm_type]
return self.set("alarm_snooze", pos)
def snooze_gui_alarm(self):
"""
Function to snooze the GUI alarm in ESP32
returns: an "OK" string in case of success.
"""
return self.set("alarm_snooze", 29)
| [
"random.uniform",
"random.choice"
] | [((1142, 1169), 'random.uniform', 'random.uniform', (['(-1500)', '(1500)'], {}), '(-1500, 1500)\n', (1156, 1169), False, 'import random\n'), ((1219, 1246), 'random.uniform', 'random.uniform', (['(-1500)', '(1500)'], {}), '(-1500, 1500)\n', (1233, 1246), False, 'import random\n'), ((1306, 1331), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1320, 1331), False, 'import random\n'), ((2411, 2462), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (2424, 2462), False, 'import random\n'), ((1382, 1409), 'random.uniform', 'random.uniform', (['(-1500)', '(1500)'], {}), '(-1500, 1500)\n', (1396, 1409), False, 'import random\n'), ((1459, 1484), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1473, 1484), False, 'import random\n'), ((1541, 1566), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1555, 1566), False, 'import random\n'), ((1631, 1653), 'random.uniform', 'random.uniform', (['(0)', '(1.5)'], {}), '(0, 1.5)\n', (1645, 1653), False, 'import random\n'), ((1703, 1728), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1717, 1728), False, 'import random\n'), ((1776, 1801), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1790, 1801), False, 'import random\n'), ((1851, 1876), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1865, 1876), False, 'import random\n'), ((1943, 1968), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (1957, 1968), False, 'import random\n'), ((2034, 2059), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (2048, 2059), False, 'import random\n'), ((2118, 2143), 'random.uniform', 'random.uniform', (['(-100)', '(100)'], {}), '(-100, 100)\n', (2132, 2143), False, 'import random\n')] |
from nsls2ptycho.ptycho_gui import main
main()
| [
"nsls2ptycho.ptycho_gui.main"
] | [((41, 47), 'nsls2ptycho.ptycho_gui.main', 'main', ([], {}), '()\n', (45, 47), False, 'from nsls2ptycho.ptycho_gui import main\n')] |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name = "complexity-science",
version = "0.0.6",
author = "<NAME>",
author_email = "<EMAIL>",
description = "A package for complexity science research",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/complexity-science",
packages = setuptools.find_packages(),
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires = '>3.6',
install_requires = [
"numpy",
"matplotlib",
"pandas"
]
)
| [
"setuptools.find_packages"
] | [((474, 500), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (498, 500), False, 'import setuptools\n')] |
import pandas as pd
from matplotlib import pyplot as plt
globe = pd.read_csv("gapminder.csv", skipinitialspace=True, usecols=['life_exp', 'gdp_cap', 'year'])
life_exp = list(globe.life_exp)
gdp_cap = list(globe.gdp_cap)
year = list(globe.year)
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Scatter plot
plt.scatter(gdp_cap, life_exp)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
# Definition of tick_val and tick_lab
tick_val = [1000, 10000, 100000]
tick_lab = ['1k', '10k', '100k']
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
# After customizing, display the plot
plt.show() | [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show"
] | [((66, 162), 'pandas.read_csv', 'pd.read_csv', (['"""gapminder.csv"""'], {'skipinitialspace': '(True)', 'usecols': "['life_exp', 'gdp_cap', 'year']"}), "('gapminder.csv', skipinitialspace=True, usecols=['life_exp',\n 'gdp_cap', 'year'])\n", (77, 162), True, 'import pandas as pd\n'), ((278, 308), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gdp_cap', 'life_exp'], {}), '(gdp_cap, life_exp)\n', (289, 308), True, 'from matplotlib import pyplot as plt\n'), ((309, 326), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (319, 326), True, 'from matplotlib import pyplot as plt\n'), ((343, 373), 'matplotlib.pyplot.scatter', 'plt.scatter', (['gdp_cap', 'life_exp'], {}), '(gdp_cap, life_exp)\n', (354, 373), True, 'from matplotlib import pyplot as plt\n'), ((401, 418), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (411, 418), True, 'from matplotlib import pyplot as plt\n'), ((419, 456), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""GDP per Capita [in USD]"""'], {}), "('GDP per Capita [in USD]')\n", (429, 456), True, 'from matplotlib import pyplot as plt\n'), ((457, 497), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Life Expectancy [in years]"""'], {}), "('Life Expectancy [in years]')\n", (467, 497), True, 'from matplotlib import pyplot as plt\n'), ((498, 536), 'matplotlib.pyplot.title', 'plt.title', (['"""World Development in 2007"""'], {}), "('World Development in 2007')\n", (507, 536), True, 'from matplotlib import pyplot as plt\n'), ((675, 705), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_val', 'tick_lab'], {}), '(tick_val, tick_lab)\n', (685, 705), True, 'from matplotlib import pyplot as plt\n'), ((745, 755), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (753, 755), True, 'from matplotlib import pyplot as plt\n')] |
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Education',
'Operating System :: Microsoft :: Windows :: Windows 10',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='covid19BD',
version='1.0.0',
description='It lets you interact with my database created by Covid-19 Bot hosted on repl.it for Bangladesh from 15th January 2021 - Current Day.',
long_description=open('README.md').read() + '\n\n' + open('CHANGELOG.txt').read(),
long_description_content_type="text/markdown",
url='',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=classifiers,
keywords=['Covid-19', 'Coronavirus'],
packages=find_packages(),
install_requires=['pyrebase']
)
| [
"setuptools.find_packages"
] | [((771, 786), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (784, 786), False, 'from setuptools import setup, find_packages\n')] |
import torch
from torch import nn
class MLPShaker(nn.Module):
"""
Maps from (bs, N) to (bs, N)
where N = flat_prod(dims).
"""
def __init__(self, dims, total_repeats, n_repeats=None, verbose=False):
super().__init__()
if n_repeats is None:
n_repeats = total_repeats
self.n_repeats = n_repeats
self.dims = dims
self.flat_dims = [flat_prod(d) for d in self.dims]
total_depth = len(total_repeats)
depth = len(self.n_repeats)
s = '\t'*(total_depth-depth)
print(f'{s}Decomposing (depth={depth}) {self.dims} -> {self.flat_dims}')
print(f'{s}with repeats={self.n_repeats}')
self.mixings = nn.ModuleList([])
print(f'{s}----> {self.n_repeats[0]}x ')
for i_repeat in range(self.n_repeats[0]):
mixing = nn.ModuleList([])
for d in self.dims:
if type(d) is int:
if i_repeat==0:
print(f'{s}Putting Linear size {d}')
mixing.append(nn.Linear(d, d))
else:
if i_repeat==0:
mixing.append(MLPShakerBlock(d, total_repeats, self.n_repeats[1:]))
else:
with contextlib.redirect_stdout(None):
mixing.append(MLPShakerBlock(d, total_repeats, self.n_repeats[1:]))
self.mixings.append(mixing)
print(f'{s}<----')
def forward(self, x):
shape = x.shape
x = x.reshape(-1, *self.flat_dims)
print(f'{shape} --> {x.shape}')
for mixing in self.mixings:
for dim, mix_dim in enumerate(mixing):
dim+=1 # ignore batch size
x = x.movedim(dim, -1)
# print(x.shape, 'dim=', dim)
if type(mix_dim) is nn.Linear:
print(f'Densely mixing with {x.shape[-1]}')
x = mix_dim(x)
x = x.movedim(-1, dim)
print(f'{shape} <-- {x.shape}')
return x.reshape(shape)
| [
"torch.nn.ModuleList",
"torch.nn.Linear"
] | [((729, 746), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (742, 746), False, 'from torch import nn\n'), ((867, 884), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (880, 884), False, 'from torch import nn\n'), ((1083, 1098), 'torch.nn.Linear', 'nn.Linear', (['d', 'd'], {}), '(d, d)\n', (1092, 1098), False, 'from torch import nn\n')] |
"""
vista2 module
"""
from mungoCore import *
from useful import smartopen
def convertStrandToPM(strand):
if strand in ['+', '-']:
return strand
else:
return {'>': '+', '<': '-'}[strand]
def convertStrandToVista(strand):
if strand in ['>', '<']:
return strand
else:
return {'>': '+', '<': '-'}[strand]
class Gene(AbstractFeature):
"""Vista gene feature"""
def __init__(self, name='', start=0, end=0, strand=''):
self.name = name
self.start = int(start)
self.end = int(end)
self.strand = convertStrandToPM(strand)
self.exons = []
self.flipped = False
def add(self, exon):
exon.name = self.name
exon.strand = self.strand
self.exons.append(exon)
def flip(self, L):
self.flipped = not self.flipped
self.start,self.end = L+1-self.end,L+1-self.start
for exon in self.exons:
exon.start,exon.end = L+1-exon.end,L+1-exon.start
self.exons.reverse()
if self.strand=='+':
self.strand = '-'
else:
self.strand = '+'
def __iter__(self):
return iter(self.exons)
def __repr__(self):
output = ['%s %i %i %s' % (self.name, self.start, self.end, self.strand)]
for exon in self.exons:
output.append(' %s' % str(exon))
output.append('\n')
return '\n'.join(output)
def __getitem__(self, i):
return self.exons[i]
def __len__(self):
return len(self.exons)
@staticmethod
def fromTokens(tokens):
return Gene(tokens[3], tokens[1], tokens[2], tokens[0])
class Exon:
def __init__(self, start, end, kind='exon'):
self.start = int(start)
self.end = int(end)
self.kind = kind
def __repr__(self):
return '%i %i %s' % (self.start, self.end, self.kind)
@staticmethod
def fromTokens(tokens):
return Exon(tokens[0], tokens[1], tokens[2])
def load_iter(iFileHandle):
iFile = smartopen(iFileHandle)
first = True
for line in iFile:
tokens = line.strip().split()
if not line or line[0]=='#':
continue
elif line[0] in ['<', '>']:
if not first:
yield gene
else:
first = False
gene = Gene.fromTokens(tokens)
else:
gene.add(Exon.fromTokens(tokens))
yield gene
def load(iFileHandle):
genes = []
for gene in load_iter(iFileHandle):
genes.append(gene)
return genes
def load2(iFileHandle):
iFile = smartopen(iFileHandle)
genes = []
for line in iFile:
tokens = line.strip().split()
if not line or line[0]=='#':
continue
elif line[0] in ['<', '>']:
genes.append(Gene.fromTokens(tokens))
else:
genes[-1].add(Exon.fromTokens(tokens))
| [
"useful.smartopen"
] | [((2078, 2100), 'useful.smartopen', 'smartopen', (['iFileHandle'], {}), '(iFileHandle)\n', (2087, 2100), False, 'from useful import smartopen\n'), ((2655, 2677), 'useful.smartopen', 'smartopen', (['iFileHandle'], {}), '(iFileHandle)\n', (2664, 2677), False, 'from useful import smartopen\n')] |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - verify account action
@copyright: 2012 <NAME>
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import user, wikiutil
from MoinMoin.Page import Page
from MoinMoin.widget import html
from MoinMoin.auth import MoinAuth
def execute(pagename, request):
found = False
for auth in request.cfg.auth:
if isinstance(auth, MoinAuth):
found = True
break
if not found:
# we will not have linked, so forbid access
request.makeForbidden(403, 'No MoinAuth in auth list')
return
page = Page(request, "FrontPage")
_ = request.getText
if not request.cfg.require_email_verification:
result = _("Verification not configured!")
request.theme.add_msg(result, "error")
return page.send_page()
uid = request.values.get('i', None)
verify = request.values.get('v', None)
# Grab user profile
theuser = user.User(request, id=uid)
# Compare the verification code
if not theuser.valid:
result = _("Unable to verify user account i=%s v=%s") % (uid, verify)
request.theme.add_msg(result, "error")
return page.send_page()
if not theuser.account_verification:
result = _("User account has already been verified!")
request.theme.add_msg(result, "error")
return page.send_page()
if theuser.account_verification != verify:
result = _("Unable to verify user account i=%s v=%s") % (uid, verify)
request.theme.add_msg(result, "error")
return page.send_page()
# All looks sane. Mark verification as done, save data
theuser.account_verification = ""
theuser.save()
loginlink = request.page.url(request, querystr={'action': 'login'})
result = _('User account verified! You can use this account to <a href="%s">login</a> now...' % loginlink)
request.theme.add_msg(result, "dialog")
return page.send_page()
| [
"MoinMoin.user.User",
"MoinMoin.Page.Page"
] | [((615, 641), 'MoinMoin.Page.Page', 'Page', (['request', '"""FrontPage"""'], {}), "(request, 'FrontPage')\n", (619, 641), False, 'from MoinMoin.Page import Page\n'), ((971, 997), 'MoinMoin.user.User', 'user.User', (['request'], {'id': 'uid'}), '(request, id=uid)\n', (980, 997), False, 'from MoinMoin import user, wikiutil\n')] |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import sys
import glob
import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import Axes3D
import os
#Universal constants
Msolar = 1.989e30
G = 6.67e-11
c=3e8
MBH = 4.31e6
pc = 3.0860e16
#Conversion parameter
convert = c**2/(G*MBH*Msolar)
# DM consants
r0_SI = 10 #kpc
r0 = r0_SI * 1000 *pc#* convert
rho0 = 4e7
k = rho0 * r0_SI**3
print ('k = ', k)
#Define G
r = np.linspace(1000*pc,8000*pc,100)
RR = r/r0
kappa = -2*k*np.pi/r
A = 1 + RR
G = (1+RR**2)**(kappa*(1-RR))*A**(2*kappa*A)*np.exp(-2*kappa*A*np.arctan(RR)) - 2/r
Y = r * (1-G) / 2
r1 = 0.1
r = np.linspace(0.01,50,100)
den = 10*r1/((r+r1)*(r**2+r1**2))
#aa = 0.2**2
#Y = r**2 * G + aa
plt.figure()
plt.plot(r,den)
#plt.ylim(-1,0.5)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.arctan"
] | [((467, 505), 'numpy.linspace', 'np.linspace', (['(1000 * pc)', '(8000 * pc)', '(100)'], {}), '(1000 * pc, 8000 * pc, 100)\n', (478, 505), True, 'import numpy as np\n'), ((664, 690), 'numpy.linspace', 'np.linspace', (['(0.01)', '(50)', '(100)'], {}), '(0.01, 50, 100)\n', (675, 690), True, 'import numpy as np\n'), ((758, 770), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (768, 770), True, 'import matplotlib.pyplot as plt\n'), ((771, 787), 'matplotlib.pyplot.plot', 'plt.plot', (['r', 'den'], {}), '(r, den)\n', (779, 787), True, 'import matplotlib.pyplot as plt\n'), ((805, 815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (813, 815), True, 'import matplotlib.pyplot as plt\n'), ((608, 621), 'numpy.arctan', 'np.arctan', (['RR'], {}), '(RR)\n', (617, 621), True, 'import numpy as np\n')] |
import collections
import math
import utils.primes as primes
# Version-specific constants - EDIT THESE VALUES
VERSION_NAME = "Version002 - Prime factorization"
VERSION_DESCRIPTION = """
Determine the smallest number divisible by all the numbers in the given
range using prime factors of each of the numbers in the given range.
"""
def _get_prime_factors(n):
prime_generator = primes.get_prime_generator()
prime = next(prime_generator)
max_divisor = math.ceil(math.sqrt(n))
prime_factors = []
while prime <= max_divisor:
while n % prime == 0:
prime_factors.append(prime)
n //= prime
prime = next(prime_generator)
if len(prime_factors) == 0:
return collections.Counter([n])
return collections.Counter(prime_factors)
def solution(resources, args):
"""Problem 5 - Version 2
Determine the smallest number divisible by all the numbers in the
given range using prime factors of each of the numbers in the given
range.
Parameters:
args.number The upper range of numbers (1 - args.numbers)
for which to find the smallest number that is
divisible by all of these numbers.
Return:
The smallest number that is divisible by all numbers in the
range 1 to args.number.
"""
prime_factors = collections.defaultdict(int)
for i in range(1, args.number + 1):
factors = _get_prime_factors(i)
for factor in factors:
prime_factors[factor] = max(factors[factor], prime_factors[factor])
retval = 1
for factor in prime_factors:
retval = retval * (factor ** prime_factors[factor])
return retval
if __name__ == "__main__":
errmsg = "Cannot run {} as a standalone script"
raise RuntimeError(errmsg.format(VERSION_NAME))
| [
"collections.Counter",
"math.sqrt",
"utils.primes.get_prime_generator",
"collections.defaultdict"
] | [((384, 412), 'utils.primes.get_prime_generator', 'primes.get_prime_generator', ([], {}), '()\n', (410, 412), True, 'import utils.primes as primes\n'), ((761, 795), 'collections.Counter', 'collections.Counter', (['prime_factors'], {}), '(prime_factors)\n', (780, 795), False, 'import collections\n'), ((1369, 1397), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (1392, 1397), False, 'import collections\n'), ((475, 487), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (484, 487), False, 'import math\n'), ((725, 749), 'collections.Counter', 'collections.Counter', (['[n]'], {}), '([n])\n', (744, 749), False, 'import collections\n')] |
from colorama import init
init()
from colorama import Fore, Back, Style
# I use these imports to product coloured text output
class WarningMsg():
# I produce warnings about errors in a set way
def __init__(self,
header = "Warning",
body = "Add your warning here",
instruction = "",
asterixNo = 60):
print('\n','*' * asterixNo,
'\n ',header,'\n'
'\n ',body,'\n')
if instruction:
print('\n ',instruction,'\n')
print('*' * asterixNo,'\n')
class Fen():
def __init__(self, fen = '?'):
# these variables to help check elements of the fen
# A) castling
self.recognisedCastling = ['q','k','kq','Q','Qq','Qk','Qkq','K',
'Kq','Kk','Kkq','KQ','KQq','KQk','KQkq']
# used to identify a castling element in a fen string
self.validCastling = ['-','q','k','kq','Q','Qq','Qk','Qkq','K',
'Kq','Kk','Kkq','KQ','KQq','KQk','KQkq']
# used to confirm a valid castling element
# DOES NOT take into account the limitations imposed
# by the actual position
# B) EP square
self.recognisedEP =['a6','b6','c6','d6','e6','f6','g6','h6',
'a3','b3','c3','d3','e3','f3','g3','h3']
# used to identify an ep element in a fen string
self.validEPwtp = ['-','a6','b6','c6','d6','e6','f6','g6','h6']
# valid ep elements if white to play
# DOES NOT take into account the limitations imposed
# by the actual position
self.validEPbtp = ['-','a3','b3','c3','d3','e3','f3','g3','h3']
# valid ep elements if black to play
# DOES NOT take into account the limitations imposed
# by the actual position
self.validEP = self.validEPwtp + self.validEPbtp
# valid ep elements ignoring who is to play
# DOES NOT take into account the limitations imposed
# by the actual position
# C) board
self.validBoardCharacters = '12345678/kqrnbpKQRBNP'
# valid characters for a board element of a fen string
self.validSquares = ('a8','b8','c8','d8','e8','f8','g8','h8',
'a7','b7','c7','d7','e7','f7','g7','h7',
'a6','b6','c6','d6','e6','f6','g6','h6',
'a5','b5','c5','d5','e5','f5','g5','h5',
'a4','b4','c4','d4','e4','f4','g4','h4',
'a3','b3','c3','d3','e3','f3','g3','h3',
'a2','b2','c2','d2','e2','f2','g2','h2',
'a1','b1','c1','d1','e1','f1','g1','h1')
# D) other
self.errors = [] # used in the checkBoard method
self.rank1 = ['a1','b1','c1','d1','e1','f1','g1','h1']
self.rank8 = ['a8','b8','c8','d8','e8','f8','g8','h8']
# processing of the fen starts here
fen = str(fen) # forcing fen argument to a string
fen = fen.strip() # stripping leading/trailing white space
# splitting fen into sub-strings
self.fen = fen
self.fenElements = fen.split(' ')
# processing the sub-strings
# In handling a string input I have made the following assumptions
# 1) the first sub-string is always the board
# 2) the last sub-string is always the move counter IF A DIGIT and fen
# has more at least 2 elements
# 3) the penultimate sub-string is always the half move clock IF A DIGIT
# AND the last sub-string is ALSO A DIGIT and the fen has as least
# 3 elements
# 4) if a toPlay, castling or ep element is recognised
self.elementCount = len(self.fenElements)
# Here I am identifying obvious elements
self.toPlay = '?'
self.castling = '?'
self.ep = '?'
self.halfMove = '0'
self.move = '1'
for element in self.fenElements:
# does element look like a ToPlay element?
if len(element) == 1 and element in 'wb':
self.toPlay = self.checkContradiction(existing = self.toPlay,
new = element)
# does element look like a castling element?
elif element in self.recognisedCastling:
self.castling = self.checkContradiction(existing = self.castling,
new = element)
# does element look like a ep element?
elif element in self.recognisedEP:
self.ep = self.checkContradiction(existing = self.ep,
new = element)
countBlank = self.fenElements.count('-')
if countBlank > 1: # implies no castling rights and no ep square
# if castling and ep not set make these '-'
if self.castling == '?':
self.castling = '-'
if self.ep == '?':
self.ep = '-'
elif countBlank == 1: # problem here is which to apply the element
if self.castling != '?' and self.ep != '?':
pass # any '-' will be ignored as castling and ep set
elif self.castling != '?':
# '-' must relate to ep square
self.ep = '-'
elif self.ep != '?':
# '-' must relate to castling right
self.castling = '-'
else: # castling and ep element not set
# it is not clear whether a single '-' relates to
# castling or ep, replace '-' with '?', forcing input
if self.castling == '?' and self.ep == '?':
# force input of castling and ep
self.fenElements = ['?' if i=='-' else i for i in self.fenElements]
self.board = self.checkBoard(board = self.fenElements[0])
if len(self.toPlay) == 1 and self.toPlay in 'wb':
pass # no change required
elif self.toPlay == '?' or self.toPlay == '??':
self.toPlay = self.checkToPlay(toPlay = '?')
else:
self.toPlay = self.checkToPlay(toPlay = self.toPlay)
if self.castling in self.validCastling:
self.castling = self.checkCastling(castling = self.castling, board = self.board)
elif self.castling == '?' or self.castling == '??':
self.castling = self.checkCastling(castling = '?', board = self.board)
else:
self.castling = self.checkCastling(castling = self.castling, board = self.board)
#print(self.ep)
if self.ep in self.validEP:
#print('__init__: valid ep')
self.ep = self.checkEP(ep = self.ep, toPlay = self.toPlay, board = self.board)
#print('post ep type: '+ str(type(self.ep)))
#print('post checkEP: ' + self.ep)
elif self.ep == '?' or self.ep == '??':
#print('__init__: ep is ? or ??')
self.ep = self.checkEP(ep = '?', toPlay = self.toPlay, board = self.board)
#print('post ep type: '+ str(type(self.ep)))
#print('post checkEP: ' + self.ep)
else:
#print('__init__: ep other invalid')
self.ep = self.checkEP(ep = self.ep, toPlay = self.toPlay, board = self.board)
#print('post ep type: '+ str(type(self.ep)))
#print('post checkEP: ' + self.ep)
# if penultimate item on list is a digit
# take this as the halfMove
if self.elementCount > 2: # must have board element plus at least 2 elements
if self.fenElements[-2].isdigit() and self.fenElements[-1].isdigit():
# the penultimate sub-string is not accepted is the last sub-string
# is not also a digit
if self.ep != '-':
self.halfMove = '0'
message = WarningMsg(header = 'provided halfMove: ' + str(self.halfMove),
body = 'was inconsistant with existance of a valid ep square.',
instruction = 'A pawn was just moved so halfMove set to "0" ')
message
else:
self.halfMove = self.fenElements[-2]
else:
# reset value to 0
self.halfMove = '0'
else:
# set to '0'
self.halfMove = '0'
# if last item on list is a digit
# take this as the move
if self.elementCount > 1: # must have board + at least 1 other sub-string
if self.fenElements[-1].isdigit(): # floats rejected as invalid
self.move = self.fenElements[-1]
else:
# reset value to 1
self.move = '1'
else:
#set to '1'
self.move = '1'
def __repr__(self):
return self.fenReconstruct()
# This section is for routines which check fenElements
def checkContradiction(self, existing, new):
# called by __init__
# compares two elements
# if not cotradictory returns the new elements
# if contradictory returns '??'
if existing == '?': #value unset
return new
elif existing == '??':
# contradictory elements in fen
return existing # leave '??' caution in place
else: # check for contradictions
if existing == new:
# make no change to existing value
return new
else: #contradictory elements in fen
return '??'
print('Warning! checkContradiction. This should never print!')
def checkBoard(self, board):
# called by __init__
# this is a series of tests on the board to see if it valid
# responsible for checking of board
# returns a valid and checked board
newBoard = ''
# to force while loop to run
self.errors.append('first time flag')
while not self.errors == []:
# to remove flag
if 'first time flag' in self.errors:
self.errors.pop(self.errors.index('first time flag'))
# if the number of squares in the board is not 64 then some of
# the subsequent methods will not work
boardString = self.boardToString(board = board, checkBoard = True)
# the boardToString method returns a string of 64 characters
# but also appends errors to self.errors if the original board
# was too short or too long'
# The appending of self.error only occurs if the boardToString
# method is called from here: hence ' = True'
for char in board:
if not char in self.validBoardCharacters:
error = ' There are invalid characters in the board string'
message = WarningMsg(header = 'Board Error',
body = error )
message
self.errors.append(error)
# count piece and pawn numbers
totalWhitePieces = 0
totalBlackPieces = 0
whiteKings = board.count('K')
totalWhitePieces += whiteKings
blackKings = board.count('k')
totalBlackPieces += blackKings
whiteQueens = board.count('Q')
totalWhitePieces += whiteQueens
blackQueens = board.count('q')
totalBlackPieces += blackQueens
whiteRooks = board.count('R')
totalWhitePieces += whiteRooks
blackRooks = board.count('r')
totalBlackPieces += blackRooks
whiteBishops = board.count('B')
totalWhitePieces += whiteBishops
blackBishops = board.count('b')
totalBlackPieces += blackBishops
whiteKnights = board.count('N')
totalWhitePieces += whiteKnights
blackKnights = board.count('n')
totalBlackPieces += blackKnights
whitePawns = board.count('P')
totalWhitePieces += whitePawns
blackPawns = board.count('p')
totalWhitePieces += blackPawns
emptySquares = board.count('.')
totalUsedSquares = 64 - emptySquares
boardString = self.boardToString(board = board)
# exactly one king for each side
if whiteKings == 0:
error = 'There is no White King on the board'
message = WarningMsg(header = 'Illegal Position',
body = error)
message
self.errors.append(error)
if whiteKings > 1:
error = 'There are too many White Kings on the board'
message = WarningMsg(header = 'Illegal Position',
body = error)
message
self.errors.append(error)
if blackKings == 0:
error ='There is no Black King on the board'
message = WarningMsg(header = 'Illegal Position',
body = error)
message
self.errors.append(error)
if blackKings > 1:
error = 'There are too many Black Kings on the board'
message = WarningMsg(header = 'Illegal Position',
body = error)
message
self.errors.append(error)
# no more than 8 pawns of each colour
if whitePawns > 8:
error = 'There are too many white pawns on the board'
message = WarningMsg(header = 'Illegal Position',
body = error, instruction = ' board has ' + str(whitePawns) + ' white pawns')
message
self.errors.append(error)
if blackPawns > 8:
error = 'There are too many black pawns on the board'
message = WarningMsg(header = 'Illegal Position',
body = error, instruction = ' board has ' + str(blackPawns) + ' black pawns')
message
self.errors.append(error)
# total of pawns and queens not more that 9
if whiteQueens > 1:
if whitePawns + whiteQueens > 9:
error = 'White queens plus pawns should not be more than 9'
message = WarningMsg(header = 'Illegal Position',
body = error, instruction = ' White has ' + str(whitePawns) + ' pawns ' + str(whiteQueens) + ' queens')
message
self.errors.append(error)
if blackQueens > 1:
if blackPawns + blackQueens > 9:
error = 'Black queens plus pawns should not be more than 9'
message = WarningMsg(header = 'Illegal Position',
body = error, instruction = ' Black has ' + str(blackPawns) + ' pawns ' + str(blackQueens) + ' queens')
message
self.errors.append(error)
# white or black pawns on 1st or 8th ranks
rank1 = self.interrogateBoard(targetSquares = self.rank1,
boardString = boardString)
rank8 = self.interrogateBoard(targetSquares = self.rank8,
boardString = boardString)
if rank1.count('P'):
error = 'There are white pawns on the 1st rank'
message = WarningMsg(header = 'Illegal Position',
body = error)
message
self.errors.append(error)
if rank8.count('P'):
error = 'There are white pawns on the 8th rank'
message = WarningMsg(header = 'Illegal Position',
body = error)
message
self.errors.append(error)
if rank1.count('p'):
error = 'There are black pawns on the 1st rank'
message = WarningMsg(header = 'Illegal Position',
body = error)
message
self.errors.append(error)
if rank8.count('p'):
error = 'There are black pawns on the 8th rank'
message = WarningMsg(header = 'Illegal Position',
body = error)
message
self.errors.append(error)
if self.errors:
self.displayBoardString(self.boardToString(board = board, checkBoard = True))
print('\n')
board= self.inputBoard(reasons = self.errors, board= board)
self.errors = []
return board
def checkToPlay(self, toPlay):
# called by __init__
# calls inputToPlay, which is responsible for returning
# a valid toPlay element
# returns a valid ep
if toPlay == 'w' or toPlay == 'b':
return toPlay
else:
message = WarningMsg(header = 'Error in To Play element of fen',
body = str(toPlay)+' input is not valid',
instruction = 'should be either "w" or "b". Please re-input.')
message
return self.inputToPlay()
def checkCastling(self, castling, board):
# called by __init__
# calls inputCastling, which is responsible for returning a valid
# castling element
# processes to recieved value of castling by comparing it to the
# board automatically changing castling elements where these
# are clearly incorrect
# returns a valid and checked castling element
if not castling in self.validCastling:
message = WarningMsg(header = 'Castling: '+str(castling),
body = 'The Castling Element is not in a valid form',
instruction = 'format "-" or up to 4 letters in order KQkq')
message
castling = self.inputCastling()
boardString = self.boardToString(board = board)
checkedSquares = self.interrogateBoard(boardString = boardString,
targetSquares = ['e1','h1','a1','e8','h8','a8'])
e1 = checkedSquares[0]
h1 = checkedSquares[1]
a1 = checkedSquares[2]
e8 = checkedSquares[3]
h8 = checkedSquares[4]
a8 = checkedSquares[5]
if castling == '-':
newCastling = '-'
else:
newCastling = ''
for char in castling:
if char == 'K':
if e1 == 'K' and h1 == 'R':
newCastling += 'K'
if char == 'Q':
if e1 == 'K' and a1 == 'R':
newCastling += 'Q'
if char == 'k':
if e8 == 'k' and h8 == 'r':
newCastling += 'k'
if char == 'q':
if e8 == 'k' and a8 == 'r':
newCastling += 'q'
if newCastling != castling:
message = WarningMsg(header = 'provided castling: ' + str(castling),
body = 'was inconsistant with rook and king positions',
instruction = 'was changed to: '+ str(newCastling))
message
if newCastling == '':
newCastling = '-'
return newCastling
def checkEP(self, ep, toPlay, board):
#print('checkEP in: '+ ep)
providedEP = ep
if ep == '-':
newEP = ep #assumed correct
elif ep in self.validEP:
if toPlay == 'w':
if ep in self.validEPwtp:
newEP = ep
else:
message = WarningMsg(header = 'EP square: ' + str(ep),
body = 'The EP square is not valid in a "white to play" position')
message
newEP = self.inputEP(toPlay = 'w')
providedEP = newEP
#print('checkEP: invalid wtp ')
#print(newEP)
#print(type(newEP))
elif toPlay == 'b':
if ep in self.validEPbtp:
newEP = ep
else:
message = WarningMsg(header = 'EP square' + str(ep),
body = 'The EP square is not valid in a "black to play" position')
message
newEP = self.inputEP(toPlay = 'b')
providedEP = newEP
#print('checkEP: invalid btp ')
#print(newEP)
#print(type(newEP))
else:
newEP = self.inputEP(toPlay = toPlay)
providedEP = newEP
#print('checkEP: invalid ep'+ep)
#print(newEP)
#print(type(newEP))
if newEP != '-': # if not set no further checks required
boardString = self.boardToString(board = board)
# find rank and file of ep
file = newEP[0]
#print(type(file))
rank = newEP[1]
#print(type(rank))
if newEP == 'a6':
checkedSquares = self.interrogateBoard(boardString = boardString,
targetSquares = ['a7','a5','b5'])
if checkedSquares != ['.','p','P']:
newEP = '-'
elif newEP == 'a3':
checkedSquares = self.interrogateBoard(boardString = boardString,
targetSquares = ['a2','a4','b4'])
#print(checkedSquares)
if checkedSquares != ['.','P','p']:
newEP = '-'
elif newEP == 'h6':
checkedSquares = self.interrogateBoard(boardString = boardString,
targetSquares = ['h7','h5','g5'])
if checkedSquares != ['.','p','P']:
newEP = '-'
elif newEP == 'h3':
checkedSquares = self.interrogateBoard(boardString = boardString,
targetSquares = ['h2','h4','g4'])
if checkedSquares != ['.','P','p']:
newEP = '-'
else:
if toPlay == 'w':
movedFromRank = '7'
movedToRank = '5'
attackerRank = '5'
ownPawn = 'p'
enemyPawn = 'P'
else:
movedFromRank = '2'
movedToRank = '4'
attackerRank ='4'
ownPawn = 'P'
enemyPawn = 'p'
files = ['a','b','c','d','e','f','g','h']
currentFileNumber = files.index(file)
leftAttackFile = str(files[currentFileNumber-1])
rightAttackFile = str(files[currentFileNumber+1])
checkedSquares = self.interrogateBoard(boardString = boardString,
targetSquares = [file+movedFromRank,
file+movedToRank,
leftAttackFile+attackerRank,
rightAttackFile+attackerRank])
print(checkedSquares)
if checkedSquares == ['.',ownPawn,enemyPawn,enemyPawn] or checkedSquares == ['.',ownPawn,enemyPawn,'.'] or checkedSquares == ['.',ownPawn,'.',enemyPawn] :
pass
else:
newEP = '-'
if providedEP != newEP:
message = WarningMsg(header = 'provided ep: ' + str(providedEP),
body = 'was inconsistant with pawn positions',
instruction = 'was changed to "-"')
message
#print('checkEP return')
#print(type(newEP))
return newEP
# this section is for input routines which return a corrected value
# these are static methods to allow mock input testing in pytest
def inputBoard(self, board, reasons = []):
# called by checkBoard
# checkBoard is responsible for verifying position
# returns an unchecked board in fen format
print('\n the board element of the current string is: \n',
board)
if reasons:
print('\n the current board is incorrect because: \n',
reasons)
newBoard = input('\n please input a new string which represents the position\n')
return newBoard
def inputToPlay(self):
# called by checkToPlay
# returns a checked toPlay value
newToPlay = '?'
print('\n Is it white or black is to play in this position?')
while newToPlay not in 'wb':
newToPlay = input('\n please input "w" or "b" \n')
if newToPlay not in 'wb':
print('\n*** input incorrect ***\n')
return newToPlay
def inputCastling(self):
# called by checkCastling
# responsible to check return value against self.validCastling
# returns a generally valid castling value
castling = '?'
while not castling in self.validCastling:
print('valid castling strings must be one of the following:')
print(self.validCastling)
castling = input('please input one of these\n')
if not castling in self.validCastling:
print('\n*** input incorrect ***\n')
return castling
def inputEP(self, toPlay):
# called by checkEP
# responsible to check return against self.validEPbtp or
# selfValidEPwtp
# return a valid ep square
ep = '?'
if toPlay == 'w':
validSquares = self.validEPwtp
else:
validSquares = self.validEPbtp
while not ep in validSquares :
print('\nvalid inputs are:\n')
print(validSquares)
ep = input('\n please input "-" if no ep square, or a valid ep square\n')
if not ep in validSquares:
print('\n*** input incorrect ***\n')
#print(ep)
#print(type(ep))
return ep
def inputSquare(self):
# called by square ToFenPosition
# responsible for checking return square if in self.validSquares
# returns a valid square name
square = '?'
while square not in self.validSquares:
square = input('please input a valid square')
return square
# This section is for simple display options
def displayBoardString(self, boardString = ''):
if boardString == '':
boardString = self.boardToString(self.board)
printable = ''
for i in range(64):
if i % 8 == 0:
printable += '\n '+boardString[i]
else:
printable += ' '+boardString[i]
print(printable)
def displayBoard(self, boardArray = ''):
if boardArray == '':
boardArray = self.boardToArray(self.board)
print('\n')
for rank in boardArray:
print(rank)
print('\n')
def displayToPlay(self, toPlay = ''):
if toPlay == '':
self.toPlay
if toPlay == 'w':
print(' white to play\n')
elif toPlay == 'b':
print(' black to play\n')
else:
print('\n')
# this section is for conversions from the board representation in the
# fen to other internal representations
def boardToString(self, board = '', checkBoard = False):
# convert board to string
# if the number of squares in the board is not 64 then some of the
if board == '':
board = self.board
boardString = ''
if board == '':
if checkBoard:
# self.board not set
board = '8/8/8/8/8/8/8/8' #empty board
else:
board = self.board
#convert board to string
for char in board:
if char == '/':
pass
elif char in self.validBoardCharacters:
if char.isdigit():
boardString += '.' * int(char)
else:
boardString += char
else:
boardString += '?' # identies invalid character
# ensure boardString is exactly 64 digits long
if len(boardString) > 64:
if checkBoard: # if called from checkBoard
self.errors.append('board has too many squares')
boardString = boardString[0:64]
if len(boardString) < 64:
if checkBoard: # if called from checkBoard
self.errors.append('board has too few squares')
paddingLength = 64 - len(boardString)
boardString += '.' * paddingLength # add blank squares to end
return boardString
def boardToArray(self, board = ''):
if board == '':
board = self.board
boardArray = []
rank = ' '
for char in board:
if char == '/':
rank += ('\n')
boardArray.append(rank)
rank = ' '
elif char in self.validBoardCharacters:
if char.isdigit():
count = int(char)
for x in range(count):
rank += '. '
else:
if char.islower():
rank += Fore.RED + char + ' ' + Style.RESET_ALL
else:
rank += char + ' '
else:
rank += '? '
boardArray.append(rank + '\n')
return boardArray
def augmentBoard(self, boardArray = []):
if boardArray == [] :
boardArray = self.boardToArray(self.board)
augmentedBoard = []
augmentedBoard.append(Fore.GREEN+'\n a b c d e f g h \n'+Style.RESET_ALL)
augmentedRank = Fore.GREEN+' 8 '+Style.RESET_ALL
rankCount = 8
for rank in boardArray:
augmentedRank = augmentedRank + rank
augmentedBoard.append(augmentedRank)
rankCount -= 1
augmentedRank = Fore.GREEN+' ' + str(rankCount) + ' '+Style.RESET_ALL
return augmentedBoard
# board searching
def squareToFenPosition(self,square):
# called by interrogateBoard
# calls squareInput, which has responsibility to return a valid square
# return: integer in range 0 to 63
if square not in self.validSquares:
square = self.inputSquare()
return self.validSquares.index(square)
def interrogateBoard(self, targetSquares, boardString = '' ):
#called by checkCastling
# returns list of pieces on each target square
returnList = []
if boardString == '':
boardString = self.boardToString(board = self.board)
for targetSquare in targetSquares:
returnList.append(boardString[self.squareToFenPosition(targetSquare)])
return returnList
# fen reconstruction
def fenReconstruct(self):
# this will recompile the elements into a valid fen
a = self.board+' '
b = self.toPlay+' '
c = self.castling+' '
d = self.ep+' '
e = self.halfMove+' '
f = self.move
return a + b + c + d + e + f
# initial test
def main():
print('starting direct tests\n')
test=Fen('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq e6 0 1')
print('should be starting position with e6 as a epsquare: ')
print(test.board, test.toPlay, test.castling, test.ep, test.halfMove, test.move)
if __name__ == '__main__' :
main()
| [
"colorama.init"
] | [((26, 32), 'colorama.init', 'init', ([], {}), '()\n', (30, 32), False, 'from colorama import init\n')] |
from itertools import count
def judge(x):
while x:
if not (x % 10 in [0, 1, 4, 9]):
return False
x //= 10
return True
cnt = 0
for i in count():
x = i*i
if judge(x):
cnt += 1
if cnt == 2020:
print(x)
break
| [
"itertools.count"
] | [((175, 182), 'itertools.count', 'count', ([], {}), '()\n', (180, 182), False, 'from itertools import count\n')] |
from optimism.JaxConfig import *
from optimism import FunctionSpace
from optimism import Interpolants
from optimism.material import LinearElastic as MatModel
from optimism import Mesh
from optimism import Mechanics
from optimism.Timer import Timer
from optimism.EquationSolver import newton_solve
from optimism import QuadratureRule
from optimism import Surface
from optimism import TractionBC
from optimism.test import MeshFixture
E = 1.0
nu = 0.3
props = {'elastic modulus': E,
'poisson ratio': nu,
'strain measure': 'linear'}
class PatchTest(MeshFixture.MeshFixture):
def setUp(self):
self.Nx = 7
self.Ny = 7
xRange = [0.,1.]
yRange = [0.,1.]
self.targetDispGrad = np.array([[0.1, -0.2],[0.4, -0.1]])
self.mesh, self.U = self.create_mesh_and_disp(self.Nx, self.Ny, xRange, yRange,
lambda x : self.targetDispGrad.dot(x))
quadRule = QuadratureRule.create_quadrature_rule_on_triangle(degree=1)
self.fs = FunctionSpace.construct_function_space(self.mesh, quadRule)
materialModel = MatModel.create_material_model_functions(props)
mcxFuncs = \
Mechanics.create_mechanics_functions(self.fs,
"plane strain",
materialModel)
self.compute_energy = jit(mcxFuncs.compute_strain_energy)
self.internals = mcxFuncs.compute_initial_state()
def test_dirichlet_patch_test(self):
EBCs = [Mesh.EssentialBC(nodeSet='all_boundary', field=0),
Mesh.EssentialBC(nodeSet='all_boundary', field=1)]
dofManager = Mesh.DofManager(self.mesh, self.U.shape, EBCs)
Ubc = dofManager.get_bc_values(self.U)
# Uu is U_unconstrained
@jit
def objective(Uu):
U = dofManager.create_field(Uu, Ubc)
return self.compute_energy(U, self.internals)
with Timer(name="NewtonSolve"):
Uu = newton_solve(objective, dofManager.get_unknown_values(self.U))
self.U = dofManager.create_field(Uu, Ubc)
dispGrads = FunctionSpace.compute_field_gradient(self.fs, self.U)
ne, nqpe = self.fs.vols.shape
for dg in dispGrads.reshape(ne*nqpe,2,2):
self.assertArrayNear(dg, self.targetDispGrad, 14)
grad_func = jit(grad(objective))
Uu = dofManager.get_unknown_values(self.U)
self.assertNear(np.linalg.norm(grad_func(Uu)), 0.0, 14)
def test_neumann_patch_test(self):
EBCs = []
EBCs.append(Mesh.EssentialBC(nodeSet='left', field=0))
EBCs.append(Mesh.EssentialBC(nodeSet='bottom', field=1))
self.U = np.zeros(self.U.shape)
dofManager = Mesh.DofManager(self.mesh, self.U.shape, EBCs)
Ubc = dofManager.get_bc_values(self.U)
sigma11 = 1.0
sigma22 = 0.0
right_traction_func = lambda X: np.array([sigma11, 0.0])
top_traction_func = lambda X: np.array([0.0, sigma22])
quadRule = QuadratureRule.create_quadrature_rule_1D(degree=2)
@jit
def objective(Uu):
U = dofManager.create_field(Uu, Ubc)
internalPotential = self.compute_energy(U, self.internals)
loadPotential = TractionBC.compute_traction_potential_energy(self.mesh, U, quadRule, self.mesh.sideSets['right'], right_traction_func)
loadPotential += TractionBC.compute_traction_potential_energy(self.mesh, U, quadRule, self.mesh.sideSets['top'], top_traction_func)
return internalPotential + loadPotential
with Timer(name="NewtonSolve"):
Uu = newton_solve(objective, dofManager.get_unknown_values(self.U))
self.U = dofManager.create_field(Uu, Ubc)
# exact solution
modulus1 = (1.0 - nu**2)/E
modulus2 = -nu*(1.0+nu)/E
UExact = np.column_stack( ((modulus1*sigma11 + modulus2*sigma22)*self.mesh.coords[:,0],
(modulus2*sigma11 + modulus1*sigma22)*self.mesh.coords[:,1]) )
self.assertArrayNear(self.U, UExact, 14)
class PatchTestQuadraticElements(MeshFixture.MeshFixture):
def setUp(self):
self.Nx = 3
self.Ny = 3
xRange = [0.,1.]
yRange = [0.,1.]
self.targetDispGrad = np.array([[0.1, -0.2],[0.4, -0.1]])
mesh, _ = self.create_mesh_and_disp(self.Nx, self.Ny, xRange, yRange,
lambda x : self.targetDispGrad.dot(x))
self.mesh = Mesh.create_higher_order_mesh_from_simplex_mesh(mesh, order=2, createNodeSetsFromSideSets=True)
self.UTarget = [email protected]
self.quadRule = QuadratureRule.create_quadrature_rule_on_triangle(degree=2)
self.fs = FunctionSpace.construct_function_space(self.mesh, self.quadRule)
materialModel = MatModel.create_material_model_functions(props)
mcxFuncs = \
Mechanics.create_mechanics_functions(self.fs,
"plane strain",
materialModel)
self.compute_energy = jit(mcxFuncs.compute_strain_energy)
self.internals = mcxFuncs.compute_initial_state()
def test_dirichlet_patch_test_with_quadratic_elements(self):
EBCs = []
EBCs.append(Mesh.EssentialBC(nodeSet='all_boundary', field=0))
EBCs.append(Mesh.EssentialBC(nodeSet='all_boundary', field=1))
dofManager = Mesh.DofManager(self.mesh, self.UTarget.shape, EBCs)
Ubc = dofManager.get_bc_values(self.UTarget)
@jit
def objective(Uu):
U = dofManager.create_field(Uu, Ubc)
return self.compute_energy(U, self.internals)
with Timer(name="NewtonSolve"):
Uu = newton_solve(objective, dofManager.get_unknown_values(self.UTarget))
U = dofManager.create_field(Uu, Ubc)
dispGrads = FunctionSpace.compute_field_gradient(self.fs, U)
ne, nqpe = self.fs.vols.shape
for dg in dispGrads.reshape(ne*nqpe,2,2):
self.assertArrayNear(dg, self.targetDispGrad, 14)
grad_func = jit(grad(objective))
Uu = dofManager.get_unknown_values(U)
self.assertNear(np.linalg.norm(grad_func(Uu)), 0.0, 14)
def test_dirichlet_patch_test_with_quadratic_elements_and_constant_jac_projection(self):
EBCs = []
EBCs.append(Mesh.EssentialBC(nodeSet='all_boundary', field=0))
EBCs.append(Mesh.EssentialBC(nodeSet='all_boundary', field=1))
dofManager = Mesh.DofManager(self.mesh, self.UTarget.shape, EBCs)
Ubc = dofManager.get_bc_values(self.UTarget)
masterForJ = Interpolants.make_master_tri_element(degree=0)
shapesForJ = Interpolants.compute_shapes_on_tri(masterForJ,
self.quadRule.xigauss)
def modify_grad(elemGrads, elemShapes, elemVols, elemNodalDisps, elemNodalCoords):
elemGrads = Mechanics.volume_average_J_gradient_transformation(elemGrads, elemVols, shapesForJ)
return Mechanics.plane_strain_gradient_transformation(elemGrads, elemShapes, elemVols, elemNodalDisps, elemNodalCoords)
@jit
def objective(Uu):
U = dofManager.create_field(Uu, Ubc)
return self.compute_energy(U, self.internals)
with Timer(name="NewtonSolve"):
Uu = newton_solve(objective, dofManager.get_unknown_values(self.UTarget))
U = dofManager.create_field(Uu, Ubc)
dispGrads = FunctionSpace.compute_field_gradient(self.fs, U, modify_grad)
ne, nqpe = self.fs.vols.shape
for dg in dispGrads.reshape(ne*nqpe,*dispGrads.shape[2:]):
self.assertArrayNear(dg[:2,:2], self.targetDispGrad, 14)
grad_func = jit(grad(objective))
Uu = dofManager.get_unknown_values(U)
self.assertNear(np.linalg.norm(grad_func(Uu)), 0.0, 14)
if __name__ == '__main__':
MeshFixture.unittest.main()
| [
"optimism.Mesh.create_higher_order_mesh_from_simplex_mesh",
"optimism.Interpolants.compute_shapes_on_tri",
"optimism.Mechanics.plane_strain_gradient_transformation",
"optimism.QuadratureRule.create_quadrature_rule_1D",
"optimism.Interpolants.make_master_tri_element",
"optimism.Mesh.DofManager",
"optimism.TractionBC.compute_traction_potential_energy",
"optimism.FunctionSpace.compute_field_gradient",
"optimism.Mechanics.volume_average_J_gradient_transformation",
"optimism.FunctionSpace.construct_function_space",
"optimism.Mechanics.create_mechanics_functions",
"optimism.test.MeshFixture.unittest.main",
"optimism.Timer.Timer",
"optimism.QuadratureRule.create_quadrature_rule_on_triangle",
"optimism.Mesh.EssentialBC",
"optimism.material.LinearElastic.create_material_model_functions"
] | [((8247, 8274), 'optimism.test.MeshFixture.unittest.main', 'MeshFixture.unittest.main', ([], {}), '()\n', (8272, 8274), False, 'from optimism.test import MeshFixture\n'), ((994, 1053), 'optimism.QuadratureRule.create_quadrature_rule_on_triangle', 'QuadratureRule.create_quadrature_rule_on_triangle', ([], {'degree': '(1)'}), '(degree=1)\n', (1043, 1053), False, 'from optimism import QuadratureRule\n'), ((1072, 1131), 'optimism.FunctionSpace.construct_function_space', 'FunctionSpace.construct_function_space', (['self.mesh', 'quadRule'], {}), '(self.mesh, quadRule)\n', (1110, 1131), False, 'from optimism import FunctionSpace\n'), ((1157, 1204), 'optimism.material.LinearElastic.create_material_model_functions', 'MatModel.create_material_model_functions', (['props'], {}), '(props)\n', (1197, 1204), True, 'from optimism.material import LinearElastic as MatModel\n'), ((1247, 1323), 'optimism.Mechanics.create_mechanics_functions', 'Mechanics.create_mechanics_functions', (['self.fs', '"""plane strain"""', 'materialModel'], {}), "(self.fs, 'plane strain', materialModel)\n", (1283, 1323), False, 'from optimism import Mechanics\n'), ((1752, 1798), 'optimism.Mesh.DofManager', 'Mesh.DofManager', (['self.mesh', 'self.U.shape', 'EBCs'], {}), '(self.mesh, self.U.shape, EBCs)\n', (1767, 1798), False, 'from optimism import Mesh\n'), ((2247, 2300), 'optimism.FunctionSpace.compute_field_gradient', 'FunctionSpace.compute_field_gradient', (['self.fs', 'self.U'], {}), '(self.fs, self.U)\n', (2283, 2300), False, 'from optimism import FunctionSpace\n'), ((2857, 2903), 'optimism.Mesh.DofManager', 'Mesh.DofManager', (['self.mesh', 'self.U.shape', 'EBCs'], {}), '(self.mesh, self.U.shape, EBCs)\n', (2872, 2903), False, 'from optimism import Mesh\n'), ((3158, 3208), 'optimism.QuadratureRule.create_quadrature_rule_1D', 'QuadratureRule.create_quadrature_rule_1D', ([], {'degree': '(2)'}), '(degree=2)\n', (3198, 3208), False, 'from optimism import QuadratureRule\n'), ((4697, 4796), 'optimism.Mesh.create_higher_order_mesh_from_simplex_mesh', 'Mesh.create_higher_order_mesh_from_simplex_mesh', (['mesh'], {'order': '(2)', 'createNodeSetsFromSideSets': '(True)'}), '(mesh, order=2,\n createNodeSetsFromSideSets=True)\n', (4744, 4796), False, 'from optimism import Mesh\n'), ((4889, 4948), 'optimism.QuadratureRule.create_quadrature_rule_on_triangle', 'QuadratureRule.create_quadrature_rule_on_triangle', ([], {'degree': '(2)'}), '(degree=2)\n', (4938, 4948), False, 'from optimism import QuadratureRule\n'), ((4967, 5031), 'optimism.FunctionSpace.construct_function_space', 'FunctionSpace.construct_function_space', (['self.mesh', 'self.quadRule'], {}), '(self.mesh, self.quadRule)\n', (5005, 5031), False, 'from optimism import FunctionSpace\n'), ((5057, 5104), 'optimism.material.LinearElastic.create_material_model_functions', 'MatModel.create_material_model_functions', (['props'], {}), '(props)\n', (5097, 5104), True, 'from optimism.material import LinearElastic as MatModel\n'), ((5139, 5215), 'optimism.Mechanics.create_mechanics_functions', 'Mechanics.create_mechanics_functions', (['self.fs', '"""plane strain"""', 'materialModel'], {}), "(self.fs, 'plane strain', materialModel)\n", (5175, 5215), False, 'from optimism import Mechanics\n'), ((5691, 5743), 'optimism.Mesh.DofManager', 'Mesh.DofManager', (['self.mesh', 'self.UTarget.shape', 'EBCs'], {}), '(self.mesh, self.UTarget.shape, EBCs)\n', (5706, 5743), False, 'from optimism import Mesh\n'), ((6167, 6215), 'optimism.FunctionSpace.compute_field_gradient', 'FunctionSpace.compute_field_gradient', (['self.fs', 'U'], {}), '(self.fs, U)\n', (6203, 6215), False, 'from optimism import FunctionSpace\n'), ((6794, 6846), 'optimism.Mesh.DofManager', 'Mesh.DofManager', (['self.mesh', 'self.UTarget.shape', 'EBCs'], {}), '(self.mesh, self.UTarget.shape, EBCs)\n', (6809, 6846), False, 'from optimism import Mesh\n'), ((6922, 6968), 'optimism.Interpolants.make_master_tri_element', 'Interpolants.make_master_tri_element', ([], {'degree': '(0)'}), '(degree=0)\n', (6958, 6968), False, 'from optimism import Interpolants\n'), ((6990, 7059), 'optimism.Interpolants.compute_shapes_on_tri', 'Interpolants.compute_shapes_on_tri', (['masterForJ', 'self.quadRule.xigauss'], {}), '(masterForJ, self.quadRule.xigauss)\n', (7024, 7059), False, 'from optimism import Interpolants\n'), ((7818, 7879), 'optimism.FunctionSpace.compute_field_gradient', 'FunctionSpace.compute_field_gradient', (['self.fs', 'U', 'modify_grad'], {}), '(self.fs, U, modify_grad)\n', (7854, 7879), False, 'from optimism import FunctionSpace\n'), ((1613, 1662), 'optimism.Mesh.EssentialBC', 'Mesh.EssentialBC', ([], {'nodeSet': '"""all_boundary"""', 'field': '(0)'}), "(nodeSet='all_boundary', field=0)\n", (1629, 1662), False, 'from optimism import Mesh\n'), ((1680, 1729), 'optimism.Mesh.EssentialBC', 'Mesh.EssentialBC', ([], {'nodeSet': '"""all_boundary"""', 'field': '(1)'}), "(nodeSet='all_boundary', field=1)\n", (1696, 1729), False, 'from optimism import Mesh\n'), ((2056, 2081), 'optimism.Timer.Timer', 'Timer', ([], {'name': '"""NewtonSolve"""'}), "(name='NewtonSolve')\n", (2061, 2081), False, 'from optimism.Timer import Timer\n'), ((2687, 2728), 'optimism.Mesh.EssentialBC', 'Mesh.EssentialBC', ([], {'nodeSet': '"""left"""', 'field': '(0)'}), "(nodeSet='left', field=0)\n", (2703, 2728), False, 'from optimism import Mesh\n'), ((2750, 2793), 'optimism.Mesh.EssentialBC', 'Mesh.EssentialBC', ([], {'nodeSet': '"""bottom"""', 'field': '(1)'}), "(nodeSet='bottom', field=1)\n", (2766, 2793), False, 'from optimism import Mesh\n'), ((3406, 3529), 'optimism.TractionBC.compute_traction_potential_energy', 'TractionBC.compute_traction_potential_energy', (['self.mesh', 'U', 'quadRule', "self.mesh.sideSets['right']", 'right_traction_func'], {}), "(self.mesh, U, quadRule, self.\n mesh.sideSets['right'], right_traction_func)\n", (3450, 3529), False, 'from optimism import TractionBC\n'), ((3554, 3673), 'optimism.TractionBC.compute_traction_potential_energy', 'TractionBC.compute_traction_potential_energy', (['self.mesh', 'U', 'quadRule', "self.mesh.sideSets['top']", 'top_traction_func'], {}), "(self.mesh, U, quadRule, self.\n mesh.sideSets['top'], top_traction_func)\n", (3598, 3673), False, 'from optimism import TractionBC\n'), ((3744, 3769), 'optimism.Timer.Timer', 'Timer', ([], {'name': '"""NewtonSolve"""'}), "(name='NewtonSolve')\n", (3749, 3769), False, 'from optimism.Timer import Timer\n'), ((5548, 5597), 'optimism.Mesh.EssentialBC', 'Mesh.EssentialBC', ([], {'nodeSet': '"""all_boundary"""', 'field': '(0)'}), "(nodeSet='all_boundary', field=0)\n", (5564, 5597), False, 'from optimism import Mesh\n'), ((5619, 5668), 'optimism.Mesh.EssentialBC', 'Mesh.EssentialBC', ([], {'nodeSet': '"""all_boundary"""', 'field': '(1)'}), "(nodeSet='all_boundary', field=1)\n", (5635, 5668), False, 'from optimism import Mesh\n'), ((5975, 6000), 'optimism.Timer.Timer', 'Timer', ([], {'name': '"""NewtonSolve"""'}), "(name='NewtonSolve')\n", (5980, 6000), False, 'from optimism.Timer import Timer\n'), ((6651, 6700), 'optimism.Mesh.EssentialBC', 'Mesh.EssentialBC', ([], {'nodeSet': '"""all_boundary"""', 'field': '(0)'}), "(nodeSet='all_boundary', field=0)\n", (6667, 6700), False, 'from optimism import Mesh\n'), ((6722, 6771), 'optimism.Mesh.EssentialBC', 'Mesh.EssentialBC', ([], {'nodeSet': '"""all_boundary"""', 'field': '(1)'}), "(nodeSet='all_boundary', field=1)\n", (6738, 6771), False, 'from optimism import Mesh\n'), ((7240, 7327), 'optimism.Mechanics.volume_average_J_gradient_transformation', 'Mechanics.volume_average_J_gradient_transformation', (['elemGrads', 'elemVols', 'shapesForJ'], {}), '(elemGrads, elemVols,\n shapesForJ)\n', (7290, 7327), False, 'from optimism import Mechanics\n'), ((7343, 7459), 'optimism.Mechanics.plane_strain_gradient_transformation', 'Mechanics.plane_strain_gradient_transformation', (['elemGrads', 'elemShapes', 'elemVols', 'elemNodalDisps', 'elemNodalCoords'], {}), '(elemGrads, elemShapes,\n elemVols, elemNodalDisps, elemNodalCoords)\n', (7389, 7459), False, 'from optimism import Mechanics\n'), ((7626, 7651), 'optimism.Timer.Timer', 'Timer', ([], {'name': '"""NewtonSolve"""'}), "(name='NewtonSolve')\n", (7631, 7651), False, 'from optimism.Timer import Timer\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
import sys
import numpy as np
from tqdm import tqdm
def sqr_dist(x, y, e=1e-8):
#assert len(list(x.get_shape())) == 2 and len(list(y.get_shape()))==2, 'illegal inputs'
xx = tf.reduce_sum(tf.square(x) + 1e-10, axis=1)
yy = tf.reduce_sum(tf.square(y) + 1e-10, axis=1)
xy = tf.matmul(x, y, transpose_b=True)
dist = tf.expand_dims(xx, 1) + tf.expand_dims(yy, 0) - 2.* xy
return dist
def median_distance(H):
V = tf.reshape(H, [-1])
n = tf.size(V)
top_k, _ = tf.nn.top_k(V, k= (n // 2) + 1)
return tf.cond(
tf.equal(n%2, 0),
lambda: (top_k[-1] + top_k[-2]) / 2.0,
lambda: top_k[-1]
)
#return tf.maximum(h, 1e-6)
return h / tf.log(tf.cast(tf.shape(H)[0], tf.float32) + 1.)
def poly_kernel(x, subtract_mean=True, e=1e-8):
if subtract_mean:
x = x - tf.reduce_mean(x, axis=0)
kxy = 1 + tf.matmul(x, x, transpose_b=True)
kxkxy = x * x.get_shape()[0]
return kxy, dxkxy
def rbf_kernel(x, h=-1, to3d=False):
H = sqr_dist(x, x)
if h == -1:
h = tf.maximum(1e-6, median_distance(H))
kxy = tf.exp(-H / h)
dxkxy = -tf.matmul(kxy, x)
sumkxy = tf.reduce_sum(kxy, axis=1, keepdims=True)
dxkxy = (dxkxy + x * sumkxy) * 2. / h
if to3d: dxkxy = -(tf.expand_dims(x, 1) - tf.expand_dims(x, 0)) * tf.expand_dims(kxy, 2) * 2. / h
return kxy, dxkxy
def imq_kernel(x, h=-1):
H = sqr_dist(x, x)
if h == -1:
h = median_distance(H)
kxy = 1. / tf.sqrt(1. + H / h)
dxkxy = .5 * kxy / (1. + H / h)
dxkxy = -tf.matmul(dxkxy, x)
sumkxy = tf.reduce_sum(kxy, axis=1, keepdims=True)
dxkxy = (dxkxy + x * sumkxy) * 2. / h
return kxy, dxkxy
def kernelized_stein_discrepancy(X, score_q, kernel='rbf', h=-1, **model_params):
n, dim = tf.cast(tf.shape(X)[0], tf.float32), tf.cast(tf.shape(X)[1], tf.float32)
Sqx = score_q(X, **model_params)
H = sqr_dist(X, X)
if h == -1:
h = median_distance(H) # 2sigma^2
h = tf.sqrt(h/2.)
# compute the rbf kernel
Kxy = tf.exp(-H / h ** 2 / 2.)
Sqxdy = -(tf.matmul(Sqx, X, transpose_b=True) - tf.reduce_sum(Sqx * X, axis=1, keepdims=True)) / (h ** 2)
dxSqy = tf.transpose(Sqxdy)
dxdy = (-H / (h ** 4) + dim / (h ** 2))
M = (tf.matmul(Sqx, Sqx, transpose_b=True) + Sqxdy + dxSqy + dxdy) * Kxy
#M2 = M - T.diag(T.diag(M))
#ksd_u = tf.reduce_sum(M2) / (n * (n - 1))
#ksd_v = tf.reduce_sum(M) / (n ** 2)
#return ksd_v
return M
def svgd_gradient(x, grad, kernel='rbf', temperature=1., u_kernel=None, **kernel_params):
assert x.get_shape()[1:] == grad.get_shape()[1:], 'illegal inputs and grads'
p_shape = tf.shape(x)
if tf.keras.backend.ndim(x) > 2:
x = tf.reshape(x, (tf.shape(x)[0], -1))
grad = tf.reshape(grad, (tf.shape(grad)[0], -1))
if u_kernel is not None:
kxy, dxkxy = u_kernel['kxy'], u_kernel['dxkxy']
dxkxy = tf.reshape(dxkxy, tf.shape(x))
else:
if kernel == 'rbf':
kxy, dxkxy = rbf_kernel(x, **kernel_params)
elif kernel == 'poly':
kxy, dxkxy = poly_kernel(x)
elif kernel == 'imq':
kxy, dxkxy = imq_kernel(x)
elif kernel == 'none':
kxy = tf.eye(tf.shape(x)[0])
dxkxy = tf.zeros_like(x)
else:
raise NotImplementedError
svgd_grad = (tf.matmul(kxy, grad) + temperature * dxkxy) / tf.reduce_sum(kxy, axis=1, keepdims=True)
svgd_grad = tf.reshape(svgd_grad, p_shape)
return svgd_grad
def wgf_gradient(x, grad, kernel='rbf', temperature=1., u_kernel=None, **kernel_params):
assert x.get_shape()[1:] == grad.get_shape()[1:], 'illegal inputs and grads'
p_shape = tf.shape(x)
if tf.keras.backend.ndim(x) > 2:
x = tf.reshape(x, (tf.shape(x)[0], -1))
grad = tf.reshape(grad, (tf.shape(grad)[0], -1))
if u_kernel is not None:
kxy, dxkxy = u_kernel['kxy'], u_kernel['dxkxy']
dxkxy = tf.reshape(dxkxy, tf.shape(x))
else:
if kernel == 'rbf':
kxy, dxkxy = rbf_kernel(x, **kernel_params)
elif kernel == 'poly':
kxy, dxkxy = poly_kernel(x)
elif kernel == 'imq':
kxy, dxkxy = imq_kernel(x)
elif kernel == 'none':
kxy = tf.eye(tf.shape(x)[0])
dxkxy = tf.zeros_like(x)
else:
raise NotImplementedError
wgf_grad = grad + temperature * dxkxy / tf.reduce_sum(kxy, axis=1, keepdims=True)
wgf_grad = tf.reshape(wgf_grad, p_shape)
return wgf_grad
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
def selu(x):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x > 0.0, x, alpha * tf.exp(x) - alpha)
def huber_loss(labels, predictions, delta=1.0):
residual = tf.abs(predictions - labels)
condition = tf.less(residual, delta)
small_res = 0.5 * tf.square(residual)
large_res = delta * residual - 0.5 * tf.square(delta)
return tf.where(condition, small_res, large_res)
def conv2d(inputs, num_outputs, activation_fn=tf.nn.relu,
kernel_size=5, stride=2, padding='SAME', name="conv2d"):
with tf.variable_scope(name):
return tf.contrib.layers.conv2d( inputs, num_outputs, kernel_size, stride=stride, padding=padding, activation_fn=activation_fn)
def deconv2d(inputs, num_outputs, activation_fn=tf.nn.relu,
kernel_size=5, stride=2, padding='SAME', name="deconv2d"):
with tf.variable_scope(name):
return tf.contrib.layers.conv2d_transpose(inputs, num_outputs, kernel_size, stride=stride, padding=padding, activation_fn=activation_fn)
def fc(input, output_shape, activation_fn=tf.nn.relu, init=None, name="fc"):
if init is None: init = tf.glorot_uniform_initializer()
output = slim.fully_connected(input, int(output_shape), activation_fn=activation_fn, weights_initializer=init)
return output
| [
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.contrib.layers.conv2d",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.contrib.layers.conv2d_transpose",
"tensorflow.keras.backend.ndim",
"tensorflow.reduce_mean",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.less",
"tensorflow.zeros_like",
"tensorflow.size",
"tensorflow.variable_scope",
"tensorflow.where",
"tensorflow.sqrt",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.glorot_uniform_initializer",
"tensorflow.nn.top_k",
"tensorflow.exp",
"tensorflow.abs"
] | [((464, 497), 'tensorflow.matmul', 'tf.matmul', (['x', 'y'], {'transpose_b': '(True)'}), '(x, y, transpose_b=True)\n', (473, 497), True, 'import tensorflow as tf\n'), ((614, 633), 'tensorflow.reshape', 'tf.reshape', (['H', '[-1]'], {}), '(H, [-1])\n', (624, 633), True, 'import tensorflow as tf\n'), ((642, 652), 'tensorflow.size', 'tf.size', (['V'], {}), '(V)\n', (649, 652), True, 'import tensorflow as tf\n'), ((668, 696), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['V'], {'k': '(n // 2 + 1)'}), '(V, k=n // 2 + 1)\n', (679, 696), True, 'import tensorflow as tf\n'), ((1277, 1291), 'tensorflow.exp', 'tf.exp', (['(-H / h)'], {}), '(-H / h)\n', (1283, 1291), True, 'import tensorflow as tf\n'), ((1336, 1377), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kxy'], {'axis': '(1)', 'keepdims': '(True)'}), '(kxy, axis=1, keepdims=True)\n', (1349, 1377), True, 'import tensorflow as tf\n'), ((1762, 1803), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kxy'], {'axis': '(1)', 'keepdims': '(True)'}), '(kxy, axis=1, keepdims=True)\n', (1775, 1803), True, 'import tensorflow as tf\n'), ((2166, 2182), 'tensorflow.sqrt', 'tf.sqrt', (['(h / 2.0)'], {}), '(h / 2.0)\n', (2173, 2182), True, 'import tensorflow as tf\n'), ((2219, 2244), 'tensorflow.exp', 'tf.exp', (['(-H / h ** 2 / 2.0)'], {}), '(-H / h ** 2 / 2.0)\n', (2225, 2244), True, 'import tensorflow as tf\n'), ((2368, 2387), 'tensorflow.transpose', 'tf.transpose', (['Sqxdy'], {}), '(Sqxdy)\n', (2380, 2387), True, 'import tensorflow as tf\n'), ((2854, 2865), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2862, 2865), True, 'import tensorflow as tf\n'), ((3659, 3689), 'tensorflow.reshape', 'tf.reshape', (['svgd_grad', 'p_shape'], {}), '(svgd_grad, p_shape)\n', (3669, 3689), True, 'import tensorflow as tf\n'), ((3896, 3907), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3904, 3907), True, 'import tensorflow as tf\n'), ((4681, 4710), 'tensorflow.reshape', 'tf.reshape', (['wgf_grad', 'p_shape'], {}), '(wgf_grad, p_shape)\n', (4691, 4710), True, 'import tensorflow as tf\n'), ((5140, 5168), 'tensorflow.abs', 'tf.abs', (['(predictions - labels)'], {}), '(predictions - labels)\n', (5146, 5168), True, 'import tensorflow as tf\n'), ((5185, 5209), 'tensorflow.less', 'tf.less', (['residual', 'delta'], {}), '(residual, delta)\n', (5192, 5209), True, 'import tensorflow as tf\n'), ((5321, 5362), 'tensorflow.where', 'tf.where', (['condition', 'small_res', 'large_res'], {}), '(condition, small_res, large_res)\n', (5329, 5362), True, 'import tensorflow as tf\n'), ((728, 746), 'tensorflow.equal', 'tf.equal', (['(n % 2)', '(0)'], {}), '(n % 2, 0)\n', (736, 746), True, 'import tensorflow as tf\n'), ((1049, 1082), 'tensorflow.matmul', 'tf.matmul', (['x', 'x'], {'transpose_b': '(True)'}), '(x, x, transpose_b=True)\n', (1058, 1082), True, 'import tensorflow as tf\n'), ((1305, 1322), 'tensorflow.matmul', 'tf.matmul', (['kxy', 'x'], {}), '(kxy, x)\n', (1314, 1322), True, 'import tensorflow as tf\n'), ((1658, 1678), 'tensorflow.sqrt', 'tf.sqrt', (['(1.0 + H / h)'], {}), '(1.0 + H / h)\n', (1665, 1678), True, 'import tensorflow as tf\n'), ((1729, 1748), 'tensorflow.matmul', 'tf.matmul', (['dxkxy', 'x'], {}), '(dxkxy, x)\n', (1738, 1748), True, 'import tensorflow as tf\n'), ((2873, 2897), 'tensorflow.keras.backend.ndim', 'tf.keras.backend.ndim', (['x'], {}), '(x)\n', (2894, 2897), True, 'import tensorflow as tf\n'), ((3600, 3641), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kxy'], {'axis': '(1)', 'keepdims': '(True)'}), '(kxy, axis=1, keepdims=True)\n', (3613, 3641), True, 'import tensorflow as tf\n'), ((3915, 3939), 'tensorflow.keras.backend.ndim', 'tf.keras.backend.ndim', (['x'], {}), '(x)\n', (3936, 3939), True, 'import tensorflow as tf\n'), ((4780, 4803), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (4797, 4803), True, 'import tensorflow as tf\n'), ((5232, 5251), 'tensorflow.square', 'tf.square', (['residual'], {}), '(residual)\n', (5241, 5251), True, 'import tensorflow as tf\n'), ((5501, 5524), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (5518, 5524), True, 'import tensorflow as tf\n'), ((5541, 5664), 'tensorflow.contrib.layers.conv2d', 'tf.contrib.layers.conv2d', (['inputs', 'num_outputs', 'kernel_size'], {'stride': 'stride', 'padding': 'padding', 'activation_fn': 'activation_fn'}), '(inputs, num_outputs, kernel_size, stride=stride,\n padding=padding, activation_fn=activation_fn)\n', (5565, 5664), True, 'import tensorflow as tf\n'), ((5801, 5824), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (5818, 5824), True, 'import tensorflow as tf\n'), ((5841, 5975), 'tensorflow.contrib.layers.conv2d_transpose', 'tf.contrib.layers.conv2d_transpose', (['inputs', 'num_outputs', 'kernel_size'], {'stride': 'stride', 'padding': 'padding', 'activation_fn': 'activation_fn'}), '(inputs, num_outputs, kernel_size, stride\n =stride, padding=padding, activation_fn=activation_fn)\n', (5875, 5975), True, 'import tensorflow as tf\n'), ((6078, 6109), 'tensorflow.glorot_uniform_initializer', 'tf.glorot_uniform_initializer', ([], {}), '()\n', (6107, 6109), True, 'import tensorflow as tf\n'), ((372, 384), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (381, 384), True, 'import tensorflow as tf\n'), ((425, 437), 'tensorflow.square', 'tf.square', (['y'], {}), '(y)\n', (434, 437), True, 'import tensorflow as tf\n'), ((509, 530), 'tensorflow.expand_dims', 'tf.expand_dims', (['xx', '(1)'], {}), '(xx, 1)\n', (523, 530), True, 'import tensorflow as tf\n'), ((533, 554), 'tensorflow.expand_dims', 'tf.expand_dims', (['yy', '(0)'], {}), '(yy, 0)\n', (547, 554), True, 'import tensorflow as tf\n'), ((1009, 1034), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1023, 1034), True, 'import tensorflow as tf\n'), ((3128, 3139), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3136, 3139), True, 'import tensorflow as tf\n'), ((3554, 3574), 'tensorflow.matmul', 'tf.matmul', (['kxy', 'grad'], {}), '(kxy, grad)\n', (3563, 3574), True, 'import tensorflow as tf\n'), ((4170, 4181), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4178, 4181), True, 'import tensorflow as tf\n'), ((4623, 4664), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kxy'], {'axis': '(1)', 'keepdims': '(True)'}), '(kxy, axis=1, keepdims=True)\n', (4636, 4664), True, 'import tensorflow as tf\n'), ((5293, 5309), 'tensorflow.square', 'tf.square', (['delta'], {}), '(delta)\n', (5302, 5309), True, 'import tensorflow as tf\n'), ((1974, 1985), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (1982, 1985), True, 'import tensorflow as tf\n'), ((2011, 2022), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (2019, 2022), True, 'import tensorflow as tf\n'), ((2259, 2294), 'tensorflow.matmul', 'tf.matmul', (['Sqx', 'X'], {'transpose_b': '(True)'}), '(Sqx, X, transpose_b=True)\n', (2268, 2294), True, 'import tensorflow as tf\n'), ((2297, 2342), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(Sqx * X)'], {'axis': '(1)', 'keepdims': '(True)'}), '(Sqx * X, axis=1, keepdims=True)\n', (2310, 2342), True, 'import tensorflow as tf\n'), ((1491, 1513), 'tensorflow.expand_dims', 'tf.expand_dims', (['kxy', '(2)'], {}), '(kxy, 2)\n', (1505, 1513), True, 'import tensorflow as tf\n'), ((2442, 2479), 'tensorflow.matmul', 'tf.matmul', (['Sqx', 'Sqx'], {'transpose_b': '(True)'}), '(Sqx, Sqx, transpose_b=True)\n', (2451, 2479), True, 'import tensorflow as tf\n'), ((2930, 2941), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2938, 2941), True, 'import tensorflow as tf\n'), ((2984, 2998), 'tensorflow.shape', 'tf.shape', (['grad'], {}), '(grad)\n', (2992, 2998), True, 'import tensorflow as tf\n'), ((3972, 3983), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3980, 3983), True, 'import tensorflow as tf\n'), ((4026, 4040), 'tensorflow.shape', 'tf.shape', (['grad'], {}), '(grad)\n', (4034, 4040), True, 'import tensorflow as tf\n'), ((5056, 5065), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (5062, 5065), True, 'import tensorflow as tf\n'), ((887, 898), 'tensorflow.shape', 'tf.shape', (['H'], {}), '(H)\n', (895, 898), True, 'import tensorflow as tf\n'), ((3467, 3483), 'tensorflow.zeros_like', 'tf.zeros_like', (['x'], {}), '(x)\n', (3480, 3483), True, 'import tensorflow as tf\n'), ((4509, 4525), 'tensorflow.zeros_like', 'tf.zeros_like', (['x'], {}), '(x)\n', (4522, 4525), True, 'import tensorflow as tf\n'), ((1444, 1464), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(1)'], {}), '(x, 1)\n', (1458, 1464), True, 'import tensorflow as tf\n'), ((1467, 1487), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (1481, 1487), True, 'import tensorflow as tf\n'), ((3431, 3442), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3439, 3442), True, 'import tensorflow as tf\n'), ((4473, 4484), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4481, 4484), True, 'import tensorflow as tf\n')] |
from headerfile import *
import numpy as np
from mando import *
import random
class Obstacles:
def __init__(self, lives):
self._killed = 0
self._lives = lives # boss should have more lives
self._x = 0 # declaring variable with some dummy value
self._y = 0 # declaring variable with some dummy value
self._xrange = 0 # declaring variable with some dummy value
self._yrange = 0 # declaring variable with some dummy value
self.__killflag = 0 # this is used to make sure that once mando comes into contact with laser once he won't lose more than one life at a time
def overlap(self, grid):
'''returns 0 if can correctly place, else returns 1
'''
m = grid[self._y-1:self._y+self._yrange+1, self._x-1:self._x +
self._xrange+1] # padding of spaces set around this
if np.count_nonzero(m == ' ') != (self._yrange+2)*(self._xrange+2):
return 1
else:
return 0
def place(self, grid):
pass
def check_collision_mando(self, obj_mando,counter):
'''returns 1 if mando has collided with this object
'''
if obj_mando.get_shield() == 1 or self._killed == 1:
return
x = obj_mando.get_x()
y = obj_mando.get_y()
# sdf,df,legs,head
if x+1 >= self._x and x-1 < self._x + self._xrange and y+1 >= self._y and y-1 < self._y+self._yrange:
# then mando is being hit by laser
if self.__killflag == 0:
# so he has just come into contact with the laser
obj_mando.kill_mando()
os.system("aplay funstuff/mandodie.wav -q &")
obj_mando.change_score(-50)
else:
# he keeps moving through the same
pass
self.__killflag = 1
else:
# he is not in contact with the laser
self.__killflag = 0
return
def check_collision_bullets(self, obj_bullet,grid,counterinc,obj_mando):
if obj_bullet.killed() or self._killed == 1:
return
x = obj_bullet.get_x()
y = obj_bullet.get_y()
if x+4+counterinc >= self._x and x-1 < self._x + self._xrange and y >= self._y and y < self._y+self._yrange:
obj_bullet.kill(grid)
self._lives -= 1
obj_mando.change_score(50)
if self._lives==0:
self._killed=1
os.system("aplay funstuff/stompenemy.wav -q &")
def get_lives(self):
return self._lives
def get_x(self):
return self._x
def get_y(self):
return self._y
class HorizontalBeam(Obstacles):
def __init__(self, x, y):
super().__init__(1)
self._x = x
self._y = y
self._xrange = 24
self._yrange = 2
def place(self, grid):
if(self._killed) == 1:
grid[self._y:self._y+self._yrange, self._x:self._x +
self._xrange] = np.tile([' '], (self._yrange, self._xrange))
else:
grid[self._y:self._y+self._yrange, self._x:self._x +
self._xrange] = np.tile([STAR], (self._yrange, self._xrange))
class VerticalBeam(Obstacles):
def __init__(self, x, y):
super().__init__(1)
self._x = x
self._y = y
self._xrange = 2
self._yrange = 12
def place(self, grid):
if(self._killed) == 1:
grid[self._y:self._y+self._yrange, self._x:self._x +
self._xrange] = np.tile([' ', ' '], (self._yrange, (int)(self._xrange/2)))
else:
grid[self._y:self._y+self._yrange, self._x:self._x +
self._xrange] = np.tile([STAR, STAR], (self._yrange, (int)(self._xrange/2)))
class DiagonalBeam(Obstacles):
def __init__(self, x, y):
super().__init__(1)
self._x = x
self._y = y
self._yrange = 8
self._xrange = self._yrange*2
def place(self, grid):
if(self._killed) == 1:
for i in range(self._yrange):
grid[self._y+i][self._x+2*i] = ' '
grid[self._y+i][self._x+2*i+1] = ' '
else:
for i in range(self._yrange):
grid[self._y+i][self._x+2*i] = STAR
grid[self._y+i][self._x+2*i+1] = STAR
class Magnet(Obstacles):
def __init__(self,x,y):
super().__init__(1)
self._x=x
self._y=y
self._xrange=3
self._yrange=3
self._magx=40
def check_collision_mando(self, obj_mando,counter):
if obj_mando.get_shield() == 1 or self._killed == 1:
return
x = obj_mando.get_x()
if(x<=self._x+1 and x>=self._x-1):
return
if(x>self._x+1 and x<self._x+1 + self._magx):
obj_mando.set_values(-3,-100,-100,counter)
if(x<self._x+1 and x>self._x+1 - self._magx):
obj_mando.set_values(3,-100,-100,counter)
def place(self, grid):
if(self._killed) == 1:
grid[self._y:self._y+self._yrange, self._x:self._x +
self._xrange] = np.tile([' '], (self._yrange, self._xrange))
else:
grid[self._y:self._y+self._yrange, self._x:self._x +
self._xrange] = np.tile([Back.RED+'m'+Back.RESET], (self._yrange, self._xrange))
class CoinBeam(Obstacles):
def __init__(self,x,y):
super().__init__(1)
self._x=x
self._y=y
self._xrange=1
self._yrange=1
def place(self, grid):
grid[self._y:self._y+self._yrange, self._x:self._x +
self._xrange] = np.tile([COIN], (self._yrange, self._xrange))
def erase_coin(self,grid):
self._killed=1
self._lives=0
grid[self._y,self._x]=' '
def generate_obstacles(grid,num,nummag):
obst = []
for i in range(num):
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
obj = DiagonalBeam(x, y)
while(obj.overlap(grid)):
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
# print(x,y)
obj = DiagonalBeam(x, y)
obj.place(grid)
obst.append(obj)
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
obj = HorizontalBeam(x, y)
while(obj.overlap(grid)):
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
# print(x,y)
obj = HorizontalBeam(x, y)
obj.place(grid)
obst.append(obj)
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
obj = VerticalBeam(x, y)
while(obj.overlap(grid)):
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
# print(x,y)
obj = VerticalBeam(x, y)
obj.place(grid)
obst.append(obj)
for i in range(nummag):
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
obj = Magnet(x, y)
while(obj.overlap(grid)):
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
# print(x,y)
obj = Magnet(x, y)
obj.place(grid)
obst.append(obj)
obst.sort(key=lambda obj: obj._x, reverse=False)
return obst
def generate_coins(grid,num):
coins=[]
for i in range(num):
x = random.randrange(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randrange(SKY+1, HEIGHT-GROUND-4)
obj = CoinBeam(x,y)
while(obj.overlap(grid)):
x = random.randint(PLACEWIDTH+5, MAXWIDTH-WIDTH-25)
y = random.randint(SKY+1, HEIGHT-GROUND-1)
obj = CoinBeam(x,y)
obj.place(grid)
coins.append(obj)
return coins | [
"numpy.count_nonzero",
"numpy.tile",
"random.randint",
"random.randrange"
] | [((5695, 5740), 'numpy.tile', 'np.tile', (['[COIN]', '(self._yrange, self._xrange)'], {}), '([COIN], (self._yrange, self._xrange))\n', (5702, 5740), True, 'import numpy as np\n'), ((5950, 6003), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (5964, 6003), False, 'import random\n'), ((6010, 6054), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (6024, 6054), False, 'import random\n'), ((6360, 6413), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (6374, 6413), False, 'import random\n'), ((6420, 6464), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (6434, 6464), False, 'import random\n'), ((6774, 6827), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (6788, 6827), False, 'import random\n'), ((6834, 6878), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (6848, 6878), False, 'import random\n'), ((7212, 7265), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (7226, 7265), False, 'import random\n'), ((7272, 7316), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (7286, 7316), False, 'import random\n'), ((7758, 7813), 'random.randrange', 'random.randrange', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (7774, 7813), False, 'import random\n'), ((7820, 7866), 'random.randrange', 'random.randrange', (['(SKY + 1)', '(HEIGHT - GROUND - 4)'], {}), '(SKY + 1, HEIGHT - GROUND - 4)\n', (7836, 7866), False, 'import random\n'), ((884, 910), 'numpy.count_nonzero', 'np.count_nonzero', (["(m == ' ')"], {}), "(m == ' ')\n", (900, 910), True, 'import numpy as np\n'), ((3031, 3075), 'numpy.tile', 'np.tile', (["[' ']", '(self._yrange, self._xrange)'], {}), "([' '], (self._yrange, self._xrange))\n", (3038, 3075), True, 'import numpy as np\n'), ((3188, 3233), 'numpy.tile', 'np.tile', (['[STAR]', '(self._yrange, self._xrange)'], {}), '([STAR], (self._yrange, self._xrange))\n', (3195, 3233), True, 'import numpy as np\n'), ((5171, 5215), 'numpy.tile', 'np.tile', (["[' ']", '(self._yrange, self._xrange)'], {}), "([' '], (self._yrange, self._xrange))\n", (5178, 5215), True, 'import numpy as np\n'), ((5328, 5396), 'numpy.tile', 'np.tile', (["[Back.RED + 'm' + Back.RESET]", '(self._yrange, self._xrange)'], {}), "([Back.RED + 'm' + Back.RESET], (self._yrange, self._xrange))\n", (5335, 5396), True, 'import numpy as np\n'), ((6133, 6186), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (6147, 6186), False, 'import random\n'), ((6197, 6241), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (6211, 6241), False, 'import random\n'), ((6545, 6598), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (6559, 6598), False, 'import random\n'), ((6609, 6653), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (6623, 6653), False, 'import random\n'), ((6957, 7010), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (6971, 7010), False, 'import random\n'), ((7021, 7065), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (7035, 7065), False, 'import random\n'), ((7389, 7442), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (7403, 7442), False, 'import random\n'), ((7453, 7497), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (7467, 7497), False, 'import random\n'), ((7940, 7993), 'random.randint', 'random.randint', (['(PLACEWIDTH + 5)', '(MAXWIDTH - WIDTH - 25)'], {}), '(PLACEWIDTH + 5, MAXWIDTH - WIDTH - 25)\n', (7954, 7993), False, 'import random\n'), ((8004, 8048), 'random.randint', 'random.randint', (['(SKY + 1)', '(HEIGHT - GROUND - 1)'], {}), '(SKY + 1, HEIGHT - GROUND - 1)\n', (8018, 8048), False, 'import random\n')] |
"""New Impressive Title - Camera API"""
from direct.task.Task import Task
from kivy.logger import Logger
from panda3d.core import ClockObject, CollisionNode, CollisionSphere, Vec3
#Constants
#==============================================================================
CAM_MODE_FIRST_PERSON = 0
CAM_MODE_CHASE = 1
CAM_MODE_FREE = 2
#Classes
#==============================================================================
class CameraManager(object):
"""A high-level camera manager. Manages camera mode and movement."""
def __init__(self):
"""Setup this camera manager."""
Logger.info("Initializing camera manager...")
self.reset()
#Camera Controls
#===============
#F1 - first person view
#F2 - chase cam view
#F3 - free cam view
#W - move forward
#S - move backward
#A - move left
#D - move right
#R - rise
#F - fall
#Up - look up
#Down - look down
#Left - turn left
#Right - turn right
base.accept("f1", self.change_mode, [CAM_MODE_FIRST_PERSON])
base.accept("f2", self.change_mode, [CAM_MODE_CHASE])
base.accept("f3", self.change_mode, [CAM_MODE_FREE])
base.accept("w", self.set_move_vec_y, [self.speed])
base.accept("w-up", self.set_move_vec_y, [0])
base.accept("s", self.set_move_vec_y, [-self.speed])
base.accept("s-up", self.set_move_vec_y, [0])
base.accept("a", self.set_move_vec_x, [-self.speed])
base.accept("a-up", self.set_move_vec_x, [0])
base.accept("d", self.set_move_vec_x, [self.speed])
base.accept("d-up", self.set_move_vec_x, [0])
base.accept("r", self.set_move_vec_z, [self.speed])
base.accept("r-up", self.set_move_vec_z, [0])
base.accept("f", self.set_move_vec_z, [-self.speed])
base.accept("f-up", self.set_move_vec_z, [0])
base.accept("arrow_up", self.set_rot_vec_p, [self.speed / 10])
base.accept("arrow_up-up", self.set_rot_vec_p, [0])
base.accept("arrow_down", self.set_rot_vec_p, [-self.speed / 10])
base.accept("arrow_down-up", self.set_rot_vec_p, [0])
base.accept("arrow_left", self.set_rot_vec_h, [self.speed / 10])
base.accept("arrow_left-up", self.set_rot_vec_h, [0])
base.accept("arrow_right", self.set_rot_vec_h, [-self.speed / 10])
base.accept("arrow_right-up", self.set_rot_vec_h, [0])
#Setup collision detection
cnode = CollisionNode("camera")
cnode.add_solid(CollisionSphere(0, 0, 0, 10))
self.collider = base.camera.attach_new_node(cnode)
base.cTrav.add_collider(self.collider, base.portal_handler)
#Start camera manager task
base.task_mgr.add(self.run_logic)
Logger.info("Camera manager initialized.")
def reset(self):
"""Reset this camera manager."""
self.mode = CAM_MODE_FIRST_PERSON
self.speed = 500
self.move_vec = Vec3(0, 0, 0)
self.rot_vec = Vec3(0, 0, 0)
base.disable_mouse()
base.camera.set_pos(0, 0, 0)
base.camera.set_hpr(0, 0, 0)
def change_mode(self, mode):
"""Change the current camera mode."""
self.mode = mode
if mode == CAM_MODE_FIRST_PERSON:
Logger.info("Camera mode changed to first person cam.")
elif mode == CAM_MODE_CHASE:
Logger.info("Camera mode changed to chase cam.")
elif mode == CAM_MODE_FREE:
Logger.info("Camera mode changed to free cam.")
def set_move_vec_x(self, x):
"""Set the X movement vector of the camera. This is only used in free
mode.
"""
self.move_vec.x = x
def set_move_vec_y(self, y):
"""Set the Y movement vector of the camera. This is only used in free
mode.
"""
self.move_vec.y = y
def set_move_vec_z(self, z):
"""Set the Z movement vector of the camera. This is only used in free
mode.
"""
self.move_vec.z = z
def set_rot_vec_h(self, h):
"""Set the heading rotation vector of the camera. This is only used in
free mode.
"""
self.rot_vec.x = h
def set_rot_vec_p(self, p):
"""Set the pitch rotation vector of the camera."""
self.rot_vec.y = p
def run_logic(self, task):
"""Run the logic for this camera manager."""
#Get delta time
dt = ClockObject.get_global_clock().get_dt()
#We only need to execute logic for the free camera here
if self.mode == CAM_MODE_FREE:
#Update rotation first
base.camera.set_hpr(base.camera.get_hpr() + self.rot_vec * dt)
#Now update position
pos = base.camera.get_pos()
vec = Vec3(self.move_vec.x, self.move_vec.y, 0) * dt
base.camera.set_pos(pos + base.camera.get_quat(render).xform(vec))
base.camera.set_z(base.camera.get_z() + self.move_vec.z * dt)
#Continue this task infinitely
return Task.cont
| [
"panda3d.core.ClockObject.get_global_clock",
"kivy.logger.Logger.info",
"panda3d.core.Vec3",
"panda3d.core.CollisionSphere",
"panda3d.core.CollisionNode"
] | [((603, 648), 'kivy.logger.Logger.info', 'Logger.info', (['"""Initializing camera manager..."""'], {}), "('Initializing camera manager...')\n", (614, 648), False, 'from kivy.logger import Logger\n'), ((2521, 2544), 'panda3d.core.CollisionNode', 'CollisionNode', (['"""camera"""'], {}), "('camera')\n", (2534, 2544), False, 'from panda3d.core import ClockObject, CollisionNode, CollisionSphere, Vec3\n'), ((2812, 2854), 'kivy.logger.Logger.info', 'Logger.info', (['"""Camera manager initialized."""'], {}), "('Camera manager initialized.')\n", (2823, 2854), False, 'from kivy.logger import Logger\n'), ((3009, 3022), 'panda3d.core.Vec3', 'Vec3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3013, 3022), False, 'from panda3d.core import ClockObject, CollisionNode, CollisionSphere, Vec3\n'), ((3046, 3059), 'panda3d.core.Vec3', 'Vec3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3050, 3059), False, 'from panda3d.core import ClockObject, CollisionNode, CollisionSphere, Vec3\n'), ((2569, 2597), 'panda3d.core.CollisionSphere', 'CollisionSphere', (['(0)', '(0)', '(0)', '(10)'], {}), '(0, 0, 0, 10)\n', (2584, 2597), False, 'from panda3d.core import ClockObject, CollisionNode, CollisionSphere, Vec3\n'), ((3324, 3379), 'kivy.logger.Logger.info', 'Logger.info', (['"""Camera mode changed to first person cam."""'], {}), "('Camera mode changed to first person cam.')\n", (3335, 3379), False, 'from kivy.logger import Logger\n'), ((3430, 3478), 'kivy.logger.Logger.info', 'Logger.info', (['"""Camera mode changed to chase cam."""'], {}), "('Camera mode changed to chase cam.')\n", (3441, 3478), False, 'from kivy.logger import Logger\n'), ((4485, 4515), 'panda3d.core.ClockObject.get_global_clock', 'ClockObject.get_global_clock', ([], {}), '()\n', (4513, 4515), False, 'from panda3d.core import ClockObject, CollisionNode, CollisionSphere, Vec3\n'), ((4831, 4872), 'panda3d.core.Vec3', 'Vec3', (['self.move_vec.x', 'self.move_vec.y', '(0)'], {}), '(self.move_vec.x, self.move_vec.y, 0)\n', (4835, 4872), False, 'from panda3d.core import ClockObject, CollisionNode, CollisionSphere, Vec3\n'), ((3528, 3575), 'kivy.logger.Logger.info', 'Logger.info', (['"""Camera mode changed to free cam."""'], {}), "('Camera mode changed to free cam.')\n", (3539, 3575), False, 'from kivy.logger import Logger\n')] |
# ready for final test
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.postgres_operator import PostgresOperator
from airflow.hooks.S3_hook import S3Hook
from airflow.operators import PythonOperator
from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator
from airflow.contrib.sensors.emr_step_sensor import EmrStepSensor
from airflow.hooks.postgres_hook import PostgresHook
import psycopg2
import os
import json
# config
# local
unload_user_purchase = "./scripts/sql/filter_unload_user_purchase.sql"
temp_filtered_user_purchase = "/temp/temp_filtered_user_purchase.csv"
movie_review_local = "/data/movie_review/movie_review.csv" # location of movie review within the docker container
# look at the docker-compose volume mounting for clarification
movie_clean_emr_steps = "./dags/scripts/emr/clean_movie_review.json"
movie_text_classification_script = "./dags/scripts/spark/random_text_classification.py"
get_user_behaviour = "scripts/sql/get_user_behavior_metrics.sql"
# remote config
BUCKET_NAME = "data-engineering-batch-mmc"
temp_filtered_user_purchase_key = (
"user_purchase/stage/{{ ds }}/temp_filtered_user_purchase.csv"
)
movie_review_load = "movie_review/load/movie.csv"
# code for everything
default_args = {
"owner": "airflow",
"depends_on_past": True,
"wait_for_downstream": True,
"start_date": datetime(
2010, 12, 1
), # we start at this date to be consistent with the dataset we have and airflow will catchup
"email": ["<EMAIL>"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
dag = DAG(
"user_behaviour",
default_args=default_args,
schedule_interval="0 0 * * *",
max_active_runs=1,
)
# helper function(s)
def _local_to_s3(filename, key, bucket_name=BUCKET_NAME):
s3 = S3Hook()
s3.load_file(filename=filename, bucket_name=bucket_name, replace=True, key=key)
def remove_local_file(filelocation):
if os.path.isfile(filelocation):
os.remove(filelocation)
else:
logging.info(f"File {filelocation} not found")
def run_redshift_external_query(qry):
rs_hook = PostgresHook(postgres_conn_id="redshift")
rs_conn = rs_hook.get_conn()
rs_conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
rs_cursor = rs_conn.cursor()
rs_cursor.execute(qry)
rs_cursor.close()
rs_conn.commit()
pg_unload = PostgresOperator(
dag=dag,
task_id="pg_unload",
sql=unload_user_purchase,
postgres_conn_id="postgres_default",
params={"temp_filtered_user_purchase": temp_filtered_user_purchase},
depends_on_past=True,
wait_for_downstream=True,
)
user_purchase_to_s3_stage = PythonOperator(
dag=dag,
task_id="user_purchase_to_s3_stage",
python_callable=_local_to_s3,
op_kwargs={
"filename": temp_filtered_user_purchase,
"key": temp_filtered_user_purchase_key,
},
)
remove_local_user_purchase_file = PythonOperator(
dag=dag,
task_id="remove_local_user_purchase_file",
python_callable=remove_local_file,
op_kwargs={"filelocation": temp_filtered_user_purchase,},
)
end_of_data_pipeline = DummyOperator(task_id="end_of_data_pipeline", dag=dag)
# Step 2 Code
EMR_ID = "j-30YXNXRWQ6C6D"
movie_review_load_folder = "movie_review/load/"
movie_review_stage = "movie_review/stage/"
text_classifier_script = "scripts/random_text_classification.py"
movie_review_to_s3_stage = PythonOperator(
dag=dag,
task_id="user_review_to_s3_stage",
python_callable=_local_to_s3,
op_kwargs={"filename": movie_review_local, "key": movie_review_load,},
)
move_emr_script_to_s3 = PythonOperator(
dag=dag,
task_id="move_emr_script_to_s3",
python_callable=_local_to_s3,
op_kwargs={
"filename": movie_text_classification_script,
"key": "scripts/random_text_classification.py",
},
)
with open(movie_clean_emr_steps) as json_file:
emr_steps = json.load(json_file)
# adding our EMR steps to an existing EMR cluster
add_emr_steps = EmrAddStepsOperator(
dag=dag,
task_id="add_emr_steps",
job_flow_id=EMR_ID,
aws_conn_id="aws_default",
steps=emr_steps,
params={
"BUCKET_NAME": BUCKET_NAME,
"movie_review_load": movie_review_load_folder,
"text_classifier_script": text_classifier_script,
"movie_review_stage": movie_review_stage,
},
depends_on_past=True,
)
last_step = len(emr_steps) - 1
# sensing if the last step is complete
clean_movie_review_data = EmrStepSensor(
dag=dag,
task_id="clean_movie_review_data",
job_flow_id=EMR_ID,
step_id='{{ task_instance.xcom_pull("add_emr_steps", key="return_value")['
+ str(last_step)
+ "] }}",
depends_on_past=True,
)
user_purchase_to_rs_stage = PythonOperator(
dag=dag,
task_id="user_purchase_to_rs_stage",
python_callable=run_redshift_external_query,
op_kwargs={
"qry": "alter table spectrum.user_purchase_staging add partition(insert_date='{{ ds }}') \
location 's3://data-engineering-batch-mmc/user_purchase/stage/{{ ds }}'",
},
)
get_user_behaviour = PostgresOperator(
dag=dag,
task_id="get_user_behaviour",
sql=get_user_behaviour,
postgres_conn_id="redshift",
)
# Part 1 2 and 3 together finally.
pg_unload >> user_purchase_to_s3_stage >> remove_local_user_purchase_file >> user_purchase_to_rs_stage
[
movie_review_to_s3_stage,
move_emr_script_to_s3,
] >> add_emr_steps >> clean_movie_review_data
[
user_purchase_to_rs_stage,
clean_movie_review_data,
] >> get_user_behaviour >> end_of_data_pipeline
| [
"datetime.datetime",
"airflow.operators.dummy_operator.DummyOperator",
"airflow.contrib.operators.emr_add_steps_operator.EmrAddStepsOperator",
"airflow.operators.PythonOperator",
"airflow.hooks.postgres_hook.PostgresHook",
"os.path.isfile",
"airflow.hooks.S3_hook.S3Hook",
"airflow.DAG",
"json.load",
"datetime.timedelta",
"airflow.operators.postgres_operator.PostgresOperator",
"os.remove"
] | [((1744, 1847), 'airflow.DAG', 'DAG', (['"""user_behaviour"""'], {'default_args': 'default_args', 'schedule_interval': '"""0 0 * * *"""', 'max_active_runs': '(1)'}), "('user_behaviour', default_args=default_args, schedule_interval=\n '0 0 * * *', max_active_runs=1)\n", (1747, 1847), False, 'from airflow import DAG\n'), ((2545, 2784), 'airflow.operators.postgres_operator.PostgresOperator', 'PostgresOperator', ([], {'dag': 'dag', 'task_id': '"""pg_unload"""', 'sql': 'unload_user_purchase', 'postgres_conn_id': '"""postgres_default"""', 'params': "{'temp_filtered_user_purchase': temp_filtered_user_purchase}", 'depends_on_past': '(True)', 'wait_for_downstream': '(True)'}), "(dag=dag, task_id='pg_unload', sql=unload_user_purchase,\n postgres_conn_id='postgres_default', params={\n 'temp_filtered_user_purchase': temp_filtered_user_purchase},\n depends_on_past=True, wait_for_downstream=True)\n", (2561, 2784), False, 'from airflow.operators.postgres_operator import PostgresOperator\n'), ((2832, 3023), 'airflow.operators.PythonOperator', 'PythonOperator', ([], {'dag': 'dag', 'task_id': '"""user_purchase_to_s3_stage"""', 'python_callable': '_local_to_s3', 'op_kwargs': "{'filename': temp_filtered_user_purchase, 'key':\n temp_filtered_user_purchase_key}"}), "(dag=dag, task_id='user_purchase_to_s3_stage',\n python_callable=_local_to_s3, op_kwargs={'filename':\n temp_filtered_user_purchase, 'key': temp_filtered_user_purchase_key})\n", (2846, 3023), False, 'from airflow.operators import PythonOperator\n'), ((3093, 3259), 'airflow.operators.PythonOperator', 'PythonOperator', ([], {'dag': 'dag', 'task_id': '"""remove_local_user_purchase_file"""', 'python_callable': 'remove_local_file', 'op_kwargs': "{'filelocation': temp_filtered_user_purchase}"}), "(dag=dag, task_id='remove_local_user_purchase_file',\n python_callable=remove_local_file, op_kwargs={'filelocation':\n temp_filtered_user_purchase})\n", (3107, 3259), False, 'from airflow.operators import PythonOperator\n'), ((3297, 3351), 'airflow.operators.dummy_operator.DummyOperator', 'DummyOperator', ([], {'task_id': '"""end_of_data_pipeline"""', 'dag': 'dag'}), "(task_id='end_of_data_pipeline', dag=dag)\n", (3310, 3351), False, 'from airflow.operators.dummy_operator import DummyOperator\n'), ((3580, 3747), 'airflow.operators.PythonOperator', 'PythonOperator', ([], {'dag': 'dag', 'task_id': '"""user_review_to_s3_stage"""', 'python_callable': '_local_to_s3', 'op_kwargs': "{'filename': movie_review_local, 'key': movie_review_load}"}), "(dag=dag, task_id='user_review_to_s3_stage', python_callable=\n _local_to_s3, op_kwargs={'filename': movie_review_local, 'key':\n movie_review_load})\n", (3594, 3747), False, 'from airflow.operators import PythonOperator\n'), ((3784, 3985), 'airflow.operators.PythonOperator', 'PythonOperator', ([], {'dag': 'dag', 'task_id': '"""move_emr_script_to_s3"""', 'python_callable': '_local_to_s3', 'op_kwargs': "{'filename': movie_text_classification_script, 'key':\n 'scripts/random_text_classification.py'}"}), "(dag=dag, task_id='move_emr_script_to_s3', python_callable=\n _local_to_s3, op_kwargs={'filename': movie_text_classification_script,\n 'key': 'scripts/random_text_classification.py'})\n", (3798, 3985), False, 'from airflow.operators import PythonOperator\n'), ((4171, 4502), 'airflow.contrib.operators.emr_add_steps_operator.EmrAddStepsOperator', 'EmrAddStepsOperator', ([], {'dag': 'dag', 'task_id': '"""add_emr_steps"""', 'job_flow_id': 'EMR_ID', 'aws_conn_id': '"""aws_default"""', 'steps': 'emr_steps', 'params': "{'BUCKET_NAME': BUCKET_NAME, 'movie_review_load': movie_review_load_folder,\n 'text_classifier_script': text_classifier_script, 'movie_review_stage':\n movie_review_stage}", 'depends_on_past': '(True)'}), "(dag=dag, task_id='add_emr_steps', job_flow_id=EMR_ID,\n aws_conn_id='aws_default', steps=emr_steps, params={'BUCKET_NAME':\n BUCKET_NAME, 'movie_review_load': movie_review_load_folder,\n 'text_classifier_script': text_classifier_script, 'movie_review_stage':\n movie_review_stage}, depends_on_past=True)\n", (4190, 4502), False, 'from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator\n'), ((4917, 5222), 'airflow.operators.PythonOperator', 'PythonOperator', ([], {'dag': 'dag', 'task_id': '"""user_purchase_to_rs_stage"""', 'python_callable': 'run_redshift_external_query', 'op_kwargs': '{\'qry\':\n "alter table spectrum.user_purchase_staging add partition(insert_date=\'{{ ds }}\') location \'s3://data-engineering-batch-mmc/user_purchase/stage/{{ ds }}\'"\n }'}), '(dag=dag, task_id=\'user_purchase_to_rs_stage\',\n python_callable=run_redshift_external_query, op_kwargs={\'qry\':\n "alter table spectrum.user_purchase_staging add partition(insert_date=\'{{ ds }}\') location \'s3://data-engineering-batch-mmc/user_purchase/stage/{{ ds }}\'"\n })\n', (4931, 5222), False, 'from airflow.operators import PythonOperator\n'), ((5268, 5381), 'airflow.operators.postgres_operator.PostgresOperator', 'PostgresOperator', ([], {'dag': 'dag', 'task_id': '"""get_user_behaviour"""', 'sql': 'get_user_behaviour', 'postgres_conn_id': '"""redshift"""'}), "(dag=dag, task_id='get_user_behaviour', sql=\n get_user_behaviour, postgres_conn_id='redshift')\n", (5284, 5381), False, 'from airflow.operators.postgres_operator import PostgresOperator\n'), ((1460, 1481), 'datetime.datetime', 'datetime', (['(2010)', '(12)', '(1)'], {}), '(2010, 12, 1)\n', (1468, 1481), False, 'from datetime import datetime, timedelta\n'), ((1712, 1732), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (1721, 1732), False, 'from datetime import datetime, timedelta\n'), ((1953, 1961), 'airflow.hooks.S3_hook.S3Hook', 'S3Hook', ([], {}), '()\n', (1959, 1961), False, 'from airflow.hooks.S3_hook import S3Hook\n'), ((2092, 2120), 'os.path.isfile', 'os.path.isfile', (['filelocation'], {}), '(filelocation)\n', (2106, 2120), False, 'import os\n'), ((2273, 2314), 'airflow.hooks.postgres_hook.PostgresHook', 'PostgresHook', ([], {'postgres_conn_id': '"""redshift"""'}), "(postgres_conn_id='redshift')\n", (2285, 2314), False, 'from airflow.hooks.postgres_hook import PostgresHook\n'), ((4083, 4103), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (4092, 4103), False, 'import json\n'), ((2130, 2153), 'os.remove', 'os.remove', (['filelocation'], {}), '(filelocation)\n', (2139, 2153), False, 'import os\n')] |
import subprocess
import json
import yaml
import shlex
import os
import uuid
def refresh_charts(branch='master'):
cwd = os.getcwd()
try:
charts_url = os.environ['CHARTS_URL']
except Exception:
charts_url = 'https://github.com/scaleoutsystems/charts/archive/{}.zip'.format(branch)
status = subprocess.run('rm -rf charts-{}'.format(branch).split(' '), cwd=cwd)
status = subprocess.run('wget -O {}.zip {}'.format(branch.replace('/', '-'), charts_url).split(' '), cwd=cwd)
status = subprocess.run('unzip {}.zip'.format(branch.replace('/', '-')).split(' '),cwd=cwd)
class Controller:
def __init__(self, cwd):
self.cwd = cwd
self.branch = os.environ['BRANCH']
self.default_args = ['helm']
pass
def deploy(self, options, action='install'):
# extras = ''
"""
try:
minio = ' --set service.minio=' + str(options['minio_port'])
extras = extras + minio
except KeyError as e:
print("could not get minioport!")
try:
controller = ' --set service.controller=' + str(options['controller_port'])
extras = extras + controller
except KeyError as e:
print("could not get controllerport")
pass
try:
user = ' --set alliance.user=' + str(options['user'])
extras = extras + user
except KeyError as e:
print("could not get user")
pass
try:
project = ' --set alliance.project=' + str(options['project'])
extras = extras + project
except KeyError as e:
print("could not get project")
pass
try:
apiUrl = ' --set alliance.apiUrl=' + str(options['api_url'])
extras = extras + apiUrl
except KeyError as e:
print("could not get apiUrl")
pass
"""
# for key in options:
# print(key)
# print(options[key])
# extras = extras + ' --set {}={}'.format(key, options[key])
volume_root = "/"
if "TELEPRESENCE_ROOT" in os.environ:
volume_root = os.environ["TELEPRESENCE_ROOT"]
kubeconfig = os.path.join(volume_root, 'root/.kube/config')
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'true':
chart = 'charts/scaleout/'+options['chart']
else:
refresh_charts(self.branch)
fname = self.branch.replace('/', '-')
chart = 'charts-{}/scaleout/{}'.format(fname, options['chart'])
args = ['helm', action, '--kubeconfig', kubeconfig, options['release'], chart]
# tmp_file_name = uuid.uuid1().hex+'.yaml'
# tmp_file = open(tmp_file_name, 'w')
# yaml.dump(options, tmp_file, allow_unicode=True)
# tmp_file.close()
# args.append('-f')
# args.append(tmp_file_name)
for key in options:
args.append('--set')
# args.append('{}={}'.format(key, options[key]))
args.append(key+"="+options[key].replace(',', '\,'))
print(args)
status = subprocess.run(args, cwd=self.cwd)
# os.remove(tmp_file_name)
return json.dumps({'helm': {'command': args, 'cwd': str(self.cwd), 'status': str(status)}})
def delete(self, options):
volume_root = "/"
if "TELEPRESENCE_ROOT" in os.environ:
volume_root = os.environ["TELEPRESENCE_ROOT"]
kubeconfig = os.path.join(volume_root, 'root/.kube/config')
print(type(options))
print(options)
# args = 'helm --kubeconfig '+str(kubeconfig)+' delete {release}'.format(release=options['release']) #.split(' ')
args = ['helm', '--kubeconfig', str(kubeconfig), 'delete', options['release']]
status = subprocess.run(args, cwd=self.cwd)
return json.dumps({'helm': {'command': args, 'cwd': str(self.cwd), 'status': str(status)}})
def update(self, options, chart):
pass
| [
"subprocess.run",
"os.path.join",
"os.getcwd"
] | [((127, 138), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (136, 138), False, 'import os\n'), ((2248, 2294), 'os.path.join', 'os.path.join', (['volume_root', '"""root/.kube/config"""'], {}), "(volume_root, 'root/.kube/config')\n", (2260, 2294), False, 'import os\n'), ((3161, 3195), 'subprocess.run', 'subprocess.run', (['args'], {'cwd': 'self.cwd'}), '(args, cwd=self.cwd)\n', (3175, 3195), False, 'import subprocess\n'), ((3514, 3560), 'os.path.join', 'os.path.join', (['volume_root', '"""root/.kube/config"""'], {}), "(volume_root, 'root/.kube/config')\n", (3526, 3560), False, 'import os\n'), ((3839, 3873), 'subprocess.run', 'subprocess.run', (['args'], {'cwd': 'self.cwd'}), '(args, cwd=self.cwd)\n', (3853, 3873), False, 'import subprocess\n')] |
import random
import os
import binascii
import sys
a = long(binascii.hexlify(os.urandom(2500)), 16)
rand = random.Random(a)
flag = "__RS{REDACTEDREDACTEDRED}" #only difference between this and challenge binary is the characters of the flag were replaced with the characters of redacted
def transform(x):
unshiftRight = lambda res,next : x^(res>>next)
makeUnshiftLeft = lambda mask: lambda res,next : x^(res << next & mask)
makeList = lambda x: [x for i in range(32)]
x = reduce(unshiftRight, makeList(18))
x = reduce(makeUnshiftLeft(0xefc60000), makeList(15))
x = reduce(makeUnshiftLeft(0x9d2c5680), makeList(7))
x = reduce(unshiftRight, makeList(11))
return x
def make_plaintext(padlen, num_iters=8):
#Make sure the padlen is reasonable.
#We pad num_iters times with padlen repeated random characters
if padlen*num_iters > 325:
padlen = 325/num_iters
print("Too much padding, shortening padding")
if padlen < len(flag):
padlen = len(flag)
print("Too little padding, lengthening padding")
gen_rand = random.Random()
padding = ""
for i in range(num_iters):
padding += chr(gen_rand.getrandbits(8))*padlen
#Make our message even longer, just to show the strength
padded_msg = (flag + padding)
plaintext = padded_msg
while(len(plaintext) < 2600):
plaintext += padded_msg[:2600-len(plaintext)]
return plaintext
def encrypt(plaintext):
ct = []
pos = 0
while pos < len(plaintext):
rand_int = rand.getrandbits(32)
trand_int = transform(rand_int)
for j in range(4):
if pos >= len(plaintext):
break
rand_short = (trand_int >> (j*8)) & 0xff
ct.append(rand_short ^ ord(plaintext[pos]))
pos += 1
return ct
print("Welcome to my advanced encryption service")
print("I have a new super secure stream cipher.")
print("One thing that it really needs is padding. I'll let let you chose how much of that to add.")
print("Don't try any of those buffer overflow tricks though. You can only ask for so much padding")
print("Just to show the strength of my cipher, I'll encrypt the message a few times and give you the result.")
print("So how much padding should I use?")
sys.stdout.flush()
padlen = int(sys.stdin.readline())
plaintext = make_plaintext(padlen)
ct = encrypt(plaintext)
print("Here your encrypted message is")
print((''.join([chr(x) for x in ct])).encode("hex"))
| [
"random.Random",
"os.urandom",
"sys.stdout.flush",
"sys.stdin.readline"
] | [((108, 124), 'random.Random', 'random.Random', (['a'], {}), '(a)\n', (121, 124), False, 'import random\n'), ((2299, 2317), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2315, 2317), False, 'import sys\n'), ((1096, 1111), 'random.Random', 'random.Random', ([], {}), '()\n', (1109, 1111), False, 'import random\n'), ((2332, 2352), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2350, 2352), False, 'import sys\n'), ((78, 94), 'os.urandom', 'os.urandom', (['(2500)'], {}), '(2500)\n', (88, 94), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'litleleprikon'
from player import FIGURES, FIG_LEN, Player
def comparator(figure_1: str, figure_2: str) -> int:
"""
Function, that make a comparison of teo figures and returns 0 if figures equals to each other,
1 if first figure beats second
2 if second figure beats first
:param figure_1: name of first figure
:param figure_2: name of third figure
:return:
-- Doctests --
>>> comparator('камень', 'бумага')
2
>>> comparator('камень', 'камень')
0
>>> comparator('камень', 'ножницы')
1
"""
if figure_1 not in FIGURES or figure_2 not in FIGURES:
raise Exception('Доступные фигуры: {}'.format(', or '.join(FIGURES)))
if figure_1 == figure_2:
return 0
elif figure_2 == FIGURES[(FIGURES.index(figure_1) + 1) % FIG_LEN]:
return 2
return 1
class Tournament:
"""
Tournament class is responsible for tournament.
This class creates players list and run rounds of tournament
"""
def __init__(self, rounds_count: int):
self.__counter = rounds_count
self.__players = [Player(i) for i in range(2**rounds_count)]
print('\n'.join(map(str, self.__players)))
def __make_round(self):
winners = []
players_len = len(self.__players)
i = 0
winner = None
while i < players_len:
while True:
result = comparator(self.__players[i].figure, self.__players[i+1].figure)
if result != 0:
winner = self.__players[i-1+result]
print('{} vs {} - победил {}!'.format(self.__players[i].name, self.__players[i+1].name, winner.name))
break
winners.append(winner)
i += 2
if len(winners) == 1:
return True
self.__players = winners
def run(self):
i = 1
while True:
if len(self.__players) == 2:
print('\nФинал')
else:
print('\nРаунд {}'.format(i))
if self.__make_round():
break
i += 1
| [
"player.FIGURES.index",
"player.Player"
] | [((1157, 1166), 'player.Player', 'Player', (['i'], {}), '(i)\n', (1163, 1166), False, 'from player import FIGURES, FIG_LEN, Player\n'), ((827, 850), 'player.FIGURES.index', 'FIGURES.index', (['figure_1'], {}), '(figure_1)\n', (840, 850), False, 'from player import FIGURES, FIG_LEN, Player\n')] |
import sys
from getch import getch
commands = '><+-.,[]'
class BrainfuckInterpreter:
def __init__(self):
self.i = 0
self.p = 0
self.cells = [0]
@staticmethod
def find_matching_paren(source, c):
paren = 0
d = {'[':']', ']':'['}
for k in range(len(source)):
if source[k]==c:
paren += 1
elif source[k]==d[c]:
if paren == 0:
return k
paren -= 1
raise SyntaxError("Could not find the matching parenthesis")
def show_cells(self):
print("Cell pointer is at", self.p)
for i in range(len(self.cells)):
print(i, self.cells[i],end=" ")
if self.p == i:
print("<-")
else:
print("")
def eval(self, source):
s = ''
while self.i != len(source):
c = source[self.i]
if c == '>':
if self.p == len(self.cells)-1:
self.cells.append(0)
self.p += 1
elif c == '<' and self.p != 0:
self.p -= 1
elif c == '+':
self.cells[self.p] += 1
elif c == '-':
self.cells[self.p] -= 1
elif c == '.':
sys.stdout.write(chr(self.cells[self.p]))
sys.stdout.flush()
s += chr(self.cells[self.p])
elif c == ',':
self.cells[self.p] = ord(getch())
elif c == '[' and self.cells[self.p] == 0:
self.i += self.find_matching_paren(source[self.i+1:], c)
elif c == ']' and self.cells[self.p] != 0:
self.i -= self.find_matching_paren(source[self.i-1::-1], c) + 1
self.i += 1
return s
def usage():
print("", file=sys.stderr)
print("Usage: {0} <filename>".format(__file__), file=sys.stderr)
print("", file=sys.stderr)
print("For more information, please visit https://github.com/handrake/brainfuck", file=sys.stderr)
print("", file=sys.stderr)
def shell():
source = ''
while 1:
line = input("brainfuck>> ")
if line == '':break
source += line
source = ''.join([c for c in source if c in commands])
return source
def main():
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "", [])
except getopt.GetoptError as err:
usage()
sys.exit(2)
filename = args[0] if len(args) > 0 else None
if filename:
source = open(filename).read()
else:
source = shell()
interpreter = BrainfuckInterpreter()
interpreter.eval(source)
interpreter.show_cells()
if __name__ == "__main__":
main()
| [
"sys.stdout.flush",
"getopt.getopt",
"getch.getch",
"sys.exit"
] | [((2374, 2409), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '""""""', '[]'], {}), "(sys.argv[1:], '', [])\n", (2387, 2409), False, 'import getopt\n'), ((2472, 2483), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2480, 2483), False, 'import sys\n'), ((1381, 1399), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1397, 1399), False, 'import sys\n'), ((1513, 1520), 'getch.getch', 'getch', ([], {}), '()\n', (1518, 1520), False, 'from getch import getch\n')] |
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name="topgeo",
version="0.1",
long_description="Herramienta para realizar calculos topograficos",
license="MIT",
packages=["topgeo"])
| [
"distutils.core.setup"
] | [((66, 214), 'distutils.core.setup', 'setup', ([], {'name': '"""topgeo"""', 'version': '"""0.1"""', 'long_description': '"""Herramienta para realizar calculos topograficos"""', 'license': '"""MIT"""', 'packages': "['topgeo']"}), "(name='topgeo', version='0.1', long_description=\n 'Herramienta para realizar calculos topograficos', license='MIT',\n packages=['topgeo'])\n", (71, 214), False, 'from distutils.core import setup\n')] |
from flask import url_for, request
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, InputRequired
from app.models import Genre
class AddReviewForm(FlaskForm):
title_parent_id = HiddenField("Titolo di riferimento")
title = StringField('Titolo', validators=[DataRequired()])
content = TextAreaField('Contenuto Recensione', validators=[DataRequired()])
rating = StringField("Valutazione", validators=[DataRequired()])
pros = FieldList(StringField("Pro"))
cons = FieldList(StringField("Contro"))
recommended = SelectField('Consigliato', coerce=int, choices=[(0,"No"), (1,"Sì")])
submit = SubmitField('Scrivi Recensione')
def get_custom_action(self):
return getattr(self, "custom_action", "")
class EditReviewForm(AddReviewForm):
submit = SubmitField("Modifica recensione")
class AddDiscussionForm(FlaskForm):
title_parent_id = HiddenField("Titolo di riferimento")
title = StringField('Titolo', validators=[DataRequired()])
description = TextAreaField('Contenuto discussione', validators=[DataRequired()])
submit = SubmitField('Inizia discussione')
def get_custom_action(self):
return getattr(self, "custom_action", "")
class EditDiscussionForm(AddDiscussionForm):
submit = SubmitField("Modifica discussione")
class AnswerDiscussionForm(FlaskForm):
title_parent_id = HiddenField("Titolo di riferimento")
description = TextAreaField('La tua risposta', validators=[DataRequired()])
submit = SubmitField('Invia')
def get_custom_action(self, discussion=None):
custom_action = getattr(self, "custom_action", "")
if custom_action == "":
if discussion is not None:
return url_for("main.add_answer", discussion_id=discussion.id)
return custom_action
class EditAnswerDiscussionForm(AnswerDiscussionForm):
submit = SubmitField("Modifica")
class SurveyGenreForm(FlaskForm):
return_url_for = "main.homepage"
survey_id = HiddenField()
genres_liked = FieldList(StringField())
submit = SubmitField("Salva")
def genres(self):
return Genre.objects().limit(50) | [
"app.models.Genre.objects",
"wtforms.SubmitField",
"wtforms.StringField",
"flask.url_for",
"wtforms.HiddenField",
"wtforms.SelectField",
"wtforms.validators.DataRequired"
] | [((389, 425), 'wtforms.HiddenField', 'HiddenField', (['"""Titolo di riferimento"""'], {}), "('Titolo di riferimento')\n", (400, 425), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((748, 818), 'wtforms.SelectField', 'SelectField', (['"""Consigliato"""'], {'coerce': 'int', 'choices': "[(0, 'No'), (1, 'Sì')]"}), "('Consigliato', coerce=int, choices=[(0, 'No'), (1, 'Sì')])\n", (759, 818), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((831, 863), 'wtforms.SubmitField', 'SubmitField', (['"""Scrivi Recensione"""'], {}), "('Scrivi Recensione')\n", (842, 863), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((1007, 1041), 'wtforms.SubmitField', 'SubmitField', (['"""Modifica recensione"""'], {}), "('Modifica recensione')\n", (1018, 1041), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((1110, 1146), 'wtforms.HiddenField', 'HiddenField', (['"""Titolo di riferimento"""'], {}), "('Titolo di riferimento')\n", (1121, 1146), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((1312, 1345), 'wtforms.SubmitField', 'SubmitField', (['"""Inizia discussione"""'], {}), "('Inizia discussione')\n", (1323, 1345), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((1495, 1530), 'wtforms.SubmitField', 'SubmitField', (['"""Modifica discussione"""'], {}), "('Modifica discussione')\n", (1506, 1530), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((1596, 1632), 'wtforms.HiddenField', 'HiddenField', (['"""Titolo di riferimento"""'], {}), "('Titolo di riferimento')\n", (1607, 1632), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((1728, 1748), 'wtforms.SubmitField', 'SubmitField', (['"""Invia"""'], {}), "('Invia')\n", (1739, 1748), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((2126, 2149), 'wtforms.SubmitField', 'SubmitField', (['"""Modifica"""'], {}), "('Modifica')\n", (2137, 2149), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((2244, 2257), 'wtforms.HiddenField', 'HiddenField', ([], {}), '()\n', (2255, 2257), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((2317, 2337), 'wtforms.SubmitField', 'SubmitField', (['"""Salva"""'], {}), "('Salva')\n", (2328, 2337), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((664, 682), 'wtforms.StringField', 'StringField', (['"""Pro"""'], {}), "('Pro')\n", (675, 682), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((706, 727), 'wtforms.StringField', 'StringField', (['"""Contro"""'], {}), "('Contro')\n", (717, 727), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((2288, 2301), 'wtforms.StringField', 'StringField', ([], {}), '()\n', (2299, 2301), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, HiddenField, FieldList, SelectField, FormField\n'), ((473, 487), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (485, 487), False, 'from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, InputRequired\n'), ((555, 569), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (567, 569), False, 'from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, InputRequired\n'), ((625, 639), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (637, 639), False, 'from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, InputRequired\n'), ((1194, 1208), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1206, 1208), False, 'from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, InputRequired\n'), ((1281, 1295), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1293, 1295), False, 'from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, InputRequired\n'), ((1697, 1711), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1709, 1711), False, 'from wtforms.validators import DataRequired, ValidationError, Email, EqualTo, InputRequired\n'), ((1959, 2014), 'flask.url_for', 'url_for', (['"""main.add_answer"""'], {'discussion_id': 'discussion.id'}), "('main.add_answer', discussion_id=discussion.id)\n", (1966, 2014), False, 'from flask import url_for, request\n'), ((2379, 2394), 'app.models.Genre.objects', 'Genre.objects', ([], {}), '()\n', (2392, 2394), False, 'from app.models import Genre\n')] |
import os
import unittest
from flask import current_app
from app import create_app, db
class EntryModelTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_database(self):
"""
Ensure that the database file exists
"""
tester = os.path.exists('data-test.sqlite')
self.assertTrue(tester)
| [
"os.path.exists",
"app.create_app",
"app.db.session.remove",
"app.db.create_all",
"app.db.drop_all"
] | [((175, 196), 'app.create_app', 'create_app', (['"""testing"""'], {}), "('testing')\n", (185, 196), False, 'from app import create_app, db\n'), ((287, 302), 'app.db.create_all', 'db.create_all', ([], {}), '()\n', (300, 302), False, 'from app import create_app, db\n'), ((336, 355), 'app.db.session.remove', 'db.session.remove', ([], {}), '()\n', (353, 355), False, 'from app import create_app, db\n'), ((364, 377), 'app.db.drop_all', 'db.drop_all', ([], {}), '()\n', (375, 377), False, 'from app import create_app, db\n'), ((526, 560), 'os.path.exists', 'os.path.exists', (['"""data-test.sqlite"""'], {}), "('data-test.sqlite')\n", (540, 560), False, 'import os\n')] |
from __future__ import absolute_import, division, print_function
import os
location = os.path.expanduser("~")
from django.conf import settings
try:
client_id = settings.THREAT_EQUATION_PRODUCT_KEY
secret = settings.THREAT_EQUATION_API_KEY
except IOError as exc:
client_id = 'n/a'
secret = 'n/a'
server = 'https://www.threatequation.com'
plugin_name = 'ThreatequationPythonDjango v0.0.5'
django_server = server + '/api/v1/attack_log/'
verify_url = server + '/api/v1/product_verify/'
port = None
| [
"os.path.expanduser"
] | [((87, 110), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (105, 110), False, 'import os\n')] |
import asyncio, base64, discord, math, praw, random, re, requests, time
from utils.datautils import config, data, default, save_data, set_client
from utils.discordbot import BotClient, send
client = None
connection = None
words = []
wordmap = {}
reddit = praw.Reddit(client_id = config["reddit"]["client-id"], client_secret = config["reddit"]["client-secret"], user_agent = config["reddit"]["user-agent"])
anagram_lock = asyncio.Lock()
def display(actual, scrambled, hint):
if hint == 0: return scrambled
cl = list(scrambled)
start = actual[:hint if hint * 2 <= len(actual) else -hint]
end = actual[-hint:]
for c in start + end:
cl.remove(c)
return f"**{start}**{''.join(cl)}**{end}**"
async def anagram_function(message, answer = None, stop = False, start = False):
global wordmap
async with anagram_lock:
if stop:
if message.channel.id in (await default("anagrams", {})):
actual, _, _, _ = (await data())["anagrams"][message.channel.id]
if len(wordmap[tuple(sorted(actual))]) == 1:
await send(message, f"Anagram puzzle ended! The correct answer was: '{actual}'.", reaction = "check")
else:
answers = ", ".join(f"'{ans}'" for ans in wordmap[tuple(sorted(actual))])
await send(message, f"Anagram puzzle ended! The correct answers were: {answers}", reaction = "check")
del (await data())["anagrams"][message.channel.id]
await save_data()
else:
await send(message, "There is no anagram puzzle in this channel!", reaction = "x")
if answer:
correct = False
if message.channel.id in (await default("anagrams", {})):
actual, _, hint, timestamp = (await data())["anagrams"][message.channel.id]
if sorted(answer.lower()) == sorted(actual.lower()) and answer.lower() in words:
points = max(len(answer) - hint * 2, 0)
bonus = 0
if time.time() - timestamp <= 5:
bonus = int(math.floor(points * 0.5))
pp = (await default("puzzlepoints", {}))
key = (message.guild.id, message.author.id)
if key not in (await data())["puzzlepoints"]:
(await data())["puzzlepoints"][key] = {}
await save_data()
if "anagram" not in (await data())["puzzlepoints"][key]:
(await data())["puzzlepoints"][key]["anagram"] = 0
await save_data()
(await data())["puzzlepoints"][key]["anagram"] += points + bonus
await save_data()
bd = f" **+{bonus}**" if bonus else ""
alts = ", ".join(wordmap[tuple(sorted(answer))] - {answer})
ad = f" (Alternative answers: {alts})" if len(wordmap[tuple(sorted(answer))]) > 1 else ""
await send(message, f"Congratulations to {message.author.mention} for winning the anagram puzzle! (+{points}{bd}){ad}", reaction = "check")
correct = True
(await default("anagramninja", {}))[message.channel.id] = (actual, time.time())
await save_data()
del (await data())["anagrams"][message.channel.id]
await save_data()
start = True
if not correct and message.channel.id in (await default("anagramninja", {})):
actual, timestamp = (await data())["anagramninja"][message.channel.id]
if time.time() - timestamp <= 1 and sorted(answer.lower()) == sorted(actual.lower()) and answer.lower() in words:
await send(message, f"{message.author.mention} L", reaction = "x")
if start:
if message.channel.id not in (await default("anagrams", {})):
answer = random.choice(words)
cl = list(answer)
random.shuffle(cl)
scrambled = "".join(cl)
(await data())["anagrams"][message.channel.id] = (answer, scrambled, 0, time.time())
await save_data()
await send(message, f"Anagram puzzle! Solve for: '{scrambled}' ({len(scrambled)}).")
else:
actual, scrambled, hint, _ = (await data())["anagrams"][message.channel.id]
await send(message, f"An anagram puzzle is already running! Solve for: '{display(actual, scrambled, hint)}' ({len(actual)}).", reaction = "x")
await save_data()
class ToplaneClient(BotClient):
def __init__(self):
BotClient.__init__(self)
self.name = "toplane"
async def on_ready(self):
await BotClient.on_ready(self)
global words, wordmap
with open("words.txt", "r") as f:
words = list(map(str.strip, f.read().strip().splitlines()))
wordmap = {}
for word in words:
(await default(tuple(sorted(word)), set(), wordmap)).add(word)
async def process(self, message):
await anagram_function(message, answer = re.sub("[?!.,\"'()\\[\\]{}> `*_~]", "", message.content.strip()))
def save_words():
with open("words.txt", "w") as f:
f.write("\n".join(words))
client = ToplaneClient()
@client.command("Testing Commands", ["test"], "test", "Test the Toplane bot")
async def command_test(command, message):
await send(message, "Test success!", reaction = "check")
@client.command("Puzzle Commands", ["anagram"], "anagram", "alias for `anagram start`")
@client.command("Puzzle Commands", ["anagram", "start"], "anagram start", "start an anagram puzzle")
async def command_anagram_start(command, message):
await anagram_function(message, start = True)
@client.command("Puzzle Commands", ["anagram", "stop"], "anagram stop", "stop the anagram puzzle")
async def command_anagram_stop(command, message):
await anagram_function(message, stop = True)
@client.command("Puzzle Commands", ["anagram", "restart"], "anagram restart", "restart the anagram puzzle (functionally stop + start but thread-safe)")
async def command_anagram_restart(command, message):
await anagram_function(message, stop = True, start = True)
@client.command("Puzzle Commands", ["anagram", "hint"], "anagram hint", "reveal one letter from the start and end of the anagram answer (decreases point value by 2)")
async def command_anagram_stop(command, message):
if message.channel.id not in (await default("anagrams", {})):
await send(message, "There is no anagram puzzle in this channel!", reaction = "x")
else:
answer, scrambled, hint, timestamp = (await data())["anagrams"][message.channel.id]
hint += 1
if hint * 2 >= len(answer) - 1:
await anagram_function(message, stop = True)
else:
await send(message, f"Hint: the current puzzle starts with '{answer[:hint]}' and ends with '{answer[-hint:]}' ('{display(answer, scrambled, hint)}' ({len(answer)})).", reaction = "check")
(await data())["anagrams"][message.channel.id] = (answer, scrambled, hint, timestamp)
await save_data()
@client.command("Puzzle Commands", ["anagram", "reorder"], "anagram reorder", "reorder the anagram puzzle")
async def command_anagram_reorder(command, message):
if message.channel.id not in (await default("anagrams", {})):
await send(message, "There is no anagram puzzle in this channel!", reaction = "x")
else:
answer, scrambled, hint, timestamp = (await data())["anagrams"][message.channel.id]
cl = list(scrambled)
random.shuffle(cl)
scrambled = "".join(cl)
await send(message, f"Reordered: solve for '{display(answer, scrambled, hint)}'.", reaction = "check")
(await data())["anagrams"][message.channel.id] = (answer, scrambled, hint, timestamp)
await save_data()
@client.command("Puzzle Commands", ["anagram", "add", ".+"], "anagram add <word>", "add a word to the dictionary (must be all lowercase letters, at least 5 letters long)")
async def command_anagram_add(command, message):
if re.match("[a-z]{5}[a-z]*", command[2]) is None:
await send(message, "Invalid word! Must be all lowercase letters, at least 5 letters long.", reaction = "x")
elif command[2] in words:
await send(message, "That word is already in the dictionary!", reaction = "x")
else:
words.append(command[2])
words.sort()
(await default(tuple(sorted(command[2])), set(), wordmap)).add(command[2])
save_words()
await send(message, f"Added '{command[2]}' to the dictionary!", reaction = "check")
@client.command("Puzzle Commands", ["anagram", "rm", ".+"], "anagram rm <word>", "alias for `anagram remove`")
@client.command("Puzzle Commands", ["anagram", "remove", ".+"], "anagram remove <word>", "remove a word from the dictionary")
async def command_anagram_remove(command, message):
if command[2] not in words:
await send(message, "That word is not in the dictionary!", reaction = "x")
else:
words.remove(command[2])
wordmap[tuple(sorted(command[2]))].discard(command[2])
save_words()
await send(message, f"Removed '{command[2]}' from the dictionary!", reaction = "check")
@client.command("Puzzle Commands", ["anagram", "leaderboard"], "anagram leaderboard", "display the anagram puzzle score leaderboard")
async def command_anagram_leaderboard(command, message):
scores = []
for gid, mid in (await default("puzzlepoints", {})):
if gid != message.guild.id: continue
points = (await data())["puzzlepoints"][(gid, mid)].get("anagram", 0)
if points:
scores.append((points, message.guild.get_member(mid)))
scores.sort(reverse = True)
await send(message, embed = discord.Embed(
title = "Anagram Leaderboard",
description = "\n".join(f"{member} - {score}" for score, member in scores) or "The leaderboard is empty!"
), reaction = "check")
@client.command("Reddit Commands", ["ket"], "ket", "alias for `cat`")
@client.command("Reddit Commands", ["cat"], "cat", "fetch a random cat image from /r/cat")
@client.command("Reddit Commands", ["mem"], "mem", "alias for `meme`")
@client.command("Reddit Commands", ["meme"], "meme", "fetch a random meme from /r/memes")
async def command_anagram_cat(command, message):
if command[0] == "ket" and random.random() < 0.01:
await send(message, f"{message.author.mention} overdosed on ketamine and died.", reaction = "x")
else:
while True:
item = reddit.subreddit("cat" if command[0] == "ket" or command[0] == "cat" else "memes").random()
if any(item.url.endswith(suffix) for suffix in [".jpg", ".png", ".gif"]) and (message.channel.is_nsfw() or not item.over_18):
break
await send(message, item.url, reaction = "check")
@client.command("Reddit Commands", ["jonk"], "jonk", "alias for `jonk`")
@client.command("Reddit Commands", ["joke"], "joke", "fetch a random joke from /r/jokes")
async def command_anagram_cat(command, message):
if random.random() < 0.01:
await send(message, f"{message.author.mention} **Discord** would like access to your camera.", reaction = "check")
else:
while True:
item = reddit.subreddit("jokes").random()
if item.selftext and (message.channel.is_nsfw() or not item.over_18):
break
await send(message, f"**{item.title}**\n{item.selftext}", reaction = "check")
@client.command("Miscellaneous Commands", ["roll", "?"], "roll [config: xdy+xdy-xdy+...+n default 1d6]", "roll a die / dice (`<x>d<y>` to roll `x` `y`-sided dice)")
async def command_roll(command, message):
if len(command) > 1:
dice_config = command[1]
else:
dice_config = "1d6"
match = re.match(r"^([+-]?\d+d\d+)(([+-]\d+d\d+)*)([+-]\d+)?$", dice_config)
if match is not None:
a, b, _, c = match.groups()
dc = a + (b or "") + (c or "")
if not dc.startswith("+"):
dc = "+" + dc
breaks = [i for i, c in enumerate(dc) if c == "+" or c == "-"]
blocks = [dc[s:e] for s, e in zip(breaks, breaks[1:])] + [dc[breaks[-1]:]]
pos = []
neg = []
mod = 0
for block in blocks:
if "d" in block:
x, y = map(int, block[1:].split("d"))
rolls = [random.randint(1, y) for _ in range(x)]
if block[0] == "+":
pos.extend(rolls)
else:
neg.extend(rolls)
else:
mod = int(block)
if 1 < len(pos) + len(neg) < 10:
breakdown = ""
if pos:
breakdown += f"[ {', '.join(map(str, pos))} ]"
if neg:
breakdown += f" - [ {', '.join(map(str, pos))} ]"
if mod > 0:
breakdown += f" + {mod}"
elif mod < 0:
breakdown += f" - {-mod}"
await send(message, f"{message.author.mention} rolled **{sum(pos) - sum(neg) + mod}**! ({breakdown})")
else:
await send(message, f"{message.author.mention} rolled **{sum(pos) - sum(neg) + mod}**!")
else:
await send(message, "Invalid dice configuration! The config must start with xdy (optionally +/- xdy), have zero or more +/- xdy, and optionally end with +/- a modifier.", reaction = "x")
@client.command("Miscellaneous Commands", ["rickroll", ".+"], "rickroll <channel>", "connect to a voice channel and play 'Never Gonna Give You Up' by <NAME>")
@client.command("Miscellaneous Commands", ["stickbug", ".+"], "stickbug <channel>", "connect to a voice channel and play the stickbug song")
@client.command("Miscellaneous Commands", ["thx", ".+"], "thx <channel>", "connect to a voice channel and play the loudest sound created by humans")
async def command_rickroll(command, message):
global connection
if connection:
await connection.disconnect()
connection = None
for channel in await message.guild.fetch_channels():
if type(channel) == discord.VoiceChannel and channel.name == command[1]:
connection = await channel.connect(timeout = 3)
connection.play(await discord.FFmpegOpusAudio.from_probe(f"{command[0]}.mp3"))
await send(message, "Enjoy :)", reaction = "check")
@client.command("Miscellaneous Commands", ["gtfo"], "gtfo", "alias for `disconnect`")
@client.command("Miscellaneous Commands", ["dc"], "dc", "alias for `disconnect`")
@client.command("Miscellaneous Commands", ["disconnect"], "disconnect", "disconnect from voice")
async def command_disconnect(command, message):
global connection
if connection:
await connection.disconnect()
connection = None
await send(message, "Disconnected!", reaction = "check")
else:
await send(message, "I am not connected to any voice channels!", reaction = "x")
set_client(client) | [
"discord.FFmpegOpusAudio.from_probe",
"utils.datautils.set_client",
"random.choice",
"random.shuffle",
"math.floor",
"utils.datautils.default",
"utils.discordbot.BotClient.on_ready",
"asyncio.Lock",
"re.match",
"utils.datautils.data",
"utils.discordbot.BotClient.__init__",
"praw.Reddit",
"utils.datautils.save_data",
"utils.discordbot.send",
"random.random",
"time.time",
"random.randint"
] | [((260, 409), 'praw.Reddit', 'praw.Reddit', ([], {'client_id': "config['reddit']['client-id']", 'client_secret': "config['reddit']['client-secret']", 'user_agent': "config['reddit']['user-agent']"}), "(client_id=config['reddit']['client-id'], client_secret=config[\n 'reddit']['client-secret'], user_agent=config['reddit']['user-agent'])\n", (271, 409), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((427, 441), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (439, 441), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((14041, 14059), 'utils.datautils.set_client', 'set_client', (['client'], {}), '(client)\n', (14051, 14059), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((11167, 11239), 're.match', 're.match', (['"""^([+-]?\\\\d+d\\\\d+)(([+-]\\\\d+d\\\\d+)*)([+-]\\\\d+)?$"""', 'dice_config'], {}), "('^([+-]?\\\\d+d\\\\d+)(([+-]\\\\d+d\\\\d+)*)([+-]\\\\d+)?$', dice_config)\n", (11175, 11239), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((4223, 4247), 'utils.discordbot.BotClient.__init__', 'BotClient.__init__', (['self'], {}), '(self)\n', (4241, 4247), False, 'from utils.discordbot import BotClient, send\n'), ((4973, 5021), 'utils.discordbot.send', 'send', (['message', '"""Test success!"""'], {'reaction': '"""check"""'}), "(message, 'Test success!', reaction='check')\n", (4977, 5021), False, 'from utils.discordbot import BotClient, send\n'), ((7101, 7119), 'random.shuffle', 'random.shuffle', (['cl'], {}), '(cl)\n', (7115, 7119), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((7594, 7632), 're.match', 're.match', (['"""[a-z]{5}[a-z]*"""', 'command[2]'], {}), "('[a-z]{5}[a-z]*', command[2])\n", (7602, 7632), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((8939, 8966), 'utils.datautils.default', 'default', (['"""puzzlepoints"""', '{}'], {}), "('puzzlepoints', {})\n", (8946, 8966), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((10478, 10493), 'random.random', 'random.random', ([], {}), '()\n', (10491, 10493), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((13433, 13476), 'utils.discordbot.send', 'send', (['message', '"""Enjoy :)"""'], {'reaction': '"""check"""'}), "(message, 'Enjoy :)', reaction='check')\n", (13437, 13476), False, 'from utils.discordbot import BotClient, send\n'), ((4152, 4163), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (4161, 4163), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((4315, 4339), 'utils.discordbot.BotClient.on_ready', 'BotClient.on_ready', (['self'], {}), '(self)\n', (4333, 4339), False, 'from utils.discordbot import BotClient, send\n'), ((6033, 6056), 'utils.datautils.default', 'default', (['"""anagrams"""', '{}'], {}), "('anagrams', {})\n", (6040, 6056), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((6069, 6143), 'utils.discordbot.send', 'send', (['message', '"""There is no anagram puzzle in this channel!"""'], {'reaction': '"""x"""'}), "(message, 'There is no anagram puzzle in this channel!', reaction='x')\n", (6073, 6143), False, 'from utils.discordbot import BotClient, send\n'), ((6863, 6886), 'utils.datautils.default', 'default', (['"""anagrams"""', '{}'], {}), "('anagrams', {})\n", (6870, 6886), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((6899, 6973), 'utils.discordbot.send', 'send', (['message', '"""There is no anagram puzzle in this channel!"""'], {'reaction': '"""x"""'}), "(message, 'There is no anagram puzzle in this channel!', reaction='x')\n", (6903, 6973), False, 'from utils.discordbot import BotClient, send\n'), ((7355, 7366), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (7364, 7366), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((7652, 7760), 'utils.discordbot.send', 'send', (['message', '"""Invalid word! Must be all lowercase letters, at least 5 letters long."""'], {'reaction': '"""x"""'}), "(message,\n 'Invalid word! Must be all lowercase letters, at least 5 letters long.',\n reaction='x')\n", (7656, 7760), False, 'from utils.discordbot import BotClient, send\n'), ((8434, 8500), 'utils.discordbot.send', 'send', (['message', '"""That word is not in the dictionary!"""'], {'reaction': '"""x"""'}), "(message, 'That word is not in the dictionary!', reaction='x')\n", (8438, 8500), False, 'from utils.discordbot import BotClient, send\n'), ((8626, 8705), 'utils.discordbot.send', 'send', (['message', 'f"""Removed \'{command[2]}\' from the dictionary!"""'], {'reaction': '"""check"""'}), '(message, f"Removed \'{command[2]}\' from the dictionary!", reaction=\'check\')\n', (8630, 8705), False, 'from utils.discordbot import BotClient, send\n'), ((9806, 9821), 'random.random', 'random.random', ([], {}), '()\n', (9819, 9821), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((9840, 9932), 'utils.discordbot.send', 'send', (['message', 'f"""{message.author.mention} overdosed on ketamine and died."""'], {'reaction': '"""x"""'}), "(message, f'{message.author.mention} overdosed on ketamine and died.',\n reaction='x')\n", (9844, 9932), False, 'from utils.discordbot import BotClient, send\n'), ((10216, 10257), 'utils.discordbot.send', 'send', (['message', 'item.url'], {'reaction': '"""check"""'}), "(message, item.url, reaction='check')\n", (10220, 10257), False, 'from utils.discordbot import BotClient, send\n'), ((10512, 10626), 'utils.discordbot.send', 'send', (['message', 'f"""{message.author.mention} **Discord** would like access to your camera."""'], {'reaction': '"""check"""'}), "(message,\n f'{message.author.mention} **Discord** would like access to your camera.',\n reaction='check')\n", (10516, 10626), False, 'from utils.discordbot import BotClient, send\n'), ((10793, 10865), 'utils.discordbot.send', 'send', (['message', 'f"""**{item.title}**\n{item.selftext}"""'], {'reaction': '"""check"""'}), '(message, f"""**{item.title}**\n{item.selftext}""", reaction=\'check\')\n', (10797, 10865), False, 'from utils.discordbot import BotClient, send\n'), ((12384, 12571), 'utils.discordbot.send', 'send', (['message', '"""Invalid dice configuration! The config must start with xdy (optionally +/- xdy), have zero or more +/- xdy, and optionally end with +/- a modifier."""'], {'reaction': '"""x"""'}), "(message,\n 'Invalid dice configuration! The config must start with xdy (optionally +/- xdy), have zero or more +/- xdy, and optionally end with +/- a modifier.'\n , reaction='x')\n", (12388, 12571), False, 'from utils.discordbot import BotClient, send\n'), ((13896, 13944), 'utils.discordbot.send', 'send', (['message', '"""Disconnected!"""'], {'reaction': '"""check"""'}), "(message, 'Disconnected!', reaction='check')\n", (13900, 13944), False, 'from utils.discordbot import BotClient, send\n'), ((13965, 14037), 'utils.discordbot.send', 'send', (['message', '"""I am not connected to any voice channels!"""'], {'reaction': '"""x"""'}), "(message, 'I am not connected to any voice channels!', reaction='x')\n", (13969, 14037), False, 'from utils.discordbot import BotClient, send\n'), ((3577, 3597), 'random.choice', 'random.choice', (['words'], {}), '(words)\n', (3590, 3597), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((3632, 3650), 'random.shuffle', 'random.shuffle', (['cl'], {}), '(cl)\n', (3646, 3650), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((6651, 6662), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (6660, 6662), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((7793, 7863), 'utils.discordbot.send', 'send', (['message', '"""That word is already in the dictionary!"""'], {'reaction': '"""x"""'}), "(message, 'That word is already in the dictionary!', reaction='x')\n", (7797, 7863), False, 'from utils.discordbot import BotClient, send\n'), ((8026, 8101), 'utils.discordbot.send', 'send', (['message', 'f"""Added \'{command[2]}\' to the dictionary!"""'], {'reaction': '"""check"""'}), '(message, f"Added \'{command[2]}\' to the dictionary!", reaction=\'check\')\n', (8030, 8101), False, 'from utils.discordbot import BotClient, send\n'), ((886, 909), 'utils.datautils.default', 'default', (['"""anagrams"""', '{}'], {}), "('anagrams', {})\n", (893, 909), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((1433, 1444), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (1442, 1444), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((1471, 1545), 'utils.discordbot.send', 'send', (['message', '"""There is no anagram puzzle in this channel!"""'], {'reaction': '"""x"""'}), "(message, 'There is no anagram puzzle in this channel!', reaction='x')\n", (1475, 1545), False, 'from utils.discordbot import BotClient, send\n'), ((1623, 1646), 'utils.datautils.default', 'default', (['"""anagrams"""', '{}'], {}), "('anagrams', {})\n", (1630, 1646), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3534, 3557), 'utils.datautils.default', 'default', (['"""anagrams"""', '{}'], {}), "('anagrams', {})\n", (3541, 3557), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3763, 3774), 'time.time', 'time.time', ([], {}), '()\n', (3772, 3774), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((3790, 3801), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (3799, 3801), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((6202, 6208), 'utils.datautils.data', 'data', ([], {}), '()\n', (6206, 6208), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((7032, 7038), 'utils.datautils.data', 'data', ([], {}), '()\n', (7036, 7038), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((7266, 7272), 'utils.datautils.data', 'data', ([], {}), '()\n', (7270, 7272), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((11673, 11693), 'random.randint', 'random.randint', (['(1)', 'y'], {}), '(1, y)\n', (11687, 11693), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((13368, 13423), 'discord.FFmpegOpusAudio.from_probe', 'discord.FFmpegOpusAudio.from_probe', (['f"""{command[0]}.mp3"""'], {}), "(f'{command[0]}.mp3')\n", (13402, 13423), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((1054, 1151), 'utils.discordbot.send', 'send', (['message', 'f"""Anagram puzzle ended! The correct answer was: \'{actual}\'."""'], {'reaction': '"""check"""'}), '(message, f"Anagram puzzle ended! The correct answer was: \'{actual}\'.",\n reaction=\'check\')\n', (1058, 1151), False, 'from utils.discordbot import BotClient, send\n'), ((1264, 1361), 'utils.discordbot.send', 'send', (['message', 'f"""Anagram puzzle ended! The correct answers were: {answers}"""'], {'reaction': '"""check"""'}), "(message, f'Anagram puzzle ended! The correct answers were: {answers}',\n reaction='check')\n", (1268, 1361), False, 'from utils.discordbot import BotClient, send\n'), ((2007, 2034), 'utils.datautils.default', 'default', (['"""puzzlepoints"""', '{}'], {}), "('puzzlepoints', {})\n", (2014, 2034), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2480, 2491), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (2489, 2491), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2727, 2867), 'utils.discordbot.send', 'send', (['message', 'f"""Congratulations to {message.author.mention} for winning the anagram puzzle! (+{points}{bd}){ad}"""'], {'reaction': '"""check"""'}), "(message,\n f'Congratulations to {message.author.mention} for winning the anagram puzzle! (+{points}{bd}){ad}'\n , reaction='check')\n", (2731, 2867), False, 'from utils.discordbot import BotClient, send\n'), ((2963, 2974), 'time.time', 'time.time', ([], {}), '()\n', (2972, 2974), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((2992, 3003), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (3001, 3003), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3081, 3092), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (3090, 3092), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3170, 3197), 'utils.datautils.default', 'default', (['"""anagramninja"""', '{}'], {}), "('anagramninja', {})\n", (3177, 3197), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3417, 3475), 'utils.discordbot.send', 'send', (['message', 'f"""{message.author.mention} L"""'], {'reaction': '"""x"""'}), "(message, f'{message.author.mention} L', reaction='x')\n", (3421, 3475), False, 'from utils.discordbot import BotClient, send\n'), ((6560, 6566), 'utils.datautils.data', 'data', ([], {}), '()\n', (6564, 6566), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((945, 951), 'utils.datautils.data', 'data', ([], {}), '()\n', (949, 951), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((1379, 1385), 'utils.datautils.data', 'data', ([], {}), '()\n', (1383, 1385), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((1693, 1699), 'utils.datautils.data', 'data', ([], {}), '()\n', (1697, 1699), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((1905, 1916), 'time.time', 'time.time', ([], {}), '()\n', (1914, 1916), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((1959, 1983), 'math.floor', 'math.floor', (['(points * 0.5)'], {}), '(points * 0.5)\n', (1969, 1983), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((2217, 2228), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (2226, 2228), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2377, 2388), 'utils.datautils.save_data', 'save_data', ([], {}), '()\n', (2386, 2388), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2903, 2930), 'utils.datautils.default', 'default', (['"""anagramninja"""', '{}'], {}), "('anagramninja', {})\n", (2910, 2930), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3235, 3241), 'utils.datautils.data', 'data', ([], {}), '()\n', (3239, 3241), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3290, 3301), 'time.time', 'time.time', ([], {}), '()\n', (3299, 3301), False, 'import asyncio, base64, discord, math, praw, random, re, requests, time\n'), ((3698, 3704), 'utils.datautils.data', 'data', ([], {}), '()\n', (3702, 3704), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3951, 3957), 'utils.datautils.data', 'data', ([], {}), '()\n', (3955, 3957), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((9030, 9036), 'utils.datautils.data', 'data', ([], {}), '()\n', (9034, 9036), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2121, 2127), 'utils.datautils.data', 'data', ([], {}), '()\n', (2125, 2127), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((3025, 3031), 'utils.datautils.data', 'data', ([], {}), '()\n', (3029, 3031), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2165, 2171), 'utils.datautils.data', 'data', ([], {}), '()\n', (2169, 2171), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2266, 2272), 'utils.datautils.data', 'data', ([], {}), '()\n', (2270, 2272), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2406, 2412), 'utils.datautils.data', 'data', ([], {}), '()\n', (2410, 2412), False, 'from utils.datautils import config, data, default, save_data, set_client\n'), ((2315, 2321), 'utils.datautils.data', 'data', ([], {}), '()\n', (2319, 2321), False, 'from utils.datautils import config, data, default, save_data, set_client\n')] |
"""Exercício Python 21: Faça um programa em Python que abra e reproduza o áudio de um arquivo MP3. """
import pygame
pygame.init()
pygame.mixer.music.load('/home/kadu/Documentos/GitHub/Curso-de-Python/Modulo-01/Usando modulos do Python/musga.mp3')
pygame.mixer.music.play()
pygame.event.wait()
| [
"pygame.mixer.music.play",
"pygame.event.wait",
"pygame.mixer.music.load",
"pygame.init"
] | [((119, 132), 'pygame.init', 'pygame.init', ([], {}), '()\n', (130, 132), False, 'import pygame\n'), ((133, 259), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""/home/kadu/Documentos/GitHub/Curso-de-Python/Modulo-01/Usando modulos do Python/musga.mp3"""'], {}), "(\n '/home/kadu/Documentos/GitHub/Curso-de-Python/Modulo-01/Usando modulos do Python/musga.mp3'\n )\n", (156, 259), False, 'import pygame\n'), ((250, 275), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (273, 275), False, 'import pygame\n'), ((276, 295), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (293, 295), False, 'import pygame\n')] |
import os
from betamax import Betamax
import pytest
import xbox
from xbox.tests import TestBase
def track_calls(func):
def inner(*a, **kw):
inner.called = True
inner.calls.append((a, kw))
return func(*a, **kw)
if not hasattr(inner, 'called'):
inner.called = False
if not hasattr(inner, 'calls'):
inner.calls = []
return inner
class TestDecorators(TestBase):
def test_authenticated_decorator_success(self):
xbox.client.authenticate = track_calls(xbox.client.authenticate)
assert not xbox.client.authenticate.called
with Betamax(xbox.client.session) as vcr:
match_on = ['uri', 'method', 'headers', 'body']
os.environ['MS_LOGIN'] = '<EMAIL>'
os.environ['MS_PASSWD'] = 'password'
vcr.use_cassette(
'authenticate_decorator_success',
match_on=match_on,
record_mode='never',
)
xbox.GamerProfile.from_gamertag('JoeAlcorn')
assert xbox.client.authenticated
assert xbox.client.login == '<EMAIL>'
assert xbox.client.authenticate.called
xbox.GamerProfile.from_gamertag('JoeAlcorn')
assert len(xbox.client.authenticate.calls) == 1
def test_authenticated_decorator_failure(self):
xbox.client.authenticate = track_calls(xbox.client.authenticate)
assert not xbox.client.authenticate.called
os.environ['MS_LOGIN'] = '<EMAIL>'
with pytest.raises(xbox.exceptions.AuthenticationException):
xbox.GamerProfile.from_gamertag('JoeAlcorn')
with Betamax(xbox.client.session) as vcr:
match_on = ['uri', 'method', 'headers', 'body']
vcr.use_cassette(
'authenticate_decorator_failure',
match_on=match_on,
record_mode='never',
)
os.environ['MS_PASSWD'] = 'password'
with pytest.raises(xbox.exceptions.AuthenticationException):
xbox.GamerProfile.from_gamertag('JoeAlcorn')
assert len(xbox.client.authenticate.calls) == 2
| [
"xbox.GamerProfile.from_gamertag",
"pytest.raises",
"betamax.Betamax"
] | [((614, 642), 'betamax.Betamax', 'Betamax', (['xbox.client.session'], {}), '(xbox.client.session)\n', (621, 642), False, 'from betamax import Betamax\n'), ((986, 1030), 'xbox.GamerProfile.from_gamertag', 'xbox.GamerProfile.from_gamertag', (['"""JoeAlcorn"""'], {}), "('JoeAlcorn')\n", (1017, 1030), False, 'import xbox\n'), ((1191, 1235), 'xbox.GamerProfile.from_gamertag', 'xbox.GamerProfile.from_gamertag', (['"""JoeAlcorn"""'], {}), "('JoeAlcorn')\n", (1222, 1235), False, 'import xbox\n'), ((1531, 1585), 'pytest.raises', 'pytest.raises', (['xbox.exceptions.AuthenticationException'], {}), '(xbox.exceptions.AuthenticationException)\n', (1544, 1585), False, 'import pytest\n'), ((1599, 1643), 'xbox.GamerProfile.from_gamertag', 'xbox.GamerProfile.from_gamertag', (['"""JoeAlcorn"""'], {}), "('JoeAlcorn')\n", (1630, 1643), False, 'import xbox\n'), ((1658, 1686), 'betamax.Betamax', 'Betamax', (['xbox.client.session'], {}), '(xbox.client.session)\n', (1665, 1686), False, 'from betamax import Betamax\n'), ((1989, 2043), 'pytest.raises', 'pytest.raises', (['xbox.exceptions.AuthenticationException'], {}), '(xbox.exceptions.AuthenticationException)\n', (2002, 2043), False, 'import pytest\n'), ((2061, 2105), 'xbox.GamerProfile.from_gamertag', 'xbox.GamerProfile.from_gamertag', (['"""JoeAlcorn"""'], {}), "('JoeAlcorn')\n", (2092, 2105), False, 'import xbox\n')] |
#!/usr/bin/env python
import sys
from argparse import ArgumentParser
class UnrecognisedCommandError(Exception):
pass
class UnrecognisedSubcommandError(Exception):
pass
def subcommand_exists(object, parser, subcommand, command_type="Subcommand"):
try:
if subcommand:
getattr(object, subcommand)
except AttributeError as error:
if not object.trace:
parser.error(f"{command_type} not found: {subcommand}")
raise UnrecognisedSubcommandError from error
class ExtendedHelpArgumentParser(ArgumentParser):
def error(self, message):
sys.stderr.write("Error: %s\n\n" % message)
self.print_help()
exit(2)
| [
"sys.stderr.write"
] | [((609, 652), 'sys.stderr.write', 'sys.stderr.write', (["('Error: %s\\n\\n' % message)"], {}), "('Error: %s\\n\\n' % message)\n", (625, 652), False, 'import sys\n')] |
from response import ResponseObj
import tornado.web
import tornado.gen
import tornado.ioloop
import tornado.concurrent
import tornado.httpclient
import logging
import asyncio
from easyspid.handlers.easyspidhandler import easyspidHandler
import globalsObj
import easyspid.lib.easyspid
from easyspid.lib.utils import Saml2_Settings, waitFuture
import commonlib
class buildMetadatahandler(easyspidHandler):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
#get
async def get(self, sp):
x_real_ip = self.request.headers.get(globalsObj.easyspid_originIP_header)
self.remote_ip = x_real_ip or self.request.remote_ip
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
#settings
sp_settings = await easyspid.lib.easyspid.spSettings(sp, close = True)
response_obj = await asyncio.get_event_loop().run_in_executor(self.executor, self.buildMetadata, sp_settings)
asyncio.ensure_future(self.writeLog(response_obj), loop = globalsObj.ioloop)
super().writeResponse(response_obj)
@staticmethod
@commonlib.inner_log
def makeMetadata(sp_settings):
if sp_settings['error'] == 0 and sp_settings['result'] is not None:
spSettings = Saml2_Settings(sp_settings['result'])
metadata = spSettings.get_sp_metadata()
metadata = str(metadata,'utf-8')
response_obj = ResponseObj(httpcode=200)
response_obj.setError('200')
response_obj.setResult(metadata = metadata)
elif sp_settings['error'] == 0 and sp_settings['result'] is None:
response_obj = ResponseObj(httpcode=404)
response_obj.setError('easyspid101')
elif sp_settings['error'] > 0:
response_obj = ResponseObj(httpcode=500, debugMessage=sp_settings['result'])
response_obj.setError("easyspid105")
return response_obj
@commonlib.inner_log
def buildMetadata(self, sp_settings, dbSave = True):
try:
makeMetadata = self.makeMetadata(sp_settings)
if makeMetadata.error.code == '200':
metadata = makeMetadata.result.metadata
sp = sp_settings['result']['sp']['cod_sp']
## insert into DB
if dbSave:
task = asyncio.run_coroutine_threadsafe(self.dbobjSaml.execute_statment("write_assertion('%s', '%s', %s, '%s')" %
(metadata.replace("'", "''"), sp, 'NULL', self.remote_ip)), globalsObj.ioloop)
wrtMetada = waitFuture(task)
if wrtMetada['error'] == 0:
task = asyncio.run_coroutine_threadsafe(self.dbobjJwt.execute_statment("get_token_by_cod('%s')" %
(wrtMetada['result'][0]['cod_token'])), globalsObj.ioloop)
jwt = waitFuture(task)
response_obj = ResponseObj(httpcode=200, ID = wrtMetada['result'][0]['ID_assertion'])
response_obj.setError('200')
response_obj.setResult(metadata = metadata, jwt=jwt['result'][0]['token'],
idassertion=wrtMetada['result'][0]['ID_assertion'])
# insert metadata in saml.metadata table
task = asyncio.run_coroutine_threadsafe(self.dbobjSaml.execute_query(self.dbobjSaml.query['insert_metadata']['sql'],
sp+"_metadata", metadata, sp), globalsObj.ioloop)
insert_metadata = waitFuture(task)
else:
response_obj = ResponseObj(httpcode=500, debugMessage=wrtMetada['result'])
response_obj.setError("easyspid105")
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('Exception',exc_info=True)
else:
response_obj = ResponseObj(httpcode=200)
response_obj.setError('200')
response_obj.setResult(metadata = metadata, jwt="")
else:
response_obj = makeMetadata
except tornado.web.MissingArgumentError as error:
response_obj = ResponseObj(debugMessage=error.log_message, httpcode=error.status_code,
devMessage=error.log_message)
response_obj.setError(str(error.status_code))
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('%s'% error,exc_info=True)
except Exception as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError('500')
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('Exception',exc_info=True)
return response_obj
| [
"easyspid.lib.utils.Saml2_Settings",
"response.ResponseObj",
"asyncio.get_event_loop",
"easyspid.lib.utils.waitFuture"
] | [((1304, 1341), 'easyspid.lib.utils.Saml2_Settings', 'Saml2_Settings', (["sp_settings['result']"], {}), "(sp_settings['result'])\n", (1318, 1341), False, 'from easyspid.lib.utils import Saml2_Settings, waitFuture\n'), ((1467, 1492), 'response.ResponseObj', 'ResponseObj', ([], {'httpcode': '(200)'}), '(httpcode=200)\n', (1478, 1492), False, 'from response import ResponseObj\n'), ((1692, 1717), 'response.ResponseObj', 'ResponseObj', ([], {'httpcode': '(404)'}), '(httpcode=404)\n', (1703, 1717), False, 'from response import ResponseObj\n'), ((4325, 4430), 'response.ResponseObj', 'ResponseObj', ([], {'debugMessage': 'error.log_message', 'httpcode': 'error.status_code', 'devMessage': 'error.log_message'}), '(debugMessage=error.log_message, httpcode=error.status_code,\n devMessage=error.log_message)\n', (4336, 4430), False, 'from response import ResponseObj\n'), ((4700, 4725), 'response.ResponseObj', 'ResponseObj', ([], {'httpcode': '(500)'}), '(httpcode=500)\n', (4711, 4725), False, 'from response import ResponseObj\n'), ((904, 928), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (926, 928), False, 'import asyncio\n'), ((1834, 1895), 'response.ResponseObj', 'ResponseObj', ([], {'httpcode': '(500)', 'debugMessage': "sp_settings['result']"}), "(httpcode=500, debugMessage=sp_settings['result'])\n", (1845, 1895), False, 'from response import ResponseObj\n'), ((2625, 2641), 'easyspid.lib.utils.waitFuture', 'waitFuture', (['task'], {}), '(task)\n', (2635, 2641), False, 'from easyspid.lib.utils import Saml2_Settings, waitFuture\n'), ((4028, 4053), 'response.ResponseObj', 'ResponseObj', ([], {'httpcode': '(200)'}), '(httpcode=200)\n', (4039, 4053), False, 'from response import ResponseObj\n'), ((2931, 2947), 'easyspid.lib.utils.waitFuture', 'waitFuture', (['task'], {}), '(task)\n', (2941, 2947), False, 'from easyspid.lib.utils import Saml2_Settings, waitFuture\n'), ((2988, 3056), 'response.ResponseObj', 'ResponseObj', ([], {'httpcode': '(200)', 'ID': "wrtMetada['result'][0]['ID_assertion']"}), "(httpcode=200, ID=wrtMetada['result'][0]['ID_assertion'])\n", (2999, 3056), False, 'from response import ResponseObj\n'), ((3641, 3657), 'easyspid.lib.utils.waitFuture', 'waitFuture', (['task'], {}), '(task)\n', (3651, 3657), False, 'from easyspid.lib.utils import Saml2_Settings, waitFuture\n'), ((3724, 3783), 'response.ResponseObj', 'ResponseObj', ([], {'httpcode': '(500)', 'debugMessage': "wrtMetada['result']"}), "(httpcode=500, debugMessage=wrtMetada['result'])\n", (3735, 3783), False, 'from response import ResponseObj\n')] |
# -*- encoding: utf-8 -*-
"""
@File : md5.py
@Time : 2020/6/3 11:15
@Author : chise
@Email : <EMAIL>
@Software: PyCharm
@info :
"""
import hashlib
from . import app_secret
def get_sign(data_dict: dict):
'''
生成签名
'''
params_list = sorted(data_dict.items(), key=lambda e: e[0], reverse=False)
params_str = "&".join(u"{}={}".format(k, v) for k, v in params_list ) + '&' + app_secret
md5 = hashlib.md5()
md5.update(params_str.encode('utf-8'))
sign = md5.hexdigest().upper()
return sign
def check_sign(data_dict:dict):
"""
检查sign是否正确
:param data_dict:
:return:
"""
return True
print("获取数据:",data_dict)
sign=data_dict.pop('sign')
if not sign:
return False
if sign!=get_sign(data_dict):
return False
return True
def add_sign(data_dict:dict):
"""
给数据增加sign字段
:param data_dict:
:return:
"""
sign=get_sign(data_dict)
data_dict['sign']=sign
return data_dict | [
"hashlib.md5"
] | [((424, 437), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (435, 437), False, 'import hashlib\n')] |
import logging
import json
from deltacat.storage import PartitionLocator
from deltacat.compute.compactor import RoundCompletionInfo
from deltacat import logs
logger = logs.configure_deltacat_logger(logging.getLogger(__name__))
def get_round_completion_file_s3_url(
bucket: str,
source_partition_locator: PartitionLocator,
pki_root_path: str) -> str:
base_url = source_partition_locator.path(f"s3://{bucket}")
return f"{base_url}/{pki_root_path}.json"
def read_round_completion_file(
bucket: str,
source_partition_locator: PartitionLocator,
primary_key_index_root_path: str) -> RoundCompletionInfo:
from deltacat.aws import s3u as s3_utils
round_completion_file_url = get_round_completion_file_s3_url(
bucket,
source_partition_locator,
primary_key_index_root_path,
)
logger.info(
f"reading round completion file from: {round_completion_file_url}")
round_completion_info = None
result = s3_utils.download(round_completion_file_url, False)
if result:
json_str = result["Body"].read().decode("utf-8")
round_completion_info = RoundCompletionInfo(json.loads(json_str))
logger.info(f"read round completion info: {round_completion_info}")
return round_completion_info
def write_round_completion_file(
bucket: str,
source_partition_locator: PartitionLocator,
primary_key_index_root_path: str,
round_completion_info: RoundCompletionInfo):
from deltacat.aws import s3u as s3_utils
logger.info(
f"writing round completion file contents: {round_completion_info}")
round_completion_file_s3_url = get_round_completion_file_s3_url(
bucket,
source_partition_locator,
primary_key_index_root_path,
)
logger.info(
f"writing round completion file to: {round_completion_file_s3_url}")
s3_utils.upload(
round_completion_file_s3_url,
str(json.dumps(round_completion_info))
)
logger.info(
f"round completion file written to: {round_completion_file_s3_url}")
| [
"logging.getLogger",
"deltacat.aws.s3u.download",
"json.dumps",
"json.loads"
] | [((200, 227), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (217, 227), False, 'import logging\n'), ((1005, 1056), 'deltacat.aws.s3u.download', 's3_utils.download', (['round_completion_file_url', '(False)'], {}), '(round_completion_file_url, False)\n', (1022, 1056), True, 'from deltacat.aws import s3u as s3_utils\n'), ((1181, 1201), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (1191, 1201), False, 'import json\n'), ((1981, 2014), 'json.dumps', 'json.dumps', (['round_completion_info'], {}), '(round_completion_info)\n', (1991, 2014), False, 'import json\n')] |
import json
from abc import ABCMeta, abstractmethod
class Hyperparams(metaclass=ABCMeta):
"""Abstract interface for defining hyperparameters"""
def __init__(self):
self.validate()
if 'hparam_type' in self.__dict__:
raise ValueError("'hparam_type' is not allowed to be used as "
"a member variable name since it is used "
"to save models to a file.")
def __repr__(self):
return f'{self.__class__.__name__}\n' + \
''.join(f'{attr}: {value}\n' for attr, value in self.__dict__.items())
@abstractmethod
def validate(self):
raise NotImplementedError('Must implement validate().')
def save(self, path):
"""Write HyperParams object to disk."""
with open(path, 'w') as file:
payload = self.__dict__
payload['hparam_type'] = self.__class__.__name__
json.dump(self.__dict__, file)
def load(self, path):
"""Load HyperParams object from disk."""
with open(path, 'r') as file:
hparams = json.load(file)
hparams.pop('hparam_type')
self.__dict__ = hparams
self.validate()
return self
| [
"json.load",
"json.dump"
] | [((930, 960), 'json.dump', 'json.dump', (['self.__dict__', 'file'], {}), '(self.__dict__, file)\n', (939, 960), False, 'import json\n'), ((1097, 1112), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1106, 1112), False, 'import json\n')] |
import pytest
from robot_server.service.session import models
def test_command_type_validation_jog():
c = models.BasicSessionCommand(**{'command': models.CalibrationCommand.jog,
'data': {'vector': [1, 2, 3]}})
assert c.data == models.JogPosition(vector=(1, 2, 3,))
def test_command_type_validation_jog_fail():
with pytest.raises(ValueError):
models.BasicSessionCommand(**{'command': models.CalibrationCommand.jog,
'data': {}})
def test_command_type_empty():
"""Test that we create command correctly for
commands that have no added data."""
c = models.BasicSessionCommand(
**{'command': models.CalibrationCommand.load_labware,
'data': {}})
assert c.data == models.EmptyModel()
| [
"robot_server.service.session.models.BasicSessionCommand",
"robot_server.service.session.models.EmptyModel",
"robot_server.service.session.models.JogPosition",
"pytest.raises"
] | [((112, 219), 'robot_server.service.session.models.BasicSessionCommand', 'models.BasicSessionCommand', ([], {}), "(**{'command': models.CalibrationCommand.jog,\n 'data': {'vector': [1, 2, 3]}})\n", (138, 219), False, 'from robot_server.service.session import models\n'), ((659, 757), 'robot_server.service.session.models.BasicSessionCommand', 'models.BasicSessionCommand', ([], {}), "(**{'command': models.CalibrationCommand.\n load_labware, 'data': {}})\n", (685, 757), False, 'from robot_server.service.session import models\n'), ((275, 311), 'robot_server.service.session.models.JogPosition', 'models.JogPosition', ([], {'vector': '(1, 2, 3)'}), '(vector=(1, 2, 3))\n', (293, 311), False, 'from robot_server.service.session import models\n'), ((369, 394), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (382, 394), False, 'import pytest\n'), ((404, 492), 'robot_server.service.session.models.BasicSessionCommand', 'models.BasicSessionCommand', ([], {}), "(**{'command': models.CalibrationCommand.jog,\n 'data': {}})\n", (430, 492), False, 'from robot_server.service.session import models\n'), ((794, 813), 'robot_server.service.session.models.EmptyModel', 'models.EmptyModel', ([], {}), '()\n', (811, 813), False, 'from robot_server.service.session import models\n')] |
import pytest
import uproot
from collections import namedtuple
@pytest.fixture
def infile():
filename = "tests/data/CMS_HEP_tutorial_ww.root"
return uproot.open(filename)["events"]
FakeEventRange = namedtuple("FakeEventRange", "start_entry stop_entry entries_in_block")
@pytest.fixture
def event_range():
return FakeEventRange(100, 200, 100)
@pytest.fixture
def full_event_range():
return FakeEventRange(0, 4580, 0)
@pytest.fixture
def wrapped_tree(infile, event_range):
import fast_carpenter.tree_wrapper as tree_w
tree = tree_w.WrappedTree(infile, event_range)
return tree
class Namespace():
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class FakeBEEvent(object):
def __init__(self, tree, eventtype):
self.tree = tree
self.config = Namespace(dataset=Namespace(eventtype=eventtype))
| [
"fast_carpenter.tree_wrapper.WrappedTree",
"collections.namedtuple",
"uproot.open"
] | [((210, 281), 'collections.namedtuple', 'namedtuple', (['"""FakeEventRange"""', '"""start_entry stop_entry entries_in_block"""'], {}), "('FakeEventRange', 'start_entry stop_entry entries_in_block')\n", (220, 281), False, 'from collections import namedtuple\n'), ((557, 596), 'fast_carpenter.tree_wrapper.WrappedTree', 'tree_w.WrappedTree', (['infile', 'event_range'], {}), '(infile, event_range)\n', (575, 596), True, 'import fast_carpenter.tree_wrapper as tree_w\n'), ((159, 180), 'uproot.open', 'uproot.open', (['filename'], {}), '(filename)\n', (170, 180), False, 'import uproot\n')] |
from bs4 import BeautifulSoup
import requests
import pymysql.cursors
import unittest
from validate_email import validate_email
class UnitTestsDataMinerYellowPagesRussia(unittest.TestCase):
def test_extract_one_email(self):
url = "https://msk.yp.ru/detail/id/mamaison_all_suites_spa_hotel_pokrovka_1152089/"
# Request the content of a page from the url
html = requests.get(url)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, "html.parser")
if soup.find("a", {"data-statclick-action": "is_web"}) is not None:
all_is_web = soup.find_all("a", {"data-statclick-action": "is_web"})
for is_web in all_is_web:
email = "info@" + is_web.get("data-statclick-url")\
.replace("www.", "")\
.replace("http://", "")\
.replace("https://", "")\
try:
if validate_email(email, verify=True) != False:
print("The email : " + email + " does exist.")
else:
print("The email : " + email + " doesn't exist.")
except:
print("An error with the email : " + email)
else:
print("no email business")
def test_extract_emails_from_all_page_of_results_for_one_activity_and_capital(self):
try:
activity = "hotel"
city = "https://msk.yp.ru/search/text/"
url_search = city + activity
html_search = requests.get(url_search)
soup_search = BeautifulSoup(html_search.content, "html.parser")
number_of_pages = 0
try:
if soup_search \
.select_one("#utm") \
.find("div", {"class": "row companies-container"}) is not None:
try:
if soup_search \
.select_one("#utm") \
.find("div", {"class": "row companies-container"}) \
.find("div", {"class": "mdl-cell mdl-cell--12-col"}) is not None:
number_of_pages_with_coma = int(soup_search
.select_one("#utm")
.find("div", {"class": "row companies-container"})
.find("div", {"class": "mdl-cell mdl-cell--12-col"})
.text
.replace("\t", "")
.replace("\n", "")
.replace(" ", "")
.replace("Нашлось", "")
.replace("компании", "")
) / 20
if int(str(number_of_pages_with_coma).split(".")[1][:1]) < 5:
number_of_pages += round(number_of_pages_with_coma) + 1
print("number_of_pages : " + str(number_of_pages))
elif int(str(number_of_pages_with_coma).split(".")[1][:1]) >= 5:
number_of_pages += round(number_of_pages_with_coma)
print("number_of_pages : " + str(number_of_pages))
else:
print("no div mdl-cell mdl-cell--12-col")
except Exception as e:
print("error mdl-cell mdl-cell--12-col : " + str(e))
else:
print("no div row companies-container")
except Exception as e:
print("error row companies-container : " + str(e))
i_1 = 0
try:
for i in range(48, number_of_pages+1):
url_one_page_of_results = city + activity + "/page/" + str(i) + "/"
print(url_one_page_of_results)
html_search = requests.get(url_one_page_of_results)
soup_one_page_of_results = BeautifulSoup(html_search.content, "html.parser")
if soup_one_page_of_results\
.select_one("#companies") is not None:
if soup_one_page_of_results\
.select_one("#companies")\
.find("div", {"class": "row"}) is not None:
all_row = soup_one_page_of_results.select_one("#companies").find_all("div", {"class": "row"})
for j in range(0, len(all_row)):
i_1 += 1
if all_row[j].find("div", {"class": "company__head"}) is not None:
if all_row[j]\
.find("div", {"class": "company__head"})\
.find("a", {"class": "testtjlw"}) is not None:
url_result = "https://msk.yp.ru" + all_row[j]\
.find("div", {"class": "company__head"})\
.find("a", {"class": "testtjlw"})\
.get("href")
print(url_result)
# Request the content of a page from the url
html = requests.get(url_result)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, "html.parser")
if soup.find("a", {"data-statclick-action": "is_web"}) is not None:
all_is_web = soup.find_all("a", {"data-statclick-action": "is_web"})
for is_web in all_is_web:
email = "info@" + is_web.get("data-statclick-url") \
.replace("www.", "") \
.replace("http://", "") \
.replace("https://", "")\
.split("/")[0]
try:
if validate_email(email, verify=True) != False:
print(str(i_1) + " The email : " + email + " does exist.")
else:
print(str(i_1) + " The email : " + email + " doesn't exist.")
except:
print(str(i_1) + " An error with the email : " + email)
else:
print(str(i_1) + " no email business")
else:
print(str(i_1) + " no a testtjlw")
else:
print(str(i_1) + " no div company__head")
else:
print("no div row")
else:
print("no id companies")
except Exception as e:
print("error page : " + str(e))
except Exception as e:
print("url_search : " + str(e))
def test_extract_emails_from_all_page_of_results_for_all_activities_and_capitals(self):
activites = [
# {'id': '1', 'url': 'временное+агентство'},
# {'id': '2', 'url': 'агентство+недвижимости'},
# {'id': '3', 'url': 'кадровое+агентство'},
# {'id': '4', 'url': 'software'},
# {'id': '5', 'url': 'hotel'},
# {'id': '6', 'url': 'социальный+арендодатель'},
# {'id': '7', 'url': 'клининговая+компания'},
# {'id': '8', 'url': 'ассоциация'},
# {'id': '9', 'url': 'финансовое+учреждение'},
# {'id': '10', 'url': 'ресторан'},
# {'id': '11', 'url': 'строительная+компания'},
# {'id': '12', 'url': 'парикмахер'},
# {'id': '13', 'url': 'флорист'},
# {'id': '14', 'url': 'слесарь'},
# {'id': '15', 'url': 'пекарня'},
# {'id': '16', 'url': 'страхование'},
# {'id': '17', 'url': 'аптека'},
# {'id': '18', 'url': 'транспортная+компания'},
# {'id': '19', 'url': 'электричество'},
# {'id': '20', 'url': 'водопровод'},
# {'id': '21', 'url': 'охранная+компания'},
# {'id': '22', 'url': 'адвокат'},
# {'id': '23', 'url': 'банк'},
# {'id': '24', 'url': 'гараж'},
# {'id': '25', 'url': 'дантист'},
# {'id': '26', 'url': 'врач'},
# {'id': '27', 'url': 'учет'},
# {'id': '28', 'url': 'супермаркет'},
# {'id': '29', 'url': 'нотариус'},
# {'id': '30', 'url': 'ювелир'},
# {'id': '31', 'url': 'модельер'},
# {'id': '32', 'url': 'мясник'},
# {'id': '33', 'url': 'книжный+магазин'},
# {'id': '34', 'url': 'архитектор'}
{'id': '36', 'url': 'цемента'},
{'id': '37', 'url': 'отопление'},
{'id': '38', 'url': 'лодке'},
{'id': '39', 'url': 'холодной'},
{'id': '41', 'url': 'стали'},
{'id': '42', 'url': 'химических'},
{'id': '43', 'url': 'газа'},
{'id': '44', 'url': 'покупка+золота'}
]
capitales_du_monde = [
{'id': '453', 'nom': 'https://msk.yp.ru/search/text/'}, # Moscow
{'id': '454', 'nom': 'https://www.yp.ru/search/text/'}, # St Petersburg
{'id': '455', 'nom': 'https://abakan.yp.ru/search/text/'}, #
{'id': '456', 'nom': 'https://yarovoe.yp.ru/search/text/'},
{'id': '457', 'nom': 'https://alatyr.yp.ru/search/text/'},
{'id': '458', 'nom': 'https://agidel.yp.ru/search/text/'},
{'id': '459', 'nom': 'https://alekseevka.yp.ru/search/text/'},
{'id': '460', 'nom': 'https://abinsk.yp.ru/search/text/'},
{'id': '461', 'nom': 'https://abakan.yp.ru/search/text/'},
{'id': '462', 'nom': 'https://yasnyi.yp.ru/search/text/'},
{'id': '463', 'nom': 'https://azov.yp.ru/search/text/'},
{'id': '464', 'nom': 'https://aldan.yp.ru/search/text/'},
{'id': '465', 'nom': 'https://alagir.yp.ru/search/text/'},
{'id': '466', 'nom': 'https://yartsevo.yp.ru/search/text/'},
{'id': '467', 'nom': 'https://alapaevsk.yp.ru/search/text/'},
{'id': '468', 'nom': 'https://agryz.yp.ru/search/text/'},
{'id': '469', 'nom': 'https://yasnogorsk.yp.ru/search/text/'},
{'id': '470', 'nom': 'https://ak-dovurak.yp.ru/search/text/'},
{'id': '471', 'nom': 'https://yaroslavl.yp.ru/search/text/'},
{'id': '472', 'nom': 'https://aleksandrovsk.yp.ru/search/text/'}
]
try:
for capitale in capitales_du_monde:
for activite in activites:
try:
activity = activite.get("url")
city = capitale.get("nom")
url_search = city + activity
html_search = requests.get(url_search)
soup_search = BeautifulSoup(html_search.content, "html.parser")
number_of_pages = 0
try:
if soup_search \
.select_one("#utm") \
.find("div", {"class": "row companies-container"}) is not None:
try:
if soup_search \
.select_one("#utm") \
.find("div", {"class": "row companies-container"}) \
.find("div", {"class": "mdl-cell mdl-cell--12-col"}) is not None:
number_of_pages_with_coma = int(soup_search
.select_one("#utm")
.find("div",
{"class": "row companies-container"})
.find("div",
{"class": "mdl-cell mdl-cell--12-col"})
.text
.replace("\t", "")
.replace("\n", "")
.replace(" ", "")
.replace("Нашлось", "")
.replace("компании", "")
.replace("компаний", "")
) / 20
if int(str(number_of_pages_with_coma).split(".")[1][:1]) < 5:
number_of_pages += round(number_of_pages_with_coma) + 1
print("number_of_pages : " + str(number_of_pages))
elif int(str(number_of_pages_with_coma).split(".")[1][:1]) >= 5:
number_of_pages += round(number_of_pages_with_coma)
print("number_of_pages : " + str(number_of_pages))
else:
print("no div mdl-cell mdl-cell--12-col")
except Exception as e:
print("error mdl-cell mdl-cell--12-col : " + str(e))
else:
print("no div row companies-container")
except Exception as e:
print("error row companies-container : " + str(e))
i_1 = 0
try:
for i in range(1, number_of_pages + 1):
url_one_page_of_results = city + activity + "/page/" + str(i) + "/"
print(url_one_page_of_results)
html_search = requests.get(url_one_page_of_results)
soup_one_page_of_results = BeautifulSoup(html_search.content, "html.parser")
if soup_one_page_of_results \
.select_one("#companies") is not None:
if soup_one_page_of_results \
.select_one("#companies") \
.find("div", {"class": "row"}) is not None:
all_row = soup_one_page_of_results.select_one("#companies").find_all("div", {
"class": "row"})
for j in range(0, len(all_row)):
i_1 += 1
if all_row[j].find("div", {"class": "company__head"}) is not None:
if all_row[j] \
.find("div", {"class": "company__head"}) \
.find("a", {"class": "testtjlw"}) is not None:
url_result = "https://msk.yp.ru" + all_row[j] \
.find("div", {"class": "company__head"}) \
.find("a", {"class": "testtjlw"}) \
.get("href")
print(url_result)
# Request the content of a page from the url
html = requests.get(url_result)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, "html.parser")
if soup.find("a", {"data-statclick-action": "is_web"}) is not None:
all_is_web = soup.find_all("a",
{"data-statclick-action": "is_web"})
for is_web in all_is_web:
email = "info@" + is_web.get("data-statclick-url") \
.replace("www.", "") \
.replace("http://", "") \
.replace("https://", "") \
.split("/")[0]
try:
connection = pymysql.connect(
host='localhost',
port=3306,
user='',
password='',
db='contacts_professionnels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
try:
sql = "INSERT INTO `emails` (" \
"`id_activite`, " \
"`id_capitale_du_monde`, " \
"`email`) VALUE (%s, %s, %s)"
cursor.execute(sql, (
activite.get('id'),
capitale.get('id'),
email))
connection.commit()
print(str(i_1) + " The record is stored : "
+ str(email))
connection.close()
except Exception as e:
print(str(i_1)
+ " The record already exists : "
+ str(email) + " "
+ str(e))
connection.close()
except Exception as e:
print(str(i_1)
+ " An error with the email : "
+ email + " "
+ str(e)
)
else:
print(str(i_1) + " no email business")
else:
print(str(i_1) + " no a testtjlw")
else:
print(str(i_1) + " no div company__head")
else:
print("no div row")
else:
print("no id companies")
except Exception as e:
print("error " + str(e))
except Exception as e:
print("url_search : " + str(e))
finally:
print('done')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"validate_email.validate_email",
"bs4.BeautifulSoup",
"requests.get"
] | [((22472, 22487), 'unittest.main', 'unittest.main', ([], {}), '()\n', (22485, 22487), False, 'import unittest\n'), ((390, 407), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (402, 407), False, 'import requests\n'), ((464, 506), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html.content', '"""html.parser"""'], {}), "(html.content, 'html.parser')\n", (477, 506), False, 'from bs4 import BeautifulSoup\n'), ((1560, 1584), 'requests.get', 'requests.get', (['url_search'], {}), '(url_search)\n', (1572, 1584), False, 'import requests\n'), ((1611, 1660), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_search.content', '"""html.parser"""'], {}), "(html_search.content, 'html.parser')\n", (1624, 1660), False, 'from bs4 import BeautifulSoup\n'), ((4293, 4330), 'requests.get', 'requests.get', (['url_one_page_of_results'], {}), '(url_one_page_of_results)\n', (4305, 4330), False, 'import requests\n'), ((4378, 4427), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_search.content', '"""html.parser"""'], {}), "(html_search.content, 'html.parser')\n", (4391, 4427), False, 'from bs4 import BeautifulSoup\n'), ((950, 984), 'validate_email.validate_email', 'validate_email', (['email'], {'verify': '(True)'}), '(email, verify=True)\n', (964, 984), False, 'from validate_email import validate_email\n'), ((11940, 11964), 'requests.get', 'requests.get', (['url_search'], {}), '(url_search)\n', (11952, 11964), False, 'import requests\n'), ((12003, 12052), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_search.content', '"""html.parser"""'], {}), "(html_search.content, 'html.parser')\n", (12016, 12052), False, 'from bs4 import BeautifulSoup\n'), ((15431, 15468), 'requests.get', 'requests.get', (['url_one_page_of_results'], {}), '(url_one_page_of_results)\n', (15443, 15468), False, 'import requests\n'), ((15528, 15577), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_search.content', '"""html.parser"""'], {}), "(html_search.content, 'html.parser')\n", (15541, 15577), False, 'from bs4 import BeautifulSoup\n'), ((5768, 5792), 'requests.get', 'requests.get', (['url_result'], {}), '(url_result)\n', (5780, 5792), False, 'import requests\n'), ((5913, 5955), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html.content', '"""html.parser"""'], {}), "(html.content, 'html.parser')\n", (5926, 5955), False, 'from bs4 import BeautifulSoup\n'), ((17219, 17243), 'requests.get', 'requests.get', (['url_result'], {}), '(url_result)\n', (17231, 17243), False, 'import requests\n'), ((17388, 17430), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html.content', '"""html.parser"""'], {}), "(html.content, 'html.parser')\n", (17401, 17430), False, 'from bs4 import BeautifulSoup\n'), ((6757, 6791), 'validate_email.validate_email', 'validate_email', (['email'], {'verify': '(True)'}), '(email, verify=True)\n', (6771, 6791), False, 'from validate_email import validate_email\n')] |
import numpy as np
import cv2
import os
def findInvalids():
searchDir = "negs"
invalidDir = "invalids"
for img in os.listdir(searchDir):
for invalid in os.listdir(invalidDir):
try:
imgPath = searchDir + "/" + str(img)
invalidImg = cv2.imread(invalidDir + "/" + str(invalid))
testingImg = cv2.imread(imgPath)
if invalidImg.shape == testingImg.shape and not(np.bitwise_xor(invalidImg, testingImg).any()):
os.remove(imgPath)
print("Invalid image " + imgPath + " has been deleted")
except Exception as e:
print(str(e))
findInvalids()
| [
"numpy.bitwise_xor",
"os.listdir",
"cv2.imread",
"os.remove"
] | [((124, 145), 'os.listdir', 'os.listdir', (['searchDir'], {}), '(searchDir)\n', (134, 145), False, 'import os\n'), ((165, 187), 'os.listdir', 'os.listdir', (['invalidDir'], {}), '(invalidDir)\n', (175, 187), False, 'import os\n'), ((318, 337), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (328, 337), False, 'import cv2\n'), ((444, 462), 'os.remove', 'os.remove', (['imgPath'], {}), '(imgPath)\n', (453, 462), False, 'import os\n'), ((391, 429), 'numpy.bitwise_xor', 'np.bitwise_xor', (['invalidImg', 'testingImg'], {}), '(invalidImg, testingImg)\n', (405, 429), True, 'import numpy as np\n')] |
import numpy as np
import neurolab as nl
# Define the input file
input_file = 'data/letter.data'
# Define the number of data points
num_data = 50
# String containing all the distinct characters
orig_labels = 'omandig'
# Store number of distinct characters
num_labels = len(orig_labels)
# Define the training and testing parameters
num_train = int(0.9 * num_data)
num_test = num_data - num_train
# Define the dataset extraction parameters
start = 6
end = -1
# Creating the dataset
data = list()
labels = list()
with open(input_file, 'r') as f:
for line in f.readlines():
# Split the current line tabwise
list_vals = line.split('\t')
# Check if the label is in our ground truth labels
# If not, we should skip it.
if list_vals[1] not in orig_labels:
continue
# Extract the current label and append it to the main list
label = np.zeros((num_labels, 1))
label[orig_labels.index(list_vals[1])] = 1
labels.append(label)
# Extract the character vector and append it to the main list
cur_char = np.array([float(x) for x in list_vals[start:end]])
data.append(cur_char)
# Exit the loop once the required dataset has been created
if len(data) >= num_data:
break
f.close()
# Convert the data and labels to numpy arrays
data = np.asfarray(data)
labels = np.array(labels).reshape(num_data, num_labels)
# Extract the number of dimensions
num_dims = len(data[0])
# Create a feed forward neural network
nn = nl.net.newff([[0, 1] for _ in range(len(data[0]))], [128, 16, num_labels])
# Set the training algorithm to gradient descent
nn.trainf = nl.train.train_gd
# Train the network
err = nn.train(data[:num_train, :], labels[:num_train, :],
epochs=10000, show=100, goal=0.01)
# Predict the output for test inputs
print('\nTesting on unknown data:')
predicted_test = nn.sim(data[num_train:, :])
for i in range(num_test):
print('\nOriginal:', orig_labels[np.argmax(labels[i])])
print('Predicted:', orig_labels[np.argmax(predicted_test[i])])
| [
"numpy.asfarray",
"numpy.array",
"numpy.zeros",
"numpy.argmax"
] | [((1368, 1385), 'numpy.asfarray', 'np.asfarray', (['data'], {}), '(data)\n', (1379, 1385), True, 'import numpy as np\n'), ((906, 931), 'numpy.zeros', 'np.zeros', (['(num_labels, 1)'], {}), '((num_labels, 1))\n', (914, 931), True, 'import numpy as np\n'), ((1395, 1411), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1403, 1411), True, 'import numpy as np\n'), ((2014, 2034), 'numpy.argmax', 'np.argmax', (['labels[i]'], {}), '(labels[i])\n', (2023, 2034), True, 'import numpy as np\n'), ((2073, 2101), 'numpy.argmax', 'np.argmax', (['predicted_test[i]'], {}), '(predicted_test[i])\n', (2082, 2101), True, 'import numpy as np\n')] |
import socket
from datetime import datetime
from email.parser import BytesParser, HeaderParser
from hashlib import sha256
from urllib.parse import urlparse
from .exceptions import IntegrityVerificationError, NoDataError, RibbitError
DEFAULT_PORT = 1119
class RibbitRequest:
"""
A request to a Ribbit server.
Initialize the request with hostname, port and encoded data to send.
Perform the request with request.send()
"""
def __init__(self, hostname: str, port: int, path: str) -> None:
self.hostname = hostname
self.port = port
self.path = path
self.data = f"{path}\n".encode()
def send(self, buffer_size: int) -> "RibbitResponse":
# Connect to the ribbit server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.hostname, self.port))
buf = []
try:
# Send the path request
s.send(self.data)
# Receive and buffer the data
chunk = s.recv(buffer_size)
while chunk:
buf.append(chunk)
chunk = s.recv(buffer_size)
finally:
s.close()
data = b"".join(buf)
if not data:
raise NoDataError(f"No data at {self.path}")
# Data is expected to terminate in a CRLF, otherwise it's most likely broken
if not data.endswith(b"\r\n"):
raise RibbitError("Unterminated data... try again.")
return RibbitResponse(self, data)
class RibbitResponse:
"""
A response to a RibbitRequest.
The request that created that response is available on the .request attribute.
"""
def __init__(
self, request: RibbitRequest, data: bytes, *, verify: bool = True
) -> None:
self.request = request
self.data = data
self.date = datetime.utcnow()
self.message = BytesParser().parsebytes(data) # type: ignore # (typeshed#2502)
self.checksum = parse_checksum(self.message.epilogue)
# The bytes of everything except the checksum (the epilogue)
# The checksum is of those bytes
self.content_bytes = data[:-len(self.message.epilogue)]
if verify:
content_checksum = sha256(self.content_bytes).hexdigest()
if self.checksum != content_checksum:
raise IntegrityVerificationError("ribbit response", content_checksum, self.checksum)
self.content = self.message.get_payload(0).get_payload()
self.signature = self.message.get_payload(1).get_payload()
# TODO: verify signature as well
class RibbitClient:
"""
A Ribbit client. Corresponds to a hostname/port pair.
"""
def __init__(self, hostname: str, port: int) -> None:
self.hostname = hostname
self.port = port
def get(self, path: str, *, buffer_size: int = 4096) -> RibbitResponse:
request = RibbitRequest(self.hostname, self.port, path)
return request.send(buffer_size)
def parse_checksum(header: str) -> str:
"""
Parse the Checksum header (eg. from the email epilogue)
"""
# Epilogue example:
# "Checksum: e231f8e724890aca477ca5efdfc7bc9c31e1da124510b4f420ebcf9c2d1fbe74\r\n"
msg = HeaderParser().parsestr(header)
return msg["Checksum"] # type: ignore
def get(url: str, **kwargs) -> RibbitResponse:
"""
Query a ribbit url. Returns a RibbitResponse object.
Port defaults to 1119 if not specified.
Usage example:
>>> from keg import ribbit
>>> ribbit.get("ribbit://version.example.com/v1/products/foo/cdns")
"""
u = urlparse(url)
if u.scheme != "ribbit":
raise ValueError(f"Invalid ribbit url: {url!r} (must start with ribbit://)")
client = RibbitClient(u.hostname, u.port or DEFAULT_PORT)
return client.get(u.path.lstrip("/"), **kwargs)
| [
"hashlib.sha256",
"urllib.parse.urlparse",
"socket.socket",
"datetime.datetime.utcnow",
"email.parser.BytesParser",
"email.parser.HeaderParser"
] | [((3199, 3212), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (3207, 3212), False, 'from urllib.parse import urlparse\n'), ((694, 743), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (707, 743), False, 'import socket\n'), ((1602, 1619), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1617, 1619), False, 'from datetime import datetime\n'), ((2853, 2867), 'email.parser.HeaderParser', 'HeaderParser', ([], {}), '()\n', (2865, 2867), False, 'from email.parser import BytesParser, HeaderParser\n'), ((1638, 1651), 'email.parser.BytesParser', 'BytesParser', ([], {}), '()\n', (1649, 1651), False, 'from email.parser import BytesParser, HeaderParser\n'), ((1951, 1977), 'hashlib.sha256', 'sha256', (['self.content_bytes'], {}), '(self.content_bytes)\n', (1957, 1977), False, 'from hashlib import sha256\n')] |
# sudoku builder/solver gui
import tkinter
import tkinter.messagebox
import sudoku
class ProgramGUI:
def __init__(self):
self.main = tkinter.Tk()
self.main.title('Sudoku Builder')
# initialise attributes
fontDetails = 'Calibri 14'
self.difficulty = tkinter.StringVar()
self.difficulty.set('1')
# create 81 StringVar()
self.varList = []
for letter in ['a','b','c','d','e','f','g','h','i']:
for num in range(1,10):
var_string = letter + str(num)
self.varList.append(var_string)
self.varDict = {var: tkinter.StringVar() for var in self.varList}
#create main frames
self.headingFrame = tkinter.Frame(self.main)
self.top = tkinter.Frame(self.main)
self.middle = tkinter.Frame(self.main)
self.bottom = tkinter.Frame(self.main)
self.bottom2 = tkinter.Frame(self.main)
self.difficultyFrame = tkinter.Frame(self.main)
#fill the heading frame
self.headingLabel = tkinter.Label(self.headingFrame, text='Create or Solve a Sudoku:', font='Calibri 16 bold').pack(side='left')
#fill the bottom2 frame
self.createButton = tkinter.Button(self.bottom2, text='Create', font='Calibri 16 bold', command=lambda: self.build_sudoku(self.difficulty.get())).pack(side='left', padx=15)
self.solveButton = tkinter.Button(self.bottom2, text='Solve', font='Calibri 16 bold', command=lambda: self.solve_sudoku()).pack(side='left', padx=15)
self.clearButton = tkinter.Button(self.bottom2, text='Clear', font='Calibri 16 bold', command=lambda: self.clear_boxes()).pack(side='left', padx=15)
#fill the bottom3 frame
self.difficultyLabel = tkinter.Label(self.difficultyFrame, text='Difficulty:', font='Calibri 14 bold').pack(side='left')
tkinter.Radiobutton(self.difficultyFrame, text='Easy', variable=self.difficulty, value='1').pack(side='left')
tkinter.Radiobutton(self.difficultyFrame, text='Medium', variable=self.difficulty, value='2').pack(side='left')
tkinter.Radiobutton(self.difficultyFrame, text='Hard', variable=self.difficulty, value='3').pack(side='left')
tkinter.Radiobutton(self.difficultyFrame, text='Extreme', variable=self.difficulty, value='4').pack(side='left')
tkinter.Radiobutton(self.difficultyFrame, text='Insane', variable=self.difficulty, value='5').pack(side='left')
#create the nine square frames
self.topLeft = tkinter.Frame(self.top)
self.topMiddle = tkinter.Frame(self.top)
self.topRight = tkinter.Frame(self.top)
self.middleLeft = tkinter.Frame(self.middle)
self.middleMiddle = tkinter.Frame(self.middle)
self.middleRight = tkinter.Frame(self.middle)
self.bottomLeft = tkinter.Frame(self.bottom)
self.bottomMiddle = tkinter.Frame(self.bottom)
self.bottomRight = tkinter.Frame(self.bottom)
# create rows inside squares
self.rowList = ['topLeftTop', 'topLeftMid', 'topLeftBottom', 'topMiddleTop', 'topMiddleMid', 'topMiddleBottom', 'topRightTop', 'topRightMid', 'topRightBottom', 'middleLeftTop', 'middleLeftMid', 'middleLeftBottom',
'middleMiddleTop', 'middleMiddleMid', 'middleMiddleBottom', 'middleRightTop', 'middleRightMid', 'middleRightBottom', 'bottomLeftTop', 'bottomLeftMid', 'bottomLeftBottom', 'bottomMiddleTop',
'bottomMiddleMid', 'bottomMiddleBottom', 'bottomRightTop', 'bottomRightMid', 'bottomRightBottom']
self.rowList2 = ['topLeftTop', 'topMiddleTop', 'topRightTop', 'topLeftMid', 'topMiddleMid', 'topRightMid', 'topLeftBottom', 'topMiddleBottom', 'topRightBottom', 'middleLeftTop', 'middleMiddleTop', 'middleRightTop',
'middleLeftMid', 'middleMiddleMid', 'middleRightMid', 'middleLeftBottom', 'middleMiddleBottom', 'middleRightBottom', 'bottomLeftTop', 'bottomMiddleTop', 'bottomRightTop', 'bottomLeftMid',
'bottomMiddleMid','bottomRightMid', 'bottomLeftBottom', 'bottomMiddleBottom', 'bottomRightBottom']
self.squareList = [self.topLeft, self.topMiddle, self.topRight, self.middleLeft, self.middleMiddle, self.middleRight, self.bottomLeft, self.bottomMiddle, self.bottomRight]
# there are three rows in each square, so in order to match rows to squares there needs to be three of each square in the list
self.squareList_exp = []
for square in self.squareList:
self.squareList_exp.append(square)
self.squareList_exp.append(square)
self.squareList_exp.append(square)
self.rowDict = {row: tkinter.Frame(square) for row, square in zip(self.rowList, self.squareList_exp)}
#fill rows
self.rowList_exp = []
for row in self.rowList2:
self.rowList_exp.append(self.rowDict[row])
self.rowList_exp.append(self.rowDict[row])
self.rowList_exp.append(self.rowDict[row])
self.entryDict = {var: tkinter.Entry(row, width=3, textvariable=self.varDict[var], font=fontDetails, justify='center').pack(side='left', padx=1, pady=1) for var, row in zip(self.varList, self.rowList_exp)}
#pack rows
for row in self.rowList:
self.rowDict[row].pack(side='top')
#pack squares and heading
for square in self.squareList:
square.pack(side='left', padx=2)
self.headingFrame.pack(side='top', padx=1, pady=1)
self.top.pack(side='top', padx=1, pady=1)
self.middle.pack(side='top', padx=1, pady=1)
self.bottom.pack(side='top', padx=1, pady=1)
self.bottom2.pack(side='top', pady=10)
self.difficultyFrame.pack(side='top', pady=10)
tkinter.mainloop()
def solve_sudoku(self):
# create a list of values to create the SudokuPuzzle object with
self.values_list = [self.varDict[var].get() for var in self.varList]
# create the SudokuPuzzle. Shows error dialoge if bad values are submitted.
try:
self.sudoku = sudoku.SudokuPuzzle(self.values_list)
validInput = True
except sudoku.SudokuError:
validInput = False
tkinter.messagebox.showerror('Error!', 'Error creating sudoku. There is either a number clash or invalid symbols present.')
if validInput:
# run the solve function and either update the boxes with the solution, or advise the problem is impossible
result = self.sudoku.solve()
if result == 'completed':
self.update_boxes()
elif result == 'impossible':
tkinter.messagebox.showerror('Error!', 'This sudoku is impossible.')
def build_sudoku(self, difficulty):
'''create a new sudoku puzzle'''
emptyValues = ['' for i in range(81)]
self.sudoku = sudoku.SudokuPuzzle(emptyValues)
self.sudoku.assign_seeds()
self.sudoku.solve()
self.sudoku.remove_values(difficulty)
self.update_boxes()
def update_boxes(self):
'''Update the GUI boxes with the characters from the solved SudokuPuzzle'''
for var in self.varList:
self.varDict[var].set(self.sudoku.varDict[var].get())
def clear_boxes(self):
'''Clear all the boxes in the GUI.'''
for box in self.sudoku.rows:
for value in box:
value.set('')
self.update_boxes()
# run everything:
gui = ProgramGUI()
| [
"tkinter.messagebox.showerror",
"tkinter.Entry",
"tkinter.Radiobutton",
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.Label",
"tkinter.mainloop",
"sudoku.SudokuPuzzle",
"tkinter.Frame"
] | [((155, 167), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (165, 167), False, 'import tkinter\n'), ((311, 330), 'tkinter.StringVar', 'tkinter.StringVar', ([], {}), '()\n', (328, 330), False, 'import tkinter\n'), ((758, 782), 'tkinter.Frame', 'tkinter.Frame', (['self.main'], {}), '(self.main)\n', (771, 782), False, 'import tkinter\n'), ((803, 827), 'tkinter.Frame', 'tkinter.Frame', (['self.main'], {}), '(self.main)\n', (816, 827), False, 'import tkinter\n'), ((851, 875), 'tkinter.Frame', 'tkinter.Frame', (['self.main'], {}), '(self.main)\n', (864, 875), False, 'import tkinter\n'), ((899, 923), 'tkinter.Frame', 'tkinter.Frame', (['self.main'], {}), '(self.main)\n', (912, 923), False, 'import tkinter\n'), ((948, 972), 'tkinter.Frame', 'tkinter.Frame', (['self.main'], {}), '(self.main)\n', (961, 972), False, 'import tkinter\n'), ((1005, 1029), 'tkinter.Frame', 'tkinter.Frame', (['self.main'], {}), '(self.main)\n', (1018, 1029), False, 'import tkinter\n'), ((2578, 2601), 'tkinter.Frame', 'tkinter.Frame', (['self.top'], {}), '(self.top)\n', (2591, 2601), False, 'import tkinter\n'), ((2628, 2651), 'tkinter.Frame', 'tkinter.Frame', (['self.top'], {}), '(self.top)\n', (2641, 2651), False, 'import tkinter\n'), ((2677, 2700), 'tkinter.Frame', 'tkinter.Frame', (['self.top'], {}), '(self.top)\n', (2690, 2700), False, 'import tkinter\n'), ((2728, 2754), 'tkinter.Frame', 'tkinter.Frame', (['self.middle'], {}), '(self.middle)\n', (2741, 2754), False, 'import tkinter\n'), ((2784, 2810), 'tkinter.Frame', 'tkinter.Frame', (['self.middle'], {}), '(self.middle)\n', (2797, 2810), False, 'import tkinter\n'), ((2839, 2865), 'tkinter.Frame', 'tkinter.Frame', (['self.middle'], {}), '(self.middle)\n', (2852, 2865), False, 'import tkinter\n'), ((2893, 2919), 'tkinter.Frame', 'tkinter.Frame', (['self.bottom'], {}), '(self.bottom)\n', (2906, 2919), False, 'import tkinter\n'), ((2949, 2975), 'tkinter.Frame', 'tkinter.Frame', (['self.bottom'], {}), '(self.bottom)\n', (2962, 2975), False, 'import tkinter\n'), ((3004, 3030), 'tkinter.Frame', 'tkinter.Frame', (['self.bottom'], {}), '(self.bottom)\n', (3017, 3030), False, 'import tkinter\n'), ((5944, 5962), 'tkinter.mainloop', 'tkinter.mainloop', ([], {}), '()\n', (5960, 5962), False, 'import tkinter\n'), ((7108, 7140), 'sudoku.SudokuPuzzle', 'sudoku.SudokuPuzzle', (['emptyValues'], {}), '(emptyValues)\n', (7127, 7140), False, 'import sudoku\n'), ((653, 672), 'tkinter.StringVar', 'tkinter.StringVar', ([], {}), '()\n', (670, 672), False, 'import tkinter\n'), ((4772, 4793), 'tkinter.Frame', 'tkinter.Frame', (['square'], {}), '(square)\n', (4785, 4793), False, 'import tkinter\n'), ((6274, 6311), 'sudoku.SudokuPuzzle', 'sudoku.SudokuPuzzle', (['self.values_list'], {}), '(self.values_list)\n', (6293, 6311), False, 'import sudoku\n'), ((1094, 1189), 'tkinter.Label', 'tkinter.Label', (['self.headingFrame'], {'text': '"""Create or Solve a Sudoku:"""', 'font': '"""Calibri 16 bold"""'}), "(self.headingFrame, text='Create or Solve a Sudoku:', font=\n 'Calibri 16 bold')\n", (1107, 1189), False, 'import tkinter\n'), ((1804, 1883), 'tkinter.Label', 'tkinter.Label', (['self.difficultyFrame'], {'text': '"""Difficulty:"""', 'font': '"""Calibri 14 bold"""'}), "(self.difficultyFrame, text='Difficulty:', font='Calibri 14 bold')\n", (1817, 1883), False, 'import tkinter\n'), ((1911, 2007), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.difficultyFrame'], {'text': '"""Easy"""', 'variable': 'self.difficulty', 'value': '"""1"""'}), "(self.difficultyFrame, text='Easy', variable=self.\n difficulty, value='1')\n", (1930, 2007), False, 'import tkinter\n'), ((2030, 2128), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.difficultyFrame'], {'text': '"""Medium"""', 'variable': 'self.difficulty', 'value': '"""2"""'}), "(self.difficultyFrame, text='Medium', variable=self.\n difficulty, value='2')\n", (2049, 2128), False, 'import tkinter\n'), ((2151, 2247), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.difficultyFrame'], {'text': '"""Hard"""', 'variable': 'self.difficulty', 'value': '"""3"""'}), "(self.difficultyFrame, text='Hard', variable=self.\n difficulty, value='3')\n", (2170, 2247), False, 'import tkinter\n'), ((2270, 2369), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.difficultyFrame'], {'text': '"""Extreme"""', 'variable': 'self.difficulty', 'value': '"""4"""'}), "(self.difficultyFrame, text='Extreme', variable=self.\n difficulty, value='4')\n", (2289, 2369), False, 'import tkinter\n'), ((2392, 2490), 'tkinter.Radiobutton', 'tkinter.Radiobutton', (['self.difficultyFrame'], {'text': '"""Insane"""', 'variable': 'self.difficulty', 'value': '"""5"""'}), "(self.difficultyFrame, text='Insane', variable=self.\n difficulty, value='5')\n", (2411, 2490), False, 'import tkinter\n'), ((6424, 6556), 'tkinter.messagebox.showerror', 'tkinter.messagebox.showerror', (['"""Error!"""', '"""Error creating sudoku. There is either a number clash or invalid symbols present."""'], {}), "('Error!',\n 'Error creating sudoku. There is either a number clash or invalid symbols present.'\n )\n", (6452, 6556), False, 'import tkinter\n'), ((5151, 5251), 'tkinter.Entry', 'tkinter.Entry', (['row'], {'width': '(3)', 'textvariable': 'self.varDict[var]', 'font': 'fontDetails', 'justify': '"""center"""'}), "(row, width=3, textvariable=self.varDict[var], font=\n fontDetails, justify='center')\n", (5164, 5251), False, 'import tkinter\n'), ((6884, 6952), 'tkinter.messagebox.showerror', 'tkinter.messagebox.showerror', (['"""Error!"""', '"""This sudoku is impossible."""'], {}), "('Error!', 'This sudoku is impossible.')\n", (6912, 6952), False, 'import tkinter\n')] |
import os
import click
from pypgatk.commands.utils import print_help
from pypgatk.ensembl.ensembl import EnsemblDataService
this_dir, this_filename = os.path.split(__file__)
@click.command('parse-gtf', short_help="Generate the DB file from a GTF, to be used by the vcf-to-proteindb command")
@click.option('-c', '--config_file', help='Configuration to perform conversion between ENSEMBL Files',
default=this_dir + '/../config/ensembl_config.yaml')
@click.option('-i', '--input_gtf', help='Path to the GTF file')
@click.option('-o', '--output_db', help='Path to the output DB file')
@click.pass_context
def parse_gtf(ctx, config_file, input_gtf, output_db):
ensembl_data_service = EnsemblDataService(config_file, {})
ensembl_data_service.parse_gtf(input_gtf, output_db) | [
"click.option",
"pypgatk.ensembl.ensembl.EnsemblDataService",
"click.command",
"os.path.split"
] | [((153, 176), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'import os\n'), ((179, 305), 'click.command', 'click.command', (['"""parse-gtf"""'], {'short_help': '"""Generate the DB file from a GTF, to be used by the vcf-to-proteindb command"""'}), "('parse-gtf', short_help=\n 'Generate the DB file from a GTF, to be used by the vcf-to-proteindb command'\n )\n", (192, 305), False, 'import click\n'), ((297, 461), 'click.option', 'click.option', (['"""-c"""', '"""--config_file"""'], {'help': '"""Configuration to perform conversion between ENSEMBL Files"""', 'default': "(this_dir + '/../config/ensembl_config.yaml')"}), "('-c', '--config_file', help=\n 'Configuration to perform conversion between ENSEMBL Files', default=\n this_dir + '/../config/ensembl_config.yaml')\n", (309, 461), False, 'import click\n'), ((467, 529), 'click.option', 'click.option', (['"""-i"""', '"""--input_gtf"""'], {'help': '"""Path to the GTF file"""'}), "('-i', '--input_gtf', help='Path to the GTF file')\n", (479, 529), False, 'import click\n'), ((531, 599), 'click.option', 'click.option', (['"""-o"""', '"""--output_db"""'], {'help': '"""Path to the output DB file"""'}), "('-o', '--output_db', help='Path to the output DB file')\n", (543, 599), False, 'import click\n'), ((701, 736), 'pypgatk.ensembl.ensembl.EnsemblDataService', 'EnsemblDataService', (['config_file', '{}'], {}), '(config_file, {})\n', (719, 736), False, 'from pypgatk.ensembl.ensembl import EnsemblDataService\n')] |
"""This module contains the `Edb` class.
This module is implicitily loaded in HFSS 3D Layout when launched.
"""
import gc
import os
import sys
import time
import traceback
import warnings
from pyaedt import inside_desktop, is_ironpython
from pyaedt.application.MessageManager import EDBMessageManager
from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup
from pyaedt.edb_core.EDB_Data import EdbBuilder
from pyaedt import retry_ntimes
from pyaedt.generic.general_methods import (
aedt_exception_handler,
env_path,
env_path_student,
env_value,
generate_unique_name,
)
from pyaedt.generic.process import SiwaveSolve
if os.name == "posix":
try:
import subprocessdotnet as subprocess
except:
warnings.warn("Pythonnet is needed to run pyaedt within Linux")
else:
import subprocess
try:
import clr
from System import Convert
edb_initialized = True
except ImportError:
warnings.warn(
"The clr is missing. Install Python.NET or use an IronPython version if you want to use the EDB module."
)
edb_initialized = False
class Edb(object):
"""Provides the EDB application interface.
This module inherits all objects that belong to EDB.
Parameters
----------
edbpath : str, optional
Full path to the ``aedb`` folder. The variable can also contain
the path to a layout to import. Allowed formarts are BRD,
XML (IPC2581), GDS, and DXF. The default is ``None``.
cellname : str, optional
Name of the cell to select. The default is ``None``.
isreadonly : bool, optional
Whether to open ``edb_core`` in read-only mode when it is
owned by HFSS 3D Layout. The default is ``False``.
edbversion : str, optional
Version of ``edb_core`` to use. The default is ``"2021.1"``.
isaedtowned : bool, optional
Whether to launch ``edb_core`` from HFSS 3D Layout. The
default is ``False``.
oproject : optional
Reference to the AEDT project object.
student_version : bool, optional
Whether to open the AEDT student version. The default is ``False.``
Examples
--------
Create an `Edb` object and a new EDB cell.
>>> from pyaedt import Edb
>>> app = Edb()
Create an `Edb` object and open the specified project.
>>> app = Edb("myfile.aedb")
"""
def __init__(
self,
edbpath=None,
cellname=None,
isreadonly=False,
edbversion="2021.1",
isaedtowned=False,
oproject=None,
student_version=False,
use_ppe=False,
):
self._clean_variables()
if is_ironpython and inside_desktop:
self.standalone = False
else:
self.standalone = True
if edb_initialized:
self.oproject = oproject
if isaedtowned:
self._main = sys.modules["__main__"]
self._messenger = self._main.oMessenger
else:
if not edbpath or not os.path.exists(edbpath):
self._messenger = EDBMessageManager()
elif os.path.exists(edbpath):
self._messenger = EDBMessageManager(os.path.dirname(edbpath))
self.student_version = student_version
self._messenger.add_info_message("Messenger Initialized in EDB")
self.edbversion = edbversion
self.isaedtowned = isaedtowned
self._init_dlls()
self._db = None
# self._edb.Database.SetRunAsStandAlone(not isaedtowned)
self.isreadonly = isreadonly
self.cellname = cellname
if not edbpath:
if os.name != "posix":
edbpath = os.getenv("USERPROFILE")
if not edbpath:
edbpath = os.path.expanduser("~")
edbpath = os.path.join(edbpath, "Documents", generate_unique_name("layout") + ".aedb")
else:
edbpath = os.getenv("HOME")
if not edbpath:
edbpath = os.path.expanduser("~")
edbpath = os.path.join(edbpath, generate_unique_name("layout") + ".aedb")
self._messenger.add_info_message("No Edb Provided. Creating new EDB {}.".format(edbpath))
self.edbpath = edbpath
if isaedtowned and inside_desktop:
self.open_edb_inside_aedt()
elif edbpath[-3:] in ["brd", "gds", "xml", "dxf", "tgz"]:
self.edbpath = edbpath[:-4] + ".aedb"
working_dir = os.path.dirname(edbpath)
self.import_layout_pcb(edbpath, working_dir, use_ppe=use_ppe)
self._messenger.add_info_message(
"Edb {} Created Correctly from {} file".format(self.edbpath, edbpath[-2:])
)
elif not os.path.exists(os.path.join(self.edbpath, "edb.def")):
self.create_edb()
self._messenger.add_info_message("Edb {} Created Correctly".format(self.edbpath))
elif ".aedb" in edbpath:
self.edbpath = edbpath
self.open_edb()
if self.builder:
self._messenger.add_info_message("Edb Initialized")
else:
self._messenger.add_info_message("Failed to initialize Dlls")
else:
warnings.warn("Failed to initialize Dlls")
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
if ex_type:
self.edb_exception(ex_value, ex_traceback)
def _clean_variables(self):
"""Initialize internal variables and perform garbage collection."""
self._components = None
self._core_primitives = None
self._stackup = None
self._padstack = None
self._siwave = None
self._hfss = None
self._nets = None
self._db = None
self._edb = None
if "edbutils" in dir(self):
self.edbutils.Logger.Disable = True
self.builder = None
self.edblib = None
self.edbutils = None
self.simSetup = None
self.layout_methods = None
self.simsetupdata = None
if os.name == "posix":
clr.ClearProfilerData()
gc.collect()
gc.collect()
@aedt_exception_handler
def _init_objects(self):
time.sleep(2)
self._components = Components(self)
self._stackup = EdbStackup(self)
self._padstack = EdbPadstacks(self)
self._siwave = EdbSiwave(self)
self._hfss = Edb3DLayout(self)
self._nets = EdbNets(self)
self._core_primitives = EdbLayout(self)
self._messenger.add_info_message("Objects Initialized")
@aedt_exception_handler
def add_info_message(self, message_text):
"""Add a type 0 "Info" message to the active design level of the Message Manager tree.
Also add an info message to the logger if the handler is present.
Parameters
----------
message_text : str
Text to display as the info message.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
Examples
--------
>>> from pyaedt import Edb
>>> edb = Edb()
>>> edb.add_info_message("Design info message")
"""
self._messenger.add_info_message(message_text)
return True
@aedt_exception_handler
def add_warning_message(self, message_text):
"""Add a type 0 "Warning" message to the active design level of the Message Manager tree.
Also add an info message to the logger if the handler is present.
Parameters
----------
message_text : str
Text to display as the warning message.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
Examples
--------
>>> from pyaedt import Edb
>>> edb = Edb()
>>> edb.add_warning_message("Design warning message")
"""
self._messenger.add_warning_message(message_text)
return True
@aedt_exception_handler
def add_error_message(self, message_text):
"""Add a type 0 "Error" message to the active design level of the Message Manager tree.
Also add an error message to the logger if the handler is present.
Parameters
----------
message_text : str
Text to display as the error message.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
Examples
--------
>>> from pyaedt import Edb
>>> edb = Edb()
>>> edb.add_error_message("Design error message")
"""
self._messenger.add_error_message(message_text)
return True
@aedt_exception_handler
def _init_dlls(self):
"""Initialize DLLs."""
sys.path.append(os.path.join(os.path.dirname(__file__), "dlls", "EDBLib"))
if os.name == "posix":
if env_value(self.edbversion) in os.environ:
self.base_path = env_path(self.edbversion)
sys.path.append(self.base_path)
else:
main = sys.modules["__main__"]
if "oDesktop" in dir(main):
self.base_path = main.oDesktop.GetExeDir()
sys.path.append(main.oDesktop.GetExeDir())
os.environ[env_value(self.edbversion)] = self.base_path
clr.AddReferenceToFile("Ansys.Ansoft.Edb.dll")
clr.AddReferenceToFile("Ansys.Ansoft.EdbBuilderUtils.dll")
clr.AddReferenceToFile("EdbLib.dll")
clr.AddReferenceToFile("DataModel.dll")
clr.AddReferenceToFileAndPath(os.path.join(self.base_path, "Ansys.Ansoft.SimSetupData.dll"))
else:
if self.student_version:
self.base_path = env_path_student(self.edbversion)
else:
self.base_path = env_path(self.edbversion)
sys.path.append(self.base_path)
clr.AddReference("Ansys.Ansoft.Edb")
clr.AddReference("Ansys.Ansoft.EdbBuilderUtils")
clr.AddReference("EdbLib")
clr.AddReference("DataModel")
clr.AddReference("Ansys.Ansoft.SimSetupData")
os.environ["ECAD_TRANSLATORS_INSTALL_DIR"] = self.base_path
oaDirectory = os.path.join(self.base_path, "common", "oa")
os.environ["ANSYS_OADIR"] = oaDirectory
os.environ["PATH"] = "{};{}".format(os.environ["PATH"], self.base_path)
edb = __import__("Ansys.Ansoft.Edb")
self.edb = edb.Ansoft.Edb
edbbuilder = __import__("Ansys.Ansoft.EdbBuilderUtils")
self.edblib = __import__("EdbLib")
self.edbutils = edbbuilder.Ansoft.EdbBuilderUtils
self.simSetup = __import__("Ansys.Ansoft.SimSetupData")
self.layout_methods = self.edblib.Layout.LayoutMethods
self.simsetupdata = self.simSetup.Ansoft.SimSetupData.Data
@aedt_exception_handler
def open_edb(self, init_dlls=False):
"""Open EDB.
Parameters
----------
init_dlls : bool, optional
Whether to initialize DLLs. The default is ``False``.
Returns
-------
"""
if init_dlls:
self._init_dlls()
self._messenger.add_info_message("EDB Path {}".format(self.edbpath))
self._messenger.add_info_message("EDB Version {}".format(self.edbversion))
self.edb.Database.SetRunAsStandAlone(self.standalone)
self._messenger.add_info_message("EDB Standalone {}".format(self.standalone))
try:
db = self.edb.Database.Open(self.edbpath, self.isreadonly)
except Exception as e:
db = None
self._messenger.add_error_message("Builder is not Initialized.")
if not db:
self._messenger.add_warning_message("Error Opening db")
self._db = None
self._active_cell = None
self.builder = None
return None
self._db = db
self._messenger.add_info_message("Database Opened")
self._active_cell = None
if self.cellname:
for cell in list(self._db.TopCircuitCells):
if cell.GetName() == self.cellname:
self._active_cell = cell
# if self._active_cell is still None, set it to default cell
if self._active_cell is None:
self._active_cell = list(self._db.TopCircuitCells)[0]
self._messenger.add_info_message("Cell {} Opened".format(self._active_cell.GetName()))
if self._db and self._active_cell:
dllpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "dlls", "EDBLib", "DataModel.dll")
self._messenger.add_info_message(dllpath)
self.layout_methods.LoadDataModel(dllpath)
dllpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "dlls", "EDBLib",
"IPC_2581_DataModel.dll")
self.layout_methods.LoadDataModel(dllpath)
time.sleep(3)
self.builder = retry_ntimes(
10,
self.layout_methods.GetBuilder,
self._db,
self._active_cell,
self.edbpath,
self.edbversion,
self.standalone,
)
self._init_objects()
self._messenger.add_info_message("Builder Initialized")
else:
self.builder = None
self._messenger.add_error_message("Builder Not Initialized")
return self.builder
@aedt_exception_handler
def open_edb_inside_aedt(self, init_dlls=False):
"""Open EDB inside of AEDT.
Parameters
----------
init_dlls : bool, optional
Whether to initialize DLLs. The default is ``False``.
Returns
-------
"""
if init_dlls:
self._init_dlls()
self._messenger.add_info_message("Opening EDB from HDL")
self.edb.Database.SetRunAsStandAlone(False)
if self.oproject.GetEDBHandle():
hdl = Convert.ToUInt64(self.oproject.GetEDBHandle())
db = self.edb.Database.Attach(hdl)
if not db:
self._messenger.add_warning_message("Error Getting db")
self._db = None
self._active_cell = None
self.builder = None
return None
self._db = db
self._active_cell = self.edb.Cell.Cell.FindByName(
self.db, self.edb.Cell.CellType.CircuitCell, self.cellname
)
if self._active_cell is None:
self._active_cell = list(self._db.TopCircuitCells)[0]
dllpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "dlls", "EDBLib", "DataModel.dll")
if self._db and self._active_cell:
self.layout_methods.LoadDataModel(dllpath)
dllpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "dlls", "EDBLib",
"IPC_2581_DataModel.dll")
self.layout_methods.LoadDataModel(dllpath)
if not os.path.exists(self.edbpath):
os.makedirs(self.edbpath)
time.sleep(3)
self.builder = EdbBuilder(self.edbutils, self._db, self._active_cell)
# self.builder = retry_ntimes(
# 10,
# self.layout_methods.GetBuilder,
# self._db,
# self._active_cell,
# self.edbpath,
# self.edbversion,
# self.standalone,
# True
# )
self._init_objects()
return self.builder
else:
self.builder = None
return None
else:
self._db = None
self._active_cell = None
self.builder = None
return None
@aedt_exception_handler
def create_edb(self, init_dlls=False):
"""Create EDB.
Parameters
----------
init_dlls : bool, optional
Whether to initialize DLLs. The default is ``False``.
Returns
-------
"""
if init_dlls:
self._init_dlls()
self.edb.Database.SetRunAsStandAlone(self.standalone)
db = self.edb.Database.Create(self.edbpath)
if not db:
self._messenger.add_warning_message("Error Creating db")
self._db = None
self._active_cell = None
self.builder = None
return None
self._db = db
if not self.cellname:
self.cellname = generate_unique_name("Cell")
self._active_cell = self.edb.Cell.Cell.Create(self._db, self.edb.Cell.CellType.CircuitCell, self.cellname)
dllpath = os.path.join(os.path.dirname(__file__), "dlls", "EDBLib", "DataModel.dll")
if self._db and self._active_cell:
self.layout_methods.LoadDataModel(dllpath)
dllpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "dlls", "EDBLib",
"IPC_2581_DataModel.dll")
self.layout_methods.LoadDataModel(dllpath)
time.sleep(3)
self.builder = retry_ntimes(
10,
self.layout_methods.GetBuilder,
self._db,
self._active_cell,
self.edbpath,
self.edbversion,
self.standalone
)
self._init_objects()
return self.builder
self.builder = None
return None
@aedt_exception_handler
def import_layout_pcb(self, input_file, working_dir, init_dlls=False, anstranslator_full_path="", use_ppe=False):
"""Import a BRD file and generate an ``edb.def`` file in the working directory.
Parameters
----------
input_file : str
Full path to the BRD file.
working_dir : str
Directory in which to create the ``aedb`` folder. The AEDB file name will be the
same as the BRD file name.
init_dlls : bool
Whether to initialize DLLs. The default is ``False``.
anstranslator_full_path : str, optional
Whether to use or not PPE License. The default is ``False``.
use_ppe : bool
Whether to use or not PPE License. The default is ``False``.
Returns
-------
str
Full path to the AEDB file.
"""
self._components = None
self._core_primitives = None
self._stackup = None
self._padstack = None
self._siwave = None
self._hfss = None
self._nets = None
self._db = None
if init_dlls:
self._init_dlls()
aedb_name = os.path.splitext(os.path.basename(input_file))[0] + ".aedb"
if anstranslator_full_path and os.path.exists(anstranslator_full_path):
command = anstranslator_full_path
else:
command = os.path.join(self.base_path, "anstranslator")
if os.name != "posix":
command += ".exe"
if not working_dir:
working_dir = os.path.dirname(input_file)
cmd_translator = command + " " + input_file + " " + os.path.join(working_dir, aedb_name)
cmd_translator += " -l=" + os.path.join(working_dir, "Translator.log")
if not use_ppe:
cmd_translator += " -ppe=false"
p = subprocess.Popen(cmd_translator)
p.wait()
if not os.path.exists(os.path.join(working_dir, aedb_name)):
self._messenger.add_error_message("Translator failed to translate.")
return False
self.edbpath = os.path.join(working_dir, aedb_name)
return self.open_edb()
@aedt_exception_handler
def export_to_ipc2581(self, ipc_path=None, units="millimeter"):
"""Create an XML IPC2581 File from active Edb.
.. note::
This Method is still under test and need further Debug. Any feedback is welcome. Actually, backdrills and
custom pads are not supported yet.
Parameters
----------
ipc_path : str, optional
Path to the xml file
units : str, optional
Units of IPC2581 file. Default ``millimeter``. It can be ``millimeter``,
``inch``, ``micron``.
Returns
-------
bool
``True`` if succeeded.
"""
if units.lower() not in ["millimeter", "inch", "micron"]:
self._messenger.add_warning_message("Wrong unit entered. Setting default to millimiter")
units = "millimeter"
if not ipc_path:
ipc_path = self.edbpath[:-4]+"xml"
self._messenger.add_info_message("Export IPC 2581 is starting. This operation can take a while...")
start = time.time()
result = self.layout_methods.ExportIPC2581FromBuilder(self.builder, ipc_path, units.lower())
end = time.time() - start
if result:
self._messenger.add_info_message("Export IPC 2581 completed in {} sec.".format(end))
self._messenger.add_info_message("File saved in {}".format(ipc_path))
return ipc_path
self._messenger.add_info_message("Error Exporting IPC 2581.")
return False
def edb_exception(self, ex_value, tb_data):
"""Write the trace stack to AEDT when a Python error occurs.
Parameters
----------
ex_value :
tb_data :
Returns
-------
"""
tb_trace = traceback.format_tb(tb_data)
tblist = tb_trace[0].split("\n")
self._messenger.add_error_message(str(ex_value))
for el in tblist:
self._messenger.add_error_message(el)
@property
def db(self):
"""Db object."""
return self._db
@property
def active_cell(self):
"""Active cell."""
return self._active_cell
@property
def core_components(self):
"""Core components."""
if not self._components and self.builder:
self._components = Components(self)
return self._components
@property
def core_stackup(self):
"""Core stackup."""
if not self._stackup and self.builder:
self._stackup = EdbStackup(self)
return self._stackup
@property
def core_padstack(self):
"""Core padstack."""
if not self._padstack and self.builder:
self._padstack = EdbPadstacks(self)
return self._padstack
@property
def core_siwave(self):
"""Core SI Wave."""
if not self._siwave and self.builder:
self._siwave = EdbSiwave(self)
return self._siwave
@property
def core_hfss(self):
"""Core HFSS."""
if not self._hfss and self.builder:
self._hfss = Edb3DLayout(self)
return self._hfss
@property
def core_nets(self):
"""Core nets."""
if not self._nets and self.builder:
self._nets = EdbNets(self)
return self._nets
@property
def core_primitives(self):
"""Core primitives."""
if not self._core_primitives and self.builder:
self._core_primitives = EdbLayout(self)
return self._core_primitives
@property
def active_layout(self):
"""Active layout."""
if self._active_cell:
return self.active_cell.GetLayout()
return None
# @property
# def builder(self):
# return self.edbutils.HfssUtilities(self.edbpath)
@property
def pins(self):
"""Pins.
Returns
-------
list
List of all pins.
"""
pins = []
if self.core_components:
for el in self.core_components.components:
comp = self.edb.Cell.Hierarchy.Component.FindByName(self.active_layout, el)
temp = [
p
for p in comp.LayoutObjs
if p.GetObjType() == self.edb.Cell.LayoutObjType.PadstackInstance and p.IsLayoutPin()
]
pins += temp
return pins
class Boundaries:
"""Boundaries.
Parameters
----------
Port :
Pec :
RLC :
CurrentSource :
VoltageSource :
NexximGround :
NexximPort :
DcTerminal :
VoltageProbe :
"""
(Port, Pec, RLC, CurrentSource, VoltageSource, NexximGround, NexximPort, DcTerminal, VoltageProbe) = range(0, 9)
@aedt_exception_handler
def edb_value(self, val):
"""EDB value.
Parameters
----------
val :
Returns
-------
"""
return self.edb.Utility.Value(val)
@aedt_exception_handler
def close_edb(self):
"""Close EDB.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
gc.collect()
self._db.Close()
self._clean_variables()
props = [a for a in dir(self) if not a.startswith("__")]
for a in props:
self.__dict__.pop(a, None)
gc.collect()
gc.collect()
return True
@aedt_exception_handler
def save_edb(self):
"""Save the EDB file.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self._db.Save()
return True
@aedt_exception_handler
def save_edb_as(self, fname):
"""Save the EDB as another file.
Parameters
----------
fname : str
Name of the new file to save to.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self._db.SaveAs(fname)
return True
@aedt_exception_handler
def execute(self, func):
"""Execute a function.
Parameters
----------
func : str
Function to execute.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
return self.edb.Utility.Command.Execute(func)
@aedt_exception_handler
def import_cadence_file(self, inputBrd, WorkDir=None, anstranslator_full_path="", use_ppe=False):
"""Import a BRD file and generate an ``edb.def`` file in the working directory.
Parameters
----------
inputBrd : str
Full path to the BRD file.
WorkDir : str
Directory in which to create the ``aedb`` folder. The AEDB file name will be
the same as the BRD file name. The default value is ``None``.
anstranslator_full_path : str, optional
Optional AnsTranslator full path.
use_ppe : bool
Whether to use or not PPE License. The default is ``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if self.import_layout_pcb(
inputBrd, working_dir=WorkDir, anstranslator_full_path=anstranslator_full_path, use_ppe=use_ppe
):
return True
else:
return False
@aedt_exception_handler
def import_gds_file(self, inputGDS, WorkDir=None, anstranslator_full_path="", use_ppe=False):
"""Import a GDS file and generate an ``edb.def`` file in the working directory.
Parameters
----------
inputGDS : str
Full path to the GDS file.
WorkDir : str
Directory in which to create the ``aedb`` folder. The AEDB file name will be
the same as the GDS file name. The default value is ``None``.
anstranslator_full_path : str, optional
Optional AnsTranslator full path.
use_ppe : bool
Whether to use or not PPE License. The default is ``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if self.import_layout_pcb(
inputGDS, working_dir=WorkDir, anstranslator_full_path=anstranslator_full_path, use_ppe=use_ppe
):
return True
else:
return False
def create_cutout(
self,
signal_list,
reference_list=["GND"],
extent_type="Conforming",
expansion_size=0.002,
use_round_corner=False,
output_aedb_path=None,
replace_design_with_cutout=True,
):
"""Create a cutout and save it to a new AEDB file.
Parameters
----------
signal_list : list
List of signal strings.
reference_list : list, optional
List of references to add. The default is ``["GND"]``.
extent_type : str, optional
Type of the extension. Options are ``"Conforming"`` and
``"Bounding"``. The default is ``"Conforming"``.
expansion_size : float, optional
Expansion size ratio in meters. The default is ``0.002``.
use_round_corner : bool, optional
Whether to use round corners. The default is ``False``.
output_aedb_path : str, optional
Full path and name for the new AEDB file.
replace_design_with_cutout : bool, optional
Whether to replace the design with the cutout. The default
is ``True``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
_signal_nets = []
# validate nets in layout
for _sig in signal_list:
_netobj = self.edb.Cell.Net.FindByName(self.active_layout, _sig)
_signal_nets.append(_netobj)
_ref_nets = []
# validate references in layout
for _ref in reference_list:
_netobj = self.edb.Cell.Net.FindByName(self.active_layout, _ref)
_ref_nets.append(_netobj)
from .edb_core.general import convert_py_list_to_net_list
_netsClip = [
self.edb.Cell.Net.FindByName(self.active_layout, reference_list[i]) for i, p in enumerate(reference_list)
]
_netsClip = convert_py_list_to_net_list(_netsClip)
net_signals = convert_py_list_to_net_list(_signal_nets)
if extent_type == "Conforming":
_poly = self.active_layout.GetExpandedExtentFromNets(
net_signals, self.edb.Geometry.ExtentType.Conforming, expansion_size, False, use_round_corner, 1
)
else:
_poly = self.active_layout.GetExpandedExtentFromNets(
net_signals, self.edb.Geometry.ExtentType.BoundingBox, expansion_size, False, use_round_corner, 1
)
# Create new cutout cell/design
_cutout = self.active_cell.CutOut(net_signals, _netsClip, _poly)
# The analysis setup(s) do not come over with the clipped design copy,
# so add the analysis setup(s) from the original here
for _setup in self.active_cell.SimulationSetups:
# Empty string '' if coming from setup copy and don't set explicitly.
_setup_name = _setup.GetName()
if "GetSimSetupInfo" in dir(_setup):
# setup is an Ansys.Ansoft.Edb.Utility.HFSSSimulationSetup object
_hfssSimSetupInfo = _setup.GetSimSetupInfo()
_hfssSimSetupInfo.Name = "HFSS Setup 1" # Set name of analysis setup
# Write the simulation setup info into the cell/design setup
_setup.SetSimSetupInfo(_hfssSimSetupInfo)
_cutout.AddSimulationSetup(_setup) # Add simulation setup to the cutout design
_dbCells = [_cutout]
if replace_design_with_cutout == True:
self.active_cell.Delete()
else:
_dbCells.append(self.active_cell)
if output_aedb_path:
db2 = self.edb.Database.Create(output_aedb_path)
# Function input is the name of a .aedb folder inside which the edb.def will be created.
# Ex: 'D:/backedup/EDB/TEST PROJECTS/CUTOUT/N1.aedb'
_dbCells = convert_py_list_to_net_list(_dbCells)
db2.CopyCells(_dbCells) # Copies cutout cell/design to db2 project
_success = db2.Save()
self._db = db2
self.edbpath = output_aedb_path
self._active_cell = list(self._db.TopCircuitCells)[0]
self.builder = retry_ntimes(
10,
self.layout_methods.GetBuilder,
self._db,
self._active_cell,
self.edbpath,
self.edbversion,
self.standalone,
)
self._init_objects()
return True
@aedt_exception_handler
def write_export3d_option_config_file(self, path_to_output, config_dictionaries=None):
"""Write the options for a 3D export to a configuration file.
Parameters
----------
path_to_output : str
Full path to the configuration file where the 3D export options are to be saved.
config_dictionaries : dict, optional
"""
option_config = {
"UNITE_NETS": 1,
"ASSIGN_SOLDER_BALLS_AS_SOURCES": 0,
"Q3D_MERGE_SOURCES": 0,
"Q3D_MERGE_SINKS": 0,
"CREATE_PORTS_FOR_PWR_GND_NETS": 0,
"PORTS_FOR_PWR_GND_NETS": 0,
"GENERATE_TERMINALS": 0,
"SOLVE_CAPACITANCE": 0,
"SOLVE_DC_RESISTANCE": 0,
"SOLVE_DC_INDUCTANCE_RESISTANCE": 1,
"SOLVE_AC_INDUCTANCE_RESISTANCE": 0,
"CreateSources": 0,
"CreateSinks": 0,
"LAUNCH_Q3D": 0,
"LAUNCH_HFSS": 0,
}
if config_dictionaries:
for el, val in config_dictionaries.items():
option_config[el] = val
with open(os.path.join(path_to_output, "options.config"), "w") as f:
for el, val in option_config.items():
f.write(el + " " + str(val) + "\n")
return os.path.join(path_to_output, "options.config")
@aedt_exception_handler
def export_hfss(self, path_to_output, net_list=None, num_cores=None):
"""Export EDB to HFSS.
Parameters
----------
path_to_output : str
Full path and name for saving the AEDT file.
net_list : list, optional
List of nets to export if only certain ones are to be
included.
num_cores : int, optional
Define number of cores to use during export
Returns
-------
str
Full path to the AEDT file.
Examples
--------
>>> from pyaedt import Edb
>>> edb = Edb(edbpath=r"C:\temp\myproject.aedb", edbversion="2021.1")
>>> options_config = {'UNITE_NETS' : 1, 'LAUNCH_Q3D' : 0}
>>> edb.write_export3d_option_config_file(r"C:\temp", options_config)
>>> edb.export_hfss(r"C:\temp")
"C:\\temp\\hfss_siwave.aedt"
"""
siwave_s = SiwaveSolve(self.edbpath, aedt_installer_path=self.base_path)
return siwave_s.export_3d_cad("HFSS", path_to_output, net_list, num_cores)
@aedt_exception_handler
def export_q3d(self, path_to_output, net_list=None, num_cores=None):
"""Export EDB to Q3D.
Parameters
----------
path_to_output : str
Full path and name for saving the AEDT file.
net_list : list, optional
List of nets only if certain ones are to be
exported.
num_cores : int, optional
Define number of cores to use during export
Returns
-------
str
Path to the AEDT file.
Examples
--------
>>> from pyaedt import Edb
>>> edb = Edb(edbpath=r"C:\temp\myproject.aedb", edbversion="2021.1")
>>> options_config = {'UNITE_NETS' : 1, 'LAUNCH_Q3D' : 0}
>>> edb.write_export3d_option_config_file(r"C:\temp", options_config)
>>> edb.export_q3d(r"C:\temp")
"C:\\temp\\q3d_siwave.aedt"
"""
siwave_s = SiwaveSolve(self.edbpath, aedt_installer_path=self.base_path)
return siwave_s.export_3d_cad("Q3D", path_to_output, net_list, num_cores=num_cores)
@aedt_exception_handler
def export_maxwell(self, path_to_output, net_list=None, num_cores=None):
"""Export EDB to Maxwell 3D.
Parameters
----------
path_to_output : str
Full path and name for saving the AEDT file.
net_list : list, optional
List of nets only if certain ones are to be
exported.
num_cores : int, optional
Define number of cores to use during export
Returns
-------
str
Path to the AEDT file.
Examples
--------
>>> from pyaedt import Edb
>>> edb = Edb(edbpath=r"C:\temp\myproject.aedb", edbversion="2021.1")
>>> options_config = {'UNITE_NETS' : 1, 'LAUNCH_Q3D' : 0}
>>> edb.write_export3d_option_config_file(r"C:\temp", options_config)
>>> edb.export_maxwell(r"C:\temp")
"C:\\temp\\maxwell_siwave.aedt"
"""
siwave_s = SiwaveSolve(self.edbpath, aedt_installer_path=self.base_path)
return siwave_s.export_3d_cad("Maxwell", path_to_output, net_list, num_cores=num_cores)
@aedt_exception_handler
def solve_siwave(self):
"""Close Edb and Solves it with Siwave.
Returns
-------
bool
"""
process = SiwaveSolve(self.edbpath, aedt_version=self.edbversion)
try:
self._db.Close()
except:
pass
process.solve()
return True
@aedt_exception_handler
def add_design_variable(self, variable_name, variable_value):
"""Add a Design Variable.
Parameters
----------
variable_name : str
Name of the variable
variable_value : str, float
Value of the variable with units.
Returns
-------
tuple
tuple containing AddVariable Result and variableserver.
"""
var_server = self.active_cell.GetVariableServer()
variables = var_server.GetAllVariableNames()
if variable_name in list(variables):
self._messenger.add_warning_message("Parameter {} exists. Using it.".format(variable_name))
return False, var_server
else:
self._messenger.add_info_message("Creating Parameter {}.".format(variable_name))
var_server.AddVariable(variable_name, self.edb_value(variable_value), True)
return True, var_server
| [
"traceback.format_tb",
"time.sleep",
"pyaedt.edb_core.EdbNets",
"pyaedt.generic.process.SiwaveSolve",
"sys.path.append",
"pyaedt.edb_core.EdbPadstacks",
"os.path.exists",
"pyaedt.edb_core.EdbLayout",
"clr.AddReference",
"warnings.warn",
"pyaedt.generic.general_methods.env_path_student",
"pyaedt.edb_core.EDB_Data.EdbBuilder",
"os.path.expanduser",
"pyaedt.edb_core.EdbSiwave",
"clr.ClearProfilerData",
"pyaedt.edb_core.EdbStackup",
"pyaedt.generic.general_methods.env_value",
"clr.AddReferenceToFile",
"subprocessdotnet.Popen",
"os.path.dirname",
"gc.collect",
"time.time",
"pyaedt.retry_ntimes",
"pyaedt.edb_core.Components",
"os.getenv",
"os.makedirs",
"pyaedt.generic.general_methods.generate_unique_name",
"os.path.join",
"pyaedt.generic.general_methods.env_path",
"pyaedt.application.MessageManager.EDBMessageManager",
"os.path.basename",
"pyaedt.edb_core.Edb3DLayout"
] | [((986, 1115), 'warnings.warn', 'warnings.warn', (['"""The clr is missing. Install Python.NET or use an IronPython version if you want to use the EDB module."""'], {}), "(\n 'The clr is missing. Install Python.NET or use an IronPython version if you want to use the EDB module.'\n )\n", (999, 1115), False, 'import warnings\n'), ((6408, 6420), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6418, 6420), False, 'import gc\n'), ((6429, 6441), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6439, 6441), False, 'import gc\n'), ((6508, 6521), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6518, 6521), False, 'import time\n'), ((6549, 6565), 'pyaedt.edb_core.Components', 'Components', (['self'], {}), '(self)\n', (6559, 6565), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((6590, 6606), 'pyaedt.edb_core.EdbStackup', 'EdbStackup', (['self'], {}), '(self)\n', (6600, 6606), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((6632, 6650), 'pyaedt.edb_core.EdbPadstacks', 'EdbPadstacks', (['self'], {}), '(self)\n', (6644, 6650), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((6674, 6689), 'pyaedt.edb_core.EdbSiwave', 'EdbSiwave', (['self'], {}), '(self)\n', (6683, 6689), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((6711, 6728), 'pyaedt.edb_core.Edb3DLayout', 'Edb3DLayout', (['self'], {}), '(self)\n', (6722, 6728), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((6750, 6763), 'pyaedt.edb_core.EdbNets', 'EdbNets', (['self'], {}), '(self)\n', (6757, 6763), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((6796, 6811), 'pyaedt.edb_core.EdbLayout', 'EdbLayout', (['self'], {}), '(self)\n', (6805, 6811), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((10606, 10650), 'os.path.join', 'os.path.join', (['self.base_path', '"""common"""', '"""oa"""'], {}), "(self.base_path, 'common', 'oa')\n", (10618, 10650), False, 'import os\n'), ((19917, 19949), 'subprocessdotnet.Popen', 'subprocess.Popen', (['cmd_translator'], {}), '(cmd_translator)\n', (19933, 19949), True, 'import subprocessdotnet as subprocess\n'), ((20165, 20201), 'os.path.join', 'os.path.join', (['working_dir', 'aedb_name'], {}), '(working_dir, aedb_name)\n', (20177, 20201), False, 'import os\n'), ((21299, 21310), 'time.time', 'time.time', ([], {}), '()\n', (21308, 21310), False, 'import time\n'), ((22024, 22052), 'traceback.format_tb', 'traceback.format_tb', (['tb_data'], {}), '(tb_data)\n', (22043, 22052), False, 'import traceback\n'), ((25480, 25492), 'gc.collect', 'gc.collect', ([], {}), '()\n', (25490, 25492), False, 'import gc\n'), ((25688, 25700), 'gc.collect', 'gc.collect', ([], {}), '()\n', (25698, 25700), False, 'import gc\n'), ((25709, 25721), 'gc.collect', 'gc.collect', ([], {}), '()\n', (25719, 25721), False, 'import gc\n'), ((34632, 34678), 'os.path.join', 'os.path.join', (['path_to_output', '"""options.config"""'], {}), "(path_to_output, 'options.config')\n", (34644, 34678), False, 'import os\n'), ((35639, 35700), 'pyaedt.generic.process.SiwaveSolve', 'SiwaveSolve', (['self.edbpath'], {'aedt_installer_path': 'self.base_path'}), '(self.edbpath, aedt_installer_path=self.base_path)\n', (35650, 35700), False, 'from pyaedt.generic.process import SiwaveSolve\n'), ((36726, 36787), 'pyaedt.generic.process.SiwaveSolve', 'SiwaveSolve', (['self.edbpath'], {'aedt_installer_path': 'self.base_path'}), '(self.edbpath, aedt_installer_path=self.base_path)\n', (36737, 36787), False, 'from pyaedt.generic.process import SiwaveSolve\n'), ((37840, 37901), 'pyaedt.generic.process.SiwaveSolve', 'SiwaveSolve', (['self.edbpath'], {'aedt_installer_path': 'self.base_path'}), '(self.edbpath, aedt_installer_path=self.base_path)\n', (37851, 37901), False, 'from pyaedt.generic.process import SiwaveSolve\n'), ((38179, 38234), 'pyaedt.generic.process.SiwaveSolve', 'SiwaveSolve', (['self.edbpath'], {'aedt_version': 'self.edbversion'}), '(self.edbpath, aedt_version=self.edbversion)\n', (38190, 38234), False, 'from pyaedt.generic.process import SiwaveSolve\n'), ((791, 854), 'warnings.warn', 'warnings.warn', (['"""Pythonnet is needed to run pyaedt within Linux"""'], {}), "('Pythonnet is needed to run pyaedt within Linux')\n", (804, 854), False, 'import warnings\n'), ((5480, 5522), 'warnings.warn', 'warnings.warn', (['"""Failed to initialize Dlls"""'], {}), "('Failed to initialize Dlls')\n", (5493, 5522), False, 'import warnings\n'), ((6376, 6399), 'clr.ClearProfilerData', 'clr.ClearProfilerData', ([], {}), '()\n', (6397, 6399), False, 'import clr\n'), ((9704, 9750), 'clr.AddReferenceToFile', 'clr.AddReferenceToFile', (['"""Ansys.Ansoft.Edb.dll"""'], {}), "('Ansys.Ansoft.Edb.dll')\n", (9726, 9750), False, 'import clr\n'), ((9763, 9821), 'clr.AddReferenceToFile', 'clr.AddReferenceToFile', (['"""Ansys.Ansoft.EdbBuilderUtils.dll"""'], {}), "('Ansys.Ansoft.EdbBuilderUtils.dll')\n", (9785, 9821), False, 'import clr\n'), ((9834, 9870), 'clr.AddReferenceToFile', 'clr.AddReferenceToFile', (['"""EdbLib.dll"""'], {}), "('EdbLib.dll')\n", (9856, 9870), False, 'import clr\n'), ((9883, 9922), 'clr.AddReferenceToFile', 'clr.AddReferenceToFile', (['"""DataModel.dll"""'], {}), "('DataModel.dll')\n", (9905, 9922), False, 'import clr\n'), ((10235, 10266), 'sys.path.append', 'sys.path.append', (['self.base_path'], {}), '(self.base_path)\n', (10250, 10266), False, 'import sys\n'), ((10279, 10315), 'clr.AddReference', 'clr.AddReference', (['"""Ansys.Ansoft.Edb"""'], {}), "('Ansys.Ansoft.Edb')\n", (10295, 10315), False, 'import clr\n'), ((10328, 10376), 'clr.AddReference', 'clr.AddReference', (['"""Ansys.Ansoft.EdbBuilderUtils"""'], {}), "('Ansys.Ansoft.EdbBuilderUtils')\n", (10344, 10376), False, 'import clr\n'), ((10389, 10415), 'clr.AddReference', 'clr.AddReference', (['"""EdbLib"""'], {}), "('EdbLib')\n", (10405, 10415), False, 'import clr\n'), ((10428, 10457), 'clr.AddReference', 'clr.AddReference', (['"""DataModel"""'], {}), "('DataModel')\n", (10444, 10457), False, 'import clr\n'), ((10470, 10515), 'clr.AddReference', 'clr.AddReference', (['"""Ansys.Ansoft.SimSetupData"""'], {}), "('Ansys.Ansoft.SimSetupData')\n", (10486, 10515), False, 'import clr\n'), ((13333, 13346), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (13343, 13346), False, 'import time\n'), ((13374, 13504), 'pyaedt.retry_ntimes', 'retry_ntimes', (['(10)', 'self.layout_methods.GetBuilder', 'self._db', 'self._active_cell', 'self.edbpath', 'self.edbversion', 'self.standalone'], {}), '(10, self.layout_methods.GetBuilder, self._db, self.\n _active_cell, self.edbpath, self.edbversion, self.standalone)\n', (13386, 13504), False, 'from pyaedt import retry_ntimes\n'), ((17071, 17099), 'pyaedt.generic.general_methods.generate_unique_name', 'generate_unique_name', (['"""Cell"""'], {}), "('Cell')\n", (17091, 17099), False, 'from pyaedt.generic.general_methods import aedt_exception_handler, env_path, env_path_student, env_value, generate_unique_name\n'), ((17246, 17271), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (17261, 17271), False, 'import os\n'), ((17631, 17644), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (17641, 17644), False, 'import time\n'), ((17672, 17802), 'pyaedt.retry_ntimes', 'retry_ntimes', (['(10)', 'self.layout_methods.GetBuilder', 'self._db', 'self._active_cell', 'self.edbpath', 'self.edbversion', 'self.standalone'], {}), '(10, self.layout_methods.GetBuilder, self._db, self.\n _active_cell, self.edbpath, self.edbversion, self.standalone)\n', (17684, 17802), False, 'from pyaedt import retry_ntimes\n'), ((19341, 19380), 'os.path.exists', 'os.path.exists', (['anstranslator_full_path'], {}), '(anstranslator_full_path)\n', (19355, 19380), False, 'import os\n'), ((19464, 19509), 'os.path.join', 'os.path.join', (['self.base_path', '"""anstranslator"""'], {}), "(self.base_path, 'anstranslator')\n", (19476, 19509), False, 'import os\n'), ((19633, 19660), 'os.path.dirname', 'os.path.dirname', (['input_file'], {}), '(input_file)\n', (19648, 19660), False, 'import os\n'), ((19721, 19757), 'os.path.join', 'os.path.join', (['working_dir', 'aedb_name'], {}), '(working_dir, aedb_name)\n', (19733, 19757), False, 'import os\n'), ((19793, 19836), 'os.path.join', 'os.path.join', (['working_dir', '"""Translator.log"""'], {}), "(working_dir, 'Translator.log')\n", (19805, 19836), False, 'import os\n'), ((21426, 21437), 'time.time', 'time.time', ([], {}), '()\n', (21435, 21437), False, 'import time\n'), ((22569, 22585), 'pyaedt.edb_core.Components', 'Components', (['self'], {}), '(self)\n', (22579, 22585), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((22764, 22780), 'pyaedt.edb_core.EdbStackup', 'EdbStackup', (['self'], {}), '(self)\n', (22774, 22780), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((22960, 22978), 'pyaedt.edb_core.EdbPadstacks', 'EdbPadstacks', (['self'], {}), '(self)\n', (22972, 22978), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((23152, 23167), 'pyaedt.edb_core.EdbSiwave', 'EdbSiwave', (['self'], {}), '(self)\n', (23161, 23167), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((23330, 23347), 'pyaedt.edb_core.Edb3DLayout', 'Edb3DLayout', (['self'], {}), '(self)\n', (23341, 23347), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((23508, 23521), 'pyaedt.edb_core.EdbNets', 'EdbNets', (['self'], {}), '(self)\n', (23515, 23521), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((23716, 23731), 'pyaedt.edb_core.EdbLayout', 'EdbLayout', (['self'], {}), '(self)\n', (23725, 23731), False, 'from pyaedt.edb_core import Components, EdbNets, EdbPadstacks, EdbLayout, Edb3DLayout, EdbSiwave, EdbStackup\n'), ((32991, 33121), 'pyaedt.retry_ntimes', 'retry_ntimes', (['(10)', 'self.layout_methods.GetBuilder', 'self._db', 'self._active_cell', 'self.edbpath', 'self.edbversion', 'self.standalone'], {}), '(10, self.layout_methods.GetBuilder, self._db, self.\n _active_cell, self.edbpath, self.edbversion, self.standalone)\n', (33003, 33121), False, 'from pyaedt import retry_ntimes\n'), ((9140, 9165), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9155, 9165), False, 'import os\n'), ((9232, 9258), 'pyaedt.generic.general_methods.env_value', 'env_value', (['self.edbversion'], {}), '(self.edbversion)\n', (9241, 9258), False, 'from pyaedt.generic.general_methods import aedt_exception_handler, env_path, env_path_student, env_value, generate_unique_name\n'), ((9307, 9332), 'pyaedt.generic.general_methods.env_path', 'env_path', (['self.edbversion'], {}), '(self.edbversion)\n', (9315, 9332), False, 'from pyaedt.generic.general_methods import aedt_exception_handler, env_path, env_path_student, env_value, generate_unique_name\n'), ((9349, 9380), 'sys.path.append', 'sys.path.append', (['self.base_path'], {}), '(self.base_path)\n', (9364, 9380), False, 'import sys\n'), ((9965, 10026), 'os.path.join', 'os.path.join', (['self.base_path', '"""Ansys.Ansoft.SimSetupData.dll"""'], {}), "(self.base_path, 'Ansys.Ansoft.SimSetupData.dll')\n", (9977, 10026), False, 'import os\n'), ((10112, 10145), 'pyaedt.generic.general_methods.env_path_student', 'env_path_student', (['self.edbversion'], {}), '(self.edbversion)\n', (10128, 10145), False, 'from pyaedt.generic.general_methods import aedt_exception_handler, env_path, env_path_student, env_value, generate_unique_name\n'), ((10197, 10222), 'pyaedt.generic.general_methods.env_path', 'env_path', (['self.edbversion'], {}), '(self.edbversion)\n', (10205, 10222), False, 'from pyaedt.generic.general_methods import aedt_exception_handler, env_path, env_path_student, env_value, generate_unique_name\n'), ((15584, 15597), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (15594, 15597), False, 'import time\n'), ((15629, 15683), 'pyaedt.edb_core.EDB_Data.EdbBuilder', 'EdbBuilder', (['self.edbutils', 'self._db', 'self._active_cell'], {}), '(self.edbutils, self._db, self._active_cell)\n', (15639, 15683), False, 'from pyaedt.edb_core.EDB_Data import EdbBuilder\n'), ((19997, 20033), 'os.path.join', 'os.path.join', (['working_dir', 'aedb_name'], {}), '(working_dir, aedb_name)\n', (20009, 20033), False, 'import os\n'), ((34456, 34502), 'os.path.join', 'os.path.join', (['path_to_output', '"""options.config"""'], {}), "(path_to_output, 'options.config')\n", (34468, 34502), False, 'import os\n'), ((3146, 3165), 'pyaedt.application.MessageManager.EDBMessageManager', 'EDBMessageManager', ([], {}), '()\n', (3163, 3165), False, 'from pyaedt.application.MessageManager import EDBMessageManager\n'), ((3187, 3210), 'os.path.exists', 'os.path.exists', (['edbpath'], {}), '(edbpath)\n', (3201, 3210), False, 'import os\n'), ((3809, 3833), 'os.getenv', 'os.getenv', (['"""USERPROFILE"""'], {}), "('USERPROFILE')\n", (3818, 3833), False, 'import os\n'), ((4087, 4104), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (4096, 4104), False, 'import os\n'), ((4679, 4703), 'os.path.dirname', 'os.path.dirname', (['edbpath'], {}), '(edbpath)\n', (4694, 4703), False, 'import os\n'), ((12935, 12960), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (12950, 12960), False, 'import os\n'), ((13158, 13183), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13173, 13183), False, 'import os\n'), ((15075, 15100), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15090, 15100), False, 'import os\n'), ((15492, 15520), 'os.path.exists', 'os.path.exists', (['self.edbpath'], {}), '(self.edbpath)\n', (15506, 15520), False, 'import os\n'), ((15542, 15567), 'os.makedirs', 'os.makedirs', (['self.edbpath'], {}), '(self.edbpath)\n', (15553, 15567), False, 'import os\n'), ((17457, 17482), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (17472, 17482), False, 'import os\n'), ((19259, 19287), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (19275, 19287), False, 'import os\n'), ((3083, 3106), 'os.path.exists', 'os.path.exists', (['edbpath'], {}), '(edbpath)\n', (3097, 3106), False, 'import os\n'), ((3904, 3927), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3922, 3927), False, 'import os\n'), ((4175, 4198), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (4193, 4198), False, 'import os\n'), ((9647, 9673), 'pyaedt.generic.general_methods.env_value', 'env_value', (['self.edbversion'], {}), '(self.edbversion)\n', (9656, 9673), False, 'from pyaedt.generic.general_methods import aedt_exception_handler, env_path, env_path_student, env_value, generate_unique_name\n'), ((15299, 15324), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (15314, 15324), False, 'import os\n'), ((3268, 3292), 'os.path.dirname', 'os.path.dirname', (['edbpath'], {}), '(edbpath)\n', (3283, 3292), False, 'import os\n'), ((3993, 4023), 'pyaedt.generic.general_methods.generate_unique_name', 'generate_unique_name', (['"""layout"""'], {}), "('layout')\n", (4013, 4023), False, 'from pyaedt.generic.general_methods import aedt_exception_handler, env_path, env_path_student, env_value, generate_unique_name\n'), ((4251, 4281), 'pyaedt.generic.general_methods.generate_unique_name', 'generate_unique_name', (['"""layout"""'], {}), "('layout')\n", (4271, 4281), False, 'from pyaedt.generic.general_methods import aedt_exception_handler, env_path, env_path_student, env_value, generate_unique_name\n'), ((4981, 5018), 'os.path.join', 'os.path.join', (['self.edbpath', '"""edb.def"""'], {}), "(self.edbpath, 'edb.def')\n", (4993, 5018), False, 'import os\n')] |
import datetime
import time
import praw
import discord
from prawcore import NotFound
from jshbot import utilities, plugins, configurations, data, logger, parser
from jshbot.exceptions import ConfiguredBotException, BotException
from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Arg, Opt, Response
__version__ = '0.1.5'
CBException = ConfiguredBotException('/r/Furry Discord plugin')
CBException_vc = ConfiguredBotException('Verification checker')
uses_configuration = True
REDDIT_CLIENT = None
WARN_COMMANDS = ('.warn ', '.w ')
@plugins.command_spawner
def get_commands(bot):
return [Command(
'verification', subcommands=[
SubCommand(
Opt('check'),
Arg('user', argtype=ArgTypes.MERGED_OPTIONAL,
quotes_recommended=False, convert=utilities.MemberConverter()),
doc='Checks if the given user qualifies for verification.',
function=verification_check),
SubCommand(
Opt('karma'),
Arg('user', argtype=ArgTypes.MERGED, quotes_recommended=False),
doc='Checks for karma on the r/furry subreddit.',
elevated_level=1, function=verification_karma),
SubCommand(
Opt('set'),
Arg('role', argtype=ArgTypes.MERGED, quotes_recommended=False,
convert=utilities.RoleConverter()),
doc='Sets the verification role.',
elevated_level=1, function=verification_set)],
shortcuts=[
Shortcut('vc', 'check {user}', Arg('user', argtype=ArgTypes.MERGED_OPTIONAL)),
Shortcut('vk', 'karma {user}', Arg('user', argtype=ArgTypes.MERGED))],
description='A set of tools for the r/furry Discord server.',
allow_direct=False, category='tools')]
async def _check_reddit_karma(bot, username):
"""Returns the amount of karma on the r/furry the given user has."""
verification_period = configurations.get(bot, __name__, 'verification_period')
time_threshold = time.time() - (verification_period * 86400)
async def _get_next(generator):
def _f():
try:
while True:
check = next(generator)
if check.created_utc <= time_threshold:
return check
return next(generator)
except StopIteration:
return None
except NotFound:
raise CBException_vc("User u/{} not found.".format(username))
except Exception as e:
raise CBException_vc("Failed to retrieve data.", e=e)
return await utilities.future(_f)
# Submission karma using built-in search
submission_limit = configurations.get(bot, __name__, 'submission_karma_limit')
submissions = REDDIT_CLIENT.subreddit('furry').search('author:{}'.format(username))
submission_karma = 0
while True:
submission = await _get_next(submissions)
submission_karma += submission.score if submission else 0
if not submission or submission_karma > submission_limit:
break
# Comment karma
comment_limit = configurations.get(bot, __name__, 'comment_karma_limit')
comments = REDDIT_CLIENT.redditor(username).comments.new(limit=None)
comment_karma = 0
while True:
comment = await _get_next(comments)
if comment and comment.subreddit.display_name == 'furry':
comment_karma += comment.score if comment else 0
if not comment or comment_karma > comment_limit:
break
return (submission_karma, comment_karma)
def _member_age(member):
"""Returns a string with the number of days a member has been on a server."""
age = (datetime.datetime.now() - member.joined_at).days
plural = '' if age == 1 else 's'
return "{} day{}".format(age, plural)
def _get_verified_role(bot, guild, member=None):
"""Checks for the verified role and returns it unless the member has the role."""
role_id = data.get(bot, __name__, 'verification_role', guild_id=guild.id)
verified_role = data.get_role(bot, role_id, guild=guild, safe=True)
if not (role_id or verified_role):
raise CBException_vc("The verified role has not been set.")
if member and verified_role in member.roles:
raise CBException_vc(
"{} already has the role {} (member for {}).".format(
member.mention, verified_role.mention, _member_age(member)))
return verified_role
async def verification_check(bot, context):
"""Checks if the given user qualifies for verification by date alone."""
member = context.arguments[0] or context.author
verified_role = _get_verified_role(bot, context.guild, member=member)
verification_period = configurations.get(bot, __name__, 'verification_period')
# Check that the user has been here for at least the verification period
age = (datetime.datetime.now() - member.joined_at).days
if age >= verification_period:
indicator = ':warning:'
qualifies = 'meets the minimum time qualification'
color = discord.Color(0xffcc4d)
else:
indicator = ':x:'
qualifies = 'needs to be a member for at least {} days'.format(verification_period)
color = discord.Color(0xdd2e44)
after_datetime = member.joined_at - datetime.timedelta(days=1)
hint = (
"\n\n:warning: __**Activity qualifications must also be met**__ :warning:\n"
"A moderator will have to confirm this manually. "
"To check for message activity, use the following search "
"parameters to see messages since the member has last joined the server:\n\n"
"```\nfrom: {} after: {}\n```"
).format(member.id, after_datetime.strftime('%Y-%m-%d'))
description = '{} Member for {}\n{} {} for {}{}'.format(
indicator, _member_age(member), member.mention,
qualifies, verified_role.mention, hint)
return Response(embed=discord.Embed(description=description, color=color))
async def verification_set(bot, context):
"""Sets the verification role."""
role = context.arguments[0]
data.add(bot, __name__, 'verification_role', role.id, guild_id=context.guild.id)
return Response(embed=discord.Embed(
description='Verification role set to {}'.format(role.mention)))
async def verification_karma(bot, context):
"""Checks if the given user has karma in the r/furry subreddit."""
verified_role = _get_verified_role(bot, context.guild)
karma = await _check_reddit_karma(bot, context.arguments[0])
threshold = configurations.get(bot, __name__, 'karma_threshold')
# Replace karma amount with limits if it is past that
limits = [
configurations.get(bot, __name__, 'submission_karma_limit'),
configurations.get(bot, __name__, 'comment_karma_limit')
]
zipped = zip(karma, limits)
karma_strings = [(str(it) if it <= ts else (str(ts) + '+')) for it, ts in zipped]
if sum(karma) >= threshold:
response = ':white_check_mark: {} submission karma, {} comment karma'
qualifies = 'qualifies'
color = discord.Color(0x77b255)
else:
response = ':x: {} submission karma, {} comment karma'
qualifies = 'needs at least {} combined karma'.format(threshold)
color = discord.Color(0xdd2e44)
description = '{0}\n[u/{1}](https://www.reddit.com/user/{1}) {2} for {3}'.format(
response.format(*karma_strings), context.arguments[0], qualifies, verified_role.mention)
return Response(embed=discord.Embed(description=description, color=color))
@plugins.listen_for('bot_on_ready_boot')
async def create_reddit_client(bot):
global REDDIT_CLIENT
credential_data = configurations.get(bot, __name__)
REDDIT_CLIENT = praw.Reddit(
client_id=credential_data['reddit_client_id'],
client_secret=credential_data['reddit_client_secret'],
user_agent=credential_data['reddit_user_agent'])
configurations.redact(bot, __name__, 'reddit_client_id')
configurations.redact(bot, __name__, 'reddit_client_secret')
configurations.redact(bot, __name__, 'reddit_user_agent')
@plugins.listen_for('on_message')
async def check_warns(bot, message):
"""Checks for issued warnings."""
if (not message.guild or
message.guild.id != configurations.get(bot, __name__, 'guild_id') or
not message.content.lower().startswith(WARN_COMMANDS) or
not data.is_mod(bot, member=message.author) or
'autolog.py' not in bot.plugins):
return
split, quotes = parser.split_parameters(message.content, include_quotes=True, quote_list=True)
if len(split) < 3: # Not enough arguments
return
name = split[2][1:-1] if 2 in quotes else split[2]
member = await data.fetch_member(bot, name, guild=message.guild, safe=True, strict=True)
if not member or data.is_mod(bot, member=member): # Member not found or is another mod
return
details = '{0} (<@{0.id}>) was warned by {1} (<@{1.id}>): {2}'.format(
member, message.author, ''.join(split[4:]) or "No warn reason given")
await bot.plugins['autolog.py'].automated_dump_message(
bot, message.guild, details, query=member.id, moderator_id=message.author.id)
@plugins.listen_for('on_message')
async def death_response(bot, message):
"""Adds an emoji reaction to a Minecraft death."""
configuration = configurations.get(bot, __name__)
if (message.channel.id == configuration['minecraft_channel_id'] and
message.author.id == configuration['admin_bot_id'] and
message.content.startswith('**') and
message.content.endswith('**')):
await message.add_reaction(bot.get_emoji(configuration['death_reaction_id']))
| [
"jshbot.data.is_mod",
"jshbot.configurations.get",
"jshbot.exceptions.ConfiguredBotException",
"datetime.timedelta",
"jshbot.commands.Opt",
"jshbot.commands.Arg",
"jshbot.plugins.listen_for",
"jshbot.configurations.redact",
"jshbot.data.get_role",
"discord.Embed",
"jshbot.data.fetch_member",
"jshbot.data.add",
"discord.Color",
"jshbot.utilities.future",
"time.time",
"jshbot.parser.split_parameters",
"jshbot.data.get",
"jshbot.utilities.MemberConverter",
"datetime.datetime.now",
"praw.Reddit",
"jshbot.utilities.RoleConverter"
] | [((355, 404), 'jshbot.exceptions.ConfiguredBotException', 'ConfiguredBotException', (['"""/r/Furry Discord plugin"""'], {}), "('/r/Furry Discord plugin')\n", (377, 404), False, 'from jshbot.exceptions import ConfiguredBotException, BotException\n'), ((422, 468), 'jshbot.exceptions.ConfiguredBotException', 'ConfiguredBotException', (['"""Verification checker"""'], {}), "('Verification checker')\n", (444, 468), False, 'from jshbot.exceptions import ConfiguredBotException, BotException\n'), ((7690, 7729), 'jshbot.plugins.listen_for', 'plugins.listen_for', (['"""bot_on_ready_boot"""'], {}), "('bot_on_ready_boot')\n", (7708, 7729), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((8247, 8279), 'jshbot.plugins.listen_for', 'plugins.listen_for', (['"""on_message"""'], {}), "('on_message')\n", (8265, 8279), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((9374, 9406), 'jshbot.plugins.listen_for', 'plugins.listen_for', (['"""on_message"""'], {}), "('on_message')\n", (9392, 9406), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((2007, 2063), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__', '"""verification_period"""'], {}), "(bot, __name__, 'verification_period')\n", (2025, 2063), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((2794, 2853), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__', '"""submission_karma_limit"""'], {}), "(bot, __name__, 'submission_karma_limit')\n", (2812, 2853), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((3224, 3280), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__', '"""comment_karma_limit"""'], {}), "(bot, __name__, 'comment_karma_limit')\n", (3242, 3280), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((4083, 4146), 'jshbot.data.get', 'data.get', (['bot', '__name__', '"""verification_role"""'], {'guild_id': 'guild.id'}), "(bot, __name__, 'verification_role', guild_id=guild.id)\n", (4091, 4146), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((4167, 4218), 'jshbot.data.get_role', 'data.get_role', (['bot', 'role_id'], {'guild': 'guild', 'safe': '(True)'}), '(bot, role_id, guild=guild, safe=True)\n', (4180, 4218), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((4848, 4904), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__', '"""verification_period"""'], {}), "(bot, __name__, 'verification_period')\n", (4866, 4904), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((6218, 6303), 'jshbot.data.add', 'data.add', (['bot', '__name__', '"""verification_role"""', 'role.id'], {'guild_id': 'context.guild.id'}), "(bot, __name__, 'verification_role', role.id, guild_id=context.guild.id\n )\n", (6226, 6303), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((6670, 6722), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__', '"""karma_threshold"""'], {}), "(bot, __name__, 'karma_threshold')\n", (6688, 6722), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((7814, 7847), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__'], {}), '(bot, __name__)\n', (7832, 7847), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((7868, 8040), 'praw.Reddit', 'praw.Reddit', ([], {'client_id': "credential_data['reddit_client_id']", 'client_secret': "credential_data['reddit_client_secret']", 'user_agent': "credential_data['reddit_user_agent']"}), "(client_id=credential_data['reddit_client_id'], client_secret=\n credential_data['reddit_client_secret'], user_agent=credential_data[\n 'reddit_user_agent'])\n", (7879, 8040), False, 'import praw\n'), ((8060, 8116), 'jshbot.configurations.redact', 'configurations.redact', (['bot', '__name__', '"""reddit_client_id"""'], {}), "(bot, __name__, 'reddit_client_id')\n", (8081, 8116), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((8121, 8181), 'jshbot.configurations.redact', 'configurations.redact', (['bot', '__name__', '"""reddit_client_secret"""'], {}), "(bot, __name__, 'reddit_client_secret')\n", (8142, 8181), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((8186, 8243), 'jshbot.configurations.redact', 'configurations.redact', (['bot', '__name__', '"""reddit_user_agent"""'], {}), "(bot, __name__, 'reddit_user_agent')\n", (8207, 8243), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((8675, 8753), 'jshbot.parser.split_parameters', 'parser.split_parameters', (['message.content'], {'include_quotes': '(True)', 'quote_list': '(True)'}), '(message.content, include_quotes=True, quote_list=True)\n', (8698, 8753), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((9522, 9555), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__'], {}), '(bot, __name__)\n', (9540, 9555), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((2085, 2096), 'time.time', 'time.time', ([], {}), '()\n', (2094, 2096), False, 'import time\n'), ((5185, 5208), 'discord.Color', 'discord.Color', (['(16763981)'], {}), '(16763981)\n', (5198, 5208), False, 'import discord\n'), ((5353, 5376), 'discord.Color', 'discord.Color', (['(14495300)'], {}), '(14495300)\n', (5366, 5376), False, 'import discord\n'), ((5418, 5444), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5436, 5444), False, 'import datetime\n'), ((6805, 6864), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__', '"""submission_karma_limit"""'], {}), "(bot, __name__, 'submission_karma_limit')\n", (6823, 6864), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((6874, 6930), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__', '"""comment_karma_limit"""'], {}), "(bot, __name__, 'comment_karma_limit')\n", (6892, 6930), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((7214, 7236), 'discord.Color', 'discord.Color', (['(7844437)'], {}), '(7844437)\n', (7227, 7236), False, 'import discord\n'), ((7400, 7423), 'discord.Color', 'discord.Color', (['(14495300)'], {}), '(14495300)\n', (7413, 7423), False, 'import discord\n'), ((8890, 8963), 'jshbot.data.fetch_member', 'data.fetch_member', (['bot', 'name'], {'guild': 'message.guild', 'safe': '(True)', 'strict': '(True)'}), '(bot, name, guild=message.guild, safe=True, strict=True)\n', (8907, 8963), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((8985, 9016), 'jshbot.data.is_mod', 'data.is_mod', (['bot'], {'member': 'member'}), '(bot, member=member)\n', (8996, 9016), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((2704, 2724), 'jshbot.utilities.future', 'utilities.future', (['_f'], {}), '(_f)\n', (2720, 2724), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((3804, 3827), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3825, 3827), False, 'import datetime\n'), ((4994, 5017), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5015, 5017), False, 'import datetime\n'), ((6047, 6098), 'discord.Embed', 'discord.Embed', ([], {'description': 'description', 'color': 'color'}), '(description=description, color=color)\n', (6060, 6098), False, 'import discord\n'), ((7634, 7685), 'discord.Embed', 'discord.Embed', ([], {'description': 'description', 'color': 'color'}), '(description=description, color=color)\n', (7647, 7685), False, 'import discord\n'), ((8416, 8461), 'jshbot.configurations.get', 'configurations.get', (['bot', '__name__', '"""guild_id"""'], {}), "(bot, __name__, 'guild_id')\n", (8434, 8461), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((8550, 8589), 'jshbot.data.is_mod', 'data.is_mod', (['bot'], {'member': 'message.author'}), '(bot, member=message.author)\n', (8561, 8589), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((701, 713), 'jshbot.commands.Opt', 'Opt', (['"""check"""'], {}), "('check')\n", (704, 713), False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Arg, Opt, Response\n'), ((1023, 1035), 'jshbot.commands.Opt', 'Opt', (['"""karma"""'], {}), "('karma')\n", (1026, 1035), False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Arg, Opt, Response\n'), ((1053, 1115), 'jshbot.commands.Arg', 'Arg', (['"""user"""'], {'argtype': 'ArgTypes.MERGED', 'quotes_recommended': '(False)'}), "('user', argtype=ArgTypes.MERGED, quotes_recommended=False)\n", (1056, 1115), False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Arg, Opt, Response\n'), ((1287, 1297), 'jshbot.commands.Opt', 'Opt', (['"""set"""'], {}), "('set')\n", (1290, 1297), False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Arg, Opt, Response\n'), ((1611, 1656), 'jshbot.commands.Arg', 'Arg', (['"""user"""'], {'argtype': 'ArgTypes.MERGED_OPTIONAL'}), "('user', argtype=ArgTypes.MERGED_OPTIONAL)\n", (1614, 1656), False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Arg, Opt, Response\n'), ((1702, 1738), 'jshbot.commands.Arg', 'Arg', (['"""user"""'], {'argtype': 'ArgTypes.MERGED'}), "('user', argtype=ArgTypes.MERGED)\n", (1705, 1738), False, 'from jshbot.commands import Command, SubCommand, Shortcut, ArgTypes, Arg, Opt, Response\n'), ((831, 858), 'jshbot.utilities.MemberConverter', 'utilities.MemberConverter', ([], {}), '()\n', (856, 858), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n'), ((1406, 1431), 'jshbot.utilities.RoleConverter', 'utilities.RoleConverter', ([], {}), '()\n', (1429, 1431), False, 'from jshbot import utilities, plugins, configurations, data, logger, parser\n')] |
import pandas as pd
import os
def machine_in_progress_list(admin_file_directory):
schedule = pd.read_csv(os.path.join(admin_file_directory, "schedule.csv"))
next_machine = []
for task in schedule.iterrows():
if task[1]["status"]=="in_progress":
next_machine = f"{task[1]['yyyy']}/{task[1]['mm']}/{task[1]['user_id']}-00{task[1]['task_id']}" \
f"-{task[1]['analysis_type']}"
return next_machine
| [
"os.path.join"
] | [((111, 161), 'os.path.join', 'os.path.join', (['admin_file_directory', '"""schedule.csv"""'], {}), "(admin_file_directory, 'schedule.csv')\n", (123, 161), False, 'import os\n')] |
import collections
import logging
import urllib.parse
from ..errors import TokenRenewalError
LOGGER = logging.getLogger(__name__)
Token = collections.namedtuple('Token',
('access_token', 'token_type',
'expires_in', 'refresh_token'))
class TokenManager():
def __init__(self, *, connection, credentials,
access_token=None, token_type=None):
self._connection = connection
self._credentials = credentials
self._token = Token(access_token, token_type, None, None)
self._path = urllib.parse.urlsplit('/dxauth/oauth/token').path
@property
def credentials(self):
return self._credentials
@property
def token(self):
return self._token
def renew_token(self):
if not self._credentials:
LOGGER.warning('Token renewal requires credentials ')
return
LOGGER.debug('Trying to get a new token...')
self._token = Token(None, None, None, None)
headers = {'Authorization':
'Basic {}'.format(self._credentials.encoded_secret),
'Content-Type': 'application/x-www-form-urlencoded'}
data = urllib.parse.urlencode(self._credentials.data)
decoded_data = self._connection.post(path=self._path,
data=data,
headers=headers,
as_json=True)
if 'access_token' in decoded_data:
self._token = Token._make((decoded_data['access_token'],
decoded_data['token_type'],
decoded_data['expires_in'],
decoded_data['refresh_token']))
LOGGER.debug('Got a new token')
else:
LOGGER.error('Unsupported response: {}'.format(decoded_data))
raise TokenRenewalError()
| [
"logging.getLogger",
"collections.namedtuple"
] | [((104, 131), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (121, 131), False, 'import logging\n'), ((141, 239), 'collections.namedtuple', 'collections.namedtuple', (['"""Token"""', "('access_token', 'token_type', 'expires_in', 'refresh_token')"], {}), "('Token', ('access_token', 'token_type', 'expires_in',\n 'refresh_token'))\n", (163, 239), False, 'import collections\n')] |
# MIT License
# Copyright (c) 2022 <NAME>™
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from enum import Enum
import functools
from telegram import Update, ParseMode
from telegram.ext import CallbackContext
from telegram.inline.inlinekeyboardbutton import InlineKeyboardButton
from telegram.inline.inlinekeyboardmarkup import InlineKeyboardMarkup
from elaina import DRAGONS, DEV_USERS, dispatcher
from elaina.modules.helper_funcs.decorators import elainacallback
class AdminPerms(Enum):
CAN_RESTRICT_MEMBERS = 'can_restrict_members'
CAN_PROMOTE_MEMBERS = 'can_promote_members'
CAN_INVITE_USERS = 'can_invite_users'
CAN_DELETE_MESSAGES = 'can_delete_messages'
CAN_CHANGE_INFO = 'can_change_info'
CAN_PIN_MESSAGES = 'can_pin_messages'
class ChatStatus(Enum):
CREATOR = "creator"
ADMIN = "administrator"
anon_callbacks = {}
anon_callback_messages = {}
def user_admin(permission: AdminPerms):
def wrapper(func):
@functools.wraps(func)
def awrapper(update: Update, context: CallbackContext, *args, **kwargs):
nonlocal permission
if update.effective_chat.type == 'private':
return func(update, context, *args, **kwargs)
message = update.effective_message
is_anon = bool(update.effective_message.sender_chat)
if is_anon:
callback_id = f'anoncb/{message.chat.id}/{message.message_id}/{permission.value}'
anon_callbacks[(message.chat.id, message.message_id)] = ((update, context), func)
anon_callback_messages[(message.chat.id, message.message_id)] = (
message.reply_text("Seems like you're anonymous, click the button below to prove your identity",
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(text='Prove identity',
callback_data=callback_id)]]))).message_id
# send message with callback f'anoncb{callback_id}'
else:
user_id = message.from_user.id
chat_id = message.chat.id
mem = context.bot.get_chat_member(chat_id=chat_id, user_id=user_id)
if getattr(mem, permission.value) is True or mem.status == "creator" or user_id in DEV_USERS:
return func(update, context, *args, **kwargs)
else:
return message.reply_text(f"You lack the permission: `{permission.name}`",
parse_mode=ParseMode.MARKDOWN)
return awrapper
return wrapper
@elainacallback(pattern="anoncb")
def anon_callback_handler1(upd: Update, _: CallbackContext):
callback = upd.callback_query
perm = callback.data.split('/')[3]
chat_id = int(callback.data.split('/')[1])
message_id = int(callback.data.split('/')[2])
try:
mem = upd.effective_chat.get_member(user_id=callback.from_user.id)
except BaseException as e:
callback.answer(f"Error: {e}", show_alert=True)
return
if mem.status not in [ChatStatus.ADMIN.value, ChatStatus.CREATOR.value]:
callback.answer("You're aren't admin.")
dispatcher.bot.delete_message(chat_id, anon_callback_messages.pop((chat_id, message_id), None))
dispatcher.bot.send_message(chat_id, "You lack the permissions required for this command")
elif getattr(mem, perm) is True or mem.status == "creator" or mem.user.id in DRAGONS:
cb = anon_callbacks.pop((chat_id, message_id), None)
if cb:
message_id = anon_callback_messages.pop((chat_id, message_id), None)
if message_id is not None:
dispatcher.bot.delete_message(chat_id, message_id)
return cb[1](cb[0][0], cb[0][1])
else:
callback.answer("This isn't for ya") | [
"telegram.inline.inlinekeyboardbutton.InlineKeyboardButton",
"elaina.dispatcher.bot.send_message",
"elaina.modules.helper_funcs.decorators.elainacallback",
"functools.wraps",
"elaina.dispatcher.bot.delete_message"
] | [((3694, 3726), 'elaina.modules.helper_funcs.decorators.elainacallback', 'elainacallback', ([], {'pattern': '"""anoncb"""'}), "(pattern='anoncb')\n", (3708, 3726), False, 'from elaina.modules.helper_funcs.decorators import elainacallback\n'), ((1974, 1995), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1989, 1995), False, 'import functools\n'), ((4381, 4475), 'elaina.dispatcher.bot.send_message', 'dispatcher.bot.send_message', (['chat_id', '"""You lack the permissions required for this command"""'], {}), "(chat_id,\n 'You lack the permissions required for this command')\n", (4408, 4475), False, 'from elaina import DRAGONS, DEV_USERS, dispatcher\n'), ((4774, 4824), 'elaina.dispatcher.bot.delete_message', 'dispatcher.bot.delete_message', (['chat_id', 'message_id'], {}), '(chat_id, message_id)\n', (4803, 4824), False, 'from elaina import DRAGONS, DEV_USERS, dispatcher\n'), ((2834, 2904), 'telegram.inline.inlinekeyboardbutton.InlineKeyboardButton', 'InlineKeyboardButton', ([], {'text': '"""Prove identity"""', 'callback_data': 'callback_id'}), "(text='Prove identity', callback_data=callback_id)\n", (2854, 2904), False, 'from telegram.inline.inlinekeyboardbutton import InlineKeyboardButton\n')] |
import os
import psycopg2
DATABASE_URL = os.environ['DB_URI']
DB_CREATED = os.environ['DB_CREATED']
def pg_get_db():
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
return conn
def pg_init_db():
print('DB_CREATED = ')
print(DB_CREATED)
if DB_CREATED == '1':
return
""" create tables in the PostgreSQL database"""
commands = (
"""
CREATE TABLE googleuser (
id VARCHAR(255) NOT NULL,
name VARCHAR(255) NOT NULL,
email VARCHAR(255) NOT NULL,
profile_pic VARCHAR(255) NOT NULL
)
""",
)
conn = None
try:
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
# create table one by one
for command in commands:
cur.execute(command)
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
conn.commit()
# DEBUG
print('DEBUG: DB created')
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
| [
"psycopg2.connect"
] | [((131, 180), 'psycopg2.connect', 'psycopg2.connect', (['DATABASE_URL'], {'sslmode': '"""require"""'}), "(DATABASE_URL, sslmode='require')\n", (147, 180), False, 'import psycopg2\n'), ((657, 706), 'psycopg2.connect', 'psycopg2.connect', (['DATABASE_URL'], {'sslmode': '"""require"""'}), "(DATABASE_URL, sslmode='require')\n", (673, 706), False, 'import psycopg2\n')] |
"""AIOGitHubAPI: AIOGitHubAPI"""
from __future__ import annotations
import os
from typing import List, Optional
import aiohttp
from ..common.const import ACCEPT_HEADERS
from ..objects.base import AIOGitHubAPIBase
from ..objects.repository import AIOGitHubAPIRepository
from .client import AIOGitHubAPIClient
class AIOGitHubAPI(AIOGitHubAPIBase):
"""
AIOGitHubAPI
This is the main class this is where it all starts.
DEPRECATED: Use `GitHubAPI` instead.
"""
_close_session = False
def __init__(
self,
token: str = None,
session: aiohttp.ClientSession = None,
headers: Optional[dict] = None,
base_url: Optional[str] = None,
) -> None:
"""
Initialises a GitHub API client.
param | required | description
-- | -- | --
`token` | False | [Your GitHub Personal Access Token](https://github.com/settings/tokens).
`session` | False | `aiohttp.ClientSession` to be used by this package.
`headers` | False | Custom headers to be used by this package.
`base_url` | False | Base URL of the GitHub API.
"""
if session is None:
session = aiohttp.ClientSession()
self._close_session = True
if token is None:
token = os.getenv("GITHUB_TOKEN")
self.client = AIOGitHubAPIClient(session, token, headers, base_url)
async def __aenter__(self) -> "AIOGitHubAPI":
"""Async enter."""
return self
async def __aexit__(self, *exc_info) -> None:
"""Async exit."""
await self._close()
async def get_repo(
self,
repo: str,
etag: Optional[str] = None,
) -> AIOGitHubAPIRepository:
"""Retrun AIOGitHubAPIRepository object."""
_endpoint = f"/repos/{repo}"
_headers = {"Accept": ACCEPT_HEADERS["preview"]}
if etag:
_headers[aiohttp.hdrs.IF_NONE_MATCH] = etag
response = await self.client.get(endpoint=_endpoint, headers=_headers)
return AIOGitHubAPIRepository(self.client, response.data)
async def get_org_repos(
self, org: str, page: int = 1, etag: Optional[str] = None
) -> List[AIOGitHubAPIRepository]:
"""
Retrun a list of AIOGitHubAPIRepository objects.
param | required | Default | description
-- | -- | -- | --
`org` | True | None | The name of the organization, example "octocat"
`page` | False | 1 | The page number you want to fetch.
"""
_enpoint = f"/orgs/{org}/repos?page={str(page)}"
_params = {"per_page": 100}
_headers = {"Accept": ACCEPT_HEADERS["preview"]}
if etag:
_headers[aiohttp.hdrs.IF_NONE_MATCH] = etag
response = await self.client.get(endpoint=_enpoint, params=_params, headers=_headers)
return [AIOGitHubAPIRepository(self.client, x) for x in response.data or []]
async def graphql(self, query: str, variables: dict = {}) -> dict:
response = await self.client.post(
endpoint="/graphql",
returnjson=True,
data={"query": query, "variables": variables},
jsondata=True,
)
return response.data.get("data")
async def get_rate_limit(self) -> dict:
"""Retrun current rate limits."""
_endpoint = "/rate_limit"
await self.client.get(endpoint=_endpoint)
return self.client.ratelimits.__dict__
async def render_markdown(self, content: str, etag: Optional[str] = None) -> str:
"""
Retrun AIOGitHubAPIRepository object.
[API Docs](https://docs.github.com/en/rest/reference/markdown#render-a-markdown-document-in-raw-mode)
param | description
-- | --
`content` | The content (as markdown) you want to render as GHF Markdown
"""
_endpoint = "/markdown/raw"
_headers = {"Content-Type": "text/plain"}
if etag:
_headers[aiohttp.hdrs.IF_NONE_MATCH] = etag
response = await self.client.post(endpoint=_endpoint, headers=_headers, data=content)
return response.data
async def _close(self) -> None:
"""Close open client session."""
if self.client.session and self._close_session:
await self.client.session.close()
| [
"aiohttp.ClientSession",
"os.getenv"
] | [((1197, 1220), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1218, 1220), False, 'import aiohttp\n'), ((1307, 1332), 'os.getenv', 'os.getenv', (['"""GITHUB_TOKEN"""'], {}), "('GITHUB_TOKEN')\n", (1316, 1332), False, 'import os\n')] |
#
# Copyright 2019 EPAM Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from odahuflow.sdk.clients.model import ModelClient
from odahuflow.airflow_plugin.api import OdahuHook
class ModelPredictRequestOperator(BaseOperator):
@apply_defaults
def __init__(self,
model_deployment_name: str,
api_connection_id: str,
model_connection_id: str,
request_body: typing.Any,
md_role_name: str = "",
*args, **kwargs):
super().__init__(*args, **kwargs)
self.model_deployment_name = model_deployment_name
self.request_body = request_body
self.model_connection_id = model_connection_id
self.api_connection_id = api_connection_id
self.md_role_name = md_role_name
def get_hook(self) -> OdahuHook:
return OdahuHook(
self.api_connection_id,
self.model_connection_id
)
def execute(self, context):
# pylint: disable=unused-argument
model_client: ModelClient = self.get_hook().get_model_client(self.model_deployment_name)
resp = model_client.invoke(**self.request_body)
print(resp)
return resp
class ModelInfoRequestOperator(BaseOperator):
@apply_defaults
def __init__(self,
model_deployment_name: str,
api_connection_id: str,
model_connection_id: str,
md_role_name: str = "",
*args, **kwargs):
super().__init__(*args, **kwargs)
self.model_deployment_name = model_deployment_name
self.model_connection_id = model_connection_id
self.api_connection_id = api_connection_id
self.md_role_name = md_role_name
def get_hook(self) -> OdahuHook:
return OdahuHook(
self.api_connection_id,
self.model_connection_id
)
def execute(self, context):
# pylint: disable=unused-argument
model_client: ModelClient = self.get_hook().get_model_client(self.model_deployment_name)
resp = model_client.info()
print(resp)
return resp
| [
"odahuflow.airflow_plugin.api.OdahuHook"
] | [((1507, 1566), 'odahuflow.airflow_plugin.api.OdahuHook', 'OdahuHook', (['self.api_connection_id', 'self.model_connection_id'], {}), '(self.api_connection_id, self.model_connection_id)\n', (1516, 1566), False, 'from odahuflow.airflow_plugin.api import OdahuHook\n'), ((2470, 2529), 'odahuflow.airflow_plugin.api.OdahuHook', 'OdahuHook', (['self.api_connection_id', 'self.model_connection_id'], {}), '(self.api_connection_id, self.model_connection_id)\n', (2479, 2529), False, 'from odahuflow.airflow_plugin.api import OdahuHook\n')] |
#!/usr/bin/python
"""A standalone dissector for files processed by usbcap.awk"""
import sys, struct
from binascii import hexlify, unhexlify
EraseBlock = 0x4000 # change according to the one reported by the device
cmd_enum = {
1: 'SYNC',
2: 'INFO',
3: 'BOOT',
4: 'REBOOT',
11: 'WRITE',
21: 'ERASE',
}
idle = True
counter, buf_size = 0, 0
for line in sys.stdin.readlines():
direction, data = line.strip().split(' ', 2)
data = unhexlify(data)
assert(direction in ('i','o'))
if direction == 'i':
if data in (b'\x00', b'\x02'):
print('In: USB RESET')
elif data[0:1] == b'\x0f':
cmd = ord(data[1:2])
print('In: ACK: %5s (%02x)' % (cmd_enum[cmd], cmd))
else:
print('In: BootInfo? (len=%d)' % ord(data[0:1]))
else:
if idle:
stx, cmd, addr, counter = struct.unpack('<BBLH', data[:8])
assert(stx == 0x0f)
cmd = cmd_enum[cmd]
print('Out: CMD %5s (addr=0x%08x counter=0x%04x)' % (cmd, addr, counter))
if cmd == 'WRITE':
buf_size = EraseBlock
idle = False
else:
read_len = min(counter, len(data))
print('Out: Data %s' % hexlify(data[:read_len]).upper().decode('ascii'))
counter -= read_len
buf_size -= read_len
assert(buf_size >= 0)
assert(counter >= 0)
if buf_size == 0 or counter == 0:
buf_size = EraseBlock
print('Expecting ACK')
if counter == 0:
idle = True
| [
"struct.unpack",
"binascii.hexlify",
"binascii.unhexlify",
"sys.stdin.readlines"
] | [((374, 395), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (393, 395), False, 'import sys, struct\n'), ((457, 472), 'binascii.unhexlify', 'unhexlify', (['data'], {}), '(data)\n', (466, 472), False, 'from binascii import hexlify, unhexlify\n'), ((879, 911), 'struct.unpack', 'struct.unpack', (['"""<BBLH"""', 'data[:8]'], {}), "('<BBLH', data[:8])\n", (892, 911), False, 'import sys, struct\n'), ((1256, 1280), 'binascii.hexlify', 'hexlify', (['data[:read_len]'], {}), '(data[:read_len])\n', (1263, 1280), False, 'from binascii import hexlify, unhexlify\n')] |
import click
from gravity import config_manager
@click.command("configs")
@click.pass_context
def cli(ctx):
"""List registered config files.
aliases: list
"""
with config_manager.config_manager(state_dir=ctx.parent.state_dir) as cm:
registered = cm.get_registered_configs()
if registered:
click.echo("%-12s %-24s %s" % ("TYPE", "INSTANCE NAME", "CONFIG PATH"))
for config in sorted(registered.keys()):
click.echo("%-12s %-24s %s" % (registered[config].get("config_type", "unknown"), registered[config].get("instance_name", "unknown"), config))
else:
click.echo("No config files registered")
| [
"gravity.config_manager.config_manager",
"click.echo",
"click.command"
] | [((52, 76), 'click.command', 'click.command', (['"""configs"""'], {}), "('configs')\n", (65, 76), False, 'import click\n'), ((184, 245), 'gravity.config_manager.config_manager', 'config_manager.config_manager', ([], {'state_dir': 'ctx.parent.state_dir'}), '(state_dir=ctx.parent.state_dir)\n', (213, 245), False, 'from gravity import config_manager\n'), ((337, 410), 'click.echo', 'click.echo', (["('%-12s %-24s %s' % ('TYPE', 'INSTANCE NAME', 'CONFIG PATH'))"], {}), "('%-12s %-24s %s' % ('TYPE', 'INSTANCE NAME', 'CONFIG PATH'))\n", (347, 410), False, 'import click\n'), ((650, 690), 'click.echo', 'click.echo', (['"""No config files registered"""'], {}), "('No config files registered')\n", (660, 690), False, 'import click\n')] |
from enum import Enum
import pandas as pd
import requests
from api.base import BaseApi
from constants.constants import CrawlerColumns
class BinanceColumns(Enum):
OPEN_TIME = "open_time"
OPEN = "Open"
HIGH = "High"
LOW = "Low"
CLOSE = "Close"
VOLUME = "Volume"
CLOSE_TIME = "Close Time"
QUOTE_ASSET_VOLUME = "Quote Asset Volume"
NUMBER_OF_TRADES = "Number of Trades"
TAKER_BUY_BASE_ASSET_VOLUME = "Taker Buy Base Asset Volume"
TAKER_BUY_QUOTE_ASSET_VOLUME = "Taker Buy Quote Asset Volume"
IGNORE = "Ignore"
class BinanceApi(BaseApi):
def fetch_candles(self, params=None):
""" Fetch historical candles data from binance.
interval should be:
1m, 3m, 5m, 15m, 30m,
1h, 2h, 4h, 6h, 8h, 12h,
1d, 3d, 1w, 1M
"""
try:
payload = {
"symbol": params["symbol"],
"interval": params["interval"],
}
if params["isEmpty"]:
payload["limit"] = 1000
else:
payload["startTime"] = int(params["start"].timestamp())
payload["endTime"] = int(params["end"].timestamp())
response = requests.get(
"https://api.binance.com/api/v3/klines",
params=payload,
)
response.raise_for_status()
candles = response.json()
if candles is None:
return
candles = pd.DataFrame(
candles,
columns=[column.value for column in BinanceColumns],
)
candles[CrawlerColumns.DATETIME.value] = pd.to_datetime(
candles[BinanceColumns.CLOSE_TIME.value] // 1000,
unit="s",
)
except requests.exceptions.RequestException as error:
raise Exception(error.response.text)
return candles[[column.value for column in CrawlerColumns]]
| [
"pandas.DataFrame",
"pandas.to_datetime",
"requests.get"
] | [((1238, 1307), 'requests.get', 'requests.get', (['"""https://api.binance.com/api/v3/klines"""'], {'params': 'payload'}), "('https://api.binance.com/api/v3/klines', params=payload)\n", (1250, 1307), False, 'import requests\n'), ((1511, 1585), 'pandas.DataFrame', 'pd.DataFrame', (['candles'], {'columns': '[column.value for column in BinanceColumns]'}), '(candles, columns=[column.value for column in BinanceColumns])\n', (1523, 1585), True, 'import pandas as pd\n'), ((1686, 1760), 'pandas.to_datetime', 'pd.to_datetime', (['(candles[BinanceColumns.CLOSE_TIME.value] // 1000)'], {'unit': '"""s"""'}), "(candles[BinanceColumns.CLOSE_TIME.value] // 1000, unit='s')\n", (1700, 1760), True, 'import pandas as pd\n')] |
import databases
from core.database import SQLALCHEMY_DATABASE_URL
import logging
log=logging.getLogger("propertyconnect-logger")
async def check_db_connected():
try:
if not str(SQLALCHEMY_DATABASE_URL).__contains__("sqlite"):
database = databases.Database(SQLALCHEMY_DATABASE_URL)
if not database.is_connected:
await database.connect()
await database.execute("SELECT 1")
log.info("Database is connected (^_^)")
except Exception as e:
log.info(
"Looks like db is missing or is there is some problem in connection,see below traceback"
)
raise e
async def check_db_disconnected():
try:
if not str(SQLALCHEMY_DATABASE_URL).__contains__("sqlite"):
database = databases.Database(SQLALCHEMY_DATABASE_URL)
if database.is_connected:
await database.disconnect()
log.info("Database is Disconnected (-_-) zZZ")
except Exception as e:
raise e | [
"logging.getLogger",
"databases.Database"
] | [((86, 129), 'logging.getLogger', 'logging.getLogger', (['"""propertyconnect-logger"""'], {}), "('propertyconnect-logger')\n", (103, 129), False, 'import logging\n'), ((264, 307), 'databases.Database', 'databases.Database', (['SQLALCHEMY_DATABASE_URL'], {}), '(SQLALCHEMY_DATABASE_URL)\n', (282, 307), False, 'import databases\n'), ((799, 842), 'databases.Database', 'databases.Database', (['SQLALCHEMY_DATABASE_URL'], {}), '(SQLALCHEMY_DATABASE_URL)\n', (817, 842), False, 'import databases\n')] |
"""
A mostly direct translation of rdradcp.m to python.
1/3/2013: Updated with DKR changes to rdradcp.m
"""
from __future__ import division
from __future__ import print_function
# cruft that 2to3 added - but seems to make less compatible with older code
# and unclear that it's needed for py3k.
# from builtins import range
# from builtins import object
# from past.utils import old_div
import sys,os,re,math
from numpy import *
import numpy as np
import scipy.stats.stats as sp
import scipy.stats.morestats as ssm
from matplotlib.dates import date2num,num2date
import datetime
from . import nmea
cfac=180.0/(2**31)
def msg_print(s):
sys.stdout.write(s)
sys.stdout.flush()
def get_ens_dtype(sourceprog = 'WINRIVER'):
ens_dtype = [('mtime',float64),
('number',int32),
('pitch',float64), ('roll',float64), ('heading',float64),
('pitch_std',float64), ('roll_std',float64), ('heading_std',float64),
('depth',float64),
('temperature',float64),
('salinity',float64),
('pressure',float64),('pressure_std',float64),
('bt_mode',float64),
('bt_range',(float64,4)),
('bt_vel',(float64,4)),
('bt_corr',(float64,4)),
('bt_ampl',(float64,4)),
('bt_perc_good',(float64,4))
]
if sourceprog == 'WINRIVER':
# if cfg.sourceprog in ['WINRIVER']:
# if cfg.sourceprog in ('WINRIVER',):
ens_dtype += [('nav_mtime',float64),
('nav_longitude',float64),
('nav_latitude',float64)]
elif sourceprog == 'VMDAS':
ens_dtype += [('nav_smtime',float64),
('nav_emtime',float64),
('nav_slongitude',float64),
('nav_elongitude',float64),
('nav_slatitude',float64),
('nav_elatitude',float64),
('nav_mtime',float64)]
else:
pass
return ens_dtype
def get_bin_dtype():
# things of the shape [n_cells,n]
return [('east_vel',float64),
('north_vel',float64),
('vert_vel',float64),
('error_vel',float64),
('corr',(float64,4)),
('status',(float64,4)),
('intens',(float64,4)),
('perc_good',(float64,4))
]
class Adcp(object):
pass
#function [adcp,cfg,ens,hdr]=rdradcp(name,varargin);
#
def rdradcp(name,
num_av=5,
nens=-1, # or [start,stop] as 1-based, inclusive
baseyear=2000,
despike='no',
log_fp=None):
"""
The original documentation from <NAME>'s code:
RDRADCP Read (raw binary) RDI ADCP files,
ADCP=RDRADCP(NAME) reads the raw binary RDI BB/Workhorse ADCP file NAME and
puts all the relevant configuration and measured data into a data structure
ADCP (which is self-explanatory). This program is designed for handling data
recorded by moored instruments (primarily Workhorse-type but can also read
Broadband) and then downloaded post-deployment. For vessel-mount data I
usually make p-files (which integrate nav info and do coordinate transformations)
and then use RDPADCP.
This current version does have some handling of VMDAS, WINRIVER, and WINRIVER2 output
files, but it is still 'beta'. There are (inadequately documented) timestamps
of various kinds from VMDAS, for example, and caveat emptor on WINRIVER2 NMEA data.
(ADCP,CFG)=RDRADCP(...) returns configuration data in a
separate data structure.
Various options can be specified on input:
(..)=RDRADCP(NAME,NUMAV) averages NUMAV ensembles together in the result.
(..)=RDRADCP(NAME,NUMAV,NENS) reads only NENS ensembles (-1 for all).
(..)=RDRADCP(NAME,NUMAV,(NFIRST NEND)) reads only the specified range
of ensembles. This is useful if you want to get rid of bad data before/after
the deployment period.
Notes:
- sometimes the ends of files are filled with garbage. In this case you may
have to rerun things explicitly specifying how many records to read (or the
last record to read). I don't handle bad data very well. Also - in Aug/2007
I discovered that WINRIVER-2 files can have a varying number of bytes per
ensemble. Thus the estimated number of ensembles in a file (based on the
length of the first ensemble and file size) can be too high or too low.
- I don't read in absolutely every parameter stored in the binaries;
just the ones that are 'most' useful. Look through the code if
you want to get other things.
- chaining of files does not occur (i.e. read .000, .001, etc.). Sometimes
a ping is split between the end of one file and the beginning of another.
The only way to get this data is to concatentate the files, using
cat file1.000 file1.001 > file1 (unix)
copy file1.000/B+file2.001/B file3.000/B (DOS/Windows)
(as of Dec 2005 we can probably read a .001 file)
- velocity fields are always called east/north/vertical/error for all
coordinate systems even though they should be treated as
1/2/3/4 in beam coordinates etc.
String parameter/option pairs can be added after these initial parameters:
'baseyear': Base century for BB/v8WH firmware (default to 2000).
'despike': 'no' | 'yes' | 3-element vector
Controls ensemble averaging. With 'no' a simple mean is used
(default). With 'yes' a mean is applied to all values that fall
within a window around the median (giving some outlier rejection).
This is useful for noisy data. Window sizes are [.3 .3 .3] m/s
for [ horiz_vel vert_vel error_vel ] values. If you want to
change these values, set 'despike' to the 3-element vector.
<NAME> (<EMAIL>) - 17/09/99
<NAME> - 17/Oct/99
5/july/00 - handled byte offsets (and mysterious 'extra" bytes) slightly better, Y2K
5/Oct/00 - bug fix - size of ens stayed 2 when NUMAV==1 due to initialization,
hopefully this is now fixed.
10/Mar/02 - #bytes per record changes mysteriously,
tried a more robust workaround. Guess that we have an extra
2 bytes if the record length is even?
28/Mar/02 - added more firmware-dependent changes to format; hopefully this
works for everything now (put previous changes on firmer footing?)
30/Mar/02 - made cfg output more intuitive by decoding things.
An early version of WAVESMON and PARSE which split out this
data from a wave recorder inserted an extra two bytes per record.
I have removed the code to handle this but if you need it see line 509
29/Nov/02 - A change in the bottom-track block for version 4.05 (very old!).
29/Jan/03 - Status block in v4.25 150khzBB two bytes short?
14/Oct/03 - Added code to at least 'ignore' WinRiver GPS blocks.
11/Nov/03 - VMDAS navigation block, added hooks to output
navigation data.
26/Mar/04
- better decoding of nav blocks
- better handling of weird bytes at beginning and end of file
- (code fixes due to <NAME>).
25/Aug/04 - fixes to "junk bytes" handling.
27/Jan/05 - even more fixed to junk byte handling (move 1 byte at a time rather than
two for odd lengths.
29/Sep/2005 - median windowing done slightly incorrectly in a way which biases
results in a negative way in data is *very* noisy. Now fixed.
28/Dc/2005 - redid code for recovering from ensembles that mysteriously change length, added
'checkheader' to make a complete check of ensembles.
Feb/2006 - handling of firmware version 9 (navigator)
23/Aug/2006 - more firmware updates (16.27)
23/Aug2006 - ouput some bt QC stiff
29/Oct/2006 - winriver bottom track block had errors in it - now fixed.
30/Oct/2006 - pitch_std, roll_std now uint8 and not int8 (thanks <NAME>)
13/Aug/2007 - added Rio Grande (firmware v 10),
better handling of those cursed winriver ASCII NMEA blocks whose
lengths change unpredictably.
skipping the inadequately documented 2022 WINRIVER-2 NMEA block
13/Mar/2010 - firmware version 50 for WH.
31/Aug/2012 - <NAME> / RMA - ported to python
Python port details:
log_fp: a file-like object - the message are the same as in the matlab code,
but this allows them to be redirected elsewhere.
"""
if log_fp is None:
log_fp = sys.stdout
def msg(s):
log_fp.write(s)
log_fp.flush()
century=baseyear # ADCP clock does not have century prior to firmware 16.05.
vels=despike # Default to simple averaging
# Check file information first
if not os.path.exists(name):
msg("ERROR******* Can't find file %s\n"%name)
return None
msg("\nOpening file %s\n\n"%name)
fd=open(name,'rb') # NB: no support at the file level for 'ieee-le'
# Read first ensemble to initialize parameters
[ens,hdr,cfg,pos]=rd_buffer(fd,-2,msg) # Initialize and read first two records
if ens is None: # ~isstruct(ens) & ens==-1,
msg("No Valid data found\n")
return None
fd.seek(pos) # Rewind
if (cfg.prog_ver<16.05 and cfg.prog_ver>5.999) or cfg.prog_ver<5.55:
msg("***** Assuming that the century begins year %d (info not in this firmware version)\n"%century)
elif cfg.prog_ver>23.18 and cfg.prog_ver<23.20:
msg("***** Maybe this is an ocean surveyor, and century is 2000\n")
century=2000
else:
century=0 # century included in clock.
def ensemble_dates(ensx):
""" helper routine to extract dates from the given ensemble, return
as an array of datenums
"""
# handle hours, minutes, seconds, 100ths manually, but date with date2num
dats = [date2num(datetime.date(int(century+ensx.rtc[i,0]),
int(ensx.rtc[i,1]),
int(ensx.rtc[i,2]))) \
+ sum( ensx.rtc[i,3:7] * [1./24, 1./(24*60), 1./86400, 1./8640000 ])
for i in range(len(ensx.rtc))]
dats = array(dats)
return dats
dats = ensemble_dates(ens)
t_int=diff(dats)[0]
msg( "Record begins at %s\n"%( num2date(dats[0]).strftime('%c') ))
msg( "Ping interval appears to be %ss\n\n"%( 86400*t_int ))
# Estimate number of records (since I don't feel like handling EOFs correctly,
# we just don't read that far!)
# Now, this is a puzzle - it appears that this is not necessary in
# a firmware v16.12 sent to me, and I can't find any example for
# which it *is* necessary so I'm not sure why its there. It could be
# a leftoever from dealing with the bad WAVESMON/PARSE problem (now
# fixed) that inserted extra bytes.
# ...So its out for now.
#if cfg.prog_ver>=16.05, extrabytes=2 else extrabytes=0 end # Extra bytes
extrabytes=0
naminfo = os.stat(name)
nensinfile=int(naminfo.st_size/(hdr.nbyte+2+extrabytes))
msg("\nEstimating %d ensembles in this file\n"%nensinfile)
# [python] nens, if a sequence, is taken to be 1-based, inclusive indices.
# This is counter to the normal python interpretation, but instead
# consistent with the original matlab.
if isinstance(nens,int) or isinstance(nens,integer):
if nens==-1:
nens=nensinfile
msg(" Reading %d ensembles, reducing by a factor of %d\n"%(nens,num_av) )
else:
msg(" Reading ensembles %d-%d, reducing by a factor of %d\n"%(nens[0],nens[1],num_av) )
fd.seek((hdr.nbyte+2+extrabytes)*(nens[0]-1),os.SEEK_CUR)
nens=nens[1] - nens[0] + 1
# Number of records after averaging.
n=int(nens/num_av)
msg("Final result %d values\n"%n)
if num_av>1:
if type(vels) == str:
msg("\n Simple mean used for ensemble averaging\n")
else:
msg("\n Averaging after outlier rejection with parameters %s\n"%vels)
# Structure to hold all ADCP data
# Note that I am not storing all the data contained in the raw binary file, merely
# things I think are useful.
# types for the data arrays - first, the fields common to all sourceprog:
adcp = Adcp()
adcp.name = 'adcp'
adcp.config=cfg
# things of the shape [1,n]
ens_dtype = get_ens_dtype(cfg.sourceprog)
# things of the shape [n_cells,n]
bin_dtype = get_bin_dtype()
# NB: we may not actually have n ensembles - don't know until
# the whole file is actually read - so at the end of this function
# these arrays may get truncated
adcp.ensemble_data = zeros(n,dtype=ens_dtype)
adcp.bin_data = zeros((n,cfg.n_cells), dtype=bin_dtype)
# Calibration factors for backscatter data
# Loop for all records
ens = None # force it to reinitialize
for k in range(n): # [python] k switched to zero-based
# Gives display so you know something is going on...
if k%50==0:
msg("%d\n"%(k*num_av))
msg(".")
# Read an ensemble
[ens,hdr,cfg1,pos]=rd_buffer(fd,num_av,msg)
if ens is None: # ~isstruct(ens), # If aborting...
msg("Only %d records found..suggest re-running RDRADCP using this parameter\n"%( (k-1)*num_av ))
msg("(If this message preceded by a POSSIBLE PROGRAM PROBLEM message, re-run using %d)\n"%( (k-1)*num_av-1))
n = k
break
dats = ensemble_dates(ens)
adcp.ensemble_data['mtime'][k] =median(dats)
adcp.ensemble_data['number'][k] =ens.number[0]
adcp.ensemble_data['heading'][k] =ssm.circmean(ens.heading*pi/180.)*180/pi
adcp.ensemble_data['pitch'][k] =mean(ens.pitch)
adcp.ensemble_data['roll'][k] =mean(ens.roll)
adcp.ensemble_data['heading_std'][k] =mean(ens.heading_std)
adcp.ensemble_data['pitch_std'][k] =mean(ens.pitch_std)
adcp.ensemble_data['roll_std'][k] =mean(ens.roll_std)
adcp.ensemble_data['depth'][k] =mean(ens.depth)
adcp.ensemble_data['temperature'][k] =mean(ens.temperature)
adcp.ensemble_data['salinity'][k] =mean(ens.salinity)
adcp.ensemble_data['pressure'][k] =mean(ens.pressure)
adcp.ensemble_data['pressure_std'][k]=mean(ens.pressure_std)
# [python] - order of indices for bin data is opposite matlab -
# adcp.east_vel[ ensemble index, bin_index ]
if type(vels) == str:
adcp.bin_data['east_vel'][k,:] =nmean(ens.east_vel ,0) # [python] axis changed to 0-based, and switched!
adcp.bin_data['north_vel'][k,:] =nmean(ens.north_vel,0) # assume ens.east_vel[sample_index,bin_index]
adcp.bin_data['vert_vel'][k,:] =nmean(ens.vert_vel ,0)
adcp.bin_data['error_vel'][k,:] =nmean(ens.error_vel,0)
else:
adcp.bin_data['east_vel'][k,:] =nmedian(ens.east_vel ,vels[0],0)
adcp.bin_data['north_vel'][k,:] =nmedian(ens.north_vel ,vels[0],0)
adcp.bin_data['vert_vel'][k,:] =nmedian(ens.vert_vel ,vels[1],0)
adcp.bin_data['error_vel'][k,:] =nmedian(ens.error_vel ,vels[2],0)
# per-beam, per bin data -
# adcp.corr[ensemble index, bin_index, beam_index ]
adcp.bin_data['corr'][k,:,:] =nmean(ens.corr,0) # added correlation RKD 9/00
adcp.bin_data['status'][k,:,:] =nmean(ens.status,0)
adcp.bin_data['intens'][k,:,:] =nmean(ens.intens,0)
adcp.bin_data['perc_good'][k,:,:] =nmean(ens.percent,0) # felipe pimenta aug. 2006
adcp.ensemble_data['bt_range'][k,:] =nmean(ens.bt_range,0)
adcp.ensemble_data['bt_mode'][k] = nmedian(ens.bt_mode)
adcp.ensemble_data['bt_vel'][k,:] =nmean(ens.bt_vel,0)
adcp.ensemble_data['bt_corr'][k,:]=nmean(ens.bt_corr,0) # felipe pimenta aug. 2006
adcp.ensemble_data['bt_ampl'][k,:]=nmean(ens.bt_ampl,0) # "
adcp.ensemble_data['bt_perc_good'][k,:]=nmean(ens.bt_perc_good,0)# "
if cfg.sourceprog == 'WINRIVER':
#if cfg.sourceprog in ('instrument','WINRIVER'):
adcp.ensemble_data['nav_mtime'][k]=nmean(ens.smtime)
# these are sometimes nan - and note that nmean
# modifies the input, so it looks like it should
adcp.ensemble_data['nav_longitude'][k]=nmean(ens.slongitude)
adcp.ensemble_data['nav_latitude'][k]=nmean(ens.slatitude)
# DBG
#print "nmean(%s) => %s"%(ens.slongitude,adcp.ensemble_data['nav_longitude'][k])
#print "nmean(%s) => %s"%(ens.slatitude,adcp.ensemble_data['nav_latitude'][k])
# out of curiosity, does this ever happen??
#if cfg.sourceprog=='instrument' and isfinite(adcp.nav_latitude[k]) and adcp.nav_latitude[k]!=0:
# print "##################### instrument has some data ###################"
elif cfg.sourceprog == 'VMDAS':
adcp.ensemble_data['nav_smtime'][k] =ens.smtime[0]
adcp.ensemble_data['nav_emtime'][k] =ens.emtime[0]
adcp.ensemble_data['nav_slatitude'][k]=ens.slatitude[0]
adcp.ensemble_data['nav_elatitude'][k]=ens.elatitude[0]
adcp.ensemble_data['nav_slongitude'][k]=ens.slongitude[0]
adcp.ensemble_data['nav_elongitude'][k]=ens.elongitude[0]
adcp.ensemble_data['nav_mtime'][k]=nmean(ens.nmtime)
##
msg("\nRead to byte %d in a file of size %d bytes\n"%( fd.tell(),naminfo.st_size ) )
if fd.tell()+hdr.nbyte<naminfo.st_size:
msg("-->There may be another %d ensembles unread\n" % int((naminfo.st_size-fd.tell())/(hdr.nbyte+2)) )
fd.close()
if n < len(adcp.ensemble_data):
msg("Truncating data to the valid set of records\n")
adcp.ensemble_data = adcp.ensemble_data[:n]
adcp.bin_data = adcp.bin_data[:n]
# RH: invalidate bad bottom track
bt_invalid = adcp.ensemble_data['bt_vel'][:,0]==-32768
adcp.ensemble_data['bt_vel'][bt_invalid]=np.nan
# and make the fields show up more like the matlab code:
for name,typ in ens_dtype:
setattr(adcp,name,adcp.ensemble_data[name])
for name,typ in bin_dtype:
setattr(adcp,name,adcp.bin_data[name])
# and normalize the latitude/longitude naming:
adcp.latitude = None
adcp.longitude = None
if cfg:
if cfg.sourceprog == 'VMDAS':
# punting on which lat/lon fields to reference
msg("VMDAS input - assuming nav_s* fields are better than nav_e*\n")
adcp.latitude = adcp.nav_slatitude
adcp.longitude = adcp.nav_slongitude
# the arrays are there, but the values aren't set yet
#print("adcp lat/lon %f %f\n"%(adcp.latitude,adcp.longitude))
elif cfg.sourceprog in ('WINRIVER'):
adcp.latitude = adcp.nav_latitude
adcp.longitude = adcp.nav_longitude
# too early to print
#print("adcp lat/lon %f %f\n"%(adcp.latitude[0],adcp.longitude[0]))
return adcp
#----------------------------------------
#function valid=checkheader(fd)
def checkheader(fd):
""" Given an open file object, read the ensemble size, skip
ahead, make sure we can read the cfg bytes of the *next*
ensemble, come back to the starting place, and report success.
"""
valid=0
starting_pos=fd.tell()
try:
# have to use the file descriptor version, since we're just getting
# the file object, not a filename
# info = os.fstat(fd.fileno())
numbytes=fromfile(fd,int16,1) # Following the header bytes is numbytes
if len(numbytes) and numbytes[0]>0: # and we move forward numbytes>0
fd.seek(numbytes[0]-2,os.SEEK_CUR)
cfgid=fromfile(fd,uint8,2) # while return [] if hit EOF
if len(cfgid)==2: # will Skip the last ensemble (sloppy code)
# fprintf([dec2hex(cfgid(1)) ' ' dec2hex(cfgid(2)) '\n'])
if cfgid[0]==0x7F and cfgid[1]==0x7F: # and we have *another* 7F7F
valid=1 # ...ONLY THEN it is valid.
finally:
fd.seek(starting_pos)
return valid
#-------------------------------------
# function [hdr,pos]=rd_hdr(fd)
def rd_hdr(fd,msg=msg_print):
# Read config data
# Changed by <NAME> to skip weird stuff at BOF (apparently
# can happen when switching from one flash card to another
# in moored ADCPs).
# on failure, return hdr=None
cfgid=fromfile(fd,uint8,2)
nread=0
# departure from matlab code - check to see if cfgid itself was
# truncated at EOF
while len(cfgid)<2 or (cfgid[0] != 0x7F or cfgid[1]!=0x7F) or not checkheader(fd):
nextbyte=fromfile(fd,uint8,1)
pos=fd.tell()
nread+=1
if len(nextbyte)==0: # End of file
msg('EOF reached before finding valid cfgid\n')
hdr=None
return hdr,pos
# seems backwards, but they're the same value - 0x7F
cfgid[1],cfgid[0] = cfgid[0],nextbyte[0]
if pos % 1000==0:
msg("Still looking for valid cfgid at file position %d...\n"%pos)
#end
#end
pos=fd.tell()-2
if nread>0:
msg("Junk found at BOF...skipping %d bytes until\n"%nread )
msg("cfgid=%x %x at file pos %d\n"%(cfgid[0],cfgid[1],pos))
#end
hdr=rd_hdrseg(fd)
return hdr,pos
#-------------------------------------
#function cfg=rd_fix(fd)
def rd_fix(fd,msg=msg_print):
# Read config data
cfgid=fromfile(fd,uint16,1)
if len(cfgid) == 0:
msg("WARNING: ran into end of file reading Fixed header ID\n")
elif cfgid[0] != 0: # 0x0000
msg("WARNING: Fixed header ID %x incorrect - data corrupted or not a BB/WH raw file?\n"%cfgid[0])
#end
cfg,nbytes=rd_fixseg(fd)
return cfg
#--------------------------------------
#function [hdr,nbyte]=rd_hdrseg(fd)
class Header(object):
pass
def rd_hdrseg(fd):
# Reads a Header
hdr = Header()
hdr.nbyte =fromfile(fd,int16,1)[0]
fd.seek(1,os.SEEK_CUR)
ndat=fromfile(fd,int8,1)[0]
hdr.dat_offsets =fromfile(fd,int16,ndat)
nbyte=4+ndat*2
return hdr,nbyte
#-------------------------------------
#function opt=getopt(val,varargin)
def getopt(val,*args):
# Returns one of a list (0=first in varargin, etc.)
val = int(val) # in case it's a boolean
if val>=len(args):
return 'unknown'
else:
return args[val]
#
#-------------------------------------
# function [cfg,nbyte]=rd_fixseg(fd)
class Config(object):
pass
def rd_fixseg(fd):
""" returns Config, nbyte
Reads the configuration data from the fixed leader
"""
##disp(fread(fd,10,'uint8'))
##fseek(fd,-10,os.SEEK_CUR)
cfg = Config()
cfg.name='wh-adcp'
cfg.sourceprog='instrument' # default - depending on what data blocks are
# around we can modify this later in rd_buffer.
cfg.prog_ver =fromfile(fd,uint8,1)[0]+fromfile(fd,uint8,1)/100.0
# 8,9,16 - WH navigator
# 10 -rio grande
# 15, 17 - NB
# 19 - REMUS, or customer specific
# 11- H-ADCP
# 31 - Streampro
# 34 - NEMO
# 50 - WH, no bottom track (built on 16.31)
# 51 - WH, w/ bottom track
# 52 - WH, mariner
if int(cfg.prog_ver) in (4,5):
cfg.name='bb-adcp'
elif int(cfg.prog_ver) in (8,9,10,16,50,51,52):
cfg.name='wh-adcp'
elif int(cfg.prog_ver) in (14,23): # phase 1 and phase 2
cfg.name='os-adcp'
else:
cfg.name='unrecognized firmware version'
#end
config =fromfile(fd,uint8,2) # Coded stuff
cfg.config ="%2o-%2o"%(config[1],config[0])
cfg.beam_angle =getopt(config[1]&3,15,20,30)
# RCH: int is needed here because the equality evaluates to a boolean, but
# getopt expects an integer which can be used to index the list of arguments.
# in the expression above, config[1]&3 evaluates to an integer
cfg.numbeams =getopt( config[1]&16==16,4,5)
cfg.beam_freq =getopt(config[0]&7,75,150,300,600,1200,2400,38)
cfg.beam_pattern =getopt(config[0]&8==8,'concave','convex') # 1=convex,0=concave
cfg.orientation =getopt(config[0]&128==128,'down','up') # 1=up,0=down
## HERE -
# 8/31/12: code above here has been translated to python
# code below is still matlab.
# a few notes on the translation:
# fread(fd,count,'type') => fromfile(fd,type,count)
# note that fromfile always returns an array - so when count==1, you
# may want fromfile(...)[0] to get back a scalar value
# returns are sneaky - since matlab defines the return values at the beginning
# but python must explicitly specify return values at each return statement
# to get something like a matlab struct, declare an empty class (see Config above)
# then you can do things like cfg.simflag = 123
# RCH: fromfile returns a list, so index it with [0] to get an int
cfg.simflag =getopt(fromfile(fd,uint8,1)[0],'real','simulated') # Flag for simulated data
fd.seek(1,os.SEEK_CUR) # fseek(fd,1,'cof')
cfg.n_beams =fromfile(fd,uint8,1)[0]
cfg.n_cells =fromfile(fd,uint8,1)[0]
cfg.pings_per_ensemble=fromfile(fd,uint16,1)[0]
cfg.cell_size =fromfile(fd,uint16,1)[0]*.01 # meters
cfg.blank =fromfile(fd,uint16,1)[0]*.01 # meters
cfg.prof_mode =fromfile(fd,uint8,1)[0]
cfg.corr_threshold =fromfile(fd,uint8,1)[0]
cfg.n_codereps =fromfile(fd,uint8,1)[0]
cfg.min_pgood =fromfile(fd,uint8,1)[0]
cfg.evel_threshold =fromfile(fd,uint16,1)[0]
cfg.time_between_ping_groups = sum( fromfile(fd,uint8,3) * array([60, 1, .01]) ) # seconds
coord_sys =fromfile(fd,uint8,1)[0] # Lots of bit-mapped info
cfg.coord="%2o"%coord_sys
# just like C...
cfg.coord_sys =getopt( (coord_sys >> 3)&3,'beam','instrument','ship','earth')
# RCH: need into since it's an equality comparison which gives a boolean
cfg.use_pitchroll =getopt(coord_sys&4==4,'no','yes')
cfg.use_3beam =getopt(coord_sys&2==2,'no','yes')
cfg.bin_mapping =getopt(coord_sys&1==1,'no','yes')
cfg.xducer_misalign=fromfile(fd,int16,1)[0]*.01 # degrees
cfg.magnetic_var =fromfile(fd,int16,1)[0]*.01 # degrees
cfg.sensors_src ="%2o"%(fromfile(fd,uint8,1)[0])
cfg.sensors_avail ="%2o"%(fromfile(fd,uint8,1)[0])
cfg.bin1_dist =fromfile(fd,uint16,1)[0]*.01 # meters
cfg.xmit_pulse =fromfile(fd,uint16,1)[0]*.01 # meters
cfg.water_ref_cells=fromfile(fd,uint8,2)
cfg.fls_target_threshold =fromfile(fd,uint8,1)[0]
fd.seek(1,os.SEEK_CUR) # fseek(fd,1,'cof')
cfg.xmit_lag =fromfile(fd,uint16,1)[0]*.01 # meters
nbyte=40
if int(cfg.prog_ver) in (8,10,16,50,51,52):
if cfg.prog_ver>=8.14: # Added CPU serial number with v8.14
cfg.serialnum =fromfile(fd,uint8,8)
nbyte+=8
#end
if cfg.prog_ver>=8.24: # Added 2 more :w bytes with v8.24 firmware
cfg.sysbandwidth =fromfile(fd,uint8,2)
nbyte+=2
#end
if cfg.prog_ver>=16.05: # Added 1 more bytes with v16.05 firmware
cfg.syspower =fromfile(fd,uint8,1)[0]
nbyte+=1
#end
if cfg.prog_ver>=16.27: # Added bytes for REMUS, navigators, and HADCP
cfg.navigator_basefreqindex=fromfile(fd,uint8,1)[0]
nbyte+=1
cfg.remus_serialnum=fromfile(fd,uint8,4)
nbyte+=4
cfg.h_adcp_beam_angle=fromfile(fd,uint8,1)[0]
nbyte+=1
#end
elif int(cfg.prog_ver)==9:
if cfg.prog_ver>=9.10: # Added CPU serial number with v8.14
cfg.serialnum =fromfile(fd,uint8,8)
nbyte+=8
cfg.sysbandwidth =fromfile(fd,uint8,2)
nbyte+=2
end
elif int(cfg.prog_ver) in (14,16):
cfg.serialnum =fromfile(fd,uint8,8) # 8 bytes 'reserved'
nbyte+=8
# It is useful to have this precomputed.
cfg.ranges=cfg.bin1_dist+arange(cfg.n_cells)*cfg.cell_size
if cfg.orientation==1:
cfg.ranges *= -1
return cfg,nbyte
#-----------------------------
#function [ens,hdr,cfg,pos]=rd_buffer(fd,num_av)
ens_alloc = None
ens_alloc_num_av = None
hdr = None
FIXOFFSET = None
SOURCE = None
def rd_buffer(fd,num_av,msg=msg_print):
""" RH: return ens=None, hdr=None if there's a problem
returns (ens,hdr,cfg,pos)
"""
# To save it being re-initialized every time.
# [python] cache the preallocated array in ens_alloc, and remember
# what num_av was, so we can reallocate when called with a different num_av.
# otherwise global/local is too confusing, as other parts of the code use
# ens both for a local variable and a global variable, or kind of appear to do
# so.
global ens_alloc,ens_alloc_num_av, hdr
pos = None
# A fudge to try and read files not handled quite right.
global FIXOFFSET, SOURCE
# If num_av<0 we are reading only 1 element and initializing
if num_av<0:
SOURCE=0
class Ensemble(object):
pass
cfg=None
ens=None
if num_av == ens_alloc_num_av:
ens = ens_alloc
# This reinitializes to whatever length of ens we want to average.
if num_av<0 or ens is None:
FIXOFFSET=0
n=abs(num_av)
[hdr,pos]=rd_hdr(fd,msg)
if hdr is None:
return ens,hdr,cfg,pos
cfg=rd_fix(fd,msg)
fd.seek(pos,os.SEEK_SET)
ens_dtype = [('number',float64),
('rtc',(float64,7)),
('BIT',float64),
('ssp',float64),
('depth',float64),
('pitch',float64),
('roll',float64),
('heading',float64),
('temperature',float64),
('salinity',float64),
('mpt',float64),
('heading_std',float64),
('pitch_std',float64),
('roll_std',float64),
('adc',(float64,8)),
('error_status_wd',float64),
('pressure',float64),
('pressure_std',float64),
('east_vel',(float64,cfg.n_cells)),
('north_vel',(float64,cfg.n_cells)),
('vert_vel',(float64,cfg.n_cells)),
('error_vel',(float64,cfg.n_cells)),
('intens',(float64,(cfg.n_cells,4))),
('percent',(float64,(cfg.n_cells,4))),
('corr',(float64,(cfg.n_cells,4))),
('status',(float64,(cfg.n_cells,4))),
('bt_mode',float64),
('bt_range',(float64,4)),
('bt_vel',(float64,4)),
('bt_corr',(float64,4)),
('bt_ampl',(float64,4)),
('bt_perc_good',(float64,4)),
('smtime',float64),
('emtime',float64),
('slatitude',float64),
('slongitude',float64),
('elatitude',float64),
('elongitude',float64),
('nmtime',float64),
('flags',float64) ]
ens = Ensemble()
ens.ensemble_data = zeros( n, dtype=ens_dtype)
for name,typ in ens_dtype:
setattr(ens,name,ens.ensemble_data[name])
ens_alloc = ens
ens_alloc_num_av = num_av
num_av=abs(num_av)
k=-1 # a bit tricky - it gets incremented at the beginning of an ensemble
while k+1<num_av:
# This is in case junk appears in the middle of a file.
num_search=6000
id1=fromfile(fd,uint8,2)
search_cnt=0
while search_cnt<num_search and \
((id1[0]!=0x7F or id1[1]!=0x7F ) or not checkheader(fd)):
search_cnt+=1
nextbyte=fromfile(fd,uint8,1)
if len(nextbyte)==0: # End of file
msg("EOF reached after %d bytes searched for next valid ensemble start\n"%search_cnt)
ens=None
return ens,hdr,cfg,pos
id1[1]=id1[0]
id1[0]=nextbyte[0]
# fprintf([dec2hex(id1(1)) '--' dec2hex(id1(2)) '\n'])
if search_cnt==num_search:
print("ERROR: Searched %d entries..."%search_cnt)
print("Not a workhorse/broadband file or bad data encountered: -> %x%x"%(id1[0],id1[1]))
ens = None
return ens,hdr,cfg,pos
elif search_cnt>0:
msg("Searched %d bytes to find next valid ensemble start\n"%search_cnt)
startpos=fd.tell()-2 # Starting position.
# Read the # data types.
[hdr,nbyte]=rd_hdrseg(fd)
byte_offset=nbyte+2
## fprintf('# data types = %d\n ',(length(hdr.dat_offsets)))
## fprintf('Blocklen = %d\n ',hdr.nbyte)
# Read all the data types.
for n in range(len(hdr.dat_offsets)): # n: 1 => 0 based
id_="%04X"%fromfile(fd,uint16,1)[0]
# handle all the various segments of data. Note that since I read the IDs as a two
# byte number in little-endian order the high and low bytes are exchanged compared to
# the values given in the manual.
#
winrivprob=0
#print("n,id = %d %s\n"%(n,id_))
if id_ == '0000':
# case '0000', # Fixed leader
[cfg,nbyte]=rd_fixseg(fd)
nbyte+=2
elif id_ == '0080': # Variable Leader
# So I think that we need to increment k here, as this marks the
# beginning of a record, but we want k to remain 0-based, so above
# it was initialized to -1 (just as in the matlab code it is initialized
# to 0).
k+=1
ens.number[k] =fromfile(fd,uint16,1)[0]
ens.rtc[k,:] =fromfile(fd,uint8,7)
ens.number[k] =ens.number[k]+65536*fromfile(fd,uint8,1)[0]
ens.BIT[k] =fromfile(fd,uint16,1)[0]
ens.ssp[k] =fromfile(fd,uint16,1)[0]
ens.depth[k] =fromfile(fd,uint16,1)[0]*.1 # meters
ens.heading[k] =fromfile(fd,uint16,1)[0]*.01 # degrees
ens.pitch[k] =fromfile(fd,int16,1)[0]*.01 # degrees
ens.roll[k] =fromfile(fd,int16,1)[0]*.01 # degrees
ens.salinity[k] =fromfile(fd,int16,1)[0] # PSU
ens.temperature[k] =fromfile(fd,int16,1)[0]*.01 # Deg C
ens.mpt[k] =sum( fromfile(fd,uint8,3) * array([60,1,.01])) # seconds
ens.heading_std[k] =fromfile(fd,uint8,1)[0] # degrees
ens.pitch_std[k] =fromfile(fd,uint8,1)[0]*.1 # degrees
ens.roll_std[k] =fromfile(fd,uint8,1)[0]*.1 # degrees
ens.adc[k,:] =fromfile(fd,uint8,8)
nbyte=2+40
if cfg.name =='bb-adcp':
if cfg.prog_ver>=5.55:
fd.seek(15,os.SEEK_CUR) # 14 zeros and one byte for number WM4 bytes
cent=fromfile(fd,uint8,1)[0] # possibly also for 5.55-5.58 but
ens.rtc[k,:] = fromfile(fd,uint8,7) # I have no data to test.
ens.rtc[k,0] += cent*100
nbyte+=15+8
# end
elif cfg.name == 'wh-adcp': # for WH versions.
ens.error_status_wd[k]=fromfile(fd,uint32,1)[0]
nbyte+=4
if int(cfg.prog_ver) in (8,10,16,50,51,52):
if cfg.prog_ver>=8.13: # Added pressure sensor stuff in 8.13
fd.seek(2,os.SEEK_CUR)
ens.pressure[k] =fromfile(fd,uint32,1)[0]
ens.pressure_std[k] =fromfile(fd,uint32,1)[0]
nbyte+=10
# end
if cfg.prog_ver>=8.24: # Spare byte added 8.24
fd.seek(1,os.SEEK_CUR)
nbyte+=1
# end
if ( cfg.prog_ver>=10.01 and cfg.prog_ver<=10.99 ) or \
cfg.prog_ver>=16.05: # Added more fields with century in clock 16.05
cent=fromfile(fd,uint8,1)[0]
ens.rtc[k,:]=fromfile(fd,uint8,7)
ens.rtc[k,0]+=cent*100
nbyte+=8
# end
elif int(cfg.prog_ver)==9:
fd.seek(2,os.SEEK_CUR)
ens.pressure[k] =fromfile(fd,uint32,1)[0]
ens.pressure_std[k] =fromfile(fd,uint32,1)[0]
nbyte+=10
if cfg.prog_ver>=9.10: # Spare byte added 8.24
fd.seek(1,os.SEEK_CUR)
nbyte+=1
# end
# end
elif cfg.name=='os-adcp':
fd.seek(16,os.SEEK_CUR) # 30 bytes all set to zero, 14 read above
nbyte+=16
if cfg.prog_ver>23:
fd.seek(2,os.SEEK_CUR)
nbyte+=2
#end
#end
elif id_ == '0100': # Velocities
# RCH: will need to check array ordering on these - may have rows/cols
# switched!
vels=fromfile(fd,int16,4*cfg.n_cells).reshape([cfg.n_cells,4]) * 0.001 # m/s
ens.east_vel[k,:] =vels[:,0]
ens.north_vel[k,:] =vels[:,1]
ens.vert_vel[k,:] =vels[:,2]
ens.error_vel[k,:] =vels[:,3]
nbyte=2+4*cfg.n_cells*2
elif id_ == '0200': # Correlations
# RCH check array ordering:
ens.corr[k,:,:] =fromfile(fd,uint8,4*cfg.n_cells).reshape([cfg.n_cells,4])
nbyte=2+4*cfg.n_cells
elif id_ == '0300': # Echo Intensities
# RCH check array ordering:
ens.intens[k,:,:] =fromfile(fd,uint8,4*cfg.n_cells).reshape([cfg.n_cells,4])
nbyte=2+4*cfg.n_cells
elif id_ == '0400': # Percent good
ens.percent[k,:,:] =fromfile(fd,uint8,4*cfg.n_cells).reshape([cfg.n_cells,4])
nbyte=2+4*cfg.n_cells
elif id_ == '0500': # Status
# RESUME TRANSLATION HERE
# Rusty, I was not consistent about retaining "end" statements
# I noticed after deleting several that you had been keeping
# commented out versions.
if cfg.name=='os-adcp':
# fd.seek(00,os.SEEK_CUR) # zero seek is in the original matlab...
nbyte=2 # +00
else:
# Note in one case with a 4.25 firmware SC-BB, it seems like
# this block was actually two bytes short!
ens.status[k,:,:] =fromfile(fd,uint8,4*cfg.n_cells).reshape([cfg.n_cells,4])
nbyte=2+4*cfg.n_cells
elif id_ == '0600': # Bottom track
# In WINRIVER GPS data is tucked into here in odd ways, as long
# as GPS is enabled.
if SOURCE==2:
fd.seek(2,os.SEEK_CUR)
# Rusty, I added the [0] below and in several other places
long1=fromfile(fd,uint16,1)[0]
# added bt mode extraction - ben
fd.seek(3,os.SEEK_CUR)
ens.bt_mode[k] = float64(fromfile(fd,uint8,1)[0]) # fromfile(fd,uint8,1)[0]
fd.seek(2,os.SEEK_CUR)
#fd.seek(6,os.SEEK_CUR)
ens.slatitude[k] =fromfile(fd,int32,1)[0]*cfac
if ens.slatitude[k]==0:
ens.slatitude[k]=nan
else:
#fd.seek(14,os.SEEK_CUR) # Skip over a bunch of stuff
fd.seek(7,os.SEEK_CUR) # Skip over a bunch of stuff
ens.bt_mode[k] = float64(fromfile(fd,uint8,1)[0])
fd.seek(6,os.SEEK_CUR) # Skip over a bunch of stuff
# end
ens.bt_range[k,:]=fromfile(fd,uint16,4)*.01 #
ens.bt_vel[k,:] =fromfile(fd,int16,4)
ens.bt_corr[k,:] =fromfile(fd,uint8,4) # felipe pimenta aug. 2006
ens.bt_ampl[k,:]=fromfile(fd,uint8,4) # "
ens.bt_perc_good[k,:]=fromfile(fd,uint8,4) # "
if SOURCE==2:
fd.seek(2,os.SEEK_CUR)
# The original rdradcp code:
# ens.slongitude[k]=(long1+65536*fromfile(fd,uint16,1)[0])*cfac
# Fix from DKR:
tmp=(long1+65536*fromfile(fd,uint16,1)[0])*cfac
if long1==0:
ens.slongitude[k]=nan #dkr --> else longitudes bad
else:
ens.slongitude[k]=tmp
#end
#fprintf('\n k %d %8.3f %f ',long1,ens.slongitude(k),(ens.slongitude(k)/cfac-long1)/65536)
if ens.slongitude[k]>180:
ens.slongitude[k]=ens.slongitude[k]-360
if ens.slongitude[k]==0:
ens.slongitude[k]=nan
fd.seek(16,os.SEEK_CUR)
qual=fromfile(fd,uint8,1)
if qual==0:
## fprintf('qual==%d,%f %f',qual,ens.slatitude(k),ens.slongitude(k))
ens.slatitude[k]=nan
ens.slongitude[k]=nan
fd.seek(71-45-21,os.SEEK_CUR)
else:
fd.seek(71-45,os.SEEK_CUR)
# end
nbyte=2+68
if cfg.prog_ver>=5.3: # Version 4.05 firmware seems to be missing these last 11 bytes.
fd.seek(78-71,os.SEEK_CUR)
ens.bt_range[k,:]=ens.bt_range[k,:]+fromfile(fd,uint8,4)*655.36
nbyte+=11
if cfg.name == 'wh-adcp':
if cfg.prog_ver>=16.20: # RDI documentation claims these extra bytes were added in v 8.17
fd.seek(4,os.SEEK_CUR) # but they don't appear in my 8.33 data - conversation with
nbyte+=4 # Egil suggests they were added in 16.20
#end
#end
#end
# end # id_==0600 # bottom track
elif id_ == '2000': # Something from VMDAS.
# The raw files produced by VMDAS contain a binary navigation data
# block.
cfg.sourceprog='VMDAS'
if SOURCE != 1:
msg("\n***** Apparently a VMDAS file \n")
#end
SOURCE=1
utim =fromfile(fd,uint8,4)
mtime =datenum(utim[3]+utim[4]*256,utim[2],utim[1])
ens.smtime[k] =mtime+fromfile(fd,uint32,1)[0]/8640000.
fd.seek(4,os.SEEK_CUR) # PC clock offset from UTC
ens.slatitude[k] =fromfile(fd,int32,1)[0]*cfac
ens.slongitude[k] =fromfile(fd,int32,1)[0]*cfac
ens.emtime[k] =mtime+fromfile(fd,uint32,1)[0]/8640000.
ens.elatitude[k] =fromfile(fd,int32,1)[0]*cfac
ens.elongitude[k] =fromfile(fd,int32,1)[0]*cfac
fd.seek(12,os.SEEK_CUR)
ens.flags[k] =fromfile(fd,uint16,1)[0]
fd.seek(6,os.SEEK_CUR)
utim =fromfile(fd,uint8,4)
mtime =datenum(utim(1)+utim(2)*256,utim(4),utim(3))
ens.nmtime[k] =mtime+fromfile(fd,uint32,1)[0]/8640000.
# in here we have 'ADCP clock' (not sure how this
# differs from RTC (in header) and UTC (earlier in this block).
fd.seek(16,os.SEEK_CUR)
nbyte=2+76
elif id_ == '2022': # New NMEA data block from WInRiverII
cfg.sourceprog='WINRIVER2'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
#end
SOURCE=2
specID=fromfile(fd,uint16,1)[0]
msgsiz=fromfile(fd,int16,1)[0]
deltaT=fromfile(fd,uint8,8)
nbyte=2+12
fd.seek(msgsiz,os.SEEK_CUR)
nbyte+=msgsiz
# print "post msgsiz, nbyte=%d"%nbyte
## do nothing code on specID
# fprintf(' %d ',specID)
# switch specID,
# case 100,
# case 101,
# case 102,
# case 103,
# end
# The following blocks come from WINRIVER files, they aparently contain
# the raw NMEA data received from a serial port.
#
# Note that for WINRIVER files somewhat decoded data is also available
# tucked into the bottom track block.
#
# I've put these all into their own block because RDI's software apparently completely ignores the
# stated lengths of these blocks and they very often have to be changed. Rather than relying on the
# error coding at the end of the main block to do this (and to produce an error message) I will
# do it here, without an error message to emphasize that I am kludging the WINRIVER blocks only!
elif id_ in ('2100','2101','2102','2103','2104'):
winrivprob=1
if id_ == '2100': # $xxDBT (Winriver addition) 38
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
SOURCE=2
str_=fd.read(38) # fromfile(fd,uchar,38)
nbyte=2+38
elif id_ == '2101': # $xxGGA (Winriver addition) 94 in manual but 97 seems to work
# Except for a winriver2 file which seems to use 77.
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
SOURCE=2
str_=fd.read(97) # setstr(fromfile(fd,uchar,97))
nbyte=2+97
l = str_.find('$GPGGA')
if l >= 0:
# original indices: str(l+7:l+8) str(l+9:l+10) str(l+11:l+12)
# but we are both zero-based, and ending index is exclusive...
# but since l is already zero-based instead of 1 based, we only have to change
# the ending indexes in each case.
try:
# occasionally the GPS will have logged an incomplete reading -
# and this may fail.
hh,mm,ss = int(str_[l+7:l+9]), int(str_[l+9:l+11]), float(str_[l+11:l+13])
ens.smtime[k]=(hh+(mm+ss/60.)/60.)/24.
except ValueError:
msg('Corrupt GPS string - skipping')
# disp(['->' setstr_(str_(1:50)) '<-'])
elif id_ == '2102': # $xxVTG (Winriver addition) 45 (but sometimes 46 and 48)
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
#end
SOURCE=2
str_=fd.read(45)
nbyte=2+45
#disp(setstr(str_))
elif id_ == '2103': # $xxGSA (Winriver addition) 60
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
#end
SOURCE=2
str_=fd.read(60)
nbyte=2+60
elif id_ == '2104': #xxHDT or HDG (Winriver addition) 38
cfg.sourceprog='WINRIVER'
if SOURCE != 2:
msg("\n***** Apparently a WINRIVER file - Raw NMEA data handler not yet implemented\n")
#end
SOURCE=2
str_=fd.read(38)
nbyte=2+38
elif id_ == '0701': # Number of good pings
fd.seek(4*cfg.n_cells,os.SEEK_CUR)
nbyte=2+4*cfg.n_cells
elif id_ == '0702': # Sum of squared velocities
fd.seek(4*cfg.n_cells,os.SEEK_CUR)
nbyte=2+4*cfg.n_cells
elif id_ == '0703': # Sum of velocities
fd.seek(4*cfg.n_cells,os.SEEK_CUR)
nbyte=2+4*cfg.n_cells
# These blocks were implemented for 5-beam systems
elif id_ == '0A00': # Beam 5 velocity (not implemented)
fd.seek(cfg.n_cells,os.SEEK_CUR)
nbyte=2+cfg.n_cells
elif id_ == '0301': # Beam 5 Number of good pings (not implemented)
fd.seek(cfg.n_cells,os.SEEK_CUR)
nbyte=2+cfg.n_cells
elif id_ == '0302': # Beam 5 Sum of squared velocities (not implemented)
fd.seek(cfg.n_cells,os.SEEK_CUR)
nbyte=2+cfg.n_cells
elif id_ == '0303': # Beam 5 Sum of velocities (not implemented)
fd.seek(cfg.n_cells,os.SEEK_CUR)
nbyte=2+cfg.n_cells
elif id_ == '020C': # Ambient sound profile (not implemented)
fd.seek(4,os.SEEK_CUR)
nbyte=2+4
elif id_ == '3000': # Fixed attitude data format for OS-ADCPs (not implemented)
fd.seek(32,os.SEEK_CUR)
nbyte=2+32
else:
# This is pretty idiotic - for OS-ADCPs (phase 2) they suddenly decided to code
# the number of bytes into the header ID word. And then they don't really
# document what they did! So, this is cruft of a high order, and although
# it works on the one example I have - caveat emptor....
#
# Anyway, there appear to be codes 0340-03FC to deal with. I am not going to
# decode them but I am going to try to figure out how many bytes to
# skip.
#if strcmp(id(1:2),'30'),
if id_[:2] == '30':
# I want to count the number of 1s in the middle 4 bits of the
# 2nd two bytes.
nflds= bin( int(id_[2:4],16) & 0x3C ).count('1')
# I want to count the number of 1s in the highest 2 bits of byte 3
dfac = bin(int(id_[2],16)&0x0C).count('1')
fd.seek(12*nflds*dfac,os.SEEK_CUR)
nbyte=2+12*nflds*dfac
else:
msg( "Unrecognized ID code: %s"%id_ )
# DBG:
#raise Exception,"STOP"
nbyte=2
ens = None
return ens,hdr,cfg,pos
## ens=-1
##
#end
# here I adjust the number of bytes so I am sure to begin
# reading at the next valid offset. If everything is working right I shouldn't have
# to do this but every so often firware changes result in some differences.
# print '#bytes is %d, original offset is %d'%(nbyte,byte_offset)
byte_offset=byte_offset+nbyte
# both n and hdr.dat_offsets are now 0-based, but len() is unchanged - so be
# careful on comparisons to len(hdr.dat_offsets)
if n+1<len(hdr.dat_offsets):
if hdr.dat_offsets[n+1] != byte_offset:
if not winrivprob:
msg("%s: Adjust location by %d\n"%(id_,hdr.dat_offsets[n+1]-byte_offset) )
fd.seek(hdr.dat_offsets[n+1]-byte_offset,os.SEEK_CUR)
#end
byte_offset=hdr.dat_offsets[n+1]
else:
if hdr.nbyte-2 != byte_offset:
if not winrivprob:
msg("%s: Adjust location by %d\n"%(id_,hdr.nbyte-2-byte_offset))
fd.seek(hdr.nbyte-2-byte_offset,os.SEEK_CUR)
#end
byte_offset=hdr.nbyte-2
#end
#end
# Now at the end of the record we have two reserved bytes, followed
# by a two-byte checksum = 4 bytes to skip over.
readbytes=fd.tell()-startpos
offset=(hdr.nbyte+2)-byte_offset # The 2 is for the checksum
if offset !=4 and FIXOFFSET==0:
# in python, no direct test for eof (annoying), so step back one byte,
# and try to read it. not sure that this will do the right thing if the
# last thing we did was a failed read - it definitely works if the last thing
# was a bad seek
fd.seek(-1,os.SEEK_CUR)
feof = len(fd.read(1)) == 0
msg("\n*****************************************************\n")
if feof:
msg("EOF reached unexpectedly - discarding this last ensemble\n")
ens=-1
else:
msg("Adjust location by %d (readbytes=%d, hdr.nbyte=%d)\n"%(offset,readbytes,hdr.nbyte))
msg(" NOTE - If this appears at the beginning of the read, it is\n")
msg(" is a program problem, possibly fixed by a fudge\n")
msg(" PLEASE REPORT TO <EMAIL> WITH DETAILS!!\n\n")
msg(" -If this appears at the end of the file it means\n")
msg(" The file is corrupted and only a partial record has \n ")
msg(" has been read\n")
#end
msg("******************************************************\n")
FIXOFFSET=offset-4
#end
fd.seek(4+FIXOFFSET,os.SEEK_CUR)
# An early version of WAVESMON and PARSE contained a bug which stuck an additional two
# bytes in these files, but they really shouldn't be there
#if cfg.prog_ver>=16.05,
# fd.seek(2,os.SEEK_CUR)
#end
#end
# Blank out stuff bigger than error velocity
# big_err=abs(ens.error_vel)>.2
# big_err=0
# Blank out invalid data
# RCH: removed big_err references
ens.east_vel[ens.east_vel==-32.768]=nan
ens.north_vel[ens.north_vel==-32.768]=nan
ens.vert_vel[ens.vert_vel==-32.768]=nan
ens.error_vel[ens.error_vel==-32.768]=nan
return ens,hdr,cfg,pos
#--------------------------------------
#function y=nmedian(x,window,dim)
def nmedian(x,window=inf,dim=None):
# python: dim is 0-based now!
# Copied from median but with handling of NaN different.
# RH: assume that this means calculate median of x, ignoring
# nans, along the axis given by dim
# window means only consider values which are within window
# of the median in the median (circular, yes)
x = array(x) # probably not necessary
if dim is None:
# choose dim to be the first non-unity dimension of x
long_dims = [d for d in range(x.ndim) if x.shape[d]>1]
# and if none are long, revert to summing over 0
dim = (long_dims + [0])[0]
#end
# Depart slightly from the original matlab for dealing with
# the case when dim>=x.ndim. Make x one dimension bigger,
# and set dim to be that. Then all the computations are simpler,
# and if necessary the dimensions can be remangled at the end
orig_dim = dim
if dim>=x.ndim:
dim = x.ndim
x = x[...,None]
# The original shape of x, but with _one_ extra dimension if
# dim was beyond the original size of x
shape_with_dimpad = x.shape
# siz = x.shape # no longer need to explicitly do this.
n = x.shape[dim]
# Permute and reshape so that DIM becomes the row dimension of a 2-D array
# basically rotate the dimensions so that dim becomes the first:
perm = (arange(x.ndim) - dim) % x.ndim
unperm = (arange(x.ndim) + dim) % x.ndim
# x = reshape(permute(x,perm), n,prod(siz)/n)
x = x.transpose(perm)
dims_permuted = x.shape
x = x.reshape( (n,-1) )
# Sort along first dimension
x.sort(axis=0) # in place sort, and puts nan at the end, while -inf,inf are in order.
[n1,n2]=x.shape
if n1==1: # each column has only one row - no stats to be taken, just copy.
y=x
else:
if n2==1:
# summing booleans is safe -
kk=sum(isfinite(x))
if kk > 0:
# x1,x2: if kk is even, the two middle elements
# if kk is odd, both are set to the middle element
x1=x[ int((kk-1)/2) ]
x2=x[ int(kk/2) ]
deviations = abs(x-(x1+x2)/2.)
x[deviations>window]=nan
#end
x.sort(axis=0)
# repeat once since we may have nan'd some values.
kk=sum(isfinite(x))
x[isnan(x)]=0
y=NaN
if kk>0:
y=sum(x)/kk
#end
else:
# count finite values in each column
kk=sum(isfinite(x),axis=0)
ll = kk<n1-2 # ll is true for rows with at least 2 nans
kk[ll]=0 ; x[:,ll]=nan # in those cases, the answer is nan. seems harsh.
# whoa - presumably want to pull the middle two valid values from
# each row
low_median = ((kk-1)/2).clip(0,inf).astype(int32)
high_median = (kk/2).clip(0,inf).astype(int32)
x1=x[ low_median, list(range(n2))]
x2=x[ high_median, list(range(n2))]
# x1,x2 have to get the extra dimension for the broadcast to work
deviations = abs(x - (x1+x2)[None,...]/2.)
x[deviations>window]=nan
x.sort(axis=0)
kk=sum(isfinite(x),axis=0)
x[ isnan(x) ]=0
y=nan+ones(n2)
if any(kk):
valid = kk>0
y[valid] = sum(x[:,valid],axis=0) / kk[valid]
# end
#end
#end
# Now we have y, which has shape x.shape[1:]
# make that back into the shape of x, first by undoing
# the reshape (recalling that we squished the first dimension of x)
y_dims_permuted = list(dims_permuted)
y_dims_permuted[0] = 1
y = y.reshape(y_dims_permuted)
# and then undoing the permute:
y = y.transpose(unperm)
# and finally, pad out some one-entry dimensions in case the user requested
# dim>x.ndim
while x.ndim <= orig_dim:
x = x[...,None]
# note that this will leave a 1 entry dimension along the axis
# of the median - unlike normal functions which lose that dimension
return y
#--------------------------------------
#function y=nmean(x,dim)
def nmean(x,dim=None):
# R_NMEAN Computes the mean of matrix ignoring NaN
# values
# R_NMEAN(X,DIM) takes the mean along the dimension DIM of X.
#
xorig = x
x=x.copy() # to get matlab semantics
kk=isfinite(x)
x[~kk]=0
if dim is None:
# choose dim to be the first non-unity dimension of x
long_dims = [d for d in range(x.ndim) if x.shape[d]>1]
# and if none are long, revert to summing over 0
dim = (long_dims + [0])[0]
#end
if dim >= x.ndim:
y=x # For matlab 5.0 only!!! Later versions have a fixed 'sum'
else:
# it's possible that x is just a vector - in which case
# this sum will return a scalar
ndat=atleast_1d( sum(kk,axis=dim) )
indat=(ndat==0)
# If there are no good data then it doesn't matter what
# we average by - and this avoid div-by-zero warnings.
ndat[indat]=1
# division is always elementwise in numpy
y = atleast_1d(sum(x,axis=dim))/ndat.astype(float64)
y[indat]=nan
#end
return y
# related functions
def adcp_merge_nmea(r,gps_fn,adjust_to_utc=False):
"""
parse a NMEA file from WinRiver (i.e. with RDENS sentences),
and add lat/lon to r.
adjust_to_utc: use GPS time to modify the hours place of r.mtime
"""
sents=nmea.parse_nmea(gps_fn)
fixes=[] # [(ens, lat, lon, dn), ... ]
last_rdens=None
last_rmc=None
for sent in sents:
if sent['sentence']=='$RDENS':
last_rdens=sent
elif sent['sentence']=='$GPRMC':
last_rmc=sent
elif sent['sentence']=='$GPGGA':
lat=sent['lat']
lon=sent['lon']
if last_rdens:
ens=int(last_rdens['ensemble'])
else:
ens=-1
if last_rmc:
dn=last_rmc['dn']
else:
dn=np.nan
fixes.append( [ens,lat,lon,dn] )
last_rmc=last_rdens=None
fixes=np.array(fixes)
if len(fixes):
valid=fixes[:,0]>=0
fixes=fixes[valid]
if len(fixes):
lats=np.interp(r.ensemble_data['number'],
fixes[:,0],fixes[:,1],left=np.nan,right=np.nan)
lons=np.interp(r.ensemble_data['number'],
fixes[:,0],fixes[:,2],left=np.nan,right=np.nan)
r.gps_dn=np.interp(r.ensemble_data['number'],
fixes[:,0],fixes[:,3],left=np.nan,right=np.nan)
if adjust_to_utc:
deltas=24*(r.gps_dn - r.mtime)
hour_correction= np.round( np.median(deltas[np.isfinite(deltas)]))
if np.isnan(hour_correction):
print("%s: WARNING: cannot determine time shift - no valid GPS data"%(gps_fn))
else:
if 1: # hour_correction!=0.0:
print("%s: shifting by %d hours, based on GPS time"%(gps_fn,hour_correction))
shift_min=np.round( np.nanmin(deltas) )
shift_max=np.round( np.nanmax(deltas) )
if shift_min != shift_max:
print("%s: WARNING: time zone shift is ambiguous (%.0f-%.0fh)"%(gps_fn,
shift_min,shift_max))
r.mtime += hour_correction/24.
else:
lats=np.nan * r.ensemble_data['number']
lons=lats
ll=np.array([lons,lats]).T
r.ll=ll
r.latitude=lats
r.longitude=lons
return r
def ship_to_earth(r):
"""
rotate r.east_vel, r.north_vel, r.bt_vel
by compass heading.
ship values moved to r.ship_**var**
"""
if r.config.coord_sys!='ship':
raise Exception("applying ship_to_earth but coord sys is %s"%r.config.coord_sys)
r.ship_east_vel=r.east_vel
r.ship_north_vel=r.north_vel
theta=np.exp(-1j*r.heading*np.pi/180)
vel=theta[:,None] * (r.east_vel + 1j*r.north_vel)
r.east_vel=np.real(vel)
r.north_vel=np.imag(vel)
r.ship_bt_vel=r.bt_vel.copy()
bt_vel=theta * (r.bt_vel[:,0] + 1j*r.bt_vel[:,1])
r.bt_vel[:,0] = np.real(bt_vel)
r.bt_vel[:,1] = np.imag(bt_vel)
r.config.coord_sys='earth'
def to_earth(r):
if r.config.coord_sys=='ship':
ship_to_earth(r)
def add_depth(r):
r.depth=np.median(r.bt_range,axis=1)
def invalidate_from_bed(r):
"""
where bottom track is good, nan out data in bottom 5%
"""
shape=r.east_vel.shape
bt_valid=np.isfinite(r.bt_vel[:,0])
Depth=-r.depth[:,None]*np.ones(shape)
Range=(-r.config.ranges[None,:])*np.ones(shape)
Depth[~bt_valid,:]=-np.inf
r.east_vel[ Range<0.9*Depth ] = np.nan
r.north_vel[ Range<0.9*Depth ] = np.nan
| [
"matplotlib.dates.num2date",
"os.path.exists",
"numpy.median",
"numpy.ones",
"numpy.exp",
"numpy.array",
"numpy.real",
"numpy.isfinite",
"numpy.isnan",
"numpy.interp",
"numpy.nanmin",
"scipy.stats.morestats.circmean",
"os.stat",
"sys.stdout.flush",
"numpy.nanmax",
"numpy.imag",
"sys.stdout.write"
] | [((648, 667), 'sys.stdout.write', 'sys.stdout.write', (['s'], {}), '(s)\n', (664, 667), False, 'import sys, os, re, math\n'), ((672, 690), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (688, 690), False, 'import sys, os, re, math\n'), ((11152, 11165), 'os.stat', 'os.stat', (['name'], {}), '(name)\n', (11159, 11165), False, 'import sys, os, re, math\n'), ((63159, 63174), 'numpy.array', 'np.array', (['fixes'], {}), '(fixes)\n', (63167, 63174), True, 'import numpy as np\n'), ((65015, 65054), 'numpy.exp', 'np.exp', (['(-1.0j * r.heading * np.pi / 180)'], {}), '(-1.0j * r.heading * np.pi / 180)\n', (65021, 65054), True, 'import numpy as np\n'), ((65116, 65128), 'numpy.real', 'np.real', (['vel'], {}), '(vel)\n', (65123, 65128), True, 'import numpy as np\n'), ((65145, 65157), 'numpy.imag', 'np.imag', (['vel'], {}), '(vel)\n', (65152, 65157), True, 'import numpy as np\n'), ((65267, 65282), 'numpy.real', 'np.real', (['bt_vel'], {}), '(bt_vel)\n', (65274, 65282), True, 'import numpy as np\n'), ((65303, 65318), 'numpy.imag', 'np.imag', (['bt_vel'], {}), '(bt_vel)\n', (65310, 65318), True, 'import numpy as np\n'), ((65459, 65488), 'numpy.median', 'np.median', (['r.bt_range'], {'axis': '(1)'}), '(r.bt_range, axis=1)\n', (65468, 65488), True, 'import numpy as np\n'), ((65631, 65658), 'numpy.isfinite', 'np.isfinite', (['r.bt_vel[:, 0]'], {}), '(r.bt_vel[:, 0])\n', (65642, 65658), True, 'import numpy as np\n'), ((8888, 8908), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (8902, 8908), False, 'import sys, os, re, math\n'), ((63286, 63379), 'numpy.interp', 'np.interp', (["r.ensemble_data['number']", 'fixes[:, 0]', 'fixes[:, 1]'], {'left': 'np.nan', 'right': 'np.nan'}), "(r.ensemble_data['number'], fixes[:, 0], fixes[:, 1], left=np.nan,\n right=np.nan)\n", (63295, 63379), True, 'import numpy as np\n'), ((63407, 63500), 'numpy.interp', 'np.interp', (["r.ensemble_data['number']", 'fixes[:, 0]', 'fixes[:, 2]'], {'left': 'np.nan', 'right': 'np.nan'}), "(r.ensemble_data['number'], fixes[:, 0], fixes[:, 2], left=np.nan,\n right=np.nan)\n", (63416, 63500), True, 'import numpy as np\n'), ((63532, 63625), 'numpy.interp', 'np.interp', (["r.ensemble_data['number']", 'fixes[:, 0]', 'fixes[:, 3]'], {'left': 'np.nan', 'right': 'np.nan'}), "(r.ensemble_data['number'], fixes[:, 0], fixes[:, 3], left=np.nan,\n right=np.nan)\n", (63541, 63625), True, 'import numpy as np\n'), ((64575, 64597), 'numpy.array', 'np.array', (['[lons, lats]'], {}), '([lons, lats])\n', (64583, 64597), True, 'import numpy as np\n'), ((65687, 65701), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (65694, 65701), True, 'import numpy as np\n'), ((65739, 65753), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (65746, 65753), True, 'import numpy as np\n'), ((63807, 63832), 'numpy.isnan', 'np.isnan', (['hour_correction'], {}), '(hour_correction)\n', (63815, 63832), True, 'import numpy as np\n'), ((13880, 13918), 'scipy.stats.morestats.circmean', 'ssm.circmean', (['(ens.heading * pi / 180.0)'], {}), '(ens.heading * pi / 180.0)\n', (13892, 13918), True, 'import scipy.stats.morestats as ssm\n'), ((10466, 10483), 'matplotlib.dates.num2date', 'num2date', (['dats[0]'], {}), '(dats[0])\n', (10474, 10483), False, 'from matplotlib.dates import date2num, num2date\n'), ((64127, 64144), 'numpy.nanmin', 'np.nanmin', (['deltas'], {}), '(deltas)\n', (64136, 64144), True, 'import numpy as np\n'), ((64183, 64200), 'numpy.nanmax', 'np.nanmax', (['deltas'], {}), '(deltas)\n', (64192, 64200), True, 'import numpy as np\n'), ((63769, 63788), 'numpy.isfinite', 'np.isfinite', (['deltas'], {}), '(deltas)\n', (63780, 63788), True, 'import numpy as np\n')] |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_canvas.utilities import fdao_canvas_override
from uw_canvas.sections import Sections
from uw_canvas.models import CanvasSection
import mock
@fdao_canvas_override
class CanvasTestSections(TestCase):
def test_sis_id(self):
section = CanvasSection()
# Missing section ID
self.assertEquals(section.sws_section_id(), None)
self.assertEquals(section.sws_instructor_regid(), None)
self.assertEquals(section.is_academic_sis_id(), False)
# Valid section IDs
section = CanvasSection(sis_section_id="2013-spring-PHYS-121-A--")
self.assertEquals(section.sws_section_id(), "2013,spring,PHYS,121/A")
self.assertEquals(section.sws_instructor_regid(), None)
self.assertEquals(section.is_academic_sis_id(), True)
section = CanvasSection(sis_section_id="2013-spring-PHYS-121-AB")
self.assertEquals(section.sws_section_id(), "2013,spring,PHYS,121/AB")
self.assertEquals(section.sws_instructor_regid(), None)
self.assertEquals(section.is_academic_sis_id(), True)
section = CanvasSection(sis_section_id="2013-autumn-GEN ST-199-A7--")
self.assertEquals(
section.sws_section_id(), "2013,autumn,GEN ST,199/A7")
section = CanvasSection(
sis_section_id=(
"2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE--"))
self.assertEquals(section.sws_section_id(), "2013,spring,PHYS,599/A")
self.assertEquals(
section.sws_instructor_regid(), "9136CCB8F66711D5BE060004AC494FFE")
self.assertEquals(section.is_academic_sis_id(), True)
# Invalid section IDs
section = CanvasSection(sis_section_id="2013-spring-PHYS-121-A")
self.assertEquals(section.sws_section_id(), None)
section = CanvasSection(sis_section_id="2013-spring-PHYS-121-A-")
self.assertEquals(section.sws_section_id(), None)
section = CanvasSection(
sis_section_id=(
"2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE"))
self.assertEquals(section.sws_section_id(), None)
section = CanvasSection(
sis_section_id=(
"2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE-"))
self.assertEquals(section.sws_section_id(), None)
section = CanvasSection(sis_section_id="course_123456_groups")
self.assertEquals(section.sws_section_id(), None)
self.assertEquals(section.sws_instructor_regid(), None)
self.assertEquals(section.is_academic_sis_id(), False)
def test_sections(self):
canvas = Sections()
sections = canvas.get_sections_in_course_by_sis_id(
'2013-spring-CSE-142-A', {'include': ['students']})
self.assertEquals(len(sections), 16, "Too few sections")
n = 0
for section in sections:
n += len(section.students)
self.assertEquals(n, 32, "Too few students")
def test_sections_with_students(self):
canvas = Sections()
sections = canvas.get_sections_with_students_in_course_by_sis_id(
'2013-spring-CSE-142-A')
self.assertEquals(len(sections), 16, "Too few sections")
n = 0
for section in sections:
n += len(section.students)
self.assertEquals(n, 32, "Too few students")
@mock.patch.object(Sections, '_post_resource')
def test_create_section(self, mock_create):
mock_create.return_value = None
canvas = Sections()
canvas.create_section("123456", "Test Section", "test-section-id")
mock_create.assert_called_with(
'/api/v1/courses/123456/sections', {
'course_section': {
'name': 'Test Section',
'sis_section_id': 'test-section-id'}})
@mock.patch.object(Sections, '_put_resource')
def test_update_section(self, mock_update):
mock_update.return_value = None
canvas = Sections()
canvas.update_section("999999", "New Name", "test-section-id")
mock_update.assert_called_with(
'/api/v1/sections/999999', {
'course_section': {
'name': 'New Name',
'sis_section_id': 'test-section-id'}})
| [
"mock.patch.object",
"uw_canvas.models.CanvasSection",
"uw_canvas.sections.Sections"
] | [((3485, 3530), 'mock.patch.object', 'mock.patch.object', (['Sections', '"""_post_resource"""'], {}), "(Sections, '_post_resource')\n", (3502, 3530), False, 'import mock\n'), ((3957, 4001), 'mock.patch.object', 'mock.patch.object', (['Sections', '"""_put_resource"""'], {}), "(Sections, '_put_resource')\n", (3974, 4001), False, 'import mock\n'), ((371, 386), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {}), '()\n', (384, 386), False, 'from uw_canvas.models import CanvasSection\n'), ((649, 705), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""2013-spring-PHYS-121-A--"""'}), "(sis_section_id='2013-spring-PHYS-121-A--')\n", (662, 705), False, 'from uw_canvas.models import CanvasSection\n'), ((929, 984), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""2013-spring-PHYS-121-AB"""'}), "(sis_section_id='2013-spring-PHYS-121-AB')\n", (942, 984), False, 'from uw_canvas.models import CanvasSection\n'), ((1209, 1268), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""2013-autumn-GEN ST-199-A7--"""'}), "(sis_section_id='2013-autumn-GEN ST-199-A7--')\n", (1222, 1268), False, 'from uw_canvas.models import CanvasSection\n'), ((1382, 1476), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE--"""'}), "(sis_section_id=\n '2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE--')\n", (1395, 1476), False, 'from uw_canvas.models import CanvasSection\n'), ((1800, 1854), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""2013-spring-PHYS-121-A"""'}), "(sis_section_id='2013-spring-PHYS-121-A')\n", (1813, 1854), False, 'from uw_canvas.models import CanvasSection\n'), ((1932, 1987), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""2013-spring-PHYS-121-A-"""'}), "(sis_section_id='2013-spring-PHYS-121-A-')\n", (1945, 1987), False, 'from uw_canvas.models import CanvasSection\n'), ((2065, 2157), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE"""'}), "(sis_section_id=\n '2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE')\n", (2078, 2157), False, 'from uw_canvas.models import CanvasSection\n'), ((2262, 2355), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE-"""'}), "(sis_section_id=\n '2013-spring-PHYS-599-A-9136CCB8F66711D5BE060004AC494FFE-')\n", (2275, 2355), False, 'from uw_canvas.models import CanvasSection\n'), ((2460, 2512), 'uw_canvas.models.CanvasSection', 'CanvasSection', ([], {'sis_section_id': '"""course_123456_groups"""'}), "(sis_section_id='course_123456_groups')\n", (2473, 2512), False, 'from uw_canvas.models import CanvasSection\n'), ((2745, 2755), 'uw_canvas.sections.Sections', 'Sections', ([], {}), '()\n', (2753, 2755), False, 'from uw_canvas.sections import Sections\n'), ((3149, 3159), 'uw_canvas.sections.Sections', 'Sections', ([], {}), '()\n', (3157, 3159), False, 'from uw_canvas.sections import Sections\n'), ((3636, 3646), 'uw_canvas.sections.Sections', 'Sections', ([], {}), '()\n', (3644, 3646), False, 'from uw_canvas.sections import Sections\n'), ((4107, 4117), 'uw_canvas.sections.Sections', 'Sections', ([], {}), '()\n', (4115, 4117), False, 'from uw_canvas.sections import Sections\n')] |
import os
import FrictionQPotSpringBlock.Line1d as model
import matplotlib.pyplot as plt
import numpy as np
import prrng
import tqdm
N = 1000
initstate = np.arange(N)
initseq = np.zeros(N)
generators = prrng.pcg32_array(initstate, initseq)
y = 2.0 * generators.random([20000])
y = np.cumsum(y, 1)
y -= 50.0
xdelta = 1e-3
system = model.System(N, y)
system.set_dt(0.1)
system.set_eta(2.0 * np.sqrt(3.0) / 10.0)
system.set_m(1.0)
system.set_mu(1.0)
system.set_k_neighbours(1.0)
system.set_k_frame(1.0 / N)
ninc = 1000
ret = np.empty([2, ninc])
pbar = tqdm.tqdm(total=ninc)
for inc in range(ninc):
# Apply event-driven protocol.
if inc == 0:
system.set_x_frame(0.0) # initial quench
elif inc % 2 != 0:
system.advanceEventRightElastic(xdelta) # elastically advance -> mechanical equilibrium
else:
system.advanceEventRightKick(xdelta) # apply kick
# Minimise energy.
if inc % 2 == 0:
niter = system.minimise()
pbar.n = inc
pbar.set_description(f"inc = {inc:4d}, niter = {niter:8d}")
pbar.refresh()
# Extract output data.
ret[0, inc] = system.x_frame()
ret[1, inc] = np.mean(system.f_frame())
if os.path.isfile("SimpleQuastiStatic_historic.txt"):
test = np.genfromtxt("SimpleQuastiStatic_historic.txt", delimiter=",")
assert np.allclose(ret, test)
fig, ax = plt.subplots()
ax.plot(ret[0, :], ret[1, :])
plt.show()
| [
"prrng.pcg32_array",
"numpy.allclose",
"numpy.sqrt",
"FrictionQPotSpringBlock.Line1d.System",
"tqdm.tqdm",
"os.path.isfile",
"numpy.zeros",
"numpy.empty",
"numpy.cumsum",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((157, 169), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (166, 169), True, 'import numpy as np\n'), ((180, 191), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (188, 191), True, 'import numpy as np\n'), ((205, 242), 'prrng.pcg32_array', 'prrng.pcg32_array', (['initstate', 'initseq'], {}), '(initstate, initseq)\n', (222, 242), False, 'import prrng\n'), ((286, 301), 'numpy.cumsum', 'np.cumsum', (['y', '(1)'], {}), '(y, 1)\n', (295, 301), True, 'import numpy as np\n'), ((337, 355), 'FrictionQPotSpringBlock.Line1d.System', 'model.System', (['N', 'y'], {}), '(N, y)\n', (349, 355), True, 'import FrictionQPotSpringBlock.Line1d as model\n'), ((531, 550), 'numpy.empty', 'np.empty', (['[2, ninc]'], {}), '([2, ninc])\n', (539, 550), True, 'import numpy as np\n'), ((559, 580), 'tqdm.tqdm', 'tqdm.tqdm', ([], {'total': 'ninc'}), '(total=ninc)\n', (568, 580), False, 'import tqdm\n'), ((1201, 1250), 'os.path.isfile', 'os.path.isfile', (['"""SimpleQuastiStatic_historic.txt"""'], {}), "('SimpleQuastiStatic_historic.txt')\n", (1215, 1250), False, 'import os\n'), ((1372, 1386), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1384, 1386), True, 'import matplotlib.pyplot as plt\n'), ((1417, 1427), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1425, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1263, 1326), 'numpy.genfromtxt', 'np.genfromtxt', (['"""SimpleQuastiStatic_historic.txt"""'], {'delimiter': '""","""'}), "('SimpleQuastiStatic_historic.txt', delimiter=',')\n", (1276, 1326), True, 'import numpy as np\n'), ((1338, 1360), 'numpy.allclose', 'np.allclose', (['ret', 'test'], {}), '(ret, test)\n', (1349, 1360), True, 'import numpy as np\n'), ((397, 409), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (404, 409), True, 'import numpy as np\n')] |
from django.urls import path
from .views import *
app_name = "tables"
urlpatterns = [
path("", index, name="index"),
#path("outcomes", views.outcomes, name="outcomes"),
path("<int:categorie_id>", categorie, name="categorie"),
path("incomes/<int:categorie_id>/", incomes, name = "incomes"),
#path for monthly incomes categorie view
path("incomes/<int:categorie_id>/<int:year>/<str:month>/", MonthlyIncomes.as_view(), name="monthly_incomes"),
#path for monthly outcomes categorie view
path("<int:categorie_id>/<int:year>/<str:month>/", MonthlyOutcomes.as_view(), name="monthly_outcomes"),
] | [
"django.urls.path"
] | [((91, 120), 'django.urls.path', 'path', (['""""""', 'index'], {'name': '"""index"""'}), "('', index, name='index')\n", (95, 120), False, 'from django.urls import path\n'), ((183, 238), 'django.urls.path', 'path', (['"""<int:categorie_id>"""', 'categorie'], {'name': '"""categorie"""'}), "('<int:categorie_id>', categorie, name='categorie')\n", (187, 238), False, 'from django.urls import path\n'), ((244, 304), 'django.urls.path', 'path', (['"""incomes/<int:categorie_id>/"""', 'incomes'], {'name': '"""incomes"""'}), "('incomes/<int:categorie_id>/', incomes, name='incomes')\n", (248, 304), False, 'from django.urls import path\n')] |
# -*- coding: utf-8 -*-
"""
Ejercicio 1: Resolución de un sistema lineal con sustitución progresiva.
Función sust_prog: Calcula utilizando sustitución progresiva la solución
del sistema lineal L*x = b, con L cuadrada triangular inferior no singular.
Argumentos de entrada:
L: matriz cuadrada triangular inferior no singular del sistema lineal
L*x = b (array numpy de dos dimensiones).
b: vector de términos independientes (array numpy de una dimensión).
Argumentos de salida:
x: solución del sistema L*x = b (array numpy de una dimensión).
Ejemplos:
L = np.array([[2., 0, 0], [1, 2, 0]])
b = np.array([2., -1, 0])
x = sus_prog(L,b)
print('Solución =', x)
Salida:
Error: La matriz L no es cuadrada
Solución = None
L = np.array([[2., 0, 0], [1, 2, 0], [1, 1, 0]])
b = np.array([2., -1, 0])
x = sus_prog(L,b)
print('Solución =', x)
Salida:
Error: La matriz L es singular
Solución = None
L = np.array([[2., 0, 0], [1, 2, 0], [1, 1, 2]])
b = np.array([2., -1, 0])
x = sus_prog(L,b)
print('Solución =', x)
# Comprobamos que L*x = b
print('Comprobación: L*x =', np.dot(L,x), 'b = ', b)
Salida:
Solución = [ 1. -1. 0.]
Comprobación: L*x = [ 2. -1. 0.] b = [ 2. -1. 0.]
"""
import numpy as np
np.set_printoptions(precision = 2) # sólo dos decimales
np.set_printoptions(suppress = True) # no usar notación exponencial
# Función sus_prog
#-----------------------------------------------------------------------------
def sus_prog(L,b):
m,n = L.shape # dimensiones de la matriz L
# Comprobamos que la matriz L es cuadrada
if m != n:
print('La matriz L no es cuadrada')
return
# Comprobamos que las dimensiones de L y b son adecuadas
if n != len(b):
print('Las dimensiones no son correctas')
return
# Comprobamos que la matriz L es no singular
d = np.diag(L)
for i in range(0,len(d)):
if d[i] == 0:
print("La matriz L es singular")
return
# Resolvemos el sistema
x = np.zeros(len(b)) # definimos el vector solución x
x[0] = b[0]/L[0,0] # primera coordenada de x
for i in range(0,len(b)):
sumatorio = 0
for j in range(0,i):
sumatorio += (L[i,j]*x[j])
x[i] = (1/L[i,i])*(b[i] - sumatorio) # restantes coordenadas de x
return x
#--------------------------------------
# Ejemplos
#--------------------------------------
# Ejemplos de prueba
#--------------------------------------
#<NAME>
L = np.array([[2., 0, 0], [1, 2, 0]])
b = np.array([2., -1, 0])
x = sus_prog(L,b)
print('Solucion =', x)
#SEGUNDO EJEMPLO DE PRUEBA
L = np.array([[2., 0, 0], [1, 2, 0], [1, 1, 0]])
b = np.array([2., -1, 0])
x = sus_prog(L,b)
print('Solucion =', x)
#TERCER EJEMPLO DE PRUEBA
L = np.array([[2., 0, 0], [1, 2, 0], [1, 1, 2]])
b = np.array([2., -1, 0])
x = sus_prog(L,b)
print('Solucion del sistema 1=', x)
# Comprobamos que L*x = b
print('Comprobacion: L*x =', np.dot(L,x), 'b = ', b)
# Ejemplo
#--------------------------------------
n = 5
np.random.seed(1) # para que los números aleatorios se repitan
L = np.random.random((n,n)) # genera una matriz aleatoria nxn
L = np.tril(L) # hacer cero los elementos por encima de la diagonal
b = np.random.random(n) # genera un vector aleatorio de dimensión n
x = sus_prog(L,b)
print('L')
print(L)
print('b')
print(b)
print('Solucion del sistema 2 =', x)
| [
"numpy.random.random",
"numpy.diag",
"numpy.array",
"numpy.dot",
"numpy.tril",
"numpy.random.seed",
"numpy.set_printoptions"
] | [((1543, 1575), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (1562, 1575), True, 'import numpy as np\n'), ((1601, 1635), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (1620, 1635), True, 'import numpy as np\n'), ((2840, 2874), 'numpy.array', 'np.array', (['[[2.0, 0, 0], [1, 2, 0]]'], {}), '([[2.0, 0, 0], [1, 2, 0]])\n', (2848, 2874), True, 'import numpy as np\n'), ((2878, 2900), 'numpy.array', 'np.array', (['[2.0, -1, 0]'], {}), '([2.0, -1, 0])\n', (2886, 2900), True, 'import numpy as np\n'), ((2973, 3018), 'numpy.array', 'np.array', (['[[2.0, 0, 0], [1, 2, 0], [1, 1, 0]]'], {}), '([[2.0, 0, 0], [1, 2, 0], [1, 1, 0]])\n', (2981, 3018), True, 'import numpy as np\n'), ((3022, 3044), 'numpy.array', 'np.array', (['[2.0, -1, 0]'], {}), '([2.0, -1, 0])\n', (3030, 3044), True, 'import numpy as np\n'), ((3127, 3172), 'numpy.array', 'np.array', (['[[2.0, 0, 0], [1, 2, 0], [1, 1, 2]]'], {}), '([[2.0, 0, 0], [1, 2, 0], [1, 1, 2]])\n', (3135, 3172), True, 'import numpy as np\n'), ((3176, 3198), 'numpy.array', 'np.array', (['[2.0, -1, 0]'], {}), '([2.0, -1, 0])\n', (3184, 3198), True, 'import numpy as np\n'), ((3388, 3405), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3402, 3405), True, 'import numpy as np\n'), ((3465, 3489), 'numpy.random.random', 'np.random.random', (['(n, n)'], {}), '((n, n))\n', (3481, 3489), True, 'import numpy as np\n'), ((3527, 3537), 'numpy.tril', 'np.tril', (['L'], {}), '(L)\n', (3534, 3537), True, 'import numpy as np\n'), ((3608, 3627), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (3624, 3627), True, 'import numpy as np\n'), ((2160, 2170), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (2167, 2170), True, 'import numpy as np\n'), ((3307, 3319), 'numpy.dot', 'np.dot', (['L', 'x'], {}), '(L, x)\n', (3313, 3319), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/python
# 2022 by <NAME>
# Version 0.1 Beta
# Reference:
# https://www.descomplicandoamusica.com/escala-menor-natural/
# https://www.descomplicandoamusica.com/escala-menor-melodica/
# https://www.descomplicandoamusica.com/escala-menor-harmonica/
import sys
def title():
print(" __ __ _ _ _____ _ ")
print(" | \/ | (_) | | / ____| | | ")
print(" | \ / |_ _ ___ _ ___ __ _| | | (___ ___ __ _| | ___ ")
print(" | |\/| | | | / __| |/ __/ _` | | \___ \ / __/ _` | |/ _ |")
print(" | | | | |_| \__ \ | (_| (_| | | ____) | (_| (_| | | __/")
print(" |_| |_|\__,_|___/_|\___\__,_|_| |_____/ \___\__,_|_|\___|\n")
print("Created by <NAME> - 2022")
print("Version 0.1 Beta\n")
def help():
print("Usage: musical_scale.py [-- Scale] [Note]")
print(" # Scales available: --major, --natural_minor")
print(" # Notes available: C, D, E, F, G, A, B\n")
print("Example 1: musical_scale.py --major G")
print("Example 2: musical_scale.py --natural_minor B\n")
sys.exit(1)
def convertNoteToNumber(note_, number_ = False):
notes_numbers = {'C' : 0, 'D' : 2, 'E' : 4, 'F' : 5, 'G' : 7, 'A' : 9, 'B' : 11}
accidentals_numbers = {'#' : 1, 'b' : -1, 'bb' : -2}
numbers_notes = {}
for n in notes_numbers:
numbers_notes[notes_numbers[n]] = n
numbers_accidentals = {}
for n in accidentals_numbers:
numbers_accidentals[accidentals_numbers[n]] = n
if number_:
if note_ in range(12):
if note_ in numbers_notes.keys():
return_note = numbers_notes[note_]
return return_note
else:
return_note = numbers_notes[note_ + accidentals_numbers['#']] + numbers_accidentals[1]
return return_note
else:
return None
elif not number_:
if note_[0] in notes_numbers.keys():
if len(note_) == 1:
return_number = notes_numbers[note_]
elif len(note_) > 1:
return_number = notes_numbers[note_[0]] + accidentals_numbers[note_[1:]]
if return_number < 0:
return_number = return_number + 12
return return_number
else:
return None
def generateScale(note, scale, return_numbers = False):
note = convertNoteToNumber(note)
notes = {
'sharps' : ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'],
'flats' : ['C', 'Db', 'D', 'Eb', 'E', 'F', 'Gb', 'G', 'Ab', 'A', 'Bb', 'B']
};
scales = {
'--major' : [2, 2, 1, 2, 2, 2],
'--natural_minor' : [2, 1, 2, 2, 1, 2],
#'--harmonic_minor' : [2, 1, 2, 2, 1, 3],
#'--melodic_minor' : [2, 1, 2, 2, 2, 2, 1, -2, -2, -1, -2, -2, -1, -2]
}
if scales[scale] == scales['--major']:
if note in [1, 3, 5, 8, 10]:
notes_array = notes['flats']
else:
notes_array = notes['sharps']
else:
if note in [0, 1, 2, 3, 5, 7, 8, 10]:
notes_array = notes['flats']
else:
notes_array = notes['sharps']
return_scale = []
start_reference = []
total = 0
for i in range(len(scales[scale])+1):
if i < len(scales[scale]):
total += scales[scale][i]
start_reference.append(total)
if i == 0:
current_note = note
else:
current_note = note + start_reference[i - 1]
if current_note >= len(notes_array):
current_note = current_note - 12
if not return_numbers:
return_scale.append(notes_array[current_note])
elif return_numbers:
return_scale.append(convertNoteToNumber(notes_array[current_note]))
else:
return None
return return_scale
def main():
title()
try:
args = sys.argv[1:]
if not args:
print("You must inform the note to be generated in the scale.\n Use --help or -h for more information.")
sys.exit(1)
if args[0] in ["--natural_minor"] and args[1] in ["C", "D", "E", "F", "G", "A", "B"]:
print("{} minor scale".format(args[1]))
print(generateScale(args[1], args[0]))
sys.exit(1)
elif args[0] in ["--major"] and args[1] in ["C", "D", "E", "F", "G", "A", "B"]:
print("{} major scale".format(args[1]))
print(generateScale(args[1], args[0]))
sys.exit(1)
elif args[0] in ["--help", "-h"]:
help()
else:
print("Invalid information. Use --help or -h for more information.")
sys.exit(1)
except Exception as e:
print("Unexpected error identified. Please contact the administrator. Error: {}".format(e))
sys.exit(1)
if __name__ == '__main__':
main() | [
"sys.exit"
] | [((1127, 1138), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1135, 1138), False, 'import sys\n'), ((3975, 3986), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3983, 3986), False, 'import sys\n'), ((4198, 4209), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4206, 4209), False, 'import sys\n'), ((4749, 4760), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4757, 4760), False, 'import sys\n'), ((4422, 4433), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4430, 4433), False, 'import sys\n'), ((4602, 4613), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4610, 4613), False, 'import sys\n')] |
from os.path import dirname, realpath, join
from pathlib import Path
class Config:
BASE_DIR = Path(realpath(join(dirname(realpath(__file__)), '..')))
RUNS_DIR = Path(join(BASE_DIR, "runs"))
DATA_DIR = Path(join(BASE_DIR, "data"))
WIC_SAMPLES_FILE = Path(join(DATA_DIR, "wic.train.data.txt"))
WIC_LABELS_FILE = Path(join(DATA_DIR, "wic.train.gold.txt"))
EMBED_DIR = Path(join(BASE_DIR, "embed"))
GLOVE_FILE = Path(join(EMBED_DIR, "glove.6B.300d.txt"))
GLOVE_WIKICLEAN_FILE = Path(join(EMBED_DIR, "glove.wikiclean.50d.txt"))
MULTIFT_BASE = join(EMBED_DIR, "mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1")
MULTIFT_WORDS_FILE = Path(join(
EMBED_DIR, "mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1.words"))
MULTIFT_EMBED_FILE = Path(join(
EMBED_DIR, "mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1.subword.npy"))
SEED = 42
| [
"os.path.realpath",
"os.path.join"
] | [((575, 630), 'os.path.join', 'join', (['EMBED_DIR', '"""mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1"""'], {}), "(EMBED_DIR, 'mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1')\n", (579, 630), False, 'from os.path import dirname, realpath, join\n'), ((175, 197), 'os.path.join', 'join', (['BASE_DIR', '"""runs"""'], {}), "(BASE_DIR, 'runs')\n", (179, 197), False, 'from os.path import dirname, realpath, join\n'), ((219, 241), 'os.path.join', 'join', (['BASE_DIR', '"""data"""'], {}), "(BASE_DIR, 'data')\n", (223, 241), False, 'from os.path import dirname, realpath, join\n'), ((271, 307), 'os.path.join', 'join', (['DATA_DIR', '"""wic.train.data.txt"""'], {}), "(DATA_DIR, 'wic.train.data.txt')\n", (275, 307), False, 'from os.path import dirname, realpath, join\n'), ((336, 372), 'os.path.join', 'join', (['DATA_DIR', '"""wic.train.gold.txt"""'], {}), "(DATA_DIR, 'wic.train.gold.txt')\n", (340, 372), False, 'from os.path import dirname, realpath, join\n'), ((395, 418), 'os.path.join', 'join', (['BASE_DIR', '"""embed"""'], {}), "(BASE_DIR, 'embed')\n", (399, 418), False, 'from os.path import dirname, realpath, join\n'), ((442, 478), 'os.path.join', 'join', (['EMBED_DIR', '"""glove.6B.300d.txt"""'], {}), "(EMBED_DIR, 'glove.6B.300d.txt')\n", (446, 478), False, 'from os.path import dirname, realpath, join\n'), ((512, 554), 'os.path.join', 'join', (['EMBED_DIR', '"""glove.wikiclean.50d.txt"""'], {}), "(EMBED_DIR, 'glove.wikiclean.50d.txt')\n", (516, 554), False, 'from os.path import dirname, realpath, join\n'), ((661, 722), 'os.path.join', 'join', (['EMBED_DIR', '"""mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1.words"""'], {}), "(EMBED_DIR, 'mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1.words')\n", (665, 722), False, 'from os.path import dirname, realpath, join\n'), ((767, 834), 'os.path.join', 'join', (['EMBED_DIR', '"""mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1.subword.npy"""'], {}), "(EMBED_DIR, 'mv-wacky_e10_d300_vs2e-4_lr1e-5_mar1.subword.npy')\n", (771, 834), False, 'from os.path import dirname, realpath, join\n'), ((126, 144), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (134, 144), False, 'from os.path import dirname, realpath, join\n')] |
import logging.config
import os
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
_, HOSTNAME, _, _, MACHINE = os.uname()
RUNNING_ON_PI = MACHINE.startswith('arm')
# Drivers #####################################################################
# Driver libs can only be installed on RPi
if RUNNING_ON_PI:
from pi2go import pi2go
BOT_DRIVER = pi2go
import bluetooth
BLUETOOTH_DRIVER = bluetooth._bluetooth
from ibeaconscanner import blescan
IBEACON_SCANNER = blescan
import picamera
CAMERA = picamera
import cv2
CV2 = cv2
import numpy
NUMPY = numpy
else:
BOT_DRIVER = None
BLUETOOTH_DRIVER = None
IBEACON_SCANNER = None
CAMERA = None
CV2 = None
NUMPY = None
# Bluetooth ###################################################################
BLUETOOTH_ENABLED = os.getenv('BLUETOOTH_ENABLED', 'false')
if BLUETOOTH_ENABLED == 'true':
BLUETOOTH_ENABLED = True
else:
BLUETOOTH_ENABLED = False
# In seconds
BLUETOOTH_SCAN_INTERVAL = int(os.getenv('BLUETOOTH_SCAN_INTERVAL', 1))
# Beacons #####################################################################
BEACON_ONE_ID = os.getenv(
'BEACON_ONE_ID', 'e2c56db5dffb48d2b060d0f5a71096e1'
)
BEACON_TWO_ID = os.getenv(
'BEACON_TWO_ID', 'e2c56db5dffb48d2b060d0f5a71096e2'
)
BEACON_THREE_ID = os.getenv(
'BEACON_THREE_ID', 'e2c56db5dffb48d2b060d0f5a71096e3'
)
BEACONS = [
BEACON_ONE_ID,
BEACON_TWO_ID,
BEACON_THREE_ID
]
# Camera ###################################################################
CAMERA_ENABLED = os.getenv('CAMERA_ENABLED', 'false')
if CAMERA_ENABLED == 'true':
CAMERA_ENABLED = True
else:
CAMERA_ENABLED = False
CAMERA_INTERVAL = int(os.getenv('CAMERA_INTERVAL', 1))
CAMERA_RESOLUTION_X = int(os.getenv('CAMERA_RESOLUTION_X', 640))
CAMERA_RESOLUTION_Y = int(os.getenv('CAMERA_RESOLUTION_Y', 480))
CAMERA_RESOLUTION = (CAMERA_RESOLUTION_X, CAMERA_RESOLUTION_Y)
FILE_MESSAGE_STORAGE_PATH = os.getenv('FILE_MESSAGE_STORAGE_PATH', '/tmp/')
# Target following ############################################################
BOT_SEARCH_FOR_TARGET = os.getenv('BOT_SEARCH_FOR_TARGET', 'true')
if BOT_SEARCH_FOR_TARGET == 'true':
BOT_SEARCH_FOR_TARGET = True
else:
BOT_SEARCH_FOR_TARGET = False
# Smallest target to move towards
TARGET_MINIMUM_AREA = int(os.getenv('MINIMUM_TARGET_AREA', 100))
TARGET_COLOUR_LOW_H = int(os.getenv('TARGET_COLOUR_LOW_H', 115))
TARGET_COLOUR_LOW_S = int(os.getenv('TARGET_COLOUR_LOW_S', 127))
TARGET_COLOUR_LOW_V = int(os.getenv('TARGET_COLOUR_LOW_V', 64))
TARGET_COLOUR_LOW = (
TARGET_COLOUR_LOW_H, TARGET_COLOUR_LOW_S, TARGET_COLOUR_LOW_V
)
TARGET_COLOUR_HIGH_H = int(os.getenv('TARGET_COLOUR_HIGH_H', 125))
TARGET_COLOUR_HIGH_S = int(os.getenv('TARGET_COLOUR_HIGH_S', 255))
TARGET_COLOUR_HIGH_V = int(os.getenv('TARGET_COLOUR_HIGH_V', 255))
TARGET_COLOUR_HIGH = (
TARGET_COLOUR_HIGH_H, TARGET_COLOUR_HIGH_S, TARGET_COLOUR_HIGH_V
)
# cnav-sense ##################################################################
CNAV_SENSE_ENABLED = os.getenv('CNAV_SENSE_ENABLED', 'true')
if CNAV_SENSE_ENABLED == 'true':
CNAV_SENSE_ENABLED = True
else:
CNAV_SENSE_ENABLED = False
# must be set individually for each bot via resin.io device env vars
if CNAV_SENSE_ENABLED:
CNAV_SENSE_ADDRESS = os.getenv('CNAV_SENSE_ADDRESS')
# Bot modes ###################################################################
BOT_ENABLED = os.getenv('BOT_ENABLED', 'true')
if BOT_ENABLED == 'true':
BOT_ENABLED = True
else:
BOT_ENABLED = False
# 'wander' (roam freely) or 'follow' (follow line)
BOT_MODE_WANDER = 'wander'
BOT_MODE_FOLLOW = 'follow'
BOT_MODE_FOLLOW_AVOID = 'follow-avoid'
BOT_MODE_DIRECTION = 'direction'
BOT_MODE_FOLLOW_DIRECTION_AND_LINE = 'direction-line'
BOT_MODE_FOLLOW_CAMERA_TARGET = 'follow-camera-target'
BOT_MODE = os.getenv('BOT_MODE', BOT_MODE_WANDER)
BOT_IN_WANDER_MODE = False
BOT_IN_FOLLOW_MODE = False
BOT_IN_FOLLOW_AVOID_MODE = False
BOT_IN_FOLLOW_DIRECTION_MODE = False
BOT_IN_FOLLOW_CAMERA_TARGET_MODE = False
BOT_IN_FOLLOW_DIRECTION_AND_LINE_MODE = False
if BOT_MODE == BOT_MODE_WANDER:
BOT_IN_WANDER_MODE = True
elif BOT_MODE == BOT_MODE_FOLLOW:
BOT_IN_FOLLOW_MODE = True
elif BOT_MODE == BOT_MODE_FOLLOW_AVOID:
BOT_IN_FOLLOW_AVOID_MODE = True
elif BOT_MODE == BOT_MODE_DIRECTION:
BOT_IN_FOLLOW_DIRECTION_MODE = True
CNAV_SENSE_ENABLED = True
elif BOT_MODE == BOT_MODE_FOLLOW_CAMERA_TARGET:
BOT_IN_FOLLOW_CAMERA_TARGET_MODE = True
CAMERA_ENABLED = True
elif BOT_MODE == BOT_MODE_FOLLOW_DIRECTION_AND_LINE:
BOT_IN_FOLLOW_DIRECTION_AND_LINE_MODE = True
CNAV_SENSE_ENABLED = True
BOT_WAIT_FOR_BUTTON_PRESS = os.getenv(
'BOT_WAIT_FOR_BUTTON_PRESS', 'true'
)
if BOT_WAIT_FOR_BUTTON_PRESS == 'true':
BOT_WAIT_FOR_BUTTON_PRESS = True
else:
BOT_WAIT_FOR_BUTTON_PRESS = False
# Defaults ####################################################################
# Must be between 0 and 100
BOT_DEFAULT_SPEED = int(os.getenv('BOT_DEFAULT_SPEED', 40))
BOT_DEFAULT_FORWARD_STEPS = int(os.getenv('BOT_DEFAULT_FORWARD_STEPS', 3))
BOT_AVOID_OBSTACLE_STEPS = int(os.getenv('BOT_AVOID_OBSTACLE_STEPS', 2))
BOT_FOLLOW_LINE_STEPS = int(os.getenv('BOT_FOLLOW_LINE_STEPS', 2))
BOT_DEFAULT_NAME = os.getenv('BOT_DEFAULT_NAME', HOSTNAME)
BOT_DEFAULT_MAX_DISTANCE = int(os.getenv('BOT_DEFAULT_MAX_DISTANCE', 10))
BOT_DIRECTION_TOLERANCE = int(os.getenv('BOT_DIRECTION_TOLERANCE', 10))
# Logging #####################################################################
BOT_LOG_PATH = os.environ.get('BOT_LOG_PATH', '/tmp/cnavbot.log')
SENTRY_DSN = os.environ.get('SENTRY_DSN')
logging.config.dictConfig({
'version': 1,
'formatters': {
'default': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
},
'sentry': {
'format': (
'[%(asctime)s][%(levelname)s] %(name)s '
'%(filename)s:%(funcName)s:%(lineno)d | %(message)s'
),
'datefmt': '%H:%M:%S',
}
},
'handlers': {
'rotating_file': {
'class': 'zmqservices.utils.MultiprocessingRotatingFileHandler',
'filename': BOT_LOG_PATH,
'maxBytes': 10 * 1024 * 1024,
'backupCount': 5,
'formatter': 'default',
'level': 'INFO',
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'DEBUG',
},
'sentry': {
'class': 'raven.handlers.logging.SentryHandler',
'dsn': SENTRY_DSN,
'level': 'ERROR',
'formatter': 'sentry',
},
'papertrail': {
'class': 'logging.handlers.SysLogHandler',
'formatter': 'default',
'address': (
os.getenv('PAPERTRAIL_HOST'),
int(os.getenv('PAPERTRAIL_PORT', 0))
),
'level': 'INFO',
}
},
'root': {
'handlers': [
'rotating_file',
'console',
'sentry',
'papertrail',
],
'level': 'DEBUG',
'propagate': True,
},
})
| [
"os.path.realpath",
"os.environ.get",
"os.getenv",
"os.uname"
] | [((123, 133), 'os.uname', 'os.uname', ([], {}), '()\n', (131, 133), False, 'import os\n'), ((851, 890), 'os.getenv', 'os.getenv', (['"""BLUETOOTH_ENABLED"""', '"""false"""'], {}), "('BLUETOOTH_ENABLED', 'false')\n", (860, 890), False, 'import os\n'), ((1172, 1234), 'os.getenv', 'os.getenv', (['"""BEACON_ONE_ID"""', '"""e2c56db5dffb48d2b060d0f5a71096e1"""'], {}), "('BEACON_ONE_ID', 'e2c56db5dffb48d2b060d0f5a71096e1')\n", (1181, 1234), False, 'import os\n'), ((1257, 1319), 'os.getenv', 'os.getenv', (['"""BEACON_TWO_ID"""', '"""e2c56db5dffb48d2b060d0f5a71096e2"""'], {}), "('BEACON_TWO_ID', 'e2c56db5dffb48d2b060d0f5a71096e2')\n", (1266, 1319), False, 'import os\n'), ((1344, 1408), 'os.getenv', 'os.getenv', (['"""BEACON_THREE_ID"""', '"""e2c56db5dffb48d2b060d0f5a71096e3"""'], {}), "('BEACON_THREE_ID', 'e2c56db5dffb48d2b060d0f5a71096e3')\n", (1353, 1408), False, 'import os\n'), ((1583, 1619), 'os.getenv', 'os.getenv', (['"""CAMERA_ENABLED"""', '"""false"""'], {}), "('CAMERA_ENABLED', 'false')\n", (1592, 1619), False, 'import os\n'), ((1985, 2032), 'os.getenv', 'os.getenv', (['"""FILE_MESSAGE_STORAGE_PATH"""', '"""/tmp/"""'], {}), "('FILE_MESSAGE_STORAGE_PATH', '/tmp/')\n", (1994, 2032), False, 'import os\n'), ((2139, 2181), 'os.getenv', 'os.getenv', (['"""BOT_SEARCH_FOR_TARGET"""', '"""true"""'], {}), "('BOT_SEARCH_FOR_TARGET', 'true')\n", (2148, 2181), False, 'import os\n'), ((3076, 3115), 'os.getenv', 'os.getenv', (['"""CNAV_SENSE_ENABLED"""', '"""true"""'], {}), "('CNAV_SENSE_ENABLED', 'true')\n", (3085, 3115), False, 'import os\n'), ((3462, 3494), 'os.getenv', 'os.getenv', (['"""BOT_ENABLED"""', '"""true"""'], {}), "('BOT_ENABLED', 'true')\n", (3471, 3494), False, 'import os\n'), ((3872, 3910), 'os.getenv', 'os.getenv', (['"""BOT_MODE"""', 'BOT_MODE_WANDER'], {}), "('BOT_MODE', BOT_MODE_WANDER)\n", (3881, 3910), False, 'import os\n'), ((4711, 4757), 'os.getenv', 'os.getenv', (['"""BOT_WAIT_FOR_BUTTON_PRESS"""', '"""true"""'], {}), "('BOT_WAIT_FOR_BUTTON_PRESS', 'true')\n", (4720, 4757), False, 'import os\n'), ((5290, 5329), 'os.getenv', 'os.getenv', (['"""BOT_DEFAULT_NAME"""', 'HOSTNAME'], {}), "('BOT_DEFAULT_NAME', HOSTNAME)\n", (5299, 5329), False, 'import os\n'), ((5574, 5624), 'os.environ.get', 'os.environ.get', (['"""BOT_LOG_PATH"""', '"""/tmp/cnavbot.log"""'], {}), "('BOT_LOG_PATH', '/tmp/cnavbot.log')\n", (5588, 5624), False, 'import os\n'), ((5638, 5666), 'os.environ.get', 'os.environ.get', (['"""SENTRY_DSN"""'], {}), "('SENTRY_DSN')\n", (5652, 5666), False, 'import os\n'), ((65, 91), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (81, 91), False, 'import os\n'), ((1032, 1071), 'os.getenv', 'os.getenv', (['"""BLUETOOTH_SCAN_INTERVAL"""', '(1)'], {}), "('BLUETOOTH_SCAN_INTERVAL', 1)\n", (1041, 1071), False, 'import os\n'), ((1731, 1762), 'os.getenv', 'os.getenv', (['"""CAMERA_INTERVAL"""', '(1)'], {}), "('CAMERA_INTERVAL', 1)\n", (1740, 1762), False, 'import os\n'), ((1790, 1827), 'os.getenv', 'os.getenv', (['"""CAMERA_RESOLUTION_X"""', '(640)'], {}), "('CAMERA_RESOLUTION_X', 640)\n", (1799, 1827), False, 'import os\n'), ((1855, 1892), 'os.getenv', 'os.getenv', (['"""CAMERA_RESOLUTION_Y"""', '(480)'], {}), "('CAMERA_RESOLUTION_Y', 480)\n", (1864, 1892), False, 'import os\n'), ((2352, 2389), 'os.getenv', 'os.getenv', (['"""MINIMUM_TARGET_AREA"""', '(100)'], {}), "('MINIMUM_TARGET_AREA', 100)\n", (2361, 2389), False, 'import os\n'), ((2418, 2455), 'os.getenv', 'os.getenv', (['"""TARGET_COLOUR_LOW_H"""', '(115)'], {}), "('TARGET_COLOUR_LOW_H', 115)\n", (2427, 2455), False, 'import os\n'), ((2483, 2520), 'os.getenv', 'os.getenv', (['"""TARGET_COLOUR_LOW_S"""', '(127)'], {}), "('TARGET_COLOUR_LOW_S', 127)\n", (2492, 2520), False, 'import os\n'), ((2548, 2584), 'os.getenv', 'os.getenv', (['"""TARGET_COLOUR_LOW_V"""', '(64)'], {}), "('TARGET_COLOUR_LOW_V', 64)\n", (2557, 2584), False, 'import os\n'), ((2705, 2743), 'os.getenv', 'os.getenv', (['"""TARGET_COLOUR_HIGH_H"""', '(125)'], {}), "('TARGET_COLOUR_HIGH_H', 125)\n", (2714, 2743), False, 'import os\n'), ((2772, 2810), 'os.getenv', 'os.getenv', (['"""TARGET_COLOUR_HIGH_S"""', '(255)'], {}), "('TARGET_COLOUR_HIGH_S', 255)\n", (2781, 2810), False, 'import os\n'), ((2839, 2877), 'os.getenv', 'os.getenv', (['"""TARGET_COLOUR_HIGH_V"""', '(255)'], {}), "('TARGET_COLOUR_HIGH_V', 255)\n", (2848, 2877), False, 'import os\n'), ((3334, 3365), 'os.getenv', 'os.getenv', (['"""CNAV_SENSE_ADDRESS"""'], {}), "('CNAV_SENSE_ADDRESS')\n", (3343, 3365), False, 'import os\n'), ((5020, 5054), 'os.getenv', 'os.getenv', (['"""BOT_DEFAULT_SPEED"""', '(40)'], {}), "('BOT_DEFAULT_SPEED', 40)\n", (5029, 5054), False, 'import os\n'), ((5088, 5129), 'os.getenv', 'os.getenv', (['"""BOT_DEFAULT_FORWARD_STEPS"""', '(3)'], {}), "('BOT_DEFAULT_FORWARD_STEPS', 3)\n", (5097, 5129), False, 'import os\n'), ((5162, 5202), 'os.getenv', 'os.getenv', (['"""BOT_AVOID_OBSTACLE_STEPS"""', '(2)'], {}), "('BOT_AVOID_OBSTACLE_STEPS', 2)\n", (5171, 5202), False, 'import os\n'), ((5232, 5269), 'os.getenv', 'os.getenv', (['"""BOT_FOLLOW_LINE_STEPS"""', '(2)'], {}), "('BOT_FOLLOW_LINE_STEPS', 2)\n", (5241, 5269), False, 'import os\n'), ((5361, 5402), 'os.getenv', 'os.getenv', (['"""BOT_DEFAULT_MAX_DISTANCE"""', '(10)'], {}), "('BOT_DEFAULT_MAX_DISTANCE', 10)\n", (5370, 5402), False, 'import os\n'), ((5434, 5474), 'os.getenv', 'os.getenv', (['"""BOT_DIRECTION_TOLERANCE"""', '(10)'], {}), "('BOT_DIRECTION_TOLERANCE', 10)\n", (5443, 5474), False, 'import os\n'), ((6876, 6904), 'os.getenv', 'os.getenv', (['"""PAPERTRAIL_HOST"""'], {}), "('PAPERTRAIL_HOST')\n", (6885, 6904), False, 'import os\n'), ((6926, 6957), 'os.getenv', 'os.getenv', (['"""PAPERTRAIL_PORT"""', '(0)'], {}), "('PAPERTRAIL_PORT', 0)\n", (6935, 6957), False, 'import os\n')] |
"""Computation of posteriors."""
import numpy as np
from typing import Tuple
from jang.gw import GW
from jang.neutrinos import Detector
from jang.parameters import Parameters
from jang.analysis import Analysis
import jang.stats.likelihoods as lkl
import jang.stats.priors as prior
def compute_flux_posterior(
detector: Detector, gw: GW, parameters: Parameters
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the posterior as a function of all-flavour neutrino flux at Earth.
Args:
detector (Detector): holds the nominal results
gw (GW): holds the gravitational wave information
parameters (Parameters): holds the needed parameters (skymap resolution to be used, neutrino spectrum and integration range...)
Returns:
np.ndarray: array of the variable flux
np.ndarray: array of computed posterior
"""
ana = Analysis(gw=gw, detector=detector, parameters=parameters)
x_arr = np.logspace(*parameters.range_flux)
post_arr = np.zeros_like(x_arr)
for toy in ana.toys:
phi_to_nsig = ana.phi_to_nsig(toy)
post_arr += lkl.poisson_several_samples(
toy[1].nobserved, toy[1].nbackground, phi_to_nsig, x_arr,
) * prior.signal_parameter(
x_arr, toy[1].nbackground, phi_to_nsig, parameters.prior_signal,
)
return x_arr, post_arr
def compute_etot_posterior(
detector: Detector, gw: GW, parameters: Parameters
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the posterior as a function of total energy.
Args:
detector (Detector): holds the nominal results
gw (GW): holds the gravitational wave information
parameters (Parameters): holds the needed parameters (skymap resolution to be used, neutrino spectrum and integration range...)
Returns:
np.ndarray: array of the variable Etot
np.ndarray: array of computed posterior
"""
ana = Analysis(gw=gw, detector=detector, parameters=parameters)
ana.add_gw_variables("luminosity_distance", "theta_jn")
x_arr = np.logspace(*parameters.range_etot)
post_arr = np.zeros_like(x_arr)
for toy in ana.toys:
etot_to_nsig = ana.etot_to_nsig(toy)
post_arr += lkl.poisson_several_samples(
toy[1].nobserved, toy[1].nbackground, etot_to_nsig, x_arr,
) * prior.signal_parameter(
x_arr, toy[1].nbackground, etot_to_nsig, parameters.prior_signal,
)
return x_arr, post_arr
def compute_fnu_posterior(
detector: Detector, gw: GW, parameters: dict
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the posterior as a function of fnu=E(tot)/E(radiated).
Args:
detector (Detector): holds the nominal results
gw (GW): holds the gravitational wave information
parameters (Parameters): holds the needed parameters (skymap resolution to be used, neutrino spectrum and integration range...)
Returns:
np.ndarray: array of the variable fnu
np.ndarray: array of computed posterior
"""
ana = Analysis(gw=gw, detector=detector, parameters=parameters)
ana.add_gw_variables("luminosity_distance", "theta_jn", "radiated_energy")
x_arr = np.logspace(*parameters.range_fnu)
post_arr = np.zeros_like(x_arr)
for toy in ana.toys:
fnu_to_nsig = ana.fnu_to_nsig(toy)
post_arr += lkl.poisson_several_samples(
toy[1].nobserved, toy[1].nbackground, fnu_to_nsig, x_arr,
) * prior.signal_parameter(
x_arr, toy[1].nbackground, fnu_to_nsig, parameters.prior_signal,
)
return x_arr, post_arr
| [
"jang.stats.priors.signal_parameter",
"jang.stats.likelihoods.poisson_several_samples",
"numpy.logspace",
"numpy.zeros_like",
"jang.analysis.Analysis"
] | [((875, 932), 'jang.analysis.Analysis', 'Analysis', ([], {'gw': 'gw', 'detector': 'detector', 'parameters': 'parameters'}), '(gw=gw, detector=detector, parameters=parameters)\n', (883, 932), False, 'from jang.analysis import Analysis\n'), ((946, 981), 'numpy.logspace', 'np.logspace', (['*parameters.range_flux'], {}), '(*parameters.range_flux)\n', (957, 981), True, 'import numpy as np\n'), ((997, 1017), 'numpy.zeros_like', 'np.zeros_like', (['x_arr'], {}), '(x_arr)\n', (1010, 1017), True, 'import numpy as np\n'), ((1925, 1982), 'jang.analysis.Analysis', 'Analysis', ([], {'gw': 'gw', 'detector': 'detector', 'parameters': 'parameters'}), '(gw=gw, detector=detector, parameters=parameters)\n', (1933, 1982), False, 'from jang.analysis import Analysis\n'), ((2056, 2091), 'numpy.logspace', 'np.logspace', (['*parameters.range_etot'], {}), '(*parameters.range_etot)\n', (2067, 2091), True, 'import numpy as np\n'), ((2107, 2127), 'numpy.zeros_like', 'np.zeros_like', (['x_arr'], {}), '(x_arr)\n', (2120, 2127), True, 'import numpy as np\n'), ((3041, 3098), 'jang.analysis.Analysis', 'Analysis', ([], {'gw': 'gw', 'detector': 'detector', 'parameters': 'parameters'}), '(gw=gw, detector=detector, parameters=parameters)\n', (3049, 3098), False, 'from jang.analysis import Analysis\n'), ((3191, 3225), 'numpy.logspace', 'np.logspace', (['*parameters.range_fnu'], {}), '(*parameters.range_fnu)\n', (3202, 3225), True, 'import numpy as np\n'), ((3241, 3261), 'numpy.zeros_like', 'np.zeros_like', (['x_arr'], {}), '(x_arr)\n', (3254, 3261), True, 'import numpy as np\n'), ((1107, 1196), 'jang.stats.likelihoods.poisson_several_samples', 'lkl.poisson_several_samples', (['toy[1].nobserved', 'toy[1].nbackground', 'phi_to_nsig', 'x_arr'], {}), '(toy[1].nobserved, toy[1].nbackground,\n phi_to_nsig, x_arr)\n', (1134, 1196), True, 'import jang.stats.likelihoods as lkl\n'), ((1218, 1310), 'jang.stats.priors.signal_parameter', 'prior.signal_parameter', (['x_arr', 'toy[1].nbackground', 'phi_to_nsig', 'parameters.prior_signal'], {}), '(x_arr, toy[1].nbackground, phi_to_nsig, parameters.\n prior_signal)\n', (1240, 1310), True, 'import jang.stats.priors as prior\n'), ((2219, 2309), 'jang.stats.likelihoods.poisson_several_samples', 'lkl.poisson_several_samples', (['toy[1].nobserved', 'toy[1].nbackground', 'etot_to_nsig', 'x_arr'], {}), '(toy[1].nobserved, toy[1].nbackground,\n etot_to_nsig, x_arr)\n', (2246, 2309), True, 'import jang.stats.likelihoods as lkl\n'), ((2331, 2424), 'jang.stats.priors.signal_parameter', 'prior.signal_parameter', (['x_arr', 'toy[1].nbackground', 'etot_to_nsig', 'parameters.prior_signal'], {}), '(x_arr, toy[1].nbackground, etot_to_nsig, parameters.\n prior_signal)\n', (2353, 2424), True, 'import jang.stats.priors as prior\n'), ((3351, 3440), 'jang.stats.likelihoods.poisson_several_samples', 'lkl.poisson_several_samples', (['toy[1].nobserved', 'toy[1].nbackground', 'fnu_to_nsig', 'x_arr'], {}), '(toy[1].nobserved, toy[1].nbackground,\n fnu_to_nsig, x_arr)\n', (3378, 3440), True, 'import jang.stats.likelihoods as lkl\n'), ((3462, 3554), 'jang.stats.priors.signal_parameter', 'prior.signal_parameter', (['x_arr', 'toy[1].nbackground', 'fnu_to_nsig', 'parameters.prior_signal'], {}), '(x_arr, toy[1].nbackground, fnu_to_nsig, parameters.\n prior_signal)\n', (3484, 3554), True, 'import jang.stats.priors as prior\n')] |
#!/usr/bin/env python
import time
class PID_CONTROLLER:
def __init__(self, p_coef, i_coef, d_coef, limit_out):
self.kp = p_coef
self.ki = i_coef
self.kd = d_coef
self._limit_out = limit_out
self._previous_error = 0
def set_current_error(self, error):
output0 = error * self.kp
error_diff = error - self._previous_error
outpu1 = self.kd * error_diff
error_intr = error + self._previous_error
outpu2 = self.ki * error_intr
self._previous_error = error
output = output0 + outpu1 + outpu2
if output > self._limit_out:
output = self._limit_out
elif output < (-self._limit_out):
output = (-self._limit_out)
return output
class PID_CONTROLLER_BELBIC:
def __init__(self, p_coef, d_coef, i_coef):
self.kp = p_coef
self.kd = d_coef
self.ki = i_coef
self._last_time = 0.0
self.error_integ = 0.0
self._previous_error = 0.0
self._i_max = 5.0 # The integral upper limit.
self._i_min = -5.0 # The integral lower limit.
self._is_error_initialized_PID = False
def set_SI (self, error):
cur_time = time.time()
output = self.kp * error
if self._is_error_initialized_PID:
dt = cur_time - self._last_time
self._last_time = cur_time
self.error_integ += error * dt
#error_diff = (error - self._previous_error) / dt
error_diff = error - self._previous_error
self._previous_error = error
derivativa = self.kd * error_diff
integral = self.ki * self.error_integ
if integral > self._i_max:
integral = self._i_max
elif integral < self._i_min:
integral = self._i_min
output += derivativa + integral
else:
self._previous_error = error
self._last_time = cur_time
self._is_error_initialized_PID = True
return output
| [
"time.time"
] | [((1190, 1201), 'time.time', 'time.time', ([], {}), '()\n', (1199, 1201), False, 'import time\n')] |
"""Tool for timing the execution of Python functions."""
import functools
import timeit
class Clocked(object):
"""Utility class for timing the execution of a function.
Initialization requires a function callable without arguments. The
function is run `executions` times, repeated `repetitions` times.
`executions` defaults to 1000, and `repetitions` to 3, resulting in 3 runs
of 1000 executions each. The shortest of these times is reported as the
function's execution time.
Arguments:
nullary (function): Function without arguments to time.
Keyword Arguments:
executions (int): Number of times to execute the provided function.
repetitions (int): Number of times to repeat executions of the
provided function.
"""
def __init__(self, nullary, executions=1000, repetitions=3):
self.nullary = nullary
self.executions = executions
self.repetitions = repetitions
self.result = None
self.timings = None
@property
def run(self):
"""Time the provided function, storing its return value."""
self.result = self.nullary()
self.timings = timeit.repeat(
self.nullary,
number=self.executions,
repeat=self.repetitions
)
@property
def time(self):
"""Give a lower bound for how fast the provided function can be run.
For more details on this approach, see
https://docs.python.org/3/library/timeit.html#timeit.Timer.repeat.
"""
return min(self.timings)
def clockit(function, *args, executions=1000, repetitions=3, **kwargs):
"""Time the execution of a function with the provided arguments.
Arguments:
function (function): Function to time.
args: Any number of positional arguments with which to call the
provided function.
Keyword Arguments:
executions (int): Number of times to execute the provided function.
repetitions (int): Number of times to repeat executions of the
provided function.
kwargs: Any number of keyword arguments with which to call the
provided function.
Returns:
Clocked, containing shortest time required to run the function
`executions` times.
"""
nullary = functools.partial(function, *args, **kwargs)
clocked = Clocked(
nullary,
executions=executions,
repetitions=repetitions
)
clocked.run
return clocked
| [
"timeit.repeat",
"functools.partial"
] | [((2333, 2377), 'functools.partial', 'functools.partial', (['function', '*args'], {}), '(function, *args, **kwargs)\n', (2350, 2377), False, 'import functools\n'), ((1182, 1258), 'timeit.repeat', 'timeit.repeat', (['self.nullary'], {'number': 'self.executions', 'repeat': 'self.repetitions'}), '(self.nullary, number=self.executions, repeat=self.repetitions)\n', (1195, 1258), False, 'import timeit\n')] |
import asyncio
import logging
import time
import math
from decimal import Decimal
from typing import (
Any,
AsyncIterable,
Dict,
List,
Optional,
)
from async_timeout import timeout
import hummingbot.connector.exchange.btcturk.btcturk_constants as CONSTANTS
from hummingbot.connector.client_order_tracker import ClientOrderTracker
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.connector.exchange.btcturk import btcturk_utils
from hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source import BtcturkAPIOrderBookDataSource
from hummingbot.connector.exchange.btcturk.btcturk_auth import BtcturkAuth
from hummingbot.connector.exchange.btcturk.btcturk_order_book_tracker import BtcturkOrderBookTracker
from hummingbot.connector.exchange.btcturk.btcturk_user_stream_tracker import BtcturkUserStreamTracker
from hummingbot.connector.time_synchronizer import TimeSynchronizer
from hummingbot.connector.trading_rule import TradingRule
from hummingbot.connector.utils import TradeFillOrderDetails
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.in_flight_order import InFlightOrder, OrderUpdate, OrderState, TradeUpdate
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.trade_fee import (
DeductedFromReturnsTradeFee,
TokenAmount,
TradeFeeBase,
)
from hummingbot.core.event.events import (
MarketEvent,
OrderFilledEvent,
OrderType,
TradeType,
)
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_utils import (
safe_ensure_future,
safe_gather,
)
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest
from hummingbot.core.web_assistant.rest_assistant import RESTAssistant
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
from hummingbot.logger import HummingbotLogger
import json
s_logger = None
s_decimal_0 = Decimal(0)
s_decimal_NaN = Decimal("nan")
class BtcturkExchange(ExchangeBase):
SHORT_POLL_INTERVAL = 5.0
UPDATE_ORDER_STATUS_MIN_INTERVAL = 10.0
LONG_POLL_INTERVAL = 120.0
MAX_ORDER_UPDATE_RETRIEVAL_RETRIES_WITH_FAILURES = 3
def __init__(
self,
btcturk_api_key: str,
btcturk_api_secret: str,
trading_pairs: Optional[List[str]] = None,
trading_required: bool = True,
):
self._btcturk_time_synchronizer = TimeSynchronizer()
super().__init__()
self._trading_required = trading_required
self._auth = BtcturkAuth(
api_key=btcturk_api_key, secret_key=btcturk_api_secret, time_provider=self._btcturk_time_synchronizer
)
self._api_factory = WebAssistantsFactory(auth=self._auth)
self._rest_assistant = None
self._throttler = AsyncThrottler(CONSTANTS.RATE_LIMITS)
self._order_book_tracker = BtcturkOrderBookTracker(
trading_pairs=trading_pairs, api_factory=self._api_factory, throttler=self._throttler
)
self._user_stream_tracker = BtcturkUserStreamTracker(auth=self._auth, throttler=self._throttler)
self._ev_loop = asyncio.get_event_loop()
self._poll_notifier = asyncio.Event()
self._last_timestamp = 0
self._order_not_found_records = {} # Dict[client_order_id:str, count:int]
self._trading_rules = {} # Dict[trading_pair:str, TradingRule]
self._trade_fees = {} # Dict[trading_pair:str, (maker_fee_percent:Decimal, taken_fee_percent:Decimal)]
self._last_update_trade_fees_timestamp = 0
self._status_polling_task = None
self._user_stream_tracker_task = None
self._user_stream_event_listener_task = None
self._trading_rules_polling_task = None
self._last_poll_timestamp = 0
self._last_trades_poll_btcturk_timestamp = 0
self._order_tracker: ClientOrderTracker = ClientOrderTracker(connector=self)
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
@property
def name(self) -> str:
return "btcturk"
@property
def order_books(self) -> Dict[str, OrderBook]:
return self._order_book_tracker.order_books
@property
def trading_rules(self) -> Dict[str, TradingRule]:
return self._trading_rules
@property
def in_flight_orders(self) -> Dict[str, InFlightOrder]:
return self._order_tracker.active_orders
@property
def limit_orders(self) -> List[LimitOrder]:
return [in_flight_order.to_limit_order() for in_flight_order in self.in_flight_orders.values()]
@property
def tracking_states(self) -> Dict[str, any]:
"""
Returns a dictionary associating current active orders client id to their JSON representation
"""
return {key: value.to_json() for key, value in self.in_flight_orders.items()}
@property
def order_book_tracker(self) -> BtcturkOrderBookTracker:
return self._order_book_tracker
@property
def user_stream_tracker(self) -> BtcturkUserStreamTracker:
return self._user_stream_tracker
@property
def status_dict(self) -> Dict[str, bool]:
"""
Returns a dictionary with the values of all the conditions that determine if the connector is ready to operate.
The key of each entry is the condition name, and the value is True if condition is ready, False otherwise.
"""
return {
"symbols_mapping_initialized": BtcturkAPIOrderBookDataSource.trading_pair_symbol_map_ready(
),
"order_books_initialized": self._order_book_tracker.ready,
"account_balance": len(self._account_balances) > 0 if self._trading_required else True,
"trading_rule_initialized": len(self._trading_rules) > 0,
}
@property
def ready(self) -> bool:
"""
Returns True if the connector is ready to operate (all connections established with the exchange). If it is
not ready it returns False.
"""
return all(self.status_dict.values())
@staticmethod
def btcturk_order_type(order_type: OrderType) -> str:
return order_type.name.upper()
@staticmethod
def to_hb_order_type(btcturk_type: str) -> OrderType:
return OrderType[btcturk_type]
def supported_order_types(self):
return [OrderType.LIMIT, OrderType.MARKET]
async def start_network(self):
"""
Start all required tasks to update the status of the connector. Those tasks include:
- The order book tracker
- The polling loop to update the trading rules
- The polling loop to update order status and balance status using REST API (backup for main update process)
- The background task to process the events received through the user stream tracker (websocket connection)
"""
self._order_book_tracker.start()
self._trading_rules_polling_task = safe_ensure_future(self._trading_rules_polling_loop())
if self._trading_required:
# TODO later status polling every 30 mins
self._status_polling_task = safe_ensure_future(self._status_polling_loop())
self._user_stream_tracker_task = safe_ensure_future(self._user_stream_tracker.start())
self._user_stream_event_listener_task = safe_ensure_future(self._user_stream_event_listener())
async def stop_network(self):
"""
This function is executed when the connector is stopped. It perform a general cleanup and stops all background
tasks that require the connection with the exchange to work.
"""
# Reset timestamps and _poll_notifier for status_polling_loop
self._last_poll_timestamp = 0
self._last_timestamp = 0
self._poll_notifier = asyncio.Event()
self._order_book_tracker.stop()
if self._status_polling_task is not None:
self._status_polling_task.cancel()
if self._user_stream_tracker_task is not None:
self._user_stream_tracker_task.cancel()
if self._user_stream_event_listener_task is not None:
self._user_stream_event_listener_task.cancel()
if self._trading_rules_polling_task is not None:
self._trading_rules_polling_task.cancel()
self._status_polling_task = self._user_stream_tracker_task = self._user_stream_event_listener_task = None
async def check_network(self) -> NetworkStatus:
"""
Checks connectivity with the exchange using the API
"""
# Use exchange data instead fo PingPathUrl
try:
await self._api_request(
method=RESTMethod.GET,
path_url=CONSTANTS.TICKER_PRICE_CHANGE_PATH_URL,
)
except asyncio.CancelledError:
raise
except Exception:
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.CONNECTED
def restore_tracking_states(self, saved_states: Dict[str, any]):
"""
Restore in-flight orders from saved tracking states, this is st the connector can pick up on where it left off
when it disconnects.
:param saved_states: The saved tracking_states.
"""
self._order_tracker.restore_tracking_states(tracking_states=saved_states)
def tick(self, timestamp: float):
"""
Includes the logic that has to be processed every time a new tick happens in the bot. Particularly it enables
the execution of the status update polling loop using an event.
"""
now = time.time()
poll_interval = (
self.SHORT_POLL_INTERVAL
if now - self.user_stream_tracker.last_recv_time > 60.0
else self.LONG_POLL_INTERVAL
)
last_tick = int(self._last_timestamp / poll_interval)
current_tick = int(timestamp / poll_interval)
if current_tick > last_tick:
if not self._poll_notifier.is_set():
self._poll_notifier.set()
self._last_timestamp = timestamp
def get_order_book(self, trading_pair: str) -> OrderBook:
"""
Returns the current order book for a particular market
:param trading_pair: the pair of tokens for which the order book should be retrieved
"""
if trading_pair not in self._order_book_tracker.order_books:
raise ValueError(f"No order book exists for '{trading_pair}'.")
return self._order_book_tracker.order_books[trading_pair]
def start_tracking_order(
self,
order_id: str,
exchange_order_id: Optional[str],
trading_pair: str,
trade_type: TradeType,
price: Decimal,
amount: Decimal,
order_type: OrderType,
):
"""
Starts tracking an order by adding it to the order tracker.
:param order_id: the order identifier
:param exchange_order_id: the identifier for the order in the exchange
:param trading_pair: the token pair for the operation
:param trade_type: the type of order (buy or sell)
:param price: the price for the order
:param amount: the amount for the order
:order type: type of execution for the order (MARKET, LIMIT, LIMIT_MAKER)
"""
self._order_tracker.start_tracking_order(
InFlightOrder(
client_order_id=order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
order_type=order_type,
trade_type=trade_type,
amount=amount,
price=price,
)
)
def stop_tracking_order(self, order_id: str):
"""
Stops tracking an order
:param order_id: The id of the order that will not be tracked any more
"""
self._order_tracker.stop_tracking_order(client_order_id=order_id)
def get_order_price_quantum(self, trading_pair: str, price: Decimal) -> Decimal:
"""
Used by quantize_order_price() in _create_order()
Returns a price step, a minimum price increment for a given trading pair.
:param trading_pair: the trading pair to check for market conditions
:param price: the starting point price
"""
trading_rule = self._trading_rules[trading_pair]
return trading_rule.min_price_increment
def get_order_size_quantum(self, trading_pair: str, order_size: Decimal) -> Decimal:
"""
Used by quantize_order_price() in _create_order()
Returns an order amount step, a minimum amount increment for a given trading pair.
:param trading_pair: the trading pair to check for market conditions
:param order_size: the starting point order price
"""
trading_rule = self._trading_rules[trading_pair]
return trading_rule.min_base_amount_increment
def quantize_order_amount(self, trading_pair: str, amount: Decimal, price: Decimal = s_decimal_0) -> Decimal:
"""
Applies the trading rules to calculate the correct order amount for the market
:param trading_pair: the token pair for which the order will be created
:param amount: the intended amount for the order
:param price: the intended price for the order
:return: the quantized order amount after applying the trading rules
"""
trading_rule = self._trading_rules[trading_pair]
quantized_amount: Decimal = super().quantize_order_amount(trading_pair, amount)
# self.logger().error(quantized_amount)
# self.logger().error(f"trading_rule: {trading_rule}")
# Check against min_order_size and min_notional_size. If not passing either check, return 0.
if quantized_amount < trading_rule.min_order_size:
return s_decimal_0
if price == s_decimal_0:
current_price: Decimal = self.get_price(trading_pair, False)
notional_size = current_price * quantized_amount
else:
notional_size = price * quantized_amount
# Add 1% as a safety factor in case the prices changed while making the order.
if notional_size < trading_rule.min_notional_size * Decimal("1.01"):
return s_decimal_0
return quantized_amount
def get_fee(
self,
base_currency: str,
quote_currency: str,
order_type: OrderType,
order_side: TradeType,
amount: Decimal,
price: Decimal = s_decimal_NaN,
is_maker: Optional[bool] = None,
) -> TradeFeeBase:
"""
Calculates the estimated fee an order would pay based on the connector configuration
:param base_currency: the order base currency
:param quote_currency: the order quote currency
:param order_type: the type of order (MARKET, LIMIT, LIMIT_MAKER)
:param order_side: if the order is for buying or selling
:param amount: the order amount
:param price: the order price
:return: the estimated fee for the order
"""
"""
To get trading fee, this function is simplified by using fee override configuration. Most parameters to this
function are ignore except order_type. Use OrderType.LIMIT_MAKER to specify you want trading fee for
maker order.
"""
is_maker = order_type is OrderType.LIMIT_MAKER
return DeductedFromReturnsTradeFee(percent=self.estimate_fee_pct(is_maker))
def buy(
self,
trading_pair: str,
amount: Decimal,
order_type: OrderType = OrderType.LIMIT,
price: Decimal = s_decimal_NaN,
**kwargs,
) -> str:
"""
Creates a promise to create a buy order using the parameters.
:param trading_pair: the token pair to operate with
:param amount: the order amount
:param order_type: the type of order to create (MARKET, LIMIT, LIMIT_MAKER)
:param price: the order price
:return: the id assigned by the connector to the order (the client id)
"""
new_order_id = btcturk_utils.get_new_client_order_id(is_buy=True, trading_pair=trading_pair)
safe_ensure_future(self._create_order(TradeType.BUY, new_order_id, trading_pair, amount, order_type, price))
return new_order_id
def sell(
self,
trading_pair: str,
amount: Decimal,
order_type: OrderType = OrderType.MARKET,
price: Decimal = s_decimal_NaN,
**kwargs,
) -> str:
"""
Creates a promise to create a sell order using the parameters.
:param trading_pair: the token pair to operate with
:param amount: the order amount
:param order_type: the type of order to create (MARKET, LIMIT, LIMIT_MAKER)
:param price: the order price
:return: the id assigned by the connector to the order (the client id)
"""
order_id = btcturk_utils.get_new_client_order_id(is_buy=False, trading_pair=trading_pair)
safe_ensure_future(self._create_order(TradeType.SELL, order_id, trading_pair, amount, order_type, price))
return order_id
def cancel(self, trading_pair: str, order_id: str):
"""
Creates a promise to cancel an order in the exchange
:param trading_pair: the trading pair the order to cancel operates with
:param order_id: the client id of the order to cancel
:return: the client id of the order to cancel
"""
safe_ensure_future(self._execute_cancel(trading_pair, order_id))
return order_id
async def cancel_all(self, timeout_seconds: float) -> List[CancellationResult]:
"""
Cancels all currently active orders. The cancellations are performed in parallel tasks.
:param timeout_seconds: the maximum time (in seconds) the cancel logic should run
:return: a list of CancellationResult instances, one for each of the orders to be cancelled
"""
incomplete_orders = [o for o in self.in_flight_orders.values() if not o.is_done]
tasks = [self._execute_cancel(o.trading_pair, o.client_order_id) for o in incomplete_orders]
order_id_set = set([o.client_order_id for o in incomplete_orders])
successful_cancellations = []
try:
async with timeout(timeout_seconds):
cancellation_results = await safe_gather(*tasks, return_exceptions=True)
for cr in cancellation_results:
if isinstance(cr, Exception):
continue
if isinstance(cr, dict) and "cancelled_order_id" in cr:
client_order_id = cr.get("cancelled_order_id")
order_id_set.remove(client_order_id)
successful_cancellations.append(CancellationResult(client_order_id, True))
except Exception:
self.logger().network(
"Unexpected error cancelling orders.",
exc_info=True,
app_warning_msg="Failed to cancel order with Btcturk. Check API key and network connection.",
)
failed_cancellations = [CancellationResult(oid, False) for oid in order_id_set]
return successful_cancellations + failed_cancellations
# TODO
# Rest awaits the results and updates the order status to OPEN
async def _create_order(
self,
trade_type: TradeType,
order_id: str,
trading_pair: str,
amount: Decimal,
order_type: OrderType,
price: Optional[Decimal] = Decimal("NaN"),
):
"""
Creates a an order in the exchange using the parameters to configure it
:param trade_type: the side of the order (BUY of SELL)
:param order_id: the id that should be assigned to the order (the client id)
:param trading_pair: the token pair to operate with
:param amount: the order amount
:param order_type: the type of order to create (MARKET, LIMIT, LIMIT_MAKER)
:param price: the order price
"""
trading_rule: TradingRule = self._trading_rules[trading_pair]
price = self.quantize_order_price(trading_pair, price)
quantize_amount_price = Decimal("0") if price.is_nan() else price
amount = self.quantize_order_amount(trading_pair=trading_pair, amount=amount, price=quantize_amount_price)
self.start_tracking_order(
order_id=order_id,
exchange_order_id=None,
trading_pair=trading_pair,
trade_type=trade_type,
price=price,
amount=amount,
order_type=order_type,
)
if amount < trading_rule.min_order_size:
self.logger().warning(
f"{trade_type.name.title()} order amount {amount} is lower than the minimum order"
f" size {trading_rule.min_order_size}. The order will not be created."
)
order_update: OrderUpdate = OrderUpdate(
client_order_id=order_id,
trading_pair=trading_pair,
update_timestamp=int(self.current_timestamp * 1e3),
new_state=OrderState.FAILED,
)
self._order_tracker.process_order_update(order_update)
return
order_result = None
amount_str = f"{amount:f}"
price_str = f"{price:f}"
type_str = BtcturkExchange.btcturk_order_type(order_type)
side_str = CONSTANTS.SIDE_BUY if trade_type is TradeType.BUY else CONSTANTS.SIDE_SELL
symbol = await BtcturkAPIOrderBookDataSource.exchange_symbol_associated_to_pair(
trading_pair=trading_pair, api_factory=self._api_factory, throttler=self._throttler
)
api_params = {
"pairSymbol": symbol,
"orderType": side_str,
"quantity": amount_str,
"orderMethod": type_str,
"newOrderClientId": order_id,
"price": price_str,
}
# if order_type == OrderType.LIMIT:
# api_params["timeInForce"] = CONSTANTS.TIME_IN_FORCE_GTC
try:
order_result = await self._api_request(
method=RESTMethod.POST,
path_url=CONSTANTS.ORDER_PATH,
data=json.dumps(api_params),
is_auth_required=True,
limit_path_url=CONSTANTS.ORDER_PATH
)
exchange_order_id = str(order_result["data"]["id"])
order_update: OrderUpdate = OrderUpdate(
client_order_id=order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
update_timestamp=int(order_result["data"]["datetime"]),
new_state=OrderState.OPEN,
)
self._order_tracker.process_order_update(order_update)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(
f"Error submitting {side_str} {type_str} order to Btcturk for " f"{amount} {trading_pair} " f"{price}.",
exc_info=True,
app_warning_msg=str(e),
)
order_update: OrderUpdate = OrderUpdate(
client_order_id=order_id,
trading_pair=trading_pair,
update_timestamp=int(self.current_timestamp * 1e3),
new_state=OrderState.FAILED,
)
self._order_tracker.process_order_update(order_update)
# TODO
# Awaits the cancel request
async def _execute_cancel(self, trading_pair: str, order_id: str):
"""
Requests the exchange to cancel an active order
:param trading_pair: the trading pair the order to cancel operates with
:param order_id: the client id of the order to cancel
"""
tracked_order = self._order_tracker.fetch_tracked_order(order_id)
if tracked_order is not None:
try:
ex_order_id = tracked_order.exchange_order_id
cancel_result = await self._api_request(
method=RESTMethod.DELETE,
path_url=CONSTANTS.ORDER_CANCEL_PATH.format(ex_order_id),
is_auth_required=True,
limit_path_url=CONSTANTS.ORDER_CANCEL_PATH
)
if cancel_result.get("message") == "SUCCESS":
order_update: OrderUpdate = OrderUpdate(
client_order_id=order_id,
trading_pair=tracked_order.trading_pair,
update_timestamp=int(self.current_timestamp * 1e3),
new_state=OrderState.CANCELLED,
)
cancel_result["cancelled_order_id"] = order_id
self._order_tracker.process_order_update(order_update)
return cancel_result
except asyncio.CancelledError:
raise
except Exception:
self.logger().exception(f"There was a an error when requesting cancellation of order {order_id}")
raise
async def _status_polling_loop(self):
"""
Performs all required operation to keep the connector updated and synchronized with the exchange.
It contains the backup logic to update status using API requests in case the main update source (the user stream
data source websocket) fails.
It also updates the time synchronizer. This is necessary because Btcturk require the time of the client to be
the same as the time in the exchange.
Executes when the _poll_notifier event is enabled by the `tick` function.
"""
while True:
try:
await self._poll_notifier.wait()
# await self._update_time_synchronizer()
await safe_gather(
self._update_balances(),
# self._update_order_fills_from_trades(),
)
await self._update_order_status()
self._last_poll_timestamp = self.current_timestamp
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unexpected error while fetching account updates.",
exc_info=True,
app_warning_msg="Could not fetch account updates from Btcturk. "
"Check API key and network connection.",
)
await asyncio.sleep(0.5)
finally:
self._poll_notifier = asyncio.Event()
async def _trading_rules_polling_loop(self):
"""
Updates the trading rules by requesting the latest definitions from the exchange.
Executes regularly every 30 minutes
"""
while True:
try:
await safe_gather(
self._update_trading_rules(),
)
await asyncio.sleep(30 * 60)
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unexpected error while fetching trading rules.",
exc_info=True,
app_warning_msg="Could not fetch new trading rules from Btcturk. " "Check network connection.",
)
await asyncio.sleep(0.5)
async def _update_trading_rules(self):
exchange_info = await self._api_request(method=RESTMethod.GET, path_url=CONSTANTS.EXCHANGE_INFO_PATH_URL)
trading_rules_list = await self._format_trading_rules(exchange_info)
self._trading_rules.clear()
for trading_rule in trading_rules_list:
self._trading_rules[trading_rule.trading_pair] = trading_rule
async def _format_trading_rules(self, exchange_info_dict: Dict[str, Any]) -> List[TradingRule]:
trading_pair_rules = exchange_info_dict["data"].get("symbols", [])
retval = []
for rule in filter(btcturk_utils.is_exchange_information_valid, trading_pair_rules):
try:
trading_pair = await BtcturkAPIOrderBookDataSource.trading_pair_associated_to_exchange_symbol(
symbol=rule.get("name"),
api_factory=self._api_factory,
throttler=self._throttler,
)
filters = rule.get("filters")
numeratorScale = Decimal(str(rule.get("numeratorScale")))
denominatorScale = Decimal(str(rule.get("denominatorScale")))
px = Decimal(str(rule.get("minimumLimitOrderPrice"))) * 10
min_notional = Decimal(filters[0].get("minExchangeValue"))
min_order_size = min_notional / px
tick_size = Decimal(10 ** (-denominatorScale)) if rule.get("hasFraction") else 1
# tick_size = Decimal(filters[0].get("tickSize"))
step_size_btcturk = Decimal(10 ** (-numeratorScale)) if rule.get("hasFraction") else 1
retval.append(
TradingRule(
trading_pair,
min_order_size=Decimal(min_order_size),
min_price_increment=Decimal(tick_size),
min_base_amount_increment=Decimal(step_size_btcturk),
min_notional_size=Decimal(min_notional),
)
)
# self.logger().error(f"retval: {retval}")
except Exception:
self.logger().exception(f"Error parsing the trading pair rule {rule}. Skipping.")
return retval
async def _user_stream_event_listener(self):
"""
This functions runs in background continuously processing the events received from the exchange by the user
stream data source. It keeps reading events from the queue until the task is interrupted.
The events received are balance updates, order updates and trade events.
"""
# TODO thinking about how to get user private stream to _iter_user_event_queue()
# Assuming btcturk_api_user_stream_data_source listen_for_user_stream automatically add
# user related messages to user event queue
async for event_message in self._iter_user_event_queue():
try:
event_type = event_message[0]
if event_type == 201:
safe_ensure_future(self._update_balances())
if event_type == 441:
client_order_id = event_message[1].get("clientId", None)
tracked_order = self._order_tracker.fetch_order(client_order_id=client_order_id)
if tracked_order is not None:
quote_ccy = btcturk_utils.convert_from_exchange_trading_pair_to_quote_ccy(tracked_order.trading_pair)
trade_id = btcturk_utils.get_trade_id()
trade_update = TradeUpdate(
trade_id=trade_id,
client_order_id=client_order_id,
exchange_order_id=str(event_message[1]["id"]),
trading_pair=tracked_order.trading_pair,
fee_asset=quote_ccy,
fee_paid=Decimal(str(event_message[1]["amount"])) * Decimal(str(event_message[1]["price"])) * Decimal(btcturk_utils.DEFAULT_FEES[0]),
fill_base_amount=Decimal(str(event_message[1]["amount"])),
fill_quote_amount=Decimal(str(event_message[1]["amount"])) * Decimal(str(event_message[1]["price"])),
fill_price=Decimal(str(event_message[1]["price"])),
fill_timestamp=int(self.current_timestamp * 1e3),
)
self._order_tracker.process_trade_update(trade_update)
elif event_type in [451, 452, 453]:
client_order_id = event_message[1].get("newOrderClientId")
else:
client_order_id = 0
tracked_order = self.in_flight_orders.get(client_order_id, None)
if tracked_order is not None:
old_state = tracked_order.current_state
new_state = old_state
if event_type == 451:
new_state = CONSTANTS.ORDER_STATE["NEW"]
elif event_type == 452:
new_state = CONSTANTS.ORDER_STATE["CANCELED"]
elif event_type == 441:
if math.isclose(tracked_order.amount, tracked_order.executed_amount_base):
new_state = CONSTANTS.ORDER_STATE["FILLED"]
else:
new_state = CONSTANTS.ORDER_STATE["PARTIALLY_FILLED"]
else:
new_state = old_state
order_update = OrderUpdate(
trading_pair=tracked_order.trading_pair,
update_timestamp=int(self.current_timestamp * 1e3),
new_state=new_state,
client_order_id=client_order_id,
exchange_order_id=str(event_message[1]["id"]),
)
self._order_tracker.process_order_update(order_update=order_update)
# # event_type is channel number for btcturk
# event_type = event_message[0]
# # Refer to https://github.com/binance-exchange/binance-official-api-docs/blob/master/user-data-stream.md
# # As per the order update section in Binance the ID of the order being cancelled is under the "C" key
# # https://binance-docs.github.io/apidocs/spot/en/#listen-key-isolated-margin
# # User related channels in ws:
# # 201 = BalanceUpdate, 441 = Order Executed, 451 = OrderReceived
# # 452 = OrderDelete, 453 = OrderUpdate
# filled = False
# if event_type in [451, 452, 453]:
# client_order_id = event_message[1].get("newOrderClientId", None)
# # Partial filled order issue needs to be solved
# elif event_type == 441:
# client_order_id = event_message[1].get("clientId", None)
# tracked_order = self._order_tracker.fetch_order(client_order_id=client_order_id)
# # btcturk only provides exchange order id, not trade_id
# if Decimal(str(event_message[1]["amount"])) > 0:
# filled = True
# if tracked_order is not None:
# quote_ccy = btcturk_utils.convert_from_exchange_trading_pair_to_quote_ccy(tracked_order.trading_pair)
# trade_id = btcturk_utils.get_trade_id()
# trade_update = TradeUpdate(
# trade_id=trade_id,
# client_order_id=client_order_id,
# exchange_order_id=str(event_message[1]["id"]),
# trading_pair=tracked_order.trading_pair,
# fee_asset=quote_ccy,
# fee_paid=Decimal(str(event_message[1]["amount"])) * Decimal(str(event_message[1]["price"])) * Decimal(btcturk_utils.DEFAULT_FEES[0]),
# fill_base_amount=Decimal(str(event_message[1]["amount"])),
# fill_quote_amount=Decimal(str(event_message[1]["amount"])) * Decimal(str(event_message[1]["price"])),
# fill_price=Decimal(str(event_message[1]["price"])),
# fill_timestamp=int(self.current_timestamp * 1e3),
# )
# self._order_tracker.process_trade_update(trade_update)
# else:
# client_order_id = 0
# tracked_order = self.in_flight_orders.get(client_order_id)
# if tracked_order is not None:
# # TODO
# old_state = tracked_order.current_state
# new_state = old_state
# if filled: # This already implies event type is 441
# new_state = OrderState.PARTIALLY_FILLED
# # TODO
# if math.isclose(tracked_order.amount, tracked_order.executed_amount_base):
# new_state = OrderState.FILLED
# elif event_type == 451:
# new_state = OrderState.OPEN
# elif event_type == 452:
# if old_state == OrderState.PENDING_CREATE:
# new_state = OrderState.FAILED
# else:
# new_state = OrderState.CANCELLED
# elif event_type == 453:
# new_state = old_state
# order_update = OrderUpdate(
# trading_pair=tracked_order.trading_pair,
# update_timestamp=int(self.current_timestamp * 1e3),
# new_state=new_state,
# client_order_id=client_order_id,
# exchange_order_id=str(event_message[1]["id"]),
# )
# self._order_tracker.process_order_update(order_update=order_update)
# btcturk not providing any details about 201-BalanceUpdate message
# elif event_type == "outboundAccountPosition":
# balances = event_message["B"]
# for balance_entry in balances:
# asset_name = balance_entry["a"]
# free_balance = Decimal(balance_entry["f"])
# total_balance = Decimal(balance_entry["f"]) + Decimal(balance_entry["l"])
# self._account_available_balances[asset_name] = free_balance
# self._account_balances[asset_name] = total_balance
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error in user stream listener loop.", exc_info=True)
await asyncio.sleep(5.0)
async def _update_order_fills_from_trades(self):
"""
This is intended to be a backup measure to get filled events with trade ID for orders,
in case Binance's user stream events are not working.
NOTE: It is not required to copy this functionality in other connectors.
This is separated from _update_order_status which only updates the order status without producing filled
events, since Binance's get order endpoint does not return trade IDs.
The minimum poll interval for order status is 10 seconds.
"""
small_interval_last_tick = self._last_poll_timestamp / self.UPDATE_ORDER_STATUS_MIN_INTERVAL
small_interval_current_tick = self.current_timestamp / self.UPDATE_ORDER_STATUS_MIN_INTERVAL
long_interval_last_tick = self._last_poll_timestamp / self.LONG_POLL_INTERVAL
long_interval_current_tick = self.current_timestamp / self.LONG_POLL_INTERVAL
if long_interval_current_tick > long_interval_last_tick or (
self.in_flight_orders and small_interval_current_tick > small_interval_last_tick
):
query_time = int(self._last_trades_poll_btcturk_timestamp * 1e3)
self._last_trades_poll_btcturk_timestamp = self._btcturk_time_synchronizer.time()
order_by_exchange_id_map = {}
for order in self._order_tracker.all_orders.values():
order_by_exchange_id_map[order.exchange_order_id] = order
# TODO
tasks = []
trading_pairs = self._order_book_tracker._trading_pairs
for trading_pair in trading_pairs:
symbol = await BtcturkAPIOrderBookDataSource.exchange_symbol_associated_to_pair(
trading_pair=trading_pair,
api_factory=self._api_factory,
throttler=self._throttler,
)
base_symbol = btcturk_utils.convert_from_exchange_trading_pair_to_base_quote(symbol)
startDate = query_time
# if self._last_poll_timestamp > 0:
# startDate = query_time
tasks.append(
self._api_request(
method=RESTMethod.GET,
path_url=CONSTANTS.MY_TRADES_PATH_URL.format(base_symbol, startDate),
is_auth_required=True,
limit_path_url=CONSTANTS.MY_TRADES_PATH_URL
)
)
self.logger().debug(f"Polling for order fills of {len(tasks)} trading pairs.")
results = await safe_gather(*tasks, return_exceptions=True)
for trades, trading_pair in zip(results, trading_pairs):
if isinstance(trades, Exception):
self.logger().network(
f"Error fetching trades update for the order {trading_pair}: {trades}.",
app_warning_msg=f"Failed to fetch trade update for {trading_pair}.",
)
continue
# TODO response object quite different from binance. numeratorSymbol etc.
for trade in trades["data"]:
exchange_order_id = str(trade["orderId"])
if exchange_order_id in order_by_exchange_id_map:
# This is a fill for a tracked order
tracked_order = order_by_exchange_id_map[exchange_order_id]
trade_update = TradeUpdate(
trade_id=tracked_order.trade_id,
client_order_id=tracked_order.client_order_id,
exchange_order_id=exchange_order_id,
trading_pair=trading_pair,
fee_asset=trade["denominatorSymbol"],
fee_paid=abs(Decimal(str(trade["fee"])) + Decimal(str(trade["tax"]))),
fill_base_amount=Decimal(abs(int(str(trade["amount"])))),
# fill_quote_amount=Decimal(trade["quoteQty"]),
fill_price=Decimal(str(trade["price"])),
fill_timestamp=int(trade["timestamp"]),
)
self._order_tracker.process_trade_update(trade_update)
elif self.is_confirmed_new_order_filled_event(tracked_order.trade_id, exchange_order_id, trading_pair):
# This is a fill of an order registered in the DB but not tracked any more
self._current_trade_fills.add(
TradeFillOrderDetails(
market=self.display_name, exchange_trade_id=tracked_order.trade_id, symbol=trading_pair
)
)
self.trigger_event(
MarketEvent.OrderFilled,
OrderFilledEvent(
timestamp=float(trade["timestamp"]) * 1e-3,
order_id=self._exchange_order_ids.get(str(trade["orderId"]), None),
trading_pair=trading_pair,
trade_type=TradeType.SELL if str(trade["data"]["orderType"]) == "sell" else TradeType.BUY,
# order_type=OrderType.LIMIT_MAKER if trade["isMaker"] else OrderType.LIMIT,
# price=Decimal(trade["price"]),
amount=Decimal(abs(int(str(trade["amount"])))),
trade_fee=DeductedFromReturnsTradeFee(
flat_fees=[TokenAmount(fee_asset=trade["denominatorSymbol"]), abs(Decimal(str(trade["fee"])) + Decimal(str(trade["tax"])))]
),
exchange_trade_id=tracked_order.trade_id,
),
)
self.logger().info(f"Recreating missing trade in TradeFill: {trade}")
async def _update_order_status(self):
# This is intended to be a backup measure to close straggler orders, in case Binance's user stream events
# are not working.
# The minimum poll interval for order status is 10 seconds.
last_tick = self._last_poll_timestamp / self.UPDATE_ORDER_STATUS_MIN_INTERVAL
current_tick = self.current_timestamp / self.UPDATE_ORDER_STATUS_MIN_INTERVAL
tracked_orders: List[InFlightOrder] = list(self.in_flight_orders.values())
if current_tick > last_tick and len(tracked_orders) > 0:
# Only pairSymbol needed for orders, we cant use origClientOrderId; so each result response
# contains multiple open orders.
tasks = [
self._api_request(
method=RESTMethod.GET,
path_url=CONSTANTS.GET_SINGLE_ORDER_PATH.format(o.exchange_order_id),
limit_path_url=CONSTANTS.ORDER_PATH,
is_auth_required=True,
)
for o in tracked_orders
]
self.logger().debug(f"Polling for order status updates of {len(tasks)} orders.")
results = await safe_gather(*tasks, return_exceptions=True)
updated_results = []
for i in results:
if i.get("data", None):
j = i.get("data")
updated_results.append(j)
else:
continue
for order_update, tracked_order in zip(updated_results, tracked_orders):
client_order_id = tracked_order.client_order_id
# If the order has already been cancelled or has failed do nothing
if client_order_id not in self.in_flight_orders:
continue
if isinstance(order_update, Exception):
self.logger().network(
f"Error fetching status update for the order {client_order_id}: {order_update}.",
app_warning_msg=f"Failed to fetch status update for the order {client_order_id}.",
)
self._order_not_found_records[client_order_id] = (
self._order_not_found_records.get(client_order_id, 0) + 1
)
if (
self._order_not_found_records[client_order_id]
>= self.MAX_ORDER_UPDATE_RETRIEVAL_RETRIES_WITH_FAILURES
):
# Wait until the order not found error have repeated a few times before actually treating
# it as failed. See: https://github.com/CoinAlpha/hummingbot/issues/601
order_update: OrderUpdate = OrderUpdate(
client_order_id=client_order_id,
trading_pair=tracked_order.trading_pair,
update_timestamp=int(self.current_timestamp * 1e3),
new_state=OrderState.FAILED,
)
self._order_tracker.process_order_update(order_update)
else:
# Update order execution status
# new_state = CONSTANTS.ORDER_STATE[order_update["status"]]
if str(order_update["status"]) == "Untouched":
new_state = CONSTANTS.ORDER_STATE["NEW"]
elif str(order_update["status"]) == "Closed":
new_state = CONSTANTS.ORDER_STATE["FILLED"]
elif order_update["status"] == "Canceled":
new_state = CONSTANTS.ORDER_STATE["CANCELED"]
update = OrderUpdate(
client_order_id=client_order_id,
exchange_order_id=str(order_update["orderClientId"]),
trading_pair=tracked_order.trading_pair,
update_timestamp=int(str(order_update["updateTime"])),
new_state=new_state,
)
self._order_tracker.process_order_update(update)
async def _iter_user_event_queue(self) -> AsyncIterable[Dict[str, any]]:
while True:
try:
yield await self._user_stream_tracker.user_stream.get()
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
"Unknown error. Retrying after 1 seconds.",
exc_info=True,
app_warning_msg="Could not fetch user events from Btcturk. Check API key and network connection.",
)
await asyncio.sleep(1.0)
async def _update_balances(self):
local_asset_names = set(self._account_balances.keys())
remote_asset_names = set()
try:
account_info = await self._api_request(
method=RESTMethod.GET, path_url=CONSTANTS.ACCOUNTS_PATH_URL, is_auth_required=True
)
balances = account_info["data"]
for balance_entry in balances:
asset_name = balance_entry["asset"]
free_balance = Decimal(balance_entry["free"])
total_balance = Decimal(balance_entry["free"]) + Decimal(balance_entry["locked"])
self._account_available_balances[asset_name] = free_balance
self._account_balances[asset_name] = total_balance
remote_asset_names.add(asset_name)
asset_names_to_remove = local_asset_names.difference(remote_asset_names)
for asset_name in asset_names_to_remove:
del self._account_available_balances[asset_name]
del self._account_balances[asset_name]
except IOError:
self.logger().exception("Error getting account balances from server")
async def _update_time_synchronizer(self):
try:
await self._btcturk_time_synchronizer.update_server_time_offset_with_time_provider(
time_provider=self._get_current_server_time()
)
except asyncio.CancelledError:
raise
except Exception:
self.logger().exception("Error requesting time from Btcturk server")
raise
async def _api_request(
self,
method: RESTMethod,
path_url: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
is_auth_required: bool = False,
limit_path_url: Optional[str] = None,
) -> Dict[str, Any]:
client = await self._get_rest_assistant()
if is_auth_required:
url = btcturk_utils.private_rest_url(path_url)
headers = self._auth.header_for_authentication()
request = RESTRequest(
method=method, url=url, data=data, params=params, headers=headers, is_auth_required=is_auth_required
)
else:
url = btcturk_utils.public_rest_url(path_url)
request = RESTRequest(
method=method, url=url, data=data, params=params, is_auth_required=is_auth_required
)
limit_id_path = path_url if limit_path_url is None else limit_path_url
async with self._throttler.execute_task(limit_id=limit_id_path):
response = await client.call(request)
if response.status != 200:
data = await response.text()
raise IOError(f"Error fetching data from {url}. HTTP status is {response.status} ({data}) request: {request}.")
try:
parsed_response = await response.json()
except Exception:
raise IOError(f"Error parsing data from {response}.")
if "code" in parsed_response and "msg" in parsed_response:
raise IOError(f"The request to Btcturk failed. Error: {parsed_response}. Request: {request}")
return parsed_response
async def _get_current_server_time(self):
response = await self._api_request(
method=RESTMethod.GET,
path_url=CONSTANTS.EXCHANGE_INFO_PATH_URL,
)
return response["data"]["serverTime"]
async def _get_rest_assistant(self) -> RESTAssistant:
if self._rest_assistant is None:
self._rest_assistant = await self._api_factory.get_rest_assistant()
return self._rest_assistant
| [
"logging.getLogger",
"hummingbot.connector.exchange.btcturk.btcturk_utils.get_new_client_order_id",
"hummingbot.connector.exchange.btcturk.btcturk_constants.MY_TRADES_PATH_URL.format",
"hummingbot.connector.exchange.btcturk.btcturk_order_book_tracker.BtcturkOrderBookTracker",
"hummingbot.connector.exchange.btcturk.btcturk_utils.get_trade_id",
"decimal.Decimal",
"hummingbot.core.data_type.cancellation_result.CancellationResult",
"hummingbot.connector.time_synchronizer.TimeSynchronizer",
"json.dumps",
"asyncio.sleep",
"hummingbot.connector.exchange.btcturk.btcturk_utils.convert_from_exchange_trading_pair_to_base_quote",
"hummingbot.connector.exchange.btcturk.btcturk_utils.public_rest_url",
"asyncio.get_event_loop",
"hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source.BtcturkAPIOrderBookDataSource.trading_pair_symbol_map_ready",
"hummingbot.connector.exchange.btcturk.btcturk_auth.BtcturkAuth",
"hummingbot.connector.client_order_tracker.ClientOrderTracker",
"hummingbot.connector.exchange.btcturk.btcturk_utils.convert_from_exchange_trading_pair_to_quote_ccy",
"hummingbot.core.data_type.trade_fee.TokenAmount",
"hummingbot.core.web_assistant.web_assistants_factory.WebAssistantsFactory",
"hummingbot.connector.utils.TradeFillOrderDetails",
"time.time",
"hummingbot.connector.exchange.btcturk.btcturk_constants.GET_SINGLE_ORDER_PATH.format",
"hummingbot.core.utils.async_utils.safe_gather",
"hummingbot.core.data_type.in_flight_order.InFlightOrder",
"hummingbot.core.web_assistant.connections.data_types.RESTRequest",
"math.isclose",
"hummingbot.connector.exchange.btcturk.btcturk_utils.private_rest_url",
"async_timeout.timeout",
"hummingbot.connector.exchange.btcturk.btcturk_user_stream_tracker.BtcturkUserStreamTracker",
"asyncio.Event",
"hummingbot.connector.exchange.btcturk.btcturk_constants.ORDER_CANCEL_PATH.format",
"hummingbot.core.api_throttler.async_throttler.AsyncThrottler",
"hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source.BtcturkAPIOrderBookDataSource.exchange_symbol_associated_to_pair"
] | [((2154, 2164), 'decimal.Decimal', 'Decimal', (['(0)'], {}), '(0)\n', (2161, 2164), False, 'from decimal import Decimal\n'), ((2181, 2195), 'decimal.Decimal', 'Decimal', (['"""nan"""'], {}), "('nan')\n", (2188, 2195), False, 'from decimal import Decimal\n'), ((2633, 2651), 'hummingbot.connector.time_synchronizer.TimeSynchronizer', 'TimeSynchronizer', ([], {}), '()\n', (2649, 2651), False, 'from hummingbot.connector.time_synchronizer import TimeSynchronizer\n'), ((2750, 2868), 'hummingbot.connector.exchange.btcturk.btcturk_auth.BtcturkAuth', 'BtcturkAuth', ([], {'api_key': 'btcturk_api_key', 'secret_key': 'btcturk_api_secret', 'time_provider': 'self._btcturk_time_synchronizer'}), '(api_key=btcturk_api_key, secret_key=btcturk_api_secret,\n time_provider=self._btcturk_time_synchronizer)\n', (2761, 2868), False, 'from hummingbot.connector.exchange.btcturk.btcturk_auth import BtcturkAuth\n'), ((2915, 2952), 'hummingbot.core.web_assistant.web_assistants_factory.WebAssistantsFactory', 'WebAssistantsFactory', ([], {'auth': 'self._auth'}), '(auth=self._auth)\n', (2935, 2952), False, 'from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory\n'), ((3015, 3052), 'hummingbot.core.api_throttler.async_throttler.AsyncThrottler', 'AsyncThrottler', (['CONSTANTS.RATE_LIMITS'], {}), '(CONSTANTS.RATE_LIMITS)\n', (3029, 3052), False, 'from hummingbot.core.api_throttler.async_throttler import AsyncThrottler\n'), ((3088, 3203), 'hummingbot.connector.exchange.btcturk.btcturk_order_book_tracker.BtcturkOrderBookTracker', 'BtcturkOrderBookTracker', ([], {'trading_pairs': 'trading_pairs', 'api_factory': 'self._api_factory', 'throttler': 'self._throttler'}), '(trading_pairs=trading_pairs, api_factory=self.\n _api_factory, throttler=self._throttler)\n', (3111, 3203), False, 'from hummingbot.connector.exchange.btcturk.btcturk_order_book_tracker import BtcturkOrderBookTracker\n'), ((3257, 3325), 'hummingbot.connector.exchange.btcturk.btcturk_user_stream_tracker.BtcturkUserStreamTracker', 'BtcturkUserStreamTracker', ([], {'auth': 'self._auth', 'throttler': 'self._throttler'}), '(auth=self._auth, throttler=self._throttler)\n', (3281, 3325), False, 'from hummingbot.connector.exchange.btcturk.btcturk_user_stream_tracker import BtcturkUserStreamTracker\n'), ((3350, 3374), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3372, 3374), False, 'import asyncio\n'), ((3405, 3420), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (3418, 3420), False, 'import asyncio\n'), ((4101, 4135), 'hummingbot.connector.client_order_tracker.ClientOrderTracker', 'ClientOrderTracker', ([], {'connector': 'self'}), '(connector=self)\n', (4119, 4135), False, 'from hummingbot.connector.client_order_tracker import ClientOrderTracker\n'), ((8126, 8141), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (8139, 8141), False, 'import asyncio\n'), ((9905, 9916), 'time.time', 'time.time', ([], {}), '()\n', (9914, 9916), False, 'import time\n'), ((16442, 16519), 'hummingbot.connector.exchange.btcturk.btcturk_utils.get_new_client_order_id', 'btcturk_utils.get_new_client_order_id', ([], {'is_buy': '(True)', 'trading_pair': 'trading_pair'}), '(is_buy=True, trading_pair=trading_pair)\n', (16479, 16519), False, 'from hummingbot.connector.exchange.btcturk import btcturk_utils\n'), ((17283, 17361), 'hummingbot.connector.exchange.btcturk.btcturk_utils.get_new_client_order_id', 'btcturk_utils.get_new_client_order_id', ([], {'is_buy': '(False)', 'trading_pair': 'trading_pair'}), '(is_buy=False, trading_pair=trading_pair)\n', (17320, 17361), False, 'from hummingbot.connector.exchange.btcturk import btcturk_utils\n'), ((19940, 19954), 'decimal.Decimal', 'Decimal', (['"""NaN"""'], {}), "('NaN')\n", (19947, 19954), False, 'from decimal import Decimal\n'), ((4271, 4298), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4288, 4298), False, 'import logging\n'), ((5796, 5857), 'hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source.BtcturkAPIOrderBookDataSource.trading_pair_symbol_map_ready', 'BtcturkAPIOrderBookDataSource.trading_pair_symbol_map_ready', ([], {}), '()\n', (5855, 5857), False, 'from hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source import BtcturkAPIOrderBookDataSource\n'), ((11670, 11855), 'hummingbot.core.data_type.in_flight_order.InFlightOrder', 'InFlightOrder', ([], {'client_order_id': 'order_id', 'exchange_order_id': 'exchange_order_id', 'trading_pair': 'trading_pair', 'order_type': 'order_type', 'trade_type': 'trade_type', 'amount': 'amount', 'price': 'price'}), '(client_order_id=order_id, exchange_order_id=exchange_order_id,\n trading_pair=trading_pair, order_type=order_type, trade_type=trade_type,\n amount=amount, price=price)\n', (11683, 11855), False, 'from hummingbot.core.data_type.in_flight_order import InFlightOrder, OrderUpdate, OrderState, TradeUpdate\n'), ((19527, 19557), 'hummingbot.core.data_type.cancellation_result.CancellationResult', 'CancellationResult', (['oid', '(False)'], {}), '(oid, False)\n', (19545, 19557), False, 'from hummingbot.core.data_type.cancellation_result import CancellationResult\n'), ((20602, 20614), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (20609, 20614), False, 'from decimal import Decimal\n'), ((21949, 22103), 'hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source.BtcturkAPIOrderBookDataSource.exchange_symbol_associated_to_pair', 'BtcturkAPIOrderBookDataSource.exchange_symbol_associated_to_pair', ([], {'trading_pair': 'trading_pair', 'api_factory': 'self._api_factory', 'throttler': 'self._throttler'}), '(trading_pair\n =trading_pair, api_factory=self._api_factory, throttler=self._throttler)\n', (22013, 22103), False, 'from hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source import BtcturkAPIOrderBookDataSource\n'), ((51857, 51897), 'hummingbot.connector.exchange.btcturk.btcturk_utils.private_rest_url', 'btcturk_utils.private_rest_url', (['path_url'], {}), '(path_url)\n', (51887, 51897), False, 'from hummingbot.connector.exchange.btcturk import btcturk_utils\n'), ((51981, 52099), 'hummingbot.core.web_assistant.connections.data_types.RESTRequest', 'RESTRequest', ([], {'method': 'method', 'url': 'url', 'data': 'data', 'params': 'params', 'headers': 'headers', 'is_auth_required': 'is_auth_required'}), '(method=method, url=url, data=data, params=params, headers=\n headers, is_auth_required=is_auth_required)\n', (51992, 52099), False, 'from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest\n'), ((52157, 52196), 'hummingbot.connector.exchange.btcturk.btcturk_utils.public_rest_url', 'btcturk_utils.public_rest_url', (['path_url'], {}), '(path_url)\n', (52186, 52196), False, 'from hummingbot.connector.exchange.btcturk import btcturk_utils\n'), ((52219, 52319), 'hummingbot.core.web_assistant.connections.data_types.RESTRequest', 'RESTRequest', ([], {'method': 'method', 'url': 'url', 'data': 'data', 'params': 'params', 'is_auth_required': 'is_auth_required'}), '(method=method, url=url, data=data, params=params,\n is_auth_required=is_auth_required)\n', (52230, 52319), False, 'from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest\n'), ((14558, 14573), 'decimal.Decimal', 'Decimal', (['"""1.01"""'], {}), "('1.01')\n", (14565, 14573), False, 'from decimal import Decimal\n'), ((18670, 18694), 'async_timeout.timeout', 'timeout', (['timeout_seconds'], {}), '(timeout_seconds)\n', (18677, 18694), False, 'from async_timeout import timeout\n'), ((27042, 27057), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (27055, 27057), False, 'import asyncio\n'), ((40904, 40974), 'hummingbot.connector.exchange.btcturk.btcturk_utils.convert_from_exchange_trading_pair_to_base_quote', 'btcturk_utils.convert_from_exchange_trading_pair_to_base_quote', (['symbol'], {}), '(symbol)\n', (40966, 40974), False, 'from hummingbot.connector.exchange.btcturk import btcturk_utils\n'), ((41597, 41640), 'hummingbot.core.utils.async_utils.safe_gather', 'safe_gather', (['*tasks'], {'return_exceptions': '(True)'}), '(*tasks, return_exceptions=True)\n', (41608, 41640), False, 'from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather\n'), ((46291, 46334), 'hummingbot.core.utils.async_utils.safe_gather', 'safe_gather', (['*tasks'], {'return_exceptions': '(True)'}), '(*tasks, return_exceptions=True)\n', (46302, 46334), False, 'from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather\n'), ((50354, 50384), 'decimal.Decimal', 'Decimal', (["balance_entry['free']"], {}), "(balance_entry['free'])\n", (50361, 50384), False, 'from decimal import Decimal\n'), ((18741, 18784), 'hummingbot.core.utils.async_utils.safe_gather', 'safe_gather', (['*tasks'], {'return_exceptions': '(True)'}), '(*tasks, return_exceptions=True)\n', (18752, 18784), False, 'from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather\n'), ((27428, 27450), 'asyncio.sleep', 'asyncio.sleep', (['(30 * 60)'], {}), '(30 * 60)\n', (27441, 27450), False, 'import asyncio\n'), ((29265, 29297), 'decimal.Decimal', 'Decimal', (['(10 ** -denominatorScale)'], {}), '(10 ** -denominatorScale)\n', (29272, 29297), False, 'from decimal import Decimal\n'), ((29436, 29466), 'decimal.Decimal', 'Decimal', (['(10 ** -numeratorScale)'], {}), '(10 ** -numeratorScale)\n', (29443, 29466), False, 'from decimal import Decimal\n'), ((40645, 40799), 'hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source.BtcturkAPIOrderBookDataSource.exchange_symbol_associated_to_pair', 'BtcturkAPIOrderBookDataSource.exchange_symbol_associated_to_pair', ([], {'trading_pair': 'trading_pair', 'api_factory': 'self._api_factory', 'throttler': 'self._throttler'}), '(trading_pair\n =trading_pair, api_factory=self._api_factory, throttler=self._throttler)\n', (40709, 40799), False, 'from hummingbot.connector.exchange.btcturk.btcturk_api_order_book_data_source import BtcturkAPIOrderBookDataSource\n'), ((50417, 50447), 'decimal.Decimal', 'Decimal', (["balance_entry['free']"], {}), "(balance_entry['free'])\n", (50424, 50447), False, 'from decimal import Decimal\n'), ((50450, 50482), 'decimal.Decimal', 'Decimal', (["balance_entry['locked']"], {}), "(balance_entry['locked'])\n", (50457, 50482), False, 'from decimal import Decimal\n'), ((22658, 22680), 'json.dumps', 'json.dumps', (['api_params'], {}), '(api_params)\n', (22668, 22680), False, 'import json\n'), ((26964, 26982), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (26977, 26982), False, 'import asyncio\n'), ((27846, 27864), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (27859, 27864), False, 'import asyncio\n'), ((31267, 31361), 'hummingbot.connector.exchange.btcturk.btcturk_utils.convert_from_exchange_trading_pair_to_quote_ccy', 'btcturk_utils.convert_from_exchange_trading_pair_to_quote_ccy', (['tracked_order.trading_pair'], {}), '(tracked_order\n .trading_pair)\n', (31328, 31361), False, 'from hummingbot.connector.exchange.btcturk import btcturk_utils\n'), ((31392, 31420), 'hummingbot.connector.exchange.btcturk.btcturk_utils.get_trade_id', 'btcturk_utils.get_trade_id', ([], {}), '()\n', (31418, 31420), False, 'from hummingbot.connector.exchange.btcturk import btcturk_utils\n'), ((38964, 38982), 'asyncio.sleep', 'asyncio.sleep', (['(5.0)'], {}), '(5.0)\n', (38977, 38982), False, 'import asyncio\n'), ((45937, 45996), 'hummingbot.connector.exchange.btcturk.btcturk_constants.GET_SINGLE_ORDER_PATH.format', 'CONSTANTS.GET_SINGLE_ORDER_PATH.format', (['o.exchange_order_id'], {}), '(o.exchange_order_id)\n', (45975, 45996), True, 'import hummingbot.connector.exchange.btcturk.btcturk_constants as CONSTANTS\n'), ((49849, 49867), 'asyncio.sleep', 'asyncio.sleep', (['(1.0)'], {}), '(1.0)\n', (49862, 49867), False, 'import asyncio\n'), ((19180, 19221), 'hummingbot.core.data_type.cancellation_result.CancellationResult', 'CancellationResult', (['client_order_id', '(True)'], {}), '(client_order_id, True)\n', (19198, 19221), False, 'from hummingbot.core.data_type.cancellation_result import CancellationResult\n'), ((24562, 24609), 'hummingbot.connector.exchange.btcturk.btcturk_constants.ORDER_CANCEL_PATH.format', 'CONSTANTS.ORDER_CANCEL_PATH.format', (['ex_order_id'], {}), '(ex_order_id)\n', (24596, 24609), True, 'import hummingbot.connector.exchange.btcturk.btcturk_constants as CONSTANTS\n'), ((29645, 29668), 'decimal.Decimal', 'Decimal', (['min_order_size'], {}), '(min_order_size)\n', (29652, 29668), False, 'from decimal import Decimal\n'), ((29714, 29732), 'decimal.Decimal', 'Decimal', (['tick_size'], {}), '(tick_size)\n', (29721, 29732), False, 'from decimal import Decimal\n'), ((29784, 29810), 'decimal.Decimal', 'Decimal', (['step_size_btcturk'], {}), '(step_size_btcturk)\n', (29791, 29810), False, 'from decimal import Decimal\n'), ((29854, 29875), 'decimal.Decimal', 'Decimal', (['min_notional'], {}), '(min_notional)\n', (29861, 29875), False, 'from decimal import Decimal\n'), ((41261, 41320), 'hummingbot.connector.exchange.btcturk.btcturk_constants.MY_TRADES_PATH_URL.format', 'CONSTANTS.MY_TRADES_PATH_URL.format', (['base_symbol', 'startDate'], {}), '(base_symbol, startDate)\n', (41296, 41320), True, 'import hummingbot.connector.exchange.btcturk.btcturk_constants as CONSTANTS\n'), ((33132, 33202), 'math.isclose', 'math.isclose', (['tracked_order.amount', 'tracked_order.executed_amount_base'], {}), '(tracked_order.amount, tracked_order.executed_amount_base)\n', (33144, 33202), False, 'import math\n'), ((43641, 43756), 'hummingbot.connector.utils.TradeFillOrderDetails', 'TradeFillOrderDetails', ([], {'market': 'self.display_name', 'exchange_trade_id': 'tracked_order.trade_id', 'symbol': 'trading_pair'}), '(market=self.display_name, exchange_trade_id=\n tracked_order.trade_id, symbol=trading_pair)\n', (43662, 43756), False, 'from hummingbot.connector.utils import TradeFillOrderDetails\n'), ((31896, 31934), 'decimal.Decimal', 'Decimal', (['btcturk_utils.DEFAULT_FEES[0]'], {}), '(btcturk_utils.DEFAULT_FEES[0])\n', (31903, 31934), False, 'from decimal import Decimal\n'), ((44713, 44762), 'hummingbot.core.data_type.trade_fee.TokenAmount', 'TokenAmount', ([], {'fee_asset': "trade['denominatorSymbol']"}), "(fee_asset=trade['denominatorSymbol'])\n", (44724, 44762), False, 'from hummingbot.core.data_type.trade_fee import DeductedFromReturnsTradeFee, TokenAmount, TradeFeeBase\n')] |
"""
Solutions for day 4 of the Advent of Code 2017.
"""
from itertools import combinations, permutations
def is_simple_passphrase(phrase):
"""
Checks whether a phrase contains no repeated words.
>>> is_simple_passphrase(["aa", "bb", "cc", "dd", "ee"])
True
>>> is_simple_passphrase(["aa", "bb", "cc", "dd", "aa"])
False
>>> is_simple_passphrase(["aa", "bb", "cc", "dd", "aaa"])
True
"""
return not any(phrase.count(word) > 1 for word in phrase)
def is_anagram_passphrase(phrase):
"""
Checks whether a phrase contains no words that are anagrams of other words.
>>> is_anagram_passphrase(["abcde", "fghij"])
True
>>> is_anagram_passphrase(["abcde", "xyz", "ecdab"])
False
>>> is_anagram_passphrase(["a", "ab", "abc", "abd", "abf", "abj"])
True
>>> is_anagram_passphrase(["iiii", "oiii", "ooii", "oooi", "oooo"])
True
>>> is_anagram_passphrase(["oiii", "ioii", "iioi", "iiio"])
False
"""
return not any(
any(
first_word == "".join(permutated_word)
for permutated_word in permutations(second_word)
)
for first_word, second_word in combinations(phrase, 2)
)
| [
"itertools.combinations",
"itertools.permutations"
] | [((1174, 1197), 'itertools.combinations', 'combinations', (['phrase', '(2)'], {}), '(phrase, 2)\n', (1186, 1197), False, 'from itertools import combinations, permutations\n'), ((1099, 1124), 'itertools.permutations', 'permutations', (['second_word'], {}), '(second_word)\n', (1111, 1124), False, 'from itertools import combinations, permutations\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.Transformer.Layers import XavierLinear as Linear
# Relative Multihead Attention
# Only for self-attention
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False, proj_out=True):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.qkv_net = Linear(d_model, 3 * n_head * d_head, bias=False)
self.dropatt = nn.Dropout(dropatt)
if proj_out:
self.o_net = Linear(n_head * d_head, n_head * d_head, bias=False)
else:
self.o_net = None
# self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
def _parallelogram_mask(self, h, w, left=False):
"""
:param h: Height (integer)
:param w: Width (Integer)
:param left: Boolean
:return: Mask (torch.ByteTensor)
"""
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m, :m] = torch.triu(mask[:m, :m])
mask[-m:, -m:] = torch.tril(mask[-m:, -m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen - 1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:, :, None, None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
# efficient computation of B and D term using shift
def _rel_shift(self, x, zero_triu=False):
zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:, :, None, None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
# Relative Partially Learnable
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_net = Linear(self.d_model, self.n_head * self.d_head, bias=False)
self.attn_type = 1
print("* Attention type: ", self.attn_type)
def forward(self, input, pos_enc, attn_mask=None, mems=None, debug=False):
"""
:param w: input embeddings (E) T x B x H
:param r: relative encodings (R)
:param attn_mask:
:param mems:
:return: output: projected attention vector T x B x H
"""
w = input
r = pos_enc
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if self.attn_type == 2:
w_heads = self.qkv_net(w + r)
elif self.attn_type == 1:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
if debug:
print("Q nan", torch.isnan(w_head_q).sum() > 0)
print("K nan", torch.isnan(w_head_k).sum() > 0)
print("V nan", torch.isnan(w_head_v).sum() > 0)
if self.attn_type == 2:
attn_score = torch.einsum('ibnd,jbnd->ijbn', (w_head_q, w_head_k))
elif self.attn_type == 1:
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x
# what is relative shift?
# here the B and D are actually B~ and D~ in the paper
# then shift them to efficiently get B and D
BD = self._rel_shift(BD)
attn_score = AC + BD
if debug:
print("AC nan", torch.isnan(AC).sum() > 0)
print("B~D~ nan", torch.isnan(BD).sum() > 0)
print("BD nan", torch.isnan(BD).sum() > 0)
print("AC + BD nan", torch.isnan(attn_score).sum() > 0)
# scale the attention score
attn_score = attn_score * (self.scale)
attn_score = attn_score.transpose(0, 2).transpose(1, 3).contiguous()
# if debug:
# print("mask nan", torch.isnan(attn_mask).sum() > 0)
# print("attn score before mask nan", torch.isnan(attn_score).sum() > 0)
# if debug:
# attn_score = torch.clamp(attn_score, -0.5, 0.5)
# print(attn_score)
attn_score = attn_score.float().masked_fill_(attn_mask.unsqueeze(-3), -float('inf')).type_as(attn_score)
if debug:
print("attn score after mask nan", torch.isnan(attn_score).sum() > 0)
#### compute attention probability
# print(attn_mask).size()
# if attn_mask is not None and attn_mask.any().item():
# if attn_mask.dim() == 2:
# attn_score = attn_score.float().masked_fill(
# attn_mask[None, :, :, None], -float('inf')).type_as(attn_score)
# elif attn_mask.dim() == 3:
# attn_score = attn_score.float().masked_fill(
# attn_mask[:, :, :, None], -float('inf')).type_as(attn_score)
# [qlen x klen x bsz x n_head]
# attn_score = attn_score.transpose(0, 2).transpose(1, 3)
attn_prob = F.softmax(attn_score.float(), dim=-1)
if debug:
print(attn_score)
print("attn prob nan", torch.isnan(attn_prob).sum() > 0)
attn_prob = attn_prob.transpose(0, 2).transpose(1, 3)
coverage = torch.mean(attn_prob, dim=-1).transpose(0, 2)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
# This is the context vector
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
if self.o_net:
attn_out = self.o_net(attn_vec)
else:
attn_out = attn_vec
output = attn_out
# if self.pre_lnorm:
# ##### residual connection
# output = w + attn_out
# else:
# ##### residual connection + layer normalization
# output = self.layer_norm(w + attn_out)
return output, coverage
def step(self, input, pos_enc, attn_mask, buffer=None):
"""
:param attn_mask:
:param input: 1 x B x H (step by 1)
:param pos_enc: T x B x H (relative position)
:param buffer: a dictionary (or None) of buffered computation
:return:
"""
w = input
r = pos_enc
# qlen is probably 1 here
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if self.attn_type == 2:
w_heads = self.qkv_net(w + r)
elif self.attn_type == 1:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
if buffer is not None and 'k' in buffer and 'v' in buffer and 'q' in buffer:
w_head_q = torch.cat([buffer['q'], w_head_q], dim=0) # time first
buffer['q'] = w_head_q
w_head_k = torch.cat([buffer['k'], w_head_k], dim=0) # time first
buffer['k'] = w_head_k
w_head_v = torch.cat([buffer['v'], w_head_v], dim=0) # time first
buffer['v'] = w_head_v
# len_key, b_ = proj_key.size(0), proj_key.size(1)
else:
if buffer is None:
buffer = dict()
buffer['k'] = w_head_k
buffer['v'] = w_head_v
buffer['q'] = w_head_q
klen = w_head_k.size(0)
qlen = w_head_q.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
if self.attn_type == 2:
attn_score = torch.einsum('ibnd,jbnd->ijbn', (w_head_q, w_head_k))
elif self.attn_type == 1:
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x
# what is relative shift?
# here the B and D are actually B~ and D~ in the paper
# then shift them to efficiently get B and D
BD = self._rel_shift(BD)
attn_score = AC + BD
# scale the attention score
attn_score.mul_(self.scale)
#### compute attention probability
# attns = attns.view(b, self.h, len_query, len_key)
attn_score = attn_score.transpose(0, 2).transpose(1, 3)
attn_score = attn_score.float().masked_fill_(attn_mask.unsqueeze(-3), -float('inf')).type_as(attn_score)
# if attn_mask is not None and attn_mask.any().item():
# if attn_mask.dim() == 2:
# attn_score = attn_score.float().masked_fill(
# attn_mask[None, :, :, None], -float('inf')).type_as(attn_score)
# elif attn_mask.dim() == 3:
# attn_score = attn_score.float().masked_fill(
# attn_mask[:, :, :, None], -float('inf')).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_score = attn_score.transpose(0, 2).transpose(1, 3)
attn_prob = F.softmax(attn_score.float(), dim=1).type_as(attn_score)
coverage = torch.mean(attn_prob, dim=-1).transpose(0, 2)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
# take the final time step and then unsqueeze
attn_vec = attn_vec[-1, :, :].unsqueeze(0)
# linear projection
if self.o_net:
attn_out = self.o_net(attn_vec)
else:
attn_out = attn_vec
output = attn_out
return output, coverage, buffer
# From Shaw et al 2019
# Not easy to do because there is a max_klen hyperparameter (which is difficult)
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen - r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen - r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
if __name__ == '__main__':
# unit test
module = RelPartialLearnableMultiHeadAttn(4, 512, 64) | [
"torch.triu",
"torch.nn.Dropout",
"torch.tril",
"torch.mean",
"torch.Tensor",
"onmt.modules.Transformer.Layers.XavierLinear",
"torch.chunk",
"torch.einsum",
"torch.zeros",
"torch.isnan",
"torch.nn.functional.softmax",
"torch.cat",
"torch.ones"
] | [((540, 588), 'onmt.modules.Transformer.Layers.XavierLinear', 'Linear', (['d_model', '(3 * n_head * d_head)'], {'bias': '(False)'}), '(d_model, 3 * n_head * d_head, bias=False)\n', (546, 588), True, 'from onmt.modules.Transformer.Layers import XavierLinear as Linear\n'), ((613, 632), 'torch.nn.Dropout', 'nn.Dropout', (['dropatt'], {}), '(dropatt)\n', (623, 632), True, 'import torch.nn as nn\n'), ((1209, 1233), 'torch.triu', 'torch.triu', (['mask[:m, :m]'], {}), '(mask[:m, :m])\n', (1219, 1233), False, 'import torch\n'), ((1259, 1285), 'torch.tril', 'torch.tril', (['mask[-m:, -m:]'], {}), '(mask[-m:, -m:])\n', (1269, 1285), False, 'import torch\n'), ((2285, 2316), 'torch.cat', 'torch.cat', (['[zero_pad, x]'], {'dim': '(1)'}), '([zero_pad, x], dim=1)\n', (2294, 2316), False, 'import torch\n'), ((3081, 3140), 'onmt.modules.Transformer.Layers.XavierLinear', 'Linear', (['self.d_model', '(self.n_head * self.d_head)'], {'bias': '(False)'}), '(self.d_model, self.n_head * self.d_head, bias=False)\n', (3087, 3140), True, 'from onmt.modules.Transformer.Layers import XavierLinear as Linear\n'), ((3844, 3875), 'torch.chunk', 'torch.chunk', (['w_heads', '(3)'], {'dim': '(-1)'}), '(w_heads, 3, dim=-1)\n', (3855, 3875), False, 'import torch\n'), ((7220, 7274), 'torch.einsum', 'torch.einsum', (['"""ijbn,jbnd->ibnd"""', '(attn_prob, w_head_v)'], {}), "('ijbn,jbnd->ibnd', (attn_prob, w_head_v))\n", (7232, 7274), False, 'import torch\n'), ((8569, 8600), 'torch.chunk', 'torch.chunk', (['w_heads', '(3)'], {'dim': '(-1)'}), '(w_heads, 3, dim=-1)\n', (8580, 8600), False, 'import torch\n'), ((11585, 11639), 'torch.einsum', 'torch.einsum', (['"""ijbn,jbnd->ibnd"""', '(attn_prob, w_head_v)'], {}), "('ijbn,jbnd->ibnd', (attn_prob, w_head_v))\n", (11597, 11639), False, 'import torch\n'), ((13998, 14052), 'torch.einsum', 'torch.einsum', (['"""ibnd,jbnd->ijbn"""', '(rw_head_q, w_head_k)'], {}), "('ibnd,jbnd->ijbn', (rw_head_q, w_head_k))\n", (14010, 14052), False, 'import torch\n'), ((14096, 14145), 'torch.einsum', 'torch.einsum', (['"""ibnd,jnd->ijbn"""', '(w_head_q, r_emb)'], {}), "('ibnd,jnd->ijbn', (w_head_q, r_emb))\n", (14108, 14145), False, 'import torch\n'), ((14790, 14818), 'torch.nn.functional.softmax', 'F.softmax', (['attn_score'], {'dim': '(1)'}), '(attn_score, dim=1)\n', (14799, 14818), True, 'import torch.nn.functional as F\n'), ((14921, 14975), 'torch.einsum', 'torch.einsum', (['"""ijbn,jbnd->ibnd"""', '(attn_prob, w_head_v)'], {}), "('ijbn,jbnd->ibnd', (attn_prob, w_head_v))\n", (14933, 14975), False, 'import torch\n'), ((680, 732), 'onmt.modules.Transformer.Layers.XavierLinear', 'Linear', (['(n_head * d_head)', '(n_head * d_head)'], {'bias': '(False)'}), '(n_head * d_head, n_head * d_head, bias=False)\n', (686, 732), True, 'from onmt.modules.Transformer.Layers import XavierLinear as Linear\n'), ((1635, 1681), 'torch.zeros', 'torch.zeros', (['(0)'], {'device': 'x.device', 'dtype': 'x.dtype'}), '(0, device=x.device, dtype=x.dtype)\n', (1646, 1681), False, 'import torch\n'), ((2942, 2980), 'torch.Tensor', 'torch.Tensor', (['self.n_head', 'self.d_head'], {}), '(self.n_head, self.d_head)\n', (2954, 2980), False, 'import torch\n'), ((3019, 3057), 'torch.Tensor', 'torch.Tensor', (['self.n_head', 'self.d_head'], {}), '(self.n_head, self.d_head)\n', (3031, 3057), False, 'import torch\n'), ((4473, 4526), 'torch.einsum', 'torch.einsum', (['"""ibnd,jbnd->ijbn"""', '(w_head_q, w_head_k)'], {}), "('ibnd,jbnd->ijbn', (w_head_q, w_head_k))\n", (4485, 4526), False, 'import torch\n'), ((8710, 8751), 'torch.cat', 'torch.cat', (["[buffer['q'], w_head_q]"], {'dim': '(0)'}), "([buffer['q'], w_head_q], dim=0)\n", (8719, 8751), False, 'import torch\n'), ((8824, 8865), 'torch.cat', 'torch.cat', (["[buffer['k'], w_head_k]"], {'dim': '(0)'}), "([buffer['k'], w_head_k], dim=0)\n", (8833, 8865), False, 'import torch\n'), ((8938, 8979), 'torch.cat', 'torch.cat', (["[buffer['v'], w_head_v]"], {'dim': '(0)'}), "([buffer['v'], w_head_v], dim=0)\n", (8947, 8979), False, 'import torch\n'), ((9705, 9758), 'torch.einsum', 'torch.einsum', (['"""ibnd,jbnd->ijbn"""', '(w_head_q, w_head_k)'], {}), "('ibnd,jbnd->ijbn', (w_head_q, w_head_k))\n", (9717, 9758), False, 'import torch\n'), ((12721, 12744), 'torch.cat', 'torch.cat', (['[mems, w]', '(0)'], {}), '([mems, w], 0)\n', (12730, 12744), False, 'import torch\n'), ((12942, 12973), 'torch.chunk', 'torch.chunk', (['w_heads', '(3)'], {'dim': '(-1)'}), '(w_heads, 3, dim=-1)\n', (12953, 12973), False, 'import torch\n'), ((13222, 13253), 'torch.chunk', 'torch.chunk', (['w_heads', '(3)'], {'dim': '(-1)'}), '(w_heads, 3, dim=-1)\n', (13233, 13253), False, 'import torch\n'), ((13624, 13656), 'torch.cat', 'torch.cat', (['[r_emb_pad, r_emb]', '(0)'], {}), '([r_emb_pad, r_emb], 0)\n', (13633, 13656), False, 'import torch\n'), ((13749, 13783), 'torch.cat', 'torch.cat', (['[r_bias_pad, r_bias]', '(0)'], {}), '([r_bias_pad, r_bias], 0)\n', (13758, 13783), False, 'import torch\n'), ((1138, 1156), 'torch.ones', 'torch.ones', (['(h, w)'], {}), '((h, w))\n', (1148, 1156), False, 'import torch\n'), ((4795, 4849), 'torch.einsum', 'torch.einsum', (['"""ibnd,jbnd->ijbn"""', '(rw_head_q, w_head_k)'], {}), "('ibnd,jbnd->ijbn', (rw_head_q, w_head_k))\n", (4807, 4849), False, 'import torch\n'), ((4947, 5000), 'torch.einsum', 'torch.einsum', (['"""ibnd,jnd->ijbn"""', '(rr_head_q, r_head_k)'], {}), "('ibnd,jnd->ijbn', (rr_head_q, r_head_k))\n", (4959, 5000), False, 'import torch\n'), ((7072, 7101), 'torch.mean', 'torch.mean', (['attn_prob'], {'dim': '(-1)'}), '(attn_prob, dim=-1)\n', (7082, 7101), False, 'import torch\n'), ((10027, 10081), 'torch.einsum', 'torch.einsum', (['"""ibnd,jbnd->ijbn"""', '(rw_head_q, w_head_k)'], {}), "('ibnd,jbnd->ijbn', (rw_head_q, w_head_k))\n", (10039, 10081), False, 'import torch\n'), ((10178, 10231), 'torch.einsum', 'torch.einsum', (['"""ibnd,jnd->ijbn"""', '(rr_head_q, r_head_k)'], {}), "('ibnd,jnd->ijbn', (rr_head_q, r_head_k))\n", (10190, 10231), False, 'import torch\n'), ((11481, 11510), 'torch.mean', 'torch.mean', (['attn_prob'], {'dim': '(-1)'}), '(attn_prob, dim=-1)\n', (11491, 11510), False, 'import torch\n'), ((1755, 1786), 'torch.cat', 'torch.cat', (['[zero_pad, x]'], {'dim': '(1)'}), '([zero_pad, x], dim=1)\n', (1764, 1786), False, 'import torch\n'), ((1849, 1880), 'torch.cat', 'torch.cat', (['[x, zero_pad]'], {'dim': '(1)'}), '([x, zero_pad], dim=1)\n', (1858, 1880), False, 'import torch\n'), ((4262, 4283), 'torch.isnan', 'torch.isnan', (['w_head_q'], {}), '(w_head_q)\n', (4273, 4283), False, 'import torch\n'), ((4322, 4343), 'torch.isnan', 'torch.isnan', (['w_head_k'], {}), '(w_head_k)\n', (4333, 4343), False, 'import torch\n'), ((4382, 4403), 'torch.isnan', 'torch.isnan', (['w_head_v'], {}), '(w_head_v)\n', (4393, 4403), False, 'import torch\n'), ((6158, 6181), 'torch.isnan', 'torch.isnan', (['attn_score'], {}), '(attn_score)\n', (6169, 6181), False, 'import torch\n'), ((6957, 6979), 'torch.isnan', 'torch.isnan', (['attn_prob'], {}), '(attn_prob)\n', (6968, 6979), False, 'import torch\n'), ((5313, 5328), 'torch.isnan', 'torch.isnan', (['AC'], {}), '(AC)\n', (5324, 5328), False, 'import torch\n'), ((5374, 5389), 'torch.isnan', 'torch.isnan', (['BD'], {}), '(BD)\n', (5385, 5389), False, 'import torch\n'), ((5433, 5448), 'torch.isnan', 'torch.isnan', (['BD'], {}), '(BD)\n', (5444, 5448), False, 'import torch\n'), ((5497, 5520), 'torch.isnan', 'torch.isnan', (['attn_score'], {}), '(attn_score)\n', (5508, 5520), False, 'import torch\n')] |
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource
from lasagne.nonlinearities import sigmoid
source = RealApplianceSource(
'/data/dk3810/ukdale.h5',
['fridge freezer', 'hair straighteners', 'television'],
max_input_power=1000, max_output_power=300,
window=("2013-06-01", "2014-06-01")
)
net = Net(
source=source,
n_cells_per_hidden_layer=[50,50,50],
output_nonlinearity=sigmoid,
learning_rate=1e-1,
n_dense_cells_per_layer=50
)
net.fit(n_iterations=1600)
net.plot_costs()
net.plot_estimates()
| [
"neuralnilm.RealApplianceSource",
"neuralnilm.Net"
] | [((149, 339), 'neuralnilm.RealApplianceSource', 'RealApplianceSource', (['"""/data/dk3810/ukdale.h5"""', "['fridge freezer', 'hair straighteners', 'television']"], {'max_input_power': '(1000)', 'max_output_power': '(300)', 'window': "('2013-06-01', '2014-06-01')"}), "('/data/dk3810/ukdale.h5', ['fridge freezer',\n 'hair straighteners', 'television'], max_input_power=1000,\n max_output_power=300, window=('2013-06-01', '2014-06-01'))\n", (168, 339), False, 'from neuralnilm import Net, RealApplianceSource\n'), ((358, 495), 'neuralnilm.Net', 'Net', ([], {'source': 'source', 'n_cells_per_hidden_layer': '[50, 50, 50]', 'output_nonlinearity': 'sigmoid', 'learning_rate': '(0.1)', 'n_dense_cells_per_layer': '(50)'}), '(source=source, n_cells_per_hidden_layer=[50, 50, 50],\n output_nonlinearity=sigmoid, learning_rate=0.1, n_dense_cells_per_layer=50)\n', (361, 495), False, 'from neuralnilm import Net, RealApplianceSource\n')] |
from django.contrib.sitemaps import (
GenericSitemap, Sitemap)
from .models import Startup, Tag
tag_sitemap_dict = {
'queryset': Tag.objects.all(),
}
TagSitemap = GenericSitemap(tag_sitemap_dict)
class StartupSitemap(Sitemap):
def items(self):
return Startup.objects.all()
def lastmod(self, startup):
if startup.newslink_set.exists():
return (
startup.newslink_set.latest()
.pub_date)
else:
return startup.founded_date
| [
"django.contrib.sitemaps.GenericSitemap"
] | [((175, 207), 'django.contrib.sitemaps.GenericSitemap', 'GenericSitemap', (['tag_sitemap_dict'], {}), '(tag_sitemap_dict)\n', (189, 207), False, 'from django.contrib.sitemaps import GenericSitemap, Sitemap\n')] |
import torch
import torch.nn as nn
class SplitMerge(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(5, 5, 5), stride=1, padding=(0, 0, 0), conv_frac=0.5):
self.conv_frac = conv_frac
super(SplitMerge, self).__init__()
self.in_channels = in_channels
# The conv_frac is NOT a parameter that can be changed anywhere else other than here. Please change manually
# for older experiments
if self.in_channels != 1:
self.block = nn.Sequential(nn.Conv3d(in_channels=int(self.in_channels * self.conv_frac),
out_channels=int(self.in_channels * self.conv_frac),
kernel_size=kernel_size, stride=stride, padding=padding),
nn.GroupNorm(4, num_channels=int(self.in_channels * self.conv_frac)),
nn.PReLU())
self.conv_1x1x1 = nn.Sequential(
nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1, 1)),
nn.GroupNorm(4, num_channels=out_channels),
nn.PReLU())
else:
self.block = nn.Sequential(nn.Conv3d(in_channels=self.in_channels,
out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=padding),
nn.GroupNorm(4, num_channels=out_channels),
nn.PReLU())
def forward(self, x):
if self.in_channels == 1:
x = self.block(x)
return x
ff_x = x[:, 0:int(self.in_channels * self.conv_frac), :, :, :]
nff_x = x[:, int(self.in_channels * self.conv_frac):self.in_channels, :, :, :]
# print(self.block, ff_x.size(), nff_x.size())
ff_x = self.block(ff_x)
x = torch.cat((ff_x, nff_x), dim=1)
x = self.conv_1x1x1(x)
# print(x.size())
return x
class Block(nn.Module):
"""
Simple VNet/3DUNet styled blocks.
"""
def __init__(self, params):
super(Block, self).__init__()
padding_x = int((params['kernel_size'][0] - 1) / 2)
padding_y = int((params['kernel_size'][1] - 1) / 2)
padding_z = int((params['kernel_size'][2] - 1) / 2)
conv_frac = 0.75
self.conv0 = SplitMerge(in_channels=params['in_channels'], out_channels=params['out_channels'],
kernel_size=params['kernel_size'], padding=(padding_x, padding_y, padding_z),
conv_frac=conv_frac)
if params['create_layer_1']:
self.conv1 = SplitMerge(in_channels=params['out_channels'], out_channels=params['out_channels'],
kernel_size=params['kernel_size'], padding=(padding_x, padding_y, padding_z),
conv_frac=conv_frac)
if params['create_layer_2']:
self.conv2 = SplitMerge(in_channels=params['out_channels'], out_channels=params['out_channels'],
kernel_size=params['kernel_size'], padding=(padding_x, padding_y, padding_z),
conv_frac=conv_frac)
def forward(self, x):
x = self.conv0(x)
try:
x = self.conv1(x)
except:
pass
try:
x = self.conv2(x)
except:
pass
return x
class EncoderBlock(nn.Module):
def __init__(self, params):
super(EncoderBlock, self).__init__()
if params['input']:
self.conv_input = nn.Sequential(
nn.Conv3d(in_channels=params['in_channels'], out_channels=params['out_channels'],
kernel_size=(1, 1, 1), padding=0, stride=1),
nn.GroupNorm(num_groups=4, num_channels=params['out_channels']),
nn.PReLU()
)
params['in_channels'] = params['out_channels']
self.encoder_block = Block(params)
self.down_block = nn.Sequential(
nn.Conv3d(in_channels=params['out_channels'], out_channels=2 * params['out_channels'],
kernel_size=(2, 2, 2), padding=0, stride=2),
nn.GroupNorm(num_groups=4, num_channels=2 * params['out_channels']),
nn.PReLU()
)
def forward(self, x):
try:
x = self.conv_input(x)
except:
pass
x_res = self.encoder_block(x)
x_res = x_res + x
x_down = self.down_block(x_res)
# print(x_down.shape, x_res.shape)
return x_res, x_down
class DecoderBlock(nn.Module):
def __init__(self, params):
super(DecoderBlock, self).__init__()
# print(params)
self.decoder_block = Block(params)
if not params['out']:
self.up_block = nn.Sequential(
nn.ConvTranspose3d(in_channels=params['out_channels'], out_channels=int(params['out_channels'] / 2),
kernel_size=(2, 2, 2), padding=0, stride=2),
nn.GroupNorm(num_groups=4, num_channels=int(params['out_channels'] / 2)),
nn.PReLU()
)
def forward(self, x_res, x_up):
x = torch.cat((x_res, x_up), dim=1)
x = self.decoder_block(x)
x1 = x + x_up
# print(x1.shape)
try:
x1 = self.up_block(x1)
except:
pass
return x1
class BottleNeck(nn.Module):
def __init__(self, params):
super(BottleNeck, self).__init__()
if params['input']:
self.conv_input = nn.Sequential(
nn.Conv3d(in_channels=params['out_channels'], out_channels=params['out_channels'],
kernel_size=(1, 1, 1), padding=0, stride=1),
nn.GroupNorm(num_groups=4, num_channels=params['out_channels']),
nn.PReLU()
)
# params['in_channels'] = params['out_channels']
self.encoder_block = Block(params)
self.up_block = nn.Sequential(
nn.ConvTranspose3d(in_channels=params['out_channels'], out_channels=params['out_channels'],
kernel_size=(2, 2, 2), padding=0, stride=2),
nn.GroupNorm(num_groups=4, num_channels=params['out_channels']),
nn.PReLU()
)
def forward(self, x):
try:
x = self.conv_input(x)
except:
pass
x_res = self.encoder_block(x)
x_res = x_res + x
x_up = self.up_block(x_res)
return x_up
if __name__ == "__main__":
params = {'in_channels': 1,
'out_channels': 16,
'create_layer_1': True,
'create_layer_2': True,
'kernel_size': (5, 5, 5),
'num_classes': 55,
'input': True,
'out': True
}
m = EncoderBlock(params=params).cuda()
# m = CompetitiveEncoderBlockInput(params=params).cuda()
from torchsummary import summary
print(m)
summary(m, input_size=(1, 32, 32, 32))
| [
"torch.nn.GroupNorm",
"torch.nn.ConvTranspose3d",
"torch.nn.PReLU",
"torchsummary.summary",
"torch.cat",
"torch.nn.Conv3d"
] | [((7209, 7247), 'torchsummary.summary', 'summary', (['m'], {'input_size': '(1, 32, 32, 32)'}), '(m, input_size=(1, 32, 32, 32))\n', (7216, 7247), False, 'from torchsummary import summary\n'), ((1976, 2007), 'torch.cat', 'torch.cat', (['(ff_x, nff_x)'], {'dim': '(1)'}), '((ff_x, nff_x), dim=1)\n', (1985, 2007), False, 'import torch\n'), ((5388, 5419), 'torch.cat', 'torch.cat', (['(x_res, x_up)'], {'dim': '(1)'}), '((x_res, x_up), dim=1)\n', (5397, 5419), False, 'import torch\n'), ((4203, 4338), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': "params['out_channels']", 'out_channels': "(2 * params['out_channels'])", 'kernel_size': '(2, 2, 2)', 'padding': '(0)', 'stride': '(2)'}), "(in_channels=params['out_channels'], out_channels=2 * params[\n 'out_channels'], kernel_size=(2, 2, 2), padding=0, stride=2)\n", (4212, 4338), True, 'import torch.nn as nn\n'), ((4369, 4436), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(4)', 'num_channels': "(2 * params['out_channels'])"}), "(num_groups=4, num_channels=2 * params['out_channels'])\n", (4381, 4436), True, 'import torch.nn as nn\n'), ((4450, 4460), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (4458, 4460), True, 'import torch.nn as nn\n'), ((6226, 6366), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', ([], {'in_channels': "params['out_channels']", 'out_channels': "params['out_channels']", 'kernel_size': '(2, 2, 2)', 'padding': '(0)', 'stride': '(2)'}), "(in_channels=params['out_channels'], out_channels=params[\n 'out_channels'], kernel_size=(2, 2, 2), padding=0, stride=2)\n", (6244, 6366), True, 'import torch.nn as nn\n'), ((6406, 6469), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(4)', 'num_channels': "params['out_channels']"}), "(num_groups=4, num_channels=params['out_channels'])\n", (6418, 6469), True, 'import torch.nn as nn\n'), ((6483, 6493), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (6491, 6493), True, 'import torch.nn as nn\n'), ((947, 957), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (955, 957), True, 'import torch.nn as nn\n'), ((1021, 1110), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(1, 1, 1)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=(\n 1, 1, 1))\n', (1030, 1110), True, 'import torch.nn as nn\n'), ((1123, 1165), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(4)'], {'num_channels': 'out_channels'}), '(4, num_channels=out_channels)\n', (1135, 1165), True, 'import torch.nn as nn\n'), ((1183, 1193), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (1191, 1193), True, 'import torch.nn as nn\n'), ((1248, 1375), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': 'self.in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=self.in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding)\n', (1257, 1375), True, 'import torch.nn as nn\n'), ((1510, 1552), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(4)'], {'num_channels': 'out_channels'}), '(4, num_channels=out_channels)\n', (1522, 1552), True, 'import torch.nn as nn\n'), ((1593, 1603), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (1601, 1603), True, 'import torch.nn as nn\n'), ((3773, 3903), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': "params['in_channels']", 'out_channels': "params['out_channels']", 'kernel_size': '(1, 1, 1)', 'padding': '(0)', 'stride': '(1)'}), "(in_channels=params['in_channels'], out_channels=params[\n 'out_channels'], kernel_size=(1, 1, 1), padding=0, stride=1)\n", (3782, 3903), True, 'import torch.nn as nn\n'), ((3942, 4005), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(4)', 'num_channels': "params['out_channels']"}), "(num_groups=4, num_channels=params['out_channels'])\n", (3954, 4005), True, 'import torch.nn as nn\n'), ((4023, 4033), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (4031, 4033), True, 'import torch.nn as nn\n'), ((5313, 5323), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (5321, 5323), True, 'import torch.nn as nn\n'), ((5799, 5930), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': "params['out_channels']", 'out_channels': "params['out_channels']", 'kernel_size': '(1, 1, 1)', 'padding': '(0)', 'stride': '(1)'}), "(in_channels=params['out_channels'], out_channels=params[\n 'out_channels'], kernel_size=(1, 1, 1), padding=0, stride=1)\n", (5808, 5930), True, 'import torch.nn as nn\n'), ((5969, 6032), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(4)', 'num_channels': "params['out_channels']"}), "(num_groups=4, num_channels=params['out_channels'])\n", (5981, 6032), True, 'import torch.nn as nn\n'), ((6050, 6060), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (6058, 6060), True, 'import torch.nn as nn\n')] |
import re
def gen_static_version(static_version, dev=False):
return '{}{}'.format(
static_version,
'-dev' if dev else '',
)
def _split_version(version):
delims = '.', '-'
pattern = '|'.join(map(re.escape, delims))
return re.split(pattern, version)
def major_bump(version, dev=False):
version_parts = _split_version(version)
return '{}.0.0{}'.format(
int(version_parts[0]) + 1,
'-dev' if dev else ''
)
def minor_bump(version, dev=False):
version_parts = _split_version(version)
return '{}.{}.0{}'.format(
version_parts[0],
int(version_parts[1]) + 1,
'-dev' if dev else ''
)
def bugfix_bump(version, dev=False):
version_parts = _split_version(version)
return '{}.{}.{}{}'.format(
version_parts[0],
version_parts[1],
int(version_parts[2]) + 1,
'-def' if dev else ''
)
def dev_remove(version):
return version.replace('-dev', '')
| [
"re.split"
] | [((261, 287), 're.split', 're.split', (['pattern', 'version'], {}), '(pattern, version)\n', (269, 287), False, 'import re\n')] |
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for python_operations.py."""
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow as tf
from tf_coder.value_search import python_operations
from tf_coder.value_search import value
from tf_coder.value_search import value_search_settings as settings_module
class IndexingOperationTest(parameterized.TestCase):
def setUp(self):
super(IndexingOperationTest, self).setUp()
self.operation = python_operations.IndexingOperation()
@parameterized.named_parameters(
('index_0', 0, 1.2),
('index_2', 2, 5.6),
('index_negative_2', -2, 3.4))
def test_apply_for_list(self, index, expected):
arg_values = [value.ConstantValue([1.2, 3.4, 5.6]),
value.ConstantValue(index)]
result_value = self.operation.apply(arg_values,
settings_module.default_settings())
self.assertEqual(result_value, value.ConstantValue(expected))
@parameterized.named_parameters(
('index_0', 0, [1.2, 3.4]),
('index_negative_1', -1, [5.6, 7.8]))
def test_apply_for_tensor(self, index, expected):
tensor = tf.constant([[1.2, 3.4], [5.6, 7.8]])
arg_values = [value.ConstantValue(tensor),
value.ConstantValue(index)]
result_value = self.operation.apply(arg_values,
settings_module.default_settings())
self.assertEqual(result_value, value.ConstantValue(tf.constant(expected)))
def test_reconstruct_expression(self):
to_index = value.InputValue([1.2, 3.4], 'my_list')
index = value.ConstantValue(0)
self.assertEqual(self.operation.reconstruct_expression([to_index, index]),
'my_list[0]')
class IndexingAxis1OperationTest(absltest.TestCase):
def setUp(self):
super(IndexingAxis1OperationTest, self).setUp()
self.operation = python_operations.IndexingAxis1Operation()
self.arg_values = [value.InputValue([[12, 34], [56, 78]], 'my_input'),
value.ConstantValue(1)]
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.OutputValue([34, 78]))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'my_input[:, 1]')
class SingletonTupleCreationOperationTest(absltest.TestCase):
def setUp(self):
super(SingletonTupleCreationOperationTest, self).setUp()
self.operation = python_operations.SingletonTupleCreationOperation()
self.arg_values = [value.ConstantValue(12)]
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.ConstantValue((12,)))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'(12,)')
class PairCreationOperationTest(absltest.TestCase):
def setUp(self):
super(PairCreationOperationTest, self).setUp()
self.operation = python_operations.PairCreationOperation()
self.arg_values = [value.ConstantValue(12), value.ConstantValue(34)]
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.ConstantValue((12, 34)))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'(12, 34)')
class TripleCreationOperationTest(absltest.TestCase):
def setUp(self):
super(TripleCreationOperationTest, self).setUp()
self.operation = python_operations.TripleCreationOperation()
self.arg_values = [value.ConstantValue(12),
value.ConstantValue(34),
value.ConstantValue(56)]
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.ConstantValue((12, 34, 56)))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'(12, 34, 56)')
class SlicingAxis0LeftOperationTest(absltest.TestCase):
def setUp(self):
super(SlicingAxis0LeftOperationTest, self).setUp()
self.operation = python_operations.SlicingAxis0LeftOperation()
self.arg_values = [value.InputValue([12, 34, 56], 'my_input'),
value.ConstantValue(1)]
def test_get_docstring(self):
self.assertIn('Selects a suffix of indices along axis 0 of the tensor',
self.operation.metadata.docstring)
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.OutputValue([34, 56]))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'my_input[1:]')
class SlicingAxis0RightOperationTest(absltest.TestCase):
def setUp(self):
super(SlicingAxis0RightOperationTest, self).setUp()
self.operation = python_operations.SlicingAxis0RightOperation()
self.arg_values = [value.InputValue([12, 34, 56], 'my_input'),
value.ConstantValue(-1)]
def test_get_docstring(self):
self.assertIn('Selects a prefix of indices along axis 0 of the tensor',
self.operation.metadata.docstring)
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.OutputValue([12, 34]))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'my_input[:-1]')
class SlicingAxis0BothOperationTest(absltest.TestCase):
def setUp(self):
super(SlicingAxis0BothOperationTest, self).setUp()
self.operation = python_operations.SlicingAxis0BothOperation()
self.arg_values = [value.InputValue([12, 34, 56, 78], 'my_input'),
value.ConstantValue(1),
value.ConstantValue(-1)]
def test_get_docstring(self):
self.assertIn('Selects a range of indices along axis 0 of the tensor',
self.operation.metadata.docstring)
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.OutputValue([34, 56]))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'my_input[1:-1]')
class SlicingAxis1LeftOperationTest(absltest.TestCase):
def setUp(self):
super(SlicingAxis1LeftOperationTest, self).setUp()
self.operation = python_operations.SlicingAxis1LeftOperation()
self.arg_values = [value.InputValue([[12, 34, 56]], 'my_input'),
value.ConstantValue(1)]
def test_get_docstring(self):
self.assertIn('Selects a suffix of indices along axis 1 of the tensor',
self.operation.metadata.docstring)
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.OutputValue([[34, 56]]))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'my_input[:, 1:]')
class SlicingAxis1RightOperationTest(absltest.TestCase):
def setUp(self):
super(SlicingAxis1RightOperationTest, self).setUp()
self.operation = python_operations.SlicingAxis1RightOperation()
self.arg_values = [value.InputValue([[12, 34, 56]], 'my_input'),
value.ConstantValue(-1)]
def test_get_docstring(self):
self.assertIn('Selects a prefix of indices along axis 1 of the tensor',
self.operation.metadata.docstring)
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.OutputValue([[12, 34]]))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'my_input[:, :-1]')
class SlicingAxis1BothOperationTest(absltest.TestCase):
def setUp(self):
super(SlicingAxis1BothOperationTest, self).setUp()
self.operation = python_operations.SlicingAxis1BothOperation()
self.arg_values = [value.InputValue([[12, 34, 56, 78],
[-1, -2, -3, -4]], 'my_input'),
value.ConstantValue(1),
value.ConstantValue(-1)]
def test_get_docstring(self):
self.assertIn('Selects a range of indices along axis 1 of the tensor',
self.operation.metadata.docstring)
def test_apply(self):
self.assertEqual(self.operation.apply(self.arg_values,
settings_module.default_settings()),
value.OutputValue([[34, 56], [-2, -3]]))
def test_reconstruct_expression(self):
self.assertEqual(self.operation.reconstruct_expression(self.arg_values),
'my_input[:, 1:-1]')
if __name__ == '__main__':
logging.set_verbosity(logging.ERROR)
absltest.main()
| [
"tf_coder.value_search.python_operations.SingletonTupleCreationOperation",
"tf_coder.value_search.python_operations.SlicingAxis1RightOperation",
"tf_coder.value_search.value_search_settings.default_settings",
"tf_coder.value_search.python_operations.SlicingAxis0BothOperation",
"tf_coder.value_search.python_operations.PairCreationOperation",
"tf_coder.value_search.value.InputValue",
"tf_coder.value_search.value.ConstantValue",
"tf_coder.value_search.python_operations.IndexingAxis1Operation",
"absl.testing.absltest.main",
"tf_coder.value_search.python_operations.SlicingAxis0RightOperation",
"tf_coder.value_search.python_operations.TripleCreationOperation",
"absl.logging.set_verbosity",
"tf_coder.value_search.python_operations.IndexingOperation",
"tf_coder.value_search.value.OutputValue",
"tf_coder.value_search.python_operations.SlicingAxis1LeftOperation",
"absl.testing.parameterized.named_parameters",
"tf_coder.value_search.python_operations.SlicingAxis1BothOperation",
"tensorflow.constant",
"tf_coder.value_search.python_operations.SlicingAxis0LeftOperation"
] | [((1118, 1226), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('index_0', 0, 1.2)", "('index_2', 2, 5.6)", "('index_negative_2', -2, 3.4)"], {}), "(('index_0', 0, 1.2), ('index_2', 2, 5.6), (\n 'index_negative_2', -2, 3.4))\n", (1148, 1226), False, 'from absl.testing import parameterized\n'), ((1591, 1692), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('index_0', 0, [1.2, 3.4])", "('index_negative_1', -1, [5.6, 7.8])"], {}), "(('index_0', 0, [1.2, 3.4]), (\n 'index_negative_1', -1, [5.6, 7.8]))\n", (1621, 1692), False, 'from absl.testing import parameterized\n'), ((10309, 10345), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.ERROR'], {}), '(logging.ERROR)\n', (10330, 10345), False, 'from absl import logging\n'), ((10349, 10364), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (10362, 10364), False, 'from absl.testing import absltest\n'), ((1076, 1113), 'tf_coder.value_search.python_operations.IndexingOperation', 'python_operations.IndexingOperation', ([], {}), '()\n', (1111, 1113), False, 'from tf_coder.value_search import python_operations\n'), ((1766, 1803), 'tensorflow.constant', 'tf.constant', (['[[1.2, 3.4], [5.6, 7.8]]'], {}), '([[1.2, 3.4], [5.6, 7.8]])\n', (1777, 1803), True, 'import tensorflow as tf\n'), ((2161, 2200), 'tf_coder.value_search.value.InputValue', 'value.InputValue', (['[1.2, 3.4]', '"""my_list"""'], {}), "([1.2, 3.4], 'my_list')\n", (2177, 2200), False, 'from tf_coder.value_search import value\n'), ((2213, 2235), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(0)'], {}), '(0)\n', (2232, 2235), False, 'from tf_coder.value_search import value\n'), ((2498, 2540), 'tf_coder.value_search.python_operations.IndexingAxis1Operation', 'python_operations.IndexingAxis1Operation', ([], {}), '()\n', (2538, 2540), False, 'from tf_coder.value_search import python_operations\n'), ((3200, 3251), 'tf_coder.value_search.python_operations.SingletonTupleCreationOperation', 'python_operations.SingletonTupleCreationOperation', ([], {}), '()\n', (3249, 3251), False, 'from tf_coder.value_search import python_operations\n'), ((3807, 3848), 'tf_coder.value_search.python_operations.PairCreationOperation', 'python_operations.PairCreationOperation', ([], {}), '()\n', (3846, 3848), False, 'from tf_coder.value_search import python_operations\n'), ((4439, 4482), 'tf_coder.value_search.python_operations.TripleCreationOperation', 'python_operations.TripleCreationOperation', ([], {}), '()\n', (4480, 4482), False, 'from tf_coder.value_search import python_operations\n'), ((5156, 5201), 'tf_coder.value_search.python_operations.SlicingAxis0LeftOperation', 'python_operations.SlicingAxis0LeftOperation', ([], {}), '()\n', (5199, 5201), False, 'from tf_coder.value_search import python_operations\n'), ((6003, 6049), 'tf_coder.value_search.python_operations.SlicingAxis0RightOperation', 'python_operations.SlicingAxis0RightOperation', ([], {}), '()\n', (6047, 6049), False, 'from tf_coder.value_search import python_operations\n'), ((6851, 6896), 'tf_coder.value_search.python_operations.SlicingAxis0BothOperation', 'python_operations.SlicingAxis0BothOperation', ([], {}), '()\n', (6894, 6896), False, 'from tf_coder.value_search import python_operations\n'), ((7749, 7794), 'tf_coder.value_search.python_operations.SlicingAxis1LeftOperation', 'python_operations.SlicingAxis1LeftOperation', ([], {}), '()\n', (7792, 7794), False, 'from tf_coder.value_search import python_operations\n'), ((8603, 8649), 'tf_coder.value_search.python_operations.SlicingAxis1RightOperation', 'python_operations.SlicingAxis1RightOperation', ([], {}), '()\n', (8647, 8649), False, 'from tf_coder.value_search import python_operations\n'), ((9458, 9503), 'tf_coder.value_search.python_operations.SlicingAxis1BothOperation', 'python_operations.SlicingAxis1BothOperation', ([], {}), '()\n', (9501, 9503), False, 'from tf_coder.value_search import python_operations\n'), ((1309, 1345), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['[1.2, 3.4, 5.6]'], {}), '([1.2, 3.4, 5.6])\n', (1328, 1345), False, 'from tf_coder.value_search import value\n'), ((1365, 1391), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['index'], {}), '(index)\n', (1384, 1391), False, 'from tf_coder.value_search import value\n'), ((1485, 1519), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (1517, 1519), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((1556, 1585), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['expected'], {}), '(expected)\n', (1575, 1585), False, 'from tf_coder.value_search import value\n'), ((1822, 1849), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['tensor'], {}), '(tensor)\n', (1841, 1849), False, 'from tf_coder.value_search import value\n'), ((1869, 1895), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['index'], {}), '(index)\n', (1888, 1895), False, 'from tf_coder.value_search import value\n'), ((1989, 2023), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (2021, 2023), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((2564, 2614), 'tf_coder.value_search.value.InputValue', 'value.InputValue', (['[[12, 34], [56, 78]]', '"""my_input"""'], {}), "([[12, 34], [56, 78]], 'my_input')\n", (2580, 2614), False, 'from tf_coder.value_search import value\n'), ((2639, 2661), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(1)'], {}), '(1)\n', (2658, 2661), False, 'from tf_coder.value_search import value\n'), ((2847, 2874), 'tf_coder.value_search.value.OutputValue', 'value.OutputValue', (['[34, 78]'], {}), '([34, 78])\n', (2864, 2874), False, 'from tf_coder.value_search import value\n'), ((3275, 3298), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(12)'], {}), '(12)\n', (3294, 3298), False, 'from tf_coder.value_search import value\n'), ((3484, 3510), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(12,)'], {}), '((12,))\n', (3503, 3510), False, 'from tf_coder.value_search import value\n'), ((3872, 3895), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(12)'], {}), '(12)\n', (3891, 3895), False, 'from tf_coder.value_search import value\n'), ((3897, 3920), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(34)'], {}), '(34)\n', (3916, 3920), False, 'from tf_coder.value_search import value\n'), ((4106, 4135), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(12, 34)'], {}), '((12, 34))\n', (4125, 4135), False, 'from tf_coder.value_search import value\n'), ((4506, 4529), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(12)'], {}), '(12)\n', (4525, 4529), False, 'from tf_coder.value_search import value\n'), ((4554, 4577), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(34)'], {}), '(34)\n', (4573, 4577), False, 'from tf_coder.value_search import value\n'), ((4602, 4625), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(56)'], {}), '(56)\n', (4621, 4625), False, 'from tf_coder.value_search import value\n'), ((4811, 4844), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(12, 34, 56)'], {}), '((12, 34, 56))\n', (4830, 4844), False, 'from tf_coder.value_search import value\n'), ((5225, 5267), 'tf_coder.value_search.value.InputValue', 'value.InputValue', (['[12, 34, 56]', '"""my_input"""'], {}), "([12, 34, 56], 'my_input')\n", (5241, 5267), False, 'from tf_coder.value_search import value\n'), ((5292, 5314), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(1)'], {}), '(1)\n', (5311, 5314), False, 'from tf_coder.value_search import value\n'), ((5662, 5689), 'tf_coder.value_search.value.OutputValue', 'value.OutputValue', (['[34, 56]'], {}), '([34, 56])\n', (5679, 5689), False, 'from tf_coder.value_search import value\n'), ((6073, 6115), 'tf_coder.value_search.value.InputValue', 'value.InputValue', (['[12, 34, 56]', '"""my_input"""'], {}), "([12, 34, 56], 'my_input')\n", (6089, 6115), False, 'from tf_coder.value_search import value\n'), ((6140, 6163), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(-1)'], {}), '(-1)\n', (6159, 6163), False, 'from tf_coder.value_search import value\n'), ((6511, 6538), 'tf_coder.value_search.value.OutputValue', 'value.OutputValue', (['[12, 34]'], {}), '([12, 34])\n', (6528, 6538), False, 'from tf_coder.value_search import value\n'), ((6920, 6966), 'tf_coder.value_search.value.InputValue', 'value.InputValue', (['[12, 34, 56, 78]', '"""my_input"""'], {}), "([12, 34, 56, 78], 'my_input')\n", (6936, 6966), False, 'from tf_coder.value_search import value\n'), ((6991, 7013), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(1)'], {}), '(1)\n', (7010, 7013), False, 'from tf_coder.value_search import value\n'), ((7038, 7061), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(-1)'], {}), '(-1)\n', (7057, 7061), False, 'from tf_coder.value_search import value\n'), ((7408, 7435), 'tf_coder.value_search.value.OutputValue', 'value.OutputValue', (['[34, 56]'], {}), '([34, 56])\n', (7425, 7435), False, 'from tf_coder.value_search import value\n'), ((7818, 7862), 'tf_coder.value_search.value.InputValue', 'value.InputValue', (['[[12, 34, 56]]', '"""my_input"""'], {}), "([[12, 34, 56]], 'my_input')\n", (7834, 7862), False, 'from tf_coder.value_search import value\n'), ((7887, 7909), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(1)'], {}), '(1)\n', (7906, 7909), False, 'from tf_coder.value_search import value\n'), ((8257, 8286), 'tf_coder.value_search.value.OutputValue', 'value.OutputValue', (['[[34, 56]]'], {}), '([[34, 56]])\n', (8274, 8286), False, 'from tf_coder.value_search import value\n'), ((8673, 8717), 'tf_coder.value_search.value.InputValue', 'value.InputValue', (['[[12, 34, 56]]', '"""my_input"""'], {}), "([[12, 34, 56]], 'my_input')\n", (8689, 8717), False, 'from tf_coder.value_search import value\n'), ((8742, 8765), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(-1)'], {}), '(-1)\n', (8761, 8765), False, 'from tf_coder.value_search import value\n'), ((9113, 9142), 'tf_coder.value_search.value.OutputValue', 'value.OutputValue', (['[[12, 34]]'], {}), '([[12, 34]])\n', (9130, 9142), False, 'from tf_coder.value_search import value\n'), ((9527, 9593), 'tf_coder.value_search.value.InputValue', 'value.InputValue', (['[[12, 34, 56, 78], [-1, -2, -3, -4]]', '"""my_input"""'], {}), "([[12, 34, 56, 78], [-1, -2, -3, -4]], 'my_input')\n", (9543, 9593), False, 'from tf_coder.value_search import value\n'), ((9659, 9681), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(1)'], {}), '(1)\n', (9678, 9681), False, 'from tf_coder.value_search import value\n'), ((9706, 9729), 'tf_coder.value_search.value.ConstantValue', 'value.ConstantValue', (['(-1)'], {}), '(-1)\n', (9725, 9729), False, 'from tf_coder.value_search import value\n'), ((10076, 10115), 'tf_coder.value_search.value.OutputValue', 'value.OutputValue', (['[[34, 56], [-2, -3]]'], {}), '([[34, 56], [-2, -3]])\n', (10093, 10115), False, 'from tf_coder.value_search import value\n'), ((2080, 2101), 'tensorflow.constant', 'tf.constant', (['expected'], {}), '(expected)\n', (2091, 2101), True, 'import tensorflow as tf\n'), ((2789, 2823), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (2821, 2823), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((3426, 3460), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (3458, 3460), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((4048, 4082), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (4080, 4082), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((4753, 4787), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (4785, 4787), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((5604, 5638), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (5636, 5638), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((6453, 6487), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (6485, 6487), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((7350, 7384), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (7382, 7384), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((8199, 8233), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (8231, 8233), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((9055, 9089), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (9087, 9089), True, 'from tf_coder.value_search import value_search_settings as settings_module\n'), ((10018, 10052), 'tf_coder.value_search.value_search_settings.default_settings', 'settings_module.default_settings', ([], {}), '()\n', (10050, 10052), True, 'from tf_coder.value_search import value_search_settings as settings_module\n')] |
from idact.detail.jupyter_app.app_allocation_parameters import \
AppAllocationParameters
from idact.detail.jupyter_app.override_parameters_if_possible import \
override_parameters_if_possible
def get_parameters_for_test() -> AppAllocationParameters:
return AppAllocationParameters(
nodes=10,
cores=20,
memory_per_node='30GiB',
walltime='0:40:00',
native_args=[['--arg1'],
['value1']])
def test_override_nodes():
parameters = get_parameters_for_test()
override_parameters_if_possible(parameters=parameters,
nodes=15,
cores=None,
memory_per_node=None,
walltime=None,
native_args=[])
assert parameters == AppAllocationParameters(
nodes=15,
cores=20,
memory_per_node='30GiB',
walltime='0:40:00',
native_args=[['--arg1'],
['value1']])
def test_override_native_args():
parameters = get_parameters_for_test()
override_parameters_if_possible(parameters=parameters,
nodes=None,
cores=None,
memory_per_node=None,
walltime=None,
native_args=[('--arg1', 'value2'),
('--arg2', 'value3'),
('--arg3', 'value4')])
assert parameters == AppAllocationParameters(
nodes=10,
cores=20,
memory_per_node='30GiB',
walltime='0:40:00',
native_args=[['--arg1', '--arg2', '--arg3'],
['value2', 'value3', 'value4']])
def test_override_all():
parameters = get_parameters_for_test()
override_parameters_if_possible(parameters=parameters,
nodes=15,
cores=25,
memory_per_node='35GiB',
walltime='0:45:00',
native_args=[('--arg2', 'value2')])
assert parameters == AppAllocationParameters(
nodes=15,
cores=25,
memory_per_node='35GiB',
walltime='0:45:00',
native_args=[['--arg2'],
['value2']])
| [
"idact.detail.jupyter_app.override_parameters_if_possible.override_parameters_if_possible",
"idact.detail.jupyter_app.app_allocation_parameters.AppAllocationParameters"
] | [((271, 401), 'idact.detail.jupyter_app.app_allocation_parameters.AppAllocationParameters', 'AppAllocationParameters', ([], {'nodes': '(10)', 'cores': '(20)', 'memory_per_node': '"""30GiB"""', 'walltime': '"""0:40:00"""', 'native_args': "[['--arg1'], ['value1']]"}), "(nodes=10, cores=20, memory_per_node='30GiB',\n walltime='0:40:00', native_args=[['--arg1'], ['value1']])\n", (294, 401), False, 'from idact.detail.jupyter_app.app_allocation_parameters import AppAllocationParameters\n'), ((537, 670), 'idact.detail.jupyter_app.override_parameters_if_possible.override_parameters_if_possible', 'override_parameters_if_possible', ([], {'parameters': 'parameters', 'nodes': '(15)', 'cores': 'None', 'memory_per_node': 'None', 'walltime': 'None', 'native_args': '[]'}), '(parameters=parameters, nodes=15, cores=None,\n memory_per_node=None, walltime=None, native_args=[])\n', (568, 670), False, 'from idact.detail.jupyter_app.override_parameters_if_possible import override_parameters_if_possible\n'), ((1145, 1349), 'idact.detail.jupyter_app.override_parameters_if_possible.override_parameters_if_possible', 'override_parameters_if_possible', ([], {'parameters': 'parameters', 'nodes': 'None', 'cores': 'None', 'memory_per_node': 'None', 'walltime': 'None', 'native_args': "[('--arg1', 'value2'), ('--arg2', 'value3'), ('--arg3', 'value4')]"}), "(parameters=parameters, nodes=None, cores=\n None, memory_per_node=None, walltime=None, native_args=[('--arg1',\n 'value2'), ('--arg2', 'value3'), ('--arg3', 'value4')])\n", (1176, 1349), False, 'from idact.detail.jupyter_app.override_parameters_if_possible import override_parameters_if_possible\n'), ((1949, 2112), 'idact.detail.jupyter_app.override_parameters_if_possible.override_parameters_if_possible', 'override_parameters_if_possible', ([], {'parameters': 'parameters', 'nodes': '(15)', 'cores': '(25)', 'memory_per_node': '"""35GiB"""', 'walltime': '"""0:45:00"""', 'native_args': "[('--arg2', 'value2')]"}), "(parameters=parameters, nodes=15, cores=25,\n memory_per_node='35GiB', walltime='0:45:00', native_args=[('--arg2',\n 'value2')])\n", (1980, 2112), False, 'from idact.detail.jupyter_app.override_parameters_if_possible import override_parameters_if_possible\n'), ((873, 1003), 'idact.detail.jupyter_app.app_allocation_parameters.AppAllocationParameters', 'AppAllocationParameters', ([], {'nodes': '(15)', 'cores': '(20)', 'memory_per_node': '"""30GiB"""', 'walltime': '"""0:40:00"""', 'native_args': "[['--arg1'], ['value1']]"}), "(nodes=15, cores=20, memory_per_node='30GiB',\n walltime='0:40:00', native_args=[['--arg1'], ['value1']])\n", (896, 1003), False, 'from idact.detail.jupyter_app.app_allocation_parameters import AppAllocationParameters\n'), ((1645, 1820), 'idact.detail.jupyter_app.app_allocation_parameters.AppAllocationParameters', 'AppAllocationParameters', ([], {'nodes': '(10)', 'cores': '(20)', 'memory_per_node': '"""30GiB"""', 'walltime': '"""0:40:00"""', 'native_args': "[['--arg1', '--arg2', '--arg3'], ['value2', 'value3', 'value4']]"}), "(nodes=10, cores=20, memory_per_node='30GiB',\n walltime='0:40:00', native_args=[['--arg1', '--arg2', '--arg3'], [\n 'value2', 'value3', 'value4']])\n", (1668, 1820), False, 'from idact.detail.jupyter_app.app_allocation_parameters import AppAllocationParameters\n'), ((2311, 2441), 'idact.detail.jupyter_app.app_allocation_parameters.AppAllocationParameters', 'AppAllocationParameters', ([], {'nodes': '(15)', 'cores': '(25)', 'memory_per_node': '"""35GiB"""', 'walltime': '"""0:45:00"""', 'native_args': "[['--arg2'], ['value2']]"}), "(nodes=15, cores=25, memory_per_node='35GiB',\n walltime='0:45:00', native_args=[['--arg2'], ['value2']])\n", (2334, 2441), False, 'from idact.detail.jupyter_app.app_allocation_parameters import AppAllocationParameters\n')] |
"""
-*- coding: utf-8 -*-
========================
AWS Lambda
========================
Contributor: <NAME> (<NAME>)
========================
"""
import uuid
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class Parse:
def __init__(self, page, get_table, get_kv, get_text):
self.response = page
self.word_map = {}
self.table_page_map = {}
self.key_map = []
self.value_map = {}
self.final_map_list = []
self.line_text = {}
self.get_table = get_table
self.get_kv = get_kv
self.get_text = get_text
def extract_text(self, extract_by="LINE"):
for block in self.response:
if block["BlockType"] == extract_by:
page_key = f'page_{block["Page"]}'
if page_key in self.line_text.keys():
self.line_text[page_key].append(block["Text"])
else:
self.line_text[page_key] = [block["Text"]]
return self.line_text
def map_word_id(self):
for block in self.response:
if block["BlockType"] == "WORD":
self.word_map[block["Id"]] = block["Text"]
if block["BlockType"] == "SELECTION_ELEMENT":
self.word_map[block["Id"]] = block["SelectionStatus"]
def extract_table_info(self):
row = []
table = {}
ri = 0
flag = False
page = self.response[0]["Page"]
response_block_len = len(self.response) - 1
for n, block in enumerate(self.response):
if block["BlockType"] == "TABLE":
key = f"table_{uuid.uuid4().hex}_page_{block['Page']}"
temp_table = []
if block["BlockType"] == "CELL":
if block["RowIndex"] != ri:
flag = True
row = []
ri = block["RowIndex"]
if "Relationships" in block:
for relation in block["Relationships"]:
if relation["Type"] == "CHILD":
row.append(
" ".join([self.word_map[i] for i in relation["Ids"]])
)
else:
row.append(" ")
if flag:
temp_table.append(row)
table[key] = temp_table
flag = False
if table:
if block["Page"] != page:
self.table_page_map[page] = table
page = block["Page"]
table = {}
if response_block_len == n:
self.table_page_map[page] = table
return self.table_page_map
def get_key_map(self):
for block in self.response:
if block["BlockType"] == "KEY_VALUE_SET" and "KEY" in block["EntityTypes"]:
for relation in block["Relationships"]:
if relation["Type"] == "VALUE":
value_id = relation["Ids"]
if relation["Type"] == "CHILD":
v = " ".join([self.word_map[i] for i in relation["Ids"]])
self.key_map.append([v, value_id])
def get_value_map(self):
for block in self.response:
if (
block["BlockType"] == "KEY_VALUE_SET"
and "VALUE" in block["EntityTypes"]
):
if "Relationships" in block:
for relation in block["Relationships"]:
if relation["Type"] == "CHILD":
v = " ".join([self.word_map[i] for i in relation["Ids"]])
self.value_map[block["Id"]] = v
else:
self.value_map[block["Id"]] = "VALUE_NOT_FOUND"
def get_kv_map(self):
for i in self.key_map:
self.final_map_list.append(
[i[0], "".join(["".join(self.value_map[k]) for k in i[1]])]
)
return self.final_map_list
def process_response(self):
final_map, table_info, text = None, None, None
logging.info("Mapping Id with word")
self.map_word_id()
if self.get_text:
logging.info("Extracting text")
text = self.extract_text()
if self.get_kv:
logging.info("Extracting Key-Value pairs")
self.get_key_map()
self.get_value_map()
final_map = self.get_kv_map()
if self.get_table:
logging.info("Extracting table information")
table_info = self.extract_table_info()
return table_info, final_map, text
| [
"logging.getLogger",
"logging.info",
"uuid.uuid4"
] | [((184, 203), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (201, 203), False, 'import logging\n'), ((4193, 4229), 'logging.info', 'logging.info', (['"""Mapping Id with word"""'], {}), "('Mapping Id with word')\n", (4205, 4229), False, 'import logging\n'), ((4296, 4327), 'logging.info', 'logging.info', (['"""Extracting text"""'], {}), "('Extracting text')\n", (4308, 4327), False, 'import logging\n'), ((4404, 4446), 'logging.info', 'logging.info', (['"""Extracting Key-Value pairs"""'], {}), "('Extracting Key-Value pairs')\n", (4416, 4446), False, 'import logging\n'), ((4593, 4637), 'logging.info', 'logging.info', (['"""Extracting table information"""'], {}), "('Extracting table information')\n", (4605, 4637), False, 'import logging\n'), ((1653, 1665), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1663, 1665), False, 'import uuid\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-27 18:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='products_img'),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(default=None, max_length=100),
),
]
| [
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((389, 455), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""products_img"""'}), "(blank=True, null=True, upload_to='products_img')\n", (406, 455), False, 'from django.db import migrations, models\n'), ((576, 622), 'django.db.models.CharField', 'models.CharField', ([], {'default': 'None', 'max_length': '(100)'}), '(default=None, max_length=100)\n', (592, 622), False, 'from django.db import migrations, models\n')] |
import os
broker_url = os.getenv('RabbitMqUrl')
timezone = 'Europe/Warsaw'
beat_scheduler = "django_celery_beat.schedulers:DatabaseScheduler"
| [
"os.getenv"
] | [((24, 48), 'os.getenv', 'os.getenv', (['"""RabbitMqUrl"""'], {}), "('RabbitMqUrl')\n", (33, 48), False, 'import os\n')] |
# Generated by Django 2.0.5 on 2018-06-09 13:54
from django.db import migrations, models
def create_name(apps, schema_editor):
Athlete = apps.get_model("rankings", "Athlete")
for athlete in Athlete.objects.all():
athlete.name = athlete.first_name + " " + athlete.last_name
athlete.save()
class Migration(migrations.Migration):
dependencies = [
('rankings', '0015_athlete_name'),
]
operations = [
migrations.AlterField(
model_name='athlete',
name='first_name',
field=models.CharField(max_length=20, null=True),
),
migrations.AlterField(
model_name='athlete',
name='last_name',
field=models.CharField(max_length=30, null=True),
),
migrations.RunPython(create_name)
]
| [
"django.db.migrations.RunPython",
"django.db.models.CharField"
] | [((821, 854), 'django.db.migrations.RunPython', 'migrations.RunPython', (['create_name'], {}), '(create_name)\n', (841, 854), False, 'from django.db import migrations, models\n'), ((583, 625), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)'}), '(max_length=20, null=True)\n', (599, 625), False, 'from django.db import migrations, models\n'), ((756, 798), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)'}), '(max_length=30, null=True)\n', (772, 798), False, 'from django.db import migrations, models\n')] |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import pickle
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib.pyplot as plt # for making plots,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import tensorflow as tf
import tensorflow_hub as hub
import scipy.stats as stats # library for statistics and technical programming,
import tensorflow.keras as keras
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ParameterGrid
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier # accepts only numerical data
from sklearn.tree import export_graphviz
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import backend as K # used for housekeeping of tf models,
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.decomposition import PCA
from tensorflow.keras import backend as K
from tensorflow.keras import Sequential
from tensorflow.keras import activations
from tensorflow.keras import initializers
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import metrics
from tensorflow.keras import Sequential, activations, initializers
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
# Function, ................................................................................
def plot_NN_loss_acc(*, model_history_df, title="", n_mean=3, figsize=(8,4), top=0.75):
''' small function to plot loss and accuracy over epoch using data created with history.history() keras functions,
the columns shodul be called acc, loss, and val_acc, val_loss,
# ...
. model_history_df : dataframe, created with history.history() keras functions (see in the above)
. n_mean : int, how many last results use to caulate values displayed in suplot title
. title. : str, plot title,
'''
#.. figure, axes,
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.suptitle(title)
#.. Plot accuracy values
ax1.plot(model_history_df.loc[:,'loss'], label='train loss')
ax1.plot(model_history_df.loc[:,'val_loss'], label='val loss')
ax1.set_title('Mean validation loss {:.3f}'.format(np.mean(model_history_df.loc[:, 'val_loss'][-n_mean:])))
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss value')
ax1.grid(ls="--", color="grey")
ax1.legend()
#.. Plot accuracy values
ax2.plot(model_history_df.loc[:, 'acc'], label='train acc')
ax2.plot(model_history_df.loc[:, 'val_acc'], label='val acc')
ax2.set_title('Mean validation acc {:.3f}'.format(
np.mean(model_history_df.loc[:, 'val_acc'][-n_mean:])))
ax2.set_xlabel('epoch')
ax2.set_ylabel('accuracy')
ax2.set_ylim(0,1)
ax2.grid(ls="--", color="grey")
ax2.legend()
# ...
plt.tight_layout()
plt.subplots_adjust(top=top)
plt.show()
# Function, ........................................................................
def deNovoCNN_gridsearch(*,
# ... model/run description
method,
run_name,
dataset_name,
dataset_variant,
module_name,
# input data,
cnn_model_function,
grid, # list or list-like, obj created wiht ParameterGrid()
path, # path to data
input_data_info_df=None, # df, with 3. columns,
unit_test = False,
# options,
default_validation_split=0.2, # float, used only if this value is not provided in input_data_info_df or validation_split pramatere in
datagen_params = dict(rescale =1/255), # dict, for keras imagedatagenerator, used only if not provided with parameter grid,
# info,
verbose=False, # detailed info,
model_fit__verbose=0,
):
"""
this function will train model provided with the
important:
cnn_model_function : this fucntion needs to return two objects
* keras model : compileed already
* list with callback function/s - always a list !
here is how teturn function looks:
return model, [callback_function]
Limitations,
because we are creating valid dataset as subset of train data with image data generators,
these must be done using the same image generator (or else it will return Value Err after 1st epoch)
from that reason, validaiton data have the same transformations as train data, and return typically lower acc values as
the tests done with other technicques and pielines,
code example:
_ ,_ , _ , _ = deNovoCNN_gridsearch(
# model/run description
method = ai_method,
run_name = "test_run",
dataset_name = dataset_name,
dataset_variant = dataset_variant,
module_name = None,
# input data,
cnn_model_function = create_cnn_model,
grid = grid,
path = path, # path to dir with data subsets in separate folders for train, test ect...
input_data_info_df = CNN_DF_INFO, # df, with 3. columns, subset_role/results_name/dirname
# results and info,
verbose=True
)
"""
# Set up, .............................................................................
#.. variables,
model_ID = -1 # to start usnique counts of the models from 0
colname_with_classname_in_batch_labels_table = "classname"
#.. objects to store the results,
model_acc_and_parameters_list = list()
model_predictions_dict = dict()
model_parameters_dict = dict() # for iterations from grid
model_history_dict = dict()
# create input_data_info_df is None available,
if input_data_info_df is None:
input_data_info_df = pd.DataFrame([
{
"subset_role": "train",
"subset_results_name": "train",
"subset_dirname": "train"
},
{
"subset_role": "valid",
"subset_results_name": "valid",
"subset_dirname": None
},
{
"subset_role": "test",
"subset_results_name": "test",
"subset_dirname": "test"
}
])
else:
pass
#..
if unit_test==True:
subset_dirname_to_use_for_unit_test = input_data_info_df.subset_dirname.iloc[0]
input_data_info_df.subset_dirname = subset_dirname_to_use_for_unit_test
else:
pass
# for loop for grid serch wiith each parameter combination,, ...............................
for params_i, params in enumerate(grid):
model_ID +=1
# check for required parameters, except for validation split,
try:
params['method_group']
except:
params['method_group']="unknown"
try:
params['method_variant']
except:
params['method_variant']="unknown"
try:
params['random_state_nr']
except:
params['random_state_nr']=0
try:
params['batch_size']
except:
params['batch_size']=10
try:
params['img_size']
except:
params['img_size']=[128, 128]
try:
params['epoch']
except:
params['epoch']=100
try:
params['early_strop']
except:
params['early_strop']=None
# add or reset status column un info df,
input_data_info_df["status"]="awating"
#.. plot history,
'''
printed here, so you know what data are being loaded with image generators,
'''
if verbose==True:
print(f"\n\n..................................................")
print(f"model_ID: {model_ID}")
print(f"method_group: {params['method_group']}")
print(f"method: {method}")
print(f"method_variant: {params['method_variant']}")
print(f"............................... input data ...")
print(f"run_name: {run_name}")
print(f"dataset_name: {dataset_name}")
print(f"dataset_variant: {dataset_variant}")
print(f"unit_test: {unit_test}")
print(f"............................... basic params ...")
print(f"random_state_nr: {params['random_state_nr']}")
print(f"batch_size: {params['batch_size']}")
print(f"img_size: {params['img_size']}")
print(f"epoch: {params['epoch']}")
print(f"..................................................\n")
else:
pass
# set parameters for
try:
train_datagen_params = params["train_datagen_params"]
valid_datagen_params = params["valid_datagen_params"]
test_datagen_params = params["test_datagen_params"]
datagen_params_info = "imgadatagen prams provided by the user"
except:
train_datagen_params = datagen_params
valid_datagen_params = datagen_params
test_datagen_params = datagen_params
datagen_params_info = "using default imgadatagen params"
# load train & valid data
if unit_test==True:
valid_datagen_params = train_datagen_params
test_datagen_params = train_datagen_params
else:
pass
# dirnames (only one dirname is allowed with generaqtors,)
train_subset_dirname = input_data_info_df.subset_dirname.loc[input_data_info_df.subset_role=="train"].iloc[0]
valid_subset_dirname = input_data_info_df.subset_dirname.loc[input_data_info_df.subset_role=="valid"].iloc[0] # not used at this moment,
# OPTION 1, subset valid data from train data
if valid_subset_dirname is None or isinstance(valid_subset_dirname, float):
# set-up directory names and datagen parameters,
if isinstance(valid_subset_dirname, float):
train_datagen_params["validation_split"]=valid_subset_dirname
# else its None, so we have to get validaiton split value from somewhere,
else:
try:
train_datagen_params["validation_split"] #KeyError if it is missing,
except:
train_datagen_params["validation_split"]=default_validation_split
train_datagen = ImageDataGenerator(**train_datagen_params)
TRAINING_DIR = os.path.join(path, train_subset_dirname)
VALIDATION_DIR = TRAINING_DIR
# update, status:
input_data_info_df.loc[input_data_info_df.subset_role=="train", "status"] = "Loading"
input_data_info_df.loc[input_data_info_df.subset_role=="valid", "status"] = f"{train_datagen_params['validation_split']} of train"
#.. for train dataset
trainset = train_datagen.flow_from_directory(
TRAINING_DIR,
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =True,
subset ="training"
)
#.. for validation data, no shuffle, made from the same dataset as train data,
validset = train_datagen.flow_from_directory(
VALIDATION_DIR,
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =False,
subset ="validation"
)
# OPTION 2, valid data are loaded from separate directory,
else:
#.. create image generator, validation_split=params["validation_split"]
train_datagen = ImageDataGenerator(**train_datagen_params)
valid_datagen = ImageDataGenerator(**valid_datagen_params)
TRAINING_DIR = os.path.join(path, train_subset_dirname)
VALIDATION_DIR = os.path.join(path, valid_subset_dirname)
# update, status:
input_data_info_df.loc[input_data_info_df.subset_role=="train", "status"] = "Loading"
input_data_info_df.loc[input_data_info_df.subset_role=="valid", "status"] = "Loading"
#.. for train dataset
trainset = train_datagen.flow_from_directory(
TRAINING_DIR,
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =True
)
#.. for validation data, no shuffle, made from the same dataset as train data,
validset = valid_datagen.flow_from_directory(
VALIDATION_DIR,
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =False
)
# Collect class encoding/decoding - its not the standard one, .....................
class_encoding = trainset.class_indices
class_decoding = dict(zip(list(class_encoding.values()),list(class_encoding.keys())))
# add some info:
if verbose==True:
print("Datagenerator parameters: ",datagen_params_info)
print(input_data_info_df)
print(f"\n")
else:
pass
# train the model, collect the results, and plot history, .........................
#.. create the model,
model, callback_function = cnn_model_function(
input_size=(params["img_size"][0], params["img_size"][1], 3),
output_size=trainset.num_classes,
params=params,
verbose=verbose
)
#.. train the model
if callback_function is not None:
history = model.fit_generator(
generator =trainset, # you provide iterators, instead of data,
validation_data =validset,
epochs =params["epoch"],
callbacks =callback_function, # LIST, functions that can be applied at diffeerent stages to fight overfitting,
verbose =model_fit__verbose
)
else:
history = model.fit_generator(
generator =trainset, # you provide iterators, instead of data,
validation_data =validset,
epochs =params["epoch"],
verbose =model_fit__verbose
)
#.. store the results,
model_acc = dict()
model_loss = dict()
n = 3 # use last 3 results in history,
acc_results = pd.DataFrame(history.history).iloc[-n::,:].mean(axis=0)
model_acc["model_acc_train"] = acc_results.loc["acc"]
model_acc["model_acc_valid"] = acc_results.loc["val_acc"]
model_loss["loss_acc_train"] = acc_results.loc["loss"]
model_loss["loss_acc_valid"] = acc_results.loc["val_loss"]
# LOAD TEST DATA and store all sort of data in the last loop !, .........................
# prepare objects to store results,
baseline_acc = dict()
one_model_predictions = dict() # with predictions collected for each subset separately,
# it will be used temporaly, untul the last chunk of code to reload model predictions to final location
# first, chek if test data were provided,
if (input_data_info_df.subset_role=="test").sum()==0:
pass
else:
# get df subset with test data
test__input_data_info_df = pd.DataFrame(input_data_info_df.loc[input_data_info_df.subset_role=="test",:])
test__input_data_info_df.reset_index(inplace=True, drop=True)
# now check if there is anythign to load, and pass if None,
if test__input_data_info_df.shape[0]==1 and test__input_data_info_df.subset_dirname.iloc[0] is None:
pass
else:
# loop over each row, to load and evaluate each test data
if verbose==True:
print(f"generating predicitons for test data: {test__input_data_info_df.shape[0]} subsets")
else:
pass
for test_subset_nr in range(test__input_data_info_df.shape[0]):
# load the dataset,
one_test_xy_name = test__input_data_info_df.subset_results_name.iloc[test_subset_nr]
one_test_subset_name_to_load = test__input_data_info_df.subset_dirname.iloc[test_subset_nr]
#.. generator for test data
test_datagen = ImageDataGenerator(**test_datagen_params)
#.. find how many images there are
temp_testset = test_datagen.flow_from_directory(
os.path.join(path, one_test_subset_name_to_load),
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =False
)
#.. get all images in one batch,
test_img_number = len(temp_testset.filenames)
testset = test_datagen.flow_from_directory(
os.path.join(path, one_test_subset_name_to_load),
batch_size =test_img_number,
target_size =params["img_size"],
shuffle =False
)
# calculate test set accuracy,
#.. get predictions (in dummy array)
test_preds = model.predict_generator(testset)
y_true = testset.classes # array with true labels
y_pred = test_preds.argmax(axis=1) # array with predicted labels
model_acc[f"model_acc_{one_test_xy_name}"] = accuracy_score(y_true, y_pred)
model_loss[f"model_loss_{one_test_xy_name}"]= np.nan
#.. caulate test set baseline
baseline_acc[f"baseline_acc_{one_test_xy_name}"] = pd.Series(y_true).value_counts(normalize=True).sort_values(ascending=True).iloc[0]
#.. store model predictions,
predictions = y_pred
decoded_predictions = pd.Series(y_pred).map(class_decoding).values
model_predictions_proba = test_preds
decoded_y_labels = pd.Series(y_true).map(class_decoding).values
batch_labels_df = pd.DataFrame(testset.filenames, columns=["imgname"])
batch_labels_df["clasname"] = decoded_y_labels # miniversiton with 2 most importnat columns
#..
one_model_predictions[one_test_xy_name] = {
"idx_in_batch": np.arange(batch_labels_df.shape[0]),
"original_labels": decoded_y_labels,
"model_predictions": decoded_predictions,
"model_predictions_proba": model_predictions_proba,
"acc_restuls_and_params": None, # will be added later on
"class_encoding": class_encoding,
"class_decoding": class_decoding,
"batch_labels_df": batch_labels_df["clasname"],
"knn_kneighbors": None
}# added, in case I woudl have some dounbts about results origine,
# Collect more results, including lists that were created over each test subset..................
# - 1 - collect acc_restuls_and_params
acc_restuls_and_params = {
"model_ID": model_ID,
"run_name": run_name,
"method": method,
"dataset_name": dataset_name,
"dataset_variant": dataset_variant,
"module": module_name,
"unit_test":unit_test,
# ....
**baseline_acc,
**model_acc,
**model_loss, # nn only
**params,
"pca_components_used":0
}
model_acc_and_parameters_list.append(acc_restuls_and_params) # in list, so it can be used as pd.df immediately,
# - 2 - save model history,
model_history_dict[model_ID] = {
"model_history": pd.DataFrame(history.history),
"acc_restuls_and_params": acc_restuls_and_params
}
# - 3 - store params,
"to allow future runs"
model_parameters_dict[model_ID] = {
"acc_restuls_and_params": acc_restuls_and_params, # when added, I can easily control what I am selecting in other
"params":params
}
# - 4- add acc_restuls_and_params to each model prediction, and store them in the dict with model ID
for one_test_xy_name in list(one_model_predictions.keys()):
one_model_predictions[one_test_xy_name]['acc_restuls_and_params'] = acc_restuls_and_params
# and finally, add this to the big dict wiht all the results,
model_predictions_dict[model_ID] = one_model_predictions
# Report on results, .....................................................................
#.. plot history,
if verbose==True:
plot_NN_loss_acc(
model_history_df=pd.DataFrame(history.history),
title=f"model_ID: {model_ID}\n{model_acc}",
n_mean=3,
)
else:
pass
# ..................................................
return model_acc_and_parameters_list, model_predictions_dict, model_parameters_dict, model_history_dict
# Function, .................................................
def train_denovoCNN_models_iteratively(*,
# names
run_ID, # str, Unique ID added to name of each file to allow running and saving similar module create with different parameters or different data
dataset_name, # str, global, provided by the wrapper function,
dataset_variant, # str, global, provided by the wrapper function,
module_name, # str, global, provided by the wrapper function,
# define input data,
path, # str, or list of the same lengh and order as subset_collection_names, path to input data
subset_collection_names, # list, anything that will allow you to identify which subset and run you are using
input_data_info_df_dict, # dict, with input_data_info_df, each is df, with 3. columns, subset_role/results_name/dirname
# model parameters,
method_name, # str, keywod in the function {knn, svm, logreg, dt, rf}
cnn_model_function, # function providing sequencian keras model, and list with
datagen_params = dict(rescale =1/255), # default datagenerator parameters applied to all data
grid, # ParameterGrid object, wiht parameters for a given function,
# model selection cycles,
models_selected_at_each_cycle = 0.3, # int, how many models with best performace will be selected and trained with another round of training and with next subset collection
include_method_variant_with_selection = True, # bool, if True, top models_selected_at_each_cycle wiht different model variant will be selected to next cycle
include_random_nr_with_selection = False, # bool, if True, top models_selected_at_each_cycle wiht different random nr will be selected to next cycle
sort_models_by = "model_acc_valid",# str {"model_acc_valid"}
find_min_score = False, # if false, the scores will be used in descending order,
# saving and other,
save_path, # str, eg PATH_results from configs
save_partial_results=True, # bool, if True, it will save model results at each iteration
valid_proportion=0.2, # used only if validation datasets is not specified,
unit_test=False,
model_fit__verbose=0,
verbose=False
):
# create path for results wiht all options,
path_results = os.path.join(save_path, f"{method_name}__{dataset_name}__{dataset_variant}")
try:
os.mkdir(path_results)
if verbose==True:
print(f"\n Crated: {path_results}\n")
else:
pass
except:
pass
# ** / grid search
run_names = list()
for cycle_nr, subset_collection_name in enumerate(subset_collection_names):
print(f"\n - - - CYCLE: {cycle_nr} - - -\n")
# ...............................................
# cycle set up
input_data_info_df = input_data_info_df_dict[subset_collection_name]
# set unique run name,
run_name = f"{subset_collection_name}__{run_ID}"
run_names.append(run_name)
# set name added to each saved file wiht results from that cycle
file_name_fingerprint = f'{method_name}__{dataset_name}__{dataset_variant}__{module_name}__{run_name}'
# path to data
'if str, one path was given, if list, datasets for different cycles awere located on different paths'
if isinstance(path, str):
path_to_data = path
else:
path_to_data = path[cycle_nr]
# ...............................................
# find grid with parameters
if cycle_nr==0:
"the grid is provided externally in 0 cycle"
cycle_grid = grid
else:
"here you must collect parameters from the best performing models, and extract params for top nr of them"
"options to include model variant in selection"
sort_by = "model_acc_valid"
# collect features you want to use to sort model results and get top of each of them
features_used_to_group_models = ["method", "dataset_name", "dataset_variant", "module"]
if include_random_nr_with_selection==True:
features_used_to_group_models.append("random_state_nr")
else:
pass
if include_method_variant_with_selection:
features_used_to_group_models.append("method_variant")
else:
pass
# add these features to df, with the model results as one column
for fi, feature in enumerate(features_used_to_group_models):
if fi==0:
composite_feature = results_from_last_cycle.loc[:, feature].values.tolist()
else:
composite_feature = [f"{x}__{y}" for (x,y) in zip(composite_feature,
results_from_last_cycle.loc[:, feature].values.tolist())]
results_from_last_cycle["method_full_name"] = composite_feature
# find best performing models in each group and sort them
method_groups = results_from_last_cycle.method_full_name.unique().tolist()
best_methods_IDs = list()
for ii, mg in enumerate(method_groups):
# subset summary_df for each method group
df_subset = results_from_last_cycle.loc[ results_from_last_cycle.method_full_name==mg, :]
df_subset = df_subset.sort_values(sort_by, ascending=find_min_score)
df_subset.reset_index(inplace=True, drop=True)
# find how many models will be selected for the next cycle,
if models_selected_at_each_cycle<1 and models_selected_at_each_cycle>0:
mnr = int(np.ceil(df_subset.shape[0]*models_selected_at_each_cycle))
elif models_selected_at_each_cycle==0:
mnr = 1
else:
mnr = models_selected_at_each_cycle
# because I had some rare situations with problems,
if mnr==0:
mnr=1
else:
pass
# find top n models in each
best_methods_IDs.extend(df_subset.model_ID.values[0:mnr].tolist()) #this will extend the list by each nr of id numbers
# create new grid
cycle_grid=list()
for gidx in best_methods_IDs:
cycle_grid.append(model_parameter_list[gidx]['params']) # yes its 'para ms'! its a mistake, that I have no time to correct
# train models
results_list, model_predictions_dict, model_parameter_list, model_history_dict = deNovoCNN_gridsearch(
# model/run description
method = method_name,
run_name = run_name,
dataset_name = dataset_name,
dataset_variant = dataset_variant,
module_name = module_name,
# input data,
path = path_to_data, # path to dir with data subsets in separate folders for train, test ect...
input_data_info_df = input_data_info_df, # df, with 3. columns, subset_role/results_name/dirname
datagen_params = datagen_params, # default datagenerator parameters applied to all data
# model and params,
cnn_model_function = cnn_model_function,
grid = cycle_grid,
# options,
default_validation_split = valid_proportion,
unit_test = unit_test,
# results and info,
model_fit__verbose = model_fit__verbose,
verbose = True
)
# this is for the next cylce
results_from_last_cycle = pd.DataFrame(results_list)
# ** / save the results,
if save_partial_results==True:
save_te_results=True
else:
if cycle_nr==(len(subset_collection_names)-1):
save_te_results=True
else:
save_te_results=False
# create path for results
if save_te_results==True:
os.chdir(path_results)
if verbose==True:
print(f"The results will be saved with as:\n{file_name_fingerprint}")
else:
pass
# save results and metadata on each model,
pd.DataFrame(results_list).to_csv(f"{file_name_fingerprint}__summary_table.csv", header=True)
# save model predictions,
with open(f"{file_name_fingerprint}__model_predictions_dict.p", 'wb') as file: # wb - write binary,
pickle.dump(model_predictions_dict, file)
# save model parameters to re-run the models
with open(f"{file_name_fingerprint}__model_parameters_list.p", 'wb') as file: # wb - write binary,
pickle.dump(model_parameter_list, file)
# save history_dict to re-run the models - speciffic for tf models
with open(f"{file_name_fingerprint}__model_history_dict.p", 'wb') as file: # wb - write binary,
pickle.dump(model_history_dict, file)
else:
if verbose==True:
print(f"The results for this cycle were not saved, only final results are going to be saved")
else:
pass
pass | [
"sklearn.metrics.accuracy_score",
"numpy.mean",
"numpy.ceil",
"pandas.Series",
"pickle.dump",
"numpy.arange",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"os.chdir",
"os.mkdir",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((4236, 4283), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': 'figsize'}), '(nrows=1, ncols=2, figsize=figsize)\n', (4248, 4283), True, 'import matplotlib.pyplot as plt\n'), ((5125, 5143), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5141, 5143), True, 'import matplotlib.pyplot as plt\n'), ((5148, 5176), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': 'top'}), '(top=top)\n', (5167, 5176), True, 'import matplotlib.pyplot as plt\n'), ((5181, 5191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5189, 5191), True, 'import matplotlib.pyplot as plt\n'), ((28357, 28433), 'os.path.join', 'os.path.join', (['save_path', 'f"""{method_name}__{dataset_name}__{dataset_variant}"""'], {}), "(save_path, f'{method_name}__{dataset_name}__{dataset_variant}')\n", (28369, 28433), False, 'import os\n'), ((8514, 8789), 'pandas.DataFrame', 'pd.DataFrame', (["[{'subset_role': 'train', 'subset_results_name': 'train', 'subset_dirname':\n 'train'}, {'subset_role': 'valid', 'subset_results_name': 'valid',\n 'subset_dirname': None}, {'subset_role': 'test', 'subset_results_name':\n 'test', 'subset_dirname': 'test'}]"], {}), "([{'subset_role': 'train', 'subset_results_name': 'train',\n 'subset_dirname': 'train'}, {'subset_role': 'valid',\n 'subset_results_name': 'valid', 'subset_dirname': None}, {'subset_role':\n 'test', 'subset_results_name': 'test', 'subset_dirname': 'test'}])\n", (8526, 8789), True, 'import pandas as pd\n'), ((28452, 28474), 'os.mkdir', 'os.mkdir', (['path_results'], {}), '(path_results)\n', (28460, 28474), False, 'import os\n'), ((34225, 34251), 'pandas.DataFrame', 'pd.DataFrame', (['results_list'], {}), '(results_list)\n', (34237, 34251), True, 'import pandas as pd\n'), ((4525, 4579), 'numpy.mean', 'np.mean', (["model_history_df.loc[:, 'val_loss'][-n_mean:]"], {}), "(model_history_df.loc[:, 'val_loss'][-n_mean:])\n", (4532, 4579), True, 'import numpy as np\n'), ((4915, 4968), 'numpy.mean', 'np.mean', (["model_history_df.loc[:, 'val_acc'][-n_mean:]"], {}), "(model_history_df.loc[:, 'val_acc'][-n_mean:])\n", (4922, 4968), True, 'import numpy as np\n'), ((13401, 13443), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**train_datagen_params)\n', (13419, 13443), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((13473, 13513), 'os.path.join', 'os.path.join', (['path', 'train_subset_dirname'], {}), '(path, train_subset_dirname)\n', (13485, 13513), False, 'import os\n'), ((14776, 14818), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**train_datagen_params)\n', (14794, 14818), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((14848, 14890), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**valid_datagen_params)\n', (14866, 14890), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((14938, 14978), 'os.path.join', 'os.path.join', (['path', 'train_subset_dirname'], {}), '(path, train_subset_dirname)\n', (14950, 14978), False, 'import os\n'), ((15008, 15048), 'os.path.join', 'os.path.join', (['path', 'valid_subset_dirname'], {}), '(path, valid_subset_dirname)\n', (15020, 15048), False, 'import os\n'), ((18823, 18908), 'pandas.DataFrame', 'pd.DataFrame', (["input_data_info_df.loc[input_data_info_df.subset_role == 'test', :]"], {}), "(input_data_info_df.loc[input_data_info_df.subset_role ==\n 'test', :])\n", (18835, 18908), True, 'import pandas as pd\n'), ((24063, 24092), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (24075, 24092), True, 'import pandas as pd\n'), ((34632, 34654), 'os.chdir', 'os.chdir', (['path_results'], {}), '(path_results)\n', (34640, 34654), False, 'import os\n'), ((35142, 35183), 'pickle.dump', 'pickle.dump', (['model_predictions_dict', 'file'], {}), '(model_predictions_dict, file)\n', (35153, 35183), False, 'import pickle\n'), ((35370, 35409), 'pickle.dump', 'pickle.dump', (['model_parameter_list', 'file'], {}), '(model_parameter_list, file)\n', (35381, 35409), False, 'import pickle\n'), ((35633, 35670), 'pickle.dump', 'pickle.dump', (['model_history_dict', 'file'], {}), '(model_history_dict, file)\n', (35644, 35670), False, 'import pickle\n'), ((19942, 19983), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**test_datagen_params)\n', (19960, 19983), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((21242, 21272), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (21256, 21272), False, 'from sklearn.metrics import accuracy_score\n'), ((21960, 22012), 'pandas.DataFrame', 'pd.DataFrame', (['testset.filenames'], {'columns': "['imgname']"}), "(testset.filenames, columns=['imgname'])\n", (21972, 22012), True, 'import pandas as pd\n'), ((25134, 25163), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (25146, 25163), True, 'import pandas as pd\n'), ((34880, 34906), 'pandas.DataFrame', 'pd.DataFrame', (['results_list'], {}), '(results_list)\n', (34892, 34906), True, 'import pandas as pd\n'), ((17814, 17843), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (17826, 17843), True, 'import pandas as pd\n'), ((20134, 20182), 'os.path.join', 'os.path.join', (['path', 'one_test_subset_name_to_load'], {}), '(path, one_test_subset_name_to_load)\n', (20146, 20182), False, 'import os\n'), ((20595, 20643), 'os.path.join', 'os.path.join', (['path', 'one_test_subset_name_to_load'], {}), '(path, one_test_subset_name_to_load)\n', (20607, 20643), False, 'import os\n'), ((22268, 22303), 'numpy.arange', 'np.arange', (['batch_labels_df.shape[0]'], {}), '(batch_labels_df.shape[0])\n', (22277, 22303), True, 'import numpy as np\n'), ((31951, 32010), 'numpy.ceil', 'np.ceil', (['(df_subset.shape[0] * models_selected_at_each_cycle)'], {}), '(df_subset.shape[0] * models_selected_at_each_cycle)\n', (31958, 32010), True, 'import numpy as np\n'), ((21709, 21726), 'pandas.Series', 'pd.Series', (['y_pred'], {}), '(y_pred)\n', (21718, 21726), True, 'import pandas as pd\n'), ((21865, 21882), 'pandas.Series', 'pd.Series', (['y_true'], {}), '(y_true)\n', (21874, 21882), True, 'import pandas as pd\n'), ((21468, 21485), 'pandas.Series', 'pd.Series', (['y_true'], {}), '(y_true)\n', (21477, 21485), True, 'import pandas as pd\n')] |
import copy
import pickle
import hjson
from c3.signal.gates import Instruction
from c3.c3objs import Quantity, hjson_decode, hjson_encode
from c3.experiment import Experiment
from c3.generator.generator import Generator
from c3.libraries.envelopes import envelopes
from c3.parametermap import ParameterMap
from c3.signal import gates, pulse
from c3.model import Model
import numpy as np
import pytest
from c3.libraries.constants import GATES
model = Model()
model.read_config("test/test_model.cfg")
generator = Generator()
generator.read_config("test/generator2.cfg")
t_final = 7e-9 # Time for single qubit gates
sideband = 50e6
gauss_params_single = {
"amp": Quantity(value=0.5, min_val=0.4, max_val=0.6, unit="V"),
"t_final": Quantity(
value=t_final, min_val=0.5 * t_final, max_val=1.5 * t_final, unit="s"
),
"sigma": Quantity(
value=t_final / 4, min_val=t_final / 8, max_val=t_final / 2, unit="s"
),
"xy_angle": Quantity(
value=0.0, min_val=-0.5 * np.pi, max_val=2.5 * np.pi, unit="rad"
),
"freq_offset": Quantity(
value=-sideband - 3e6, min_val=-56 * 1e6, max_val=-52 * 1e6, unit="Hz 2pi"
),
"delta": Quantity(value=-1, min_val=-5, max_val=3, unit=""),
}
gauss_env_single = pulse.EnvelopeDrag(
name="gauss",
desc="Gaussian comp for single-qubit gates",
params=gauss_params_single,
shape=envelopes["gaussian_nonorm"],
)
lo_freq_q1 = 5e9 + sideband
carrier_parameters = {
"freq": Quantity(value=lo_freq_q1, min_val=4.5e9, max_val=6e9, unit="Hz 2pi"),
"framechange": Quantity(value=0.0, min_val=-np.pi, max_val=3 * np.pi, unit="rad"),
}
carr = pulse.Carrier(
name="carrier", desc="Frequency of the local oscillator", params=carrier_parameters
)
lo_freq_q2 = 5.6e9 + sideband
carr_2 = copy.deepcopy(carr)
carr_2.params["freq"].set_value(lo_freq_q2)
instr = gates.Instruction(
name="multi_instruction",
t_start=0.0,
t_end=t_final * 3 + 6e-9,
channels=["d1", "d2"],
)
instr.add_component(copy.deepcopy(gauss_env_single), "d1", name="gaussd1_1")
instr.add_component(
copy.deepcopy(gauss_env_single),
"d1",
name="gaussd1_2",
options={"delay": Quantity(1e-9), "trigger_comp": ("d1", "gaussd1_1")},
)
instr.add_component(
copy.deepcopy(gauss_env_single),
"d1",
name="gaussd1_3",
options={"delay": Quantity(1e-9), "trigger_comp": ("d1", "gaussd1_2")},
)
instr.add_component(copy.deepcopy(gauss_env_single), "d2", name="gaussd2_1")
instr.add_component(
copy.deepcopy(gauss_env_single),
"d2",
name="gaussd2_2",
options={
"delay": Quantity(1e-9),
"trigger_comp": ("d1", "gaussd1_2"),
"t_final_cut": Quantity(0.9 * t_final),
},
)
instr.add_component(carr, "d1")
instr.add_component(carr_2, "d2")
instr_dict_str = hjson.dumpsJSON(instr.asdict(), default=hjson_encode)
pmap = ParameterMap(model=model, generator=generator, instructions=[instr])
exp = Experiment(pmap)
with open("test/instruction.pickle", "rb") as filename:
test_data = pickle.load(filename)
@pytest.mark.integration
def test_extended_pulse():
instr_it = instr
gen_signal = generator.generate_signals(instr_it)
ts = gen_signal["d1"]["ts"]
np.testing.assert_allclose(
ts,
test_data["signal"]["d1"]["ts"],
atol=1e-9 * np.max(test_data["signal"]["d1"]["ts"]),
)
np.testing.assert_allclose(
actual=gen_signal["d1"]["values"].numpy(),
desired=test_data["signal"]["d1"]["values"].numpy(),
atol=1e-9 * np.max(test_data["signal"]["d1"]["values"].numpy()),
)
np.testing.assert_allclose(
actual=gen_signal["d2"]["values"].numpy(),
desired=test_data["signal"]["d2"]["values"].numpy(),
atol=1e-9 * np.max(test_data["signal"]["d2"]["values"].numpy()),
)
np.testing.assert_allclose(
instr_it.get_full_gate_length(), test_data["full_gate_length1"]
)
instr_it.auto_adjust_t_end(buffer=0.2)
np.testing.assert_allclose(
instr_it.get_full_gate_length(), test_data["full_gate_length2"]
)
np.testing.assert_allclose(instr_it.t_end, test_data["t_end2"])
pmap.set_parameters(
[2 * t_final],
[[("multi_instruction", "d1", "gaussd1_2", "t_final")]],
extend_bounds=True,
)
gen_signal = generator.generate_signals(instr_it)
np.testing.assert_allclose(
actual=gen_signal["d1"]["values"].numpy(),
desired=test_data["signal2"]["d1"]["values"].numpy(),
atol=1e-9 * np.max(test_data["signal"]["d1"]["values"].numpy()),
)
np.testing.assert_allclose(
actual=gen_signal["d2"]["values"].numpy(),
desired=test_data["signal2"]["d2"]["values"].numpy(),
atol=1e-9 * np.max(test_data["signal"]["d2"]["values"].numpy()),
)
instr_it.auto_adjust_t_end(0.1)
gen_signal = generator.generate_signals(instr_it)
np.testing.assert_allclose(
actual=gen_signal["d1"]["values"].numpy(),
desired=test_data["signal3"]["d1"]["values"].numpy(),
atol=1e-9 * np.max(test_data["signal"]["d1"]["values"].numpy()),
)
np.testing.assert_allclose(
actual=gen_signal["d2"]["values"].numpy(),
desired=test_data["signal3"]["d2"]["values"].numpy(),
atol=1e-9 * np.max(test_data["signal"]["d2"]["values"].numpy()),
)
@pytest.mark.unit
def test_save_and_load():
global instr, pmap
instr = Instruction()
instr.from_dict(hjson.loads(instr_dict_str, object_pairs_hook=hjson_decode))
pmap = ParameterMap(model=model, generator=generator, instructions=[instr])
test_extended_pulse()
@pytest.mark.unit
def test_str_conversion():
assert repr(instr) == "Instruction[multi_instruction]"
@pytest.mark.unit
def test_set_name_ideal():
"""Check that asigning a name of a specific gate from the constants updates the ideal unitary."""
instr.set_name("ry90p")
assert (instr.ideal == GATES["ry90p"]).all()
instr.set_name("crzp")
assert (instr.ideal == GATES["crzp"]).all()
| [
"hjson.loads",
"numpy.testing.assert_allclose",
"pickle.load",
"c3.parametermap.ParameterMap",
"c3.signal.pulse.EnvelopeDrag",
"c3.signal.pulse.Carrier",
"numpy.max",
"c3.signal.gates.Instruction",
"c3.experiment.Experiment",
"c3.model.Model",
"copy.deepcopy",
"c3.generator.generator.Generator",
"c3.c3objs.Quantity"
] | [((455, 462), 'c3.model.Model', 'Model', ([], {}), '()\n', (460, 462), False, 'from c3.model import Model\n'), ((516, 527), 'c3.generator.generator.Generator', 'Generator', ([], {}), '()\n', (525, 527), False, 'from c3.generator.generator import Generator\n'), ((1258, 1408), 'c3.signal.pulse.EnvelopeDrag', 'pulse.EnvelopeDrag', ([], {'name': '"""gauss"""', 'desc': '"""Gaussian comp for single-qubit gates"""', 'params': 'gauss_params_single', 'shape': "envelopes['gaussian_nonorm']"}), "(name='gauss', desc=\n 'Gaussian comp for single-qubit gates', params=gauss_params_single,\n shape=envelopes['gaussian_nonorm'])\n", (1276, 1408), False, 'from c3.signal import gates, pulse\n'), ((1651, 1753), 'c3.signal.pulse.Carrier', 'pulse.Carrier', ([], {'name': '"""carrier"""', 'desc': '"""Frequency of the local oscillator"""', 'params': 'carrier_parameters'}), "(name='carrier', desc='Frequency of the local oscillator',\n params=carrier_parameters)\n", (1664, 1753), False, 'from c3.signal import gates, pulse\n'), ((1796, 1815), 'copy.deepcopy', 'copy.deepcopy', (['carr'], {}), '(carr)\n', (1809, 1815), False, 'import copy\n'), ((1869, 1979), 'c3.signal.gates.Instruction', 'gates.Instruction', ([], {'name': '"""multi_instruction"""', 't_start': '(0.0)', 't_end': '(t_final * 3 + 6e-09)', 'channels': "['d1', 'd2']"}), "(name='multi_instruction', t_start=0.0, t_end=t_final * 3 +\n 6e-09, channels=['d1', 'd2'])\n", (1886, 1979), False, 'from c3.signal import gates, pulse\n'), ((2869, 2937), 'c3.parametermap.ParameterMap', 'ParameterMap', ([], {'model': 'model', 'generator': 'generator', 'instructions': '[instr]'}), '(model=model, generator=generator, instructions=[instr])\n', (2881, 2937), False, 'from c3.parametermap import ParameterMap\n'), ((2945, 2961), 'c3.experiment.Experiment', 'Experiment', (['pmap'], {}), '(pmap)\n', (2955, 2961), False, 'from c3.experiment import Experiment\n'), ((671, 726), 'c3.c3objs.Quantity', 'Quantity', ([], {'value': '(0.5)', 'min_val': '(0.4)', 'max_val': '(0.6)', 'unit': '"""V"""'}), "(value=0.5, min_val=0.4, max_val=0.6, unit='V')\n", (679, 726), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((743, 822), 'c3.c3objs.Quantity', 'Quantity', ([], {'value': 't_final', 'min_val': '(0.5 * t_final)', 'max_val': '(1.5 * t_final)', 'unit': '"""s"""'}), "(value=t_final, min_val=0.5 * t_final, max_val=1.5 * t_final, unit='s')\n", (751, 822), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((851, 930), 'c3.c3objs.Quantity', 'Quantity', ([], {'value': '(t_final / 4)', 'min_val': '(t_final / 8)', 'max_val': '(t_final / 2)', 'unit': '"""s"""'}), "(value=t_final / 4, min_val=t_final / 8, max_val=t_final / 2, unit='s')\n", (859, 930), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((962, 1036), 'c3.c3objs.Quantity', 'Quantity', ([], {'value': '(0.0)', 'min_val': '(-0.5 * np.pi)', 'max_val': '(2.5 * np.pi)', 'unit': '"""rad"""'}), "(value=0.0, min_val=-0.5 * np.pi, max_val=2.5 * np.pi, unit='rad')\n", (970, 1036), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((1071, 1177), 'c3.c3objs.Quantity', 'Quantity', ([], {'value': '(-sideband - 3000000.0)', 'min_val': '(-56 * 1000000.0)', 'max_val': '(-52 * 1000000.0)', 'unit': '"""Hz 2pi"""'}), "(value=-sideband - 3000000.0, min_val=-56 * 1000000.0, max_val=-52 *\n 1000000.0, unit='Hz 2pi')\n", (1079, 1177), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((1184, 1234), 'c3.c3objs.Quantity', 'Quantity', ([], {'value': '(-1)', 'min_val': '(-5)', 'max_val': '(3)', 'unit': '""""""'}), "(value=-1, min_val=-5, max_val=3, unit='')\n", (1192, 1234), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((1483, 1573), 'c3.c3objs.Quantity', 'Quantity', ([], {'value': 'lo_freq_q1', 'min_val': '(4500000000.0)', 'max_val': '(6000000000.0)', 'unit': '"""Hz 2pi"""'}), "(value=lo_freq_q1, min_val=4500000000.0, max_val=6000000000.0, unit\n ='Hz 2pi')\n", (1491, 1573), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((1573, 1639), 'c3.c3objs.Quantity', 'Quantity', ([], {'value': '(0.0)', 'min_val': '(-np.pi)', 'max_val': '(3 * np.pi)', 'unit': '"""rad"""'}), "(value=0.0, min_val=-np.pi, max_val=3 * np.pi, unit='rad')\n", (1581, 1639), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((2015, 2046), 'copy.deepcopy', 'copy.deepcopy', (['gauss_env_single'], {}), '(gauss_env_single)\n', (2028, 2046), False, 'import copy\n'), ((2097, 2128), 'copy.deepcopy', 'copy.deepcopy', (['gauss_env_single'], {}), '(gauss_env_single)\n', (2110, 2128), False, 'import copy\n'), ((2265, 2296), 'copy.deepcopy', 'copy.deepcopy', (['gauss_env_single'], {}), '(gauss_env_single)\n', (2278, 2296), False, 'import copy\n'), ((2428, 2459), 'copy.deepcopy', 'copy.deepcopy', (['gauss_env_single'], {}), '(gauss_env_single)\n', (2441, 2459), False, 'import copy\n'), ((2510, 2541), 'copy.deepcopy', 'copy.deepcopy', (['gauss_env_single'], {}), '(gauss_env_single)\n', (2523, 2541), False, 'import copy\n'), ((3035, 3056), 'pickle.load', 'pickle.load', (['filename'], {}), '(filename)\n', (3046, 3056), False, 'import pickle\n'), ((4084, 4147), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['instr_it.t_end', "test_data['t_end2']"], {}), "(instr_it.t_end, test_data['t_end2'])\n", (4110, 4147), True, 'import numpy as np\n'), ((5418, 5431), 'c3.signal.gates.Instruction', 'Instruction', ([], {}), '()\n', (5429, 5431), False, 'from c3.signal.gates import Instruction\n'), ((5524, 5592), 'c3.parametermap.ParameterMap', 'ParameterMap', ([], {'model': 'model', 'generator': 'generator', 'instructions': '[instr]'}), '(model=model, generator=generator, instructions=[instr])\n', (5536, 5592), False, 'from c3.parametermap import ParameterMap\n'), ((5452, 5511), 'hjson.loads', 'hjson.loads', (['instr_dict_str'], {'object_pairs_hook': 'hjson_decode'}), '(instr_dict_str, object_pairs_hook=hjson_decode)\n', (5463, 5511), False, 'import hjson\n'), ((2184, 2199), 'c3.c3objs.Quantity', 'Quantity', (['(1e-09)'], {}), '(1e-09)\n', (2192, 2199), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((2352, 2367), 'c3.c3objs.Quantity', 'Quantity', (['(1e-09)'], {}), '(1e-09)\n', (2360, 2367), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((2606, 2621), 'c3.c3objs.Quantity', 'Quantity', (['(1e-09)'], {}), '(1e-09)\n', (2614, 2621), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((2690, 2713), 'c3.c3objs.Quantity', 'Quantity', (['(0.9 * t_final)'], {}), '(0.9 * t_final)\n', (2698, 2713), False, 'from c3.c3objs import Quantity, hjson_decode, hjson_encode\n'), ((3324, 3363), 'numpy.max', 'np.max', (["test_data['signal']['d1']['ts']"], {}), "(test_data['signal']['d1']['ts'])\n", (3330, 3363), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import TensorDataset
from utils import init_embedding
class Corpus(object):
PAD = '<PAD>'
UNK = '<UNK>'
SOS = '<SOS>'
EOS = '<EOS>'
def __init__(self, fdata, fembed=None):
# 获取数据的句子
self.sents = self.preprocess(fdata)
# 获取数据的所有不同的词汇、词性和字符
self.words, self.tags, self.chars = self.parse(self.sents)
# 增加句首词汇、句尾词汇、填充词汇和未知词汇
self.words = [self.PAD, self.UNK, self.SOS, self.EOS] + self.words
# 增加填充字符和未知字符
self.chars = [self.PAD, self.UNK] + self.chars
# 词汇字典
self.wdict = {w: i for i, w in enumerate(self.words)}
# 词性字典
self.tdict = {t: i for i, t in enumerate(self.tags)}
# 字符字典
self.cdict = {c: i for i, c in enumerate(self.chars)}
# 填充词汇索引
self.pad_wi = self.wdict[self.PAD]
# 未知词汇索引
self.unk_wi = self.wdict[self.UNK]
# 句首词汇索引
self.sos_wi = self.wdict[self.SOS]
# 句尾词汇索引
self.sos_wi = self.wdict[self.EOS]
# 填充字符索引
self.pad_ci = self.cdict[self.PAD]
# 未知字符索引
self.unk_ci = self.cdict[self.UNK]
# 句子数量
self.n_sents = len(self.sents)
# 词汇数量
self.n_words = len(self.words)
# 词性数量
self.n_tags = len(self.tags)
# 字符数量
self.n_chars = len(self.chars)
# 预训练词嵌入
self.embed = self.get_embed(fembed) if fembed is not None else None
def extend(self, words):
unk_words = [w for w in words if w not in self.wdict]
unk_chars = [c for c in ''.join(unk_words) if c not in self.cdict]
# 扩展词汇和字符
self.words = sorted(set(self.words + unk_words) - {self.PAD})
self.chars = sorted(set(self.chars + unk_chars) - {self.PAD})
self.words = [self.PAD] + self.words
self.chars = [self.PAD] + self.chars
# 更新字典
self.wdict = {w: i for i, w in enumerate(self.words)}
self.cdict = {c: i for i, c in enumerate(self.chars)}
# 更新索引
self.pad_wi = self.wdict[self.PAD]
self.unk_wi = self.wdict[self.UNK]
self.sos_wi = self.wdict[self.SOS]
self.sos_wi = self.wdict[self.EOS]
self.pad_ci = self.cdict[self.PAD]
self.unk_ci = self.cdict[self.UNK]
# 更新词汇和字符数
self.n_words = len(self.words)
self.n_chars = len(self.chars)
def load(self, fdata, use_char=False, n_context=1, max_len=10):
sentences = self.preprocess(fdata)
x, y, char_x, lens = [], [], [], []
for wordseq, tagseq in sentences:
wiseq = [self.wdict.get(w, self.unk_wi) for w in wordseq]
tiseq = [self.tdict[t] for t in tagseq]
# 获取每个词汇的上下文
if n_context > 1:
x.append(self.get_context(wiseq, n_context))
else:
x.append(torch.tensor(wiseq, dtype=torch.long))
y.append(torch.tensor(tiseq, dtype=torch.long))
# 不足最大长度的部分用0填充
char_x.append(torch.tensor([
[self.cdict.get(c, self.unk_ci)
for c in w[:max_len]] + [0] * (max_len - len(w))
for w in wordseq
]))
lens.append(len(tiseq))
x = pad_sequence(x, True)
y = pad_sequence(y, True)
char_x = pad_sequence(char_x, True)
lens = torch.tensor(lens)
if use_char:
dataset = TensorDataset(x, y, char_x, lens)
else:
dataset = TensorDataset(x, y, lens)
return dataset
def get_context(self, wiseq, n_context):
half = n_context // 2
length = len(wiseq)
wiseq = [self.sos_wi] * half + wiseq + [self.sos_wi] * half
context = [wiseq[i:i + n_context] for i in range(length)]
context = torch.tensor(context, dtype=torch.long)
return context
def get_embed(self, fembed):
with open(fembed, 'r') as f:
lines = [line for line in f]
splits = [line.split() for line in lines]
# 获取预训练数据中的词汇和嵌入矩阵
words, embed = zip(*[
(split[0], list(map(float, split[1:]))) for split in splits
])
# 扩充词汇
self.extend(words)
# 初始化词嵌入
embed = torch.tensor(embed, dtype=torch.float)
embed_indices = [self.wdict[w] for w in words]
extended_embed = torch.Tensor(self.n_words, embed.size(1))
init_embedding(extended_embed)
extended_embed[embed_indices] = embed
return extended_embed
def __repr__(self):
info = f"{self.__class__.__name__}(\n"
info += f"{'':2}num of sentences: {self.n_sents}\n"
info += f"{'':2}num of words: {self.n_words}\n"
info += f"{'':2}num of tags: {self.n_tags}\n"
info += f"{'':2}num of chars: {self.n_chars}\n"
info += f")\n"
return info
@staticmethod
def preprocess(fdata):
start = 0
sentences = []
with open(fdata, 'r') as f:
lines = [line for line in f]
for i, line in enumerate(lines):
if len(lines[i]) <= 1:
splits = [l.split()[1:4:2] for l in lines[start:i]]
wordseq, tagseq = zip(*splits)
start = i + 1
while start < len(lines) and len(lines[start]) <= 1:
start += 1
sentences.append((wordseq, tagseq))
return sentences
@staticmethod
def parse(sentences):
wordseqs, tagseqs = zip(*sentences)
words = sorted(set(w for wordseq in wordseqs for w in wordseq))
tags = sorted(set(t for tagseq in tagseqs for t in tagseq))
chars = sorted(set(''.join(words)))
return words, tags, chars
| [
"torch.tensor",
"utils.init_embedding",
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.TensorDataset"
] | [((3427, 3448), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['x', '(True)'], {}), '(x, True)\n', (3439, 3448), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3462, 3483), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['y', '(True)'], {}), '(y, True)\n', (3474, 3483), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3502, 3528), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['char_x', '(True)'], {}), '(char_x, True)\n', (3514, 3528), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3545, 3563), 'torch.tensor', 'torch.tensor', (['lens'], {}), '(lens)\n', (3557, 3563), False, 'import torch\n'), ((3998, 4037), 'torch.tensor', 'torch.tensor', (['context'], {'dtype': 'torch.long'}), '(context, dtype=torch.long)\n', (4010, 4037), False, 'import torch\n'), ((4454, 4492), 'torch.tensor', 'torch.tensor', (['embed'], {'dtype': 'torch.float'}), '(embed, dtype=torch.float)\n', (4466, 4492), False, 'import torch\n'), ((4626, 4656), 'utils.init_embedding', 'init_embedding', (['extended_embed'], {}), '(extended_embed)\n', (4640, 4656), False, 'from utils import init_embedding\n'), ((3611, 3644), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x', 'y', 'char_x', 'lens'], {}), '(x, y, char_x, lens)\n', (3624, 3644), False, 'from torch.utils.data import TensorDataset\n'), ((3683, 3708), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x', 'y', 'lens'], {}), '(x, y, lens)\n', (3696, 3708), False, 'from torch.utils.data import TensorDataset\n'), ((3098, 3135), 'torch.tensor', 'torch.tensor', (['tiseq'], {'dtype': 'torch.long'}), '(tiseq, dtype=torch.long)\n', (3110, 3135), False, 'import torch\n'), ((3037, 3074), 'torch.tensor', 'torch.tensor', (['wiseq'], {'dtype': 'torch.long'}), '(wiseq, dtype=torch.long)\n', (3049, 3074), False, 'import torch\n')] |
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import Permission, PermissionsMixin, UserManager
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.mail import send_mail
from django.db import models
from django.db.models import QuerySet
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from dhh_auth.utils import get_perm_obj
from utils.models import TimestampedUUIDModel
class ClientPermission(TimestampedUUIDModel):
permission = models.ForeignKey(Permission, on_delete=models.CASCADE)
client = models.ForeignKey('Client', related_name='clientpermission', on_delete=models.CASCADE)
permitted_by = models.ForeignKey('User', on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_pk = models.CharField(_('object ID'), max_length=255)
content_object = GenericForeignKey(fk_field='object_pk')
class Meta:
unique_together = ['client', 'permission', 'content_type', 'object_pk']
class Client(TimestampedUUIDModel):
def _get_perms(self, permission, objects, object_lookup_field):
# TODO: Add caching
if isinstance(objects, QuerySet):
objects = list(objects)
if not isinstance(objects, list):
objects = [objects]
return (
ClientPermission.objects
.filter(permission=permission,
client=self,
content_type=permission.content_type,
object_pk__in=[getattr(obj, object_lookup_field) for obj in objects])
)
def has_obj_perm(self, perm, obj, object_lookup_field='sts_id'):
permission = get_perm_obj(perm, obj)
obj_permission = self._get_perms(permission, obj, object_lookup_field)
if not obj_permission:
return False
return True
def has_any_obj_perm(self, perm, objects, object_lookup_field='sts_id'):
permission = get_perm_obj(perm, objects[0])
obj_permissions = self._get_perms(permission, objects, object_lookup_field)
if not obj_permissions:
return False
return True
def has_all_obj_perm(self, perm, objects, object_lookup_field='sts_id'):
permission = get_perm_obj(perm, objects[0])
obj_permissions = self._get_perms(permission, objects, object_lookup_field)
if not obj_permissions or len(obj_permissions) != len(objects):
return False
return True
def create_perm(self, perm, obj, permitted_by):
permission = get_perm_obj(perm, obj)
return ClientPermission.objects.create(
client=self,
permission=permission,
content_type=permission.content_type,
object_pk=obj.id,
permitted_by=permitted_by,
)
def __str__(self):
return str(self.id)
class AbstractClientUser(AbstractBaseUser, TimestampedUUIDModel):
"""
User to model that stores additional client informatin
The purpose of the model is to have an additional client instance
linked to
"""
client = models.OneToOneField(Client, on_delete=models.CASCADE)
def save(self, *args, **kwargs):
if not getattr(self, 'client', None):
self.client = Client.objects.create()
super().save(*args, **kwargs)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractClientUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractClientUser must provide a get_short_name() method.')
class Meta:
abstract = True
class User(AbstractClientUser, PermissionsMixin):
username_validator = UnicodeUsernameValidator()
username = models.CharField(
_('username'),
max_length=150,
unique=True,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def clean(self):
super(AbstractClientUser, self).clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
| [
"django.db.models.OneToOneField",
"django.utils.translation.ugettext_lazy",
"django.db.models.ForeignKey",
"dhh_auth.utils.get_perm_obj",
"django.core.mail.send_mail",
"django.contrib.auth.validators.UnicodeUsernameValidator",
"django.contrib.auth.models.UserManager",
"django.contrib.contenttypes.fields.GenericForeignKey"
] | [((680, 735), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Permission'], {'on_delete': 'models.CASCADE'}), '(Permission, on_delete=models.CASCADE)\n', (697, 735), False, 'from django.db import models\n'), ((749, 840), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Client"""'], {'related_name': '"""clientpermission"""', 'on_delete': 'models.CASCADE'}), "('Client', related_name='clientpermission', on_delete=\n models.CASCADE)\n", (766, 840), False, 'from django.db import models\n'), ((855, 906), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""User"""'], {'on_delete': 'models.CASCADE'}), "('User', on_delete=models.CASCADE)\n", (872, 906), False, 'from django.db import models\n'), ((927, 983), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ContentType'], {'on_delete': 'models.CASCADE'}), '(ContentType, on_delete=models.CASCADE)\n', (944, 983), False, 'from django.db import models\n'), ((1070, 1109), 'django.contrib.contenttypes.fields.GenericForeignKey', 'GenericForeignKey', ([], {'fk_field': '"""object_pk"""'}), "(fk_field='object_pk')\n", (1087, 1109), False, 'from django.contrib.contenttypes.fields import GenericForeignKey\n'), ((3322, 3376), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Client'], {'on_delete': 'models.CASCADE'}), '(Client, on_delete=models.CASCADE)\n', (3342, 3376), False, 'from django.db import models\n'), ((3946, 3972), 'django.contrib.auth.validators.UnicodeUsernameValidator', 'UnicodeUsernameValidator', ([], {}), '()\n', (3970, 3972), False, 'from django.contrib.auth.validators import UnicodeUsernameValidator\n'), ((5056, 5069), 'django.contrib.auth.models.UserManager', 'UserManager', ([], {}), '()\n', (5067, 5069), False, 'from django.contrib.auth.models import Permission, PermissionsMixin, UserManager\n'), ((1017, 1031), 'django.utils.translation.ugettext_lazy', '_', (['"""object ID"""'], {}), "('object ID')\n", (1018, 1031), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1890, 1913), 'dhh_auth.utils.get_perm_obj', 'get_perm_obj', (['perm', 'obj'], {}), '(perm, obj)\n', (1902, 1913), False, 'from dhh_auth.utils import get_perm_obj\n'), ((2169, 2199), 'dhh_auth.utils.get_perm_obj', 'get_perm_obj', (['perm', 'objects[0]'], {}), '(perm, objects[0])\n', (2181, 2199), False, 'from dhh_auth.utils import get_perm_obj\n'), ((2461, 2491), 'dhh_auth.utils.get_perm_obj', 'get_perm_obj', (['perm', 'objects[0]'], {}), '(perm, objects[0])\n', (2473, 2491), False, 'from dhh_auth.utils import get_perm_obj\n'), ((2768, 2791), 'dhh_auth.utils.get_perm_obj', 'get_perm_obj', (['perm', 'obj'], {}), '(perm, obj)\n', (2780, 2791), False, 'from dhh_auth.utils import get_perm_obj\n'), ((4015, 4028), 'django.utils.translation.ugettext_lazy', '_', (['"""username"""'], {}), "('username')\n", (4016, 4028), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4357, 4372), 'django.utils.translation.ugettext_lazy', '_', (['"""first name"""'], {}), "('first name')\n", (4358, 4372), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4434, 4448), 'django.utils.translation.ugettext_lazy', '_', (['"""last name"""'], {}), "('last name')\n", (4435, 4448), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4507, 4525), 'django.utils.translation.ugettext_lazy', '_', (['"""email address"""'], {}), "('email address')\n", (4508, 4525), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4583, 4600), 'django.utils.translation.ugettext_lazy', '_', (['"""staff status"""'], {}), "('staff status')\n", (4584, 4600), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4758, 4769), 'django.utils.translation.ugettext_lazy', '_', (['"""active"""'], {}), "('active')\n", (4759, 4769), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5001, 5017), 'django.utils.translation.ugettext_lazy', '_', (['"""date joined"""'], {}), "('date joined')\n", (5002, 5017), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5201, 5210), 'django.utils.translation.ugettext_lazy', '_', (['"""user"""'], {}), "('user')\n", (5202, 5210), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5241, 5251), 'django.utils.translation.ugettext_lazy', '_', (['"""users"""'], {}), "('users')\n", (5242, 5251), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5871, 5934), 'django.core.mail.send_mail', 'send_mail', (['subject', 'message', 'from_email', '[self.email]'], {}), '(subject, message, from_email, [self.email], **kwargs)\n', (5880, 5934), False, 'from django.core.mail import send_mail\n'), ((4093, 4168), 'django.utils.translation.ugettext_lazy', '_', (['"""Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only."""'], {}), "('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.')\n", (4094, 4168), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4643, 4705), 'django.utils.translation.ugettext_lazy', '_', (['"""Designates whether the user can log into this admin site."""'], {}), "('Designates whether the user can log into this admin site.')\n", (4644, 4705), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4811, 4922), 'django.utils.translation.ugettext_lazy', '_', (['"""Designates whether this user should be treated as active. Unselect this instead of deleting accounts."""'], {}), "('Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'\n )\n", (4812, 4922), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4258, 4304), 'django.utils.translation.ugettext_lazy', '_', (['"""A user with that username already exists."""'], {}), "('A user with that username already exists.')\n", (4259, 4304), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
from setuptools import setup
setup(name='CustomAwdRnn',
version='0.0.0',
description='Custom awd rnn',
author='<NAME>',
author_email='<EMAIL>',
packages=['CustomAwdRnn', ],
zip_safe=False) | [
"setuptools.setup"
] | [((31, 196), 'setuptools.setup', 'setup', ([], {'name': '"""CustomAwdRnn"""', 'version': '"""0.0.0"""', 'description': '"""Custom awd rnn"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['CustomAwdRnn']", 'zip_safe': '(False)'}), "(name='CustomAwdRnn', version='0.0.0', description='Custom awd rnn',\n author='<NAME>', author_email='<EMAIL>', packages=['CustomAwdRnn'],\n zip_safe=False)\n", (36, 196), False, 'from setuptools import setup\n')] |
from optparse import OptionParser
from ztf_fit import SN_fit_tab
from ztf_hdf5 import Read_LightCurve
import astropy
from ztf_util import multiproc, dump_in_yaml, checkDir
from ztf_util import make_dict_from_config, make_dict_from_optparse
def fit(metaTable, params={}, j=0, output_q=None):
"""
Function to perfom LC fits
with the possibility to use multiprocessing
Parameters
----------------
metaTable: astropy table
array of data to process
params: dict
parameters of the function
j: int, opt
tag for multiprocessing (default: 0)
output_q: multiprocessing.queue,opt
queue for multiprocessing
Returns
----------
resfit: astropy table
result of the fit
"""
sn_fit = SN_fit_tab(metaTable)
resfit = sn_fit()
if output_q is not None:
return output_q.put({j: resfit})
else:
return 0
# get all possible simulation parameters and put in a dict
# path = simu_fit.__path__
confDict = make_dict_from_config(
'ztf_stage/script/fit_lc', 'config_fit_lc.txt')
parser = OptionParser()
# parser : 'dynamical' generation
for key, vals in confDict.items():
vv = vals[1]
if vals[0] != 'str':
vv = eval('{}({})'.format(vals[0], vals[1]))
parser.add_option('--{}'.format(key), help='{} [%default]'.format(
vals[2]), default=vv, type=vals[0], metavar='')
opts, args = parser.parse_args()
metaFileInput = opts.metaFileInput
metaDirInput = opts.metaDirInput
metaFileOutput = opts.metaFileOutput
metaDirOutput = opts.metaDirOutput
nproc = opts.nprocFit
# dump parameters in yaml file
checkDir(metaDirOutput)
nameOut = metaFileOutput.replace('.hdf5', '.yaml')
dump_in_yaml(opts, confDict, metaDirOutput, nameOut, 'fit')
if __name__ == '__main__':
meta = Read_LightCurve(file_name=metaFileInput, inputDir=metaDirInput)
metaTable = meta.get_table(path='meta')
params = {}
resfit = multiproc(metaTable, params, fit, nproc)
print(len(metaTable), len(resfit))
# write results in file
fOut = '{}/{}'.format(metaDirOutput, metaFileOutput)
astropy.io.misc.hdf5.write_table_hdf5(
resfit, fOut, path='meta', overwrite=True, serialize_meta=False)
| [
"ztf_fit.SN_fit_tab",
"ztf_util.make_dict_from_config",
"optparse.OptionParser",
"ztf_hdf5.Read_LightCurve",
"astropy.io.misc.hdf5.write_table_hdf5",
"ztf_util.multiproc",
"ztf_util.checkDir",
"ztf_util.dump_in_yaml"
] | [((1000, 1069), 'ztf_util.make_dict_from_config', 'make_dict_from_config', (['"""ztf_stage/script/fit_lc"""', '"""config_fit_lc.txt"""'], {}), "('ztf_stage/script/fit_lc', 'config_fit_lc.txt')\n", (1021, 1069), False, 'from ztf_util import make_dict_from_config, make_dict_from_optparse\n'), ((1085, 1099), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (1097, 1099), False, 'from optparse import OptionParser\n'), ((1620, 1643), 'ztf_util.checkDir', 'checkDir', (['metaDirOutput'], {}), '(metaDirOutput)\n', (1628, 1643), False, 'from ztf_util import multiproc, dump_in_yaml, checkDir\n'), ((1695, 1754), 'ztf_util.dump_in_yaml', 'dump_in_yaml', (['opts', 'confDict', 'metaDirOutput', 'nameOut', '"""fit"""'], {}), "(opts, confDict, metaDirOutput, nameOut, 'fit')\n", (1707, 1754), False, 'from ztf_util import multiproc, dump_in_yaml, checkDir\n'), ((758, 779), 'ztf_fit.SN_fit_tab', 'SN_fit_tab', (['metaTable'], {}), '(metaTable)\n', (768, 779), False, 'from ztf_fit import SN_fit_tab\n'), ((1795, 1858), 'ztf_hdf5.Read_LightCurve', 'Read_LightCurve', ([], {'file_name': 'metaFileInput', 'inputDir': 'metaDirInput'}), '(file_name=metaFileInput, inputDir=metaDirInput)\n', (1810, 1858), False, 'from ztf_hdf5 import Read_LightCurve\n'), ((1933, 1973), 'ztf_util.multiproc', 'multiproc', (['metaTable', 'params', 'fit', 'nproc'], {}), '(metaTable, params, fit, nproc)\n', (1942, 1973), False, 'from ztf_util import multiproc, dump_in_yaml, checkDir\n'), ((2104, 2211), 'astropy.io.misc.hdf5.write_table_hdf5', 'astropy.io.misc.hdf5.write_table_hdf5', (['resfit', 'fOut'], {'path': '"""meta"""', 'overwrite': '(True)', 'serialize_meta': '(False)'}), "(resfit, fOut, path='meta', overwrite=\n True, serialize_meta=False)\n", (2141, 2211), False, 'import astropy\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.